static void _my_create_generation_sorted_member_cache(SG_context * pCtx, SG_dagfrag * pFrag) { // everything in the main CACHE is (implicitly) sorted by HID. there are // times when we need this list sorted by generation. // // here we construct a copy of the CACHE in that alternate ordering. we // simply borrow the associated data pointers of the items in the real // CACHE, so we don't own/free them. we only include items of type // START_MEMBER and INTERIOR_MEMBER. // // WARNING: whenever you change the CACHE (such as during __add_{leaf,leaves}()), // WARNING: you must delete/recreate or likewise update this copy. SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache); pFrag->m_pRB_GenerationSortedMemberCache = NULL; SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &pFrag->m_pRB_GenerationSortedMemberCache) ); SG_ERR_CHECK( SG_rbtree__foreach(pCtx, pFrag->m_pRB_Cache, _sg_dagfrag__my_create_generation_sorted_member_cache_callback, pFrag) ); return; fail: SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache); pFrag->m_pRB_GenerationSortedMemberCache = NULL; }
void u0048_multidag_test__1(SG_context * pCtx) { char bufName[SG_TID_MAX_BUFFER_LENGTH + u0048_multidag__MY_LABEL_LENGTH]; SG_repo* pRepo = NULL; SG_rbtree* prb = NULL; SG_uint32 count; char* pid1 = NULL; char* pid1a = NULL; char* pid1b = NULL; char* pid1c = NULL; char* pid2 = NULL; char* pid2a = NULL; char* pid2b = NULL; VERIFY_ERR_CHECK( SG_strcpy(pCtx, bufName, sizeof(bufName), u0048_multidag__MY_LABEL) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, &bufName[u0048_multidag__MY_LABEL_LENGTH], (sizeof(bufName) - u0048_multidag__MY_LABEL_LENGTH), 32) ); /* create the repo */ VERIFY_ERR_CHECK( u0048_multidag__new_repo(pCtx, bufName, &pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid1, NULL, SG_DAGNUM__TESTING__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid1a, pid1, SG_DAGNUM__TESTING__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid1b, pid1, SG_DAGNUM__TESTING__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid1c, pid1, SG_DAGNUM__TESTING__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid2, NULL, SG_DAGNUM__TESTING2__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid2a, pid2, SG_DAGNUM__TESTING2__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid2b, pid2, SG_DAGNUM__TESTING2__NOTHING, pRepo) ); SG_NULLFREE(pCtx, pid1); SG_NULLFREE(pCtx, pid1a); SG_NULLFREE(pCtx, pid1b); SG_NULLFREE(pCtx, pid1c); SG_NULLFREE(pCtx, pid2); SG_NULLFREE(pCtx, pid2a); SG_NULLFREE(pCtx, pid2b); VERIFY_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, SG_DAGNUM__TESTING__NOTHING, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); SG_RBTREE_NULLFREE(pCtx, prb); VERIFY_COND("count", (3 == count)); VERIFY_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, SG_DAGNUM__TESTING2__NOTHING, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); SG_RBTREE_NULLFREE(pCtx, prb); VERIFY_COND("count", (2 == count)); SG_REPO_NULLFREE(pCtx, pRepo); return; fail: SG_REPO_NULLFREE(pCtx, pRepo); }
void SG_dagquery__new_since(SG_context * pCtx, SG_repo* pRepo, SG_uint64 iDagNum, const char* pszOldNodeHid, const char* pszNewNodeHid, SG_rbtree** pprbNewNodeHids) { SG_dagquery_relationship rel; new_since_context cbCtx; cbCtx.dagnum = iDagNum; cbCtx.prbNewNodeHids = NULL; cbCtx.pszNewNodeHid = pszNewNodeHid; cbCtx.pszOldNodeHid = pszOldNodeHid; SG_ERR_CHECK( SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, iDagNum, pszNewNodeHid, pszOldNodeHid, SG_FALSE, SG_FALSE, &rel) ); if (rel != SG_DAGQUERY_RELATIONSHIP__DESCENDANT) SG_ERR_THROW2_RETURN(SG_ERR_UNSPECIFIED, (pCtx, "pszNewNodeHid must be a descendant of pszOldNodeHid")); SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &cbCtx.prbNewNodeHids) ); SG_ERR_CHECK( SG_dagwalker__walk_dag_single(pCtx, pRepo, iDagNum, pszNewNodeHid, _dagquery__new_since__callback, (void*)&cbCtx) ); SG_RETURN_AND_NULL(cbCtx.prbNewNodeHids, pprbNewNodeHids); /* Fall through to common cleanup */ fail: SG_RBTREE_NULLFREE(pCtx, cbCtx.prbNewNodeHids); }
void SG_repo__free_implementation_plugin_list( SG_context* pCtx ) { // TODO do we need to call each one to free up dynamic resources somehow? SG_RBTREE_NULLFREE(pCtx, g_prb_repo_vtables); }
void SG_dagfrag__free(SG_context * pCtx, SG_dagfrag * pFrag) { if (!pFrag) return; SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx, pFrag->m_pRB_Cache, _my_data__free); SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache); // we borrowed the assoc data from the real cache, so we don't free them. SG_NULLFREE(pCtx, pFrag); }
static void _loadModuleDir(SG_context *pCtx, const SG_pathname *path, const char *modname, JSContext *cx, JSObject *glob) { SG_rbtree * pJavascriptFiles = NULL; SG_rbtree_iterator * pJavascriptFile = NULL; const char *szJavascriptFile = NULL; SG_pathname *pJavascriptFilePath = NULL; SG_bool ok = SG_FALSE; SG_bool valid = SG_FALSE; char *psz_js = NULL; // free SG_uint32 len_js = 0; jsval rval = JSVAL_VOID; SG_ERR_CHECK( SG_dir__list(pCtx, path, NULL, NULL, ".js", &pJavascriptFiles) ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pJavascriptFile, pJavascriptFiles, &ok, &szJavascriptFile, NULL) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__COPY(pCtx, &pJavascriptFilePath, path) ); while(ok) { SG_ERR_CHECK( _isValidJsFile(pCtx, szJavascriptFile, &valid) ); if (valid) { SG_ERR_CHECK( SG_pathname__append__from_sz(pCtx, pJavascriptFilePath, szJavascriptFile) ); SG_ERR_CHECK( sg_read_entire_file(pCtx, pJavascriptFilePath, &psz_js, &len_js) ); if(!JS_EvaluateScript(cx, glob, psz_js, len_js, szJavascriptFile, 1, &rval)) { SG_ERR_CHECK_CURRENT; SG_ERR_THROW2(SG_ERR_JS, (pCtx, "An error occurred loading %s javascript file '%s'", modname, szJavascriptFile)); } SG_NULLFREE(pCtx, psz_js); SG_ERR_CHECK( SG_pathname__remove_last(pCtx, pJavascriptFilePath) ); } SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pJavascriptFile, &ok, &szJavascriptFile, NULL) ); } fail: SG_PATHNAME_NULLFREE(pCtx, pJavascriptFilePath); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pJavascriptFile); SG_RBTREE_NULLFREE(pCtx, pJavascriptFiles); SG_NULLFREE(pCtx, psz_js); }
void SG_dagquery__find_single_descendant_head(SG_context * pCtx, SG_repo * pRepo, SG_uint64 dagnum, const char * pszHidStart, SG_dagquery_find_head_status * pdqfhs, char * bufHidHead, SG_uint32 lenBufHidHead) { SG_rbtree * prbHeads = NULL; const char * pszKey_0; SG_dagquery_find_head_status dqfhs; SG_bool b; SG_NULLARGCHECK_RETURN(pRepo); SG_NONEMPTYCHECK_RETURN(pszHidStart); SG_NULLARGCHECK_RETURN(pdqfhs); SG_NULLARGCHECK_RETURN(bufHidHead); SG_ERR_CHECK( SG_dagquery__find_descendant_heads(pCtx, pRepo, dagnum, pszHidStart, SG_TRUE, &dqfhs, &prbHeads) ); switch (dqfhs) { case SG_DAGQUERY_FIND_HEAD_STATUS__IS_LEAF: case SG_DAGQUERY_FIND_HEAD_STATUS__UNIQUE: SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prbHeads, &b, &pszKey_0, NULL) ); SG_ASSERT( (b) ); SG_ERR_CHECK( SG_strcpy(pCtx, bufHidHead, lenBufHidHead, pszKey_0) ); break; case SG_DAGQUERY_FIND_HEAD_STATUS__MULTIPLE: default: // don't throw, just return the status bufHidHead[0] = 0; break; } *pdqfhs = dqfhs; fail: SG_RBTREE_NULLFREE(pCtx, prbHeads); }
static void _sg_jscore__install_modules(SG_context * pCtx, JSContext *cx, JSObject *glob, const SG_vhash *pServerConfig) { SG_pathname *pModuleDirPath = NULL; SG_rbtree_iterator *pModuleDir = NULL; SG_rbtree *pModules = NULL; const char *szModuleDir = NULL; SG_bool ok = SG_FALSE; SG_uint32 len_js = 0; jsval rval; char *psz_js = NULL; jsval fo = JSVAL_VOID; JSObject * jsoServerConfig = NULL; if (gpJSCoreGlobalState->bSkipModules) return; SG_ERR_CHECK( _modulesInstalled(pCtx, cx, glob, &ok) ); if (ok) return; if (! gpJSCoreGlobalState->pPathToDispatchDotJS) return; SG_ERR_CHECK( _setModulesInstalled(pCtx, cx, glob) ); SG_ERR_CHECK( sg_read_entire_file(pCtx, gpJSCoreGlobalState->pPathToDispatchDotJS, &psz_js, &len_js) ); if(!JS_EvaluateScript(cx, glob, psz_js, len_js, "dispatch.js", 1, &rval)) { SG_ERR_CHECK_CURRENT; SG_ERR_THROW2(SG_ERR_JS, (pCtx, "An error occurred evaluating dispatch.js!")); } SG_NULLFREE(pCtx, psz_js); // Call init function in dispatch.js if(pServerConfig) { jsval arg; JSBool js_ok; jsval rval2; SG_JS_NULL_CHECK( jsoServerConfig = JS_NewObject(cx, NULL, NULL, NULL) ); SG_ERR_CHECK( sg_jsglue__copy_vhash_into_jsobject(pCtx, cx, pServerConfig, jsoServerConfig) ); arg = OBJECT_TO_JSVAL(jsoServerConfig); js_ok = JS_CallFunctionName(cx, glob, "init", 1, &arg, &rval2); SG_ERR_CHECK_CURRENT; if(!js_ok) SG_ERR_THROW2(SG_ERR_JS, (pCtx, "An error occurred initializing JavaScript framework: call to JavaScript init() failed")); jsoServerConfig = NULL; } // Load core. SG_ERR_CHECK( _loadModuleDir(pCtx, gpJSCoreGlobalState->pPathToCore, "core", cx, glob) ); // Load modules. SG_ERR_CHECK( SG_dir__list(pCtx, gpJSCoreGlobalState->pPathToModules, NULL, NULL, NULL, &pModules) ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pModuleDir, pModules, &ok, &szModuleDir, NULL) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__COPY(pCtx, &pModuleDirPath, gpJSCoreGlobalState->pPathToModules) ); while(ok) { if (szModuleDir[0] != '.') { SG_fsobj_stat fss; SG_ERR_CHECK( SG_pathname__append__from_sz(pCtx, pModuleDirPath, szModuleDir) ); SG_ERR_CHECK( SG_fsobj__stat__pathname(pCtx, pModuleDirPath, &fss) ); if (fss.type & SG_FSOBJ_TYPE__DIRECTORY) // dot paths? { SG_ERR_CHECK( _loadModuleDir(pCtx, pModuleDirPath, szModuleDir, cx, glob) ); } SG_ERR_CHECK( SG_pathname__remove_last(pCtx, pModuleDirPath) ); } SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pModuleDir, &ok, &szModuleDir, NULL) ); } if (! JS_LookupProperty(cx, glob, "initModules", &fo)) { SG_ERR_CHECK_CURRENT; SG_ERR_THROW2(SG_ERR_JS, (pCtx, "lookup of initModules() failed")); } if (!JSVAL_IS_VOID(fo)) if (! JS_CallFunctionName(cx, glob, "initModules", 0, NULL, &rval)) { SG_ERR_CHECK_CURRENT; SG_ERR_THROW2(SG_ERR_JS, (pCtx, "Call to initModules() failed")); } fail: SG_NULLFREE(pCtx, psz_js); SG_PATHNAME_NULLFREE(pCtx, pModuleDirPath); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pModuleDir); SG_RBTREE_NULLFREE(pCtx, pModules); }
int u0050_logstuff_test__1(SG_context * pCtx, SG_pathname* pPathTopDir) { char bufName[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_pathname* pPathFile = NULL; SG_vhash* pvh = NULL; SG_dagnode* pdn = NULL; const char* psz_hid_cs = NULL; SG_repo* pRepo = NULL; SG_uint32 count; SG_rbtree* prb = NULL; SG_varray* pva = NULL; SG_rbtree* prb_reversed = NULL; const char* psz_val = NULL; SG_audit q; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufName, sizeof(bufName), 32) ); /* create the working dir */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, bufName) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); /* add stuff */ VERIFY_ERR_CHECK( u0050_logstuff__create_file__numbers(pCtx, pPathWorkingDir, "aaa", 20) ); /* create the repo */ VERIFY_ERR_CHECK( _ut_pt__new_repo(pCtx, bufName, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( u0050_logstuff__commit_all(pCtx, pPathWorkingDir, &pdn) ); VERIFY_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_hid_cs) ); SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, bufName, &pRepo) ); #define MY_COMMENT "The name of this new file sucks! What kind of a name is 'aaa'?" VERIFY_ERR_CHECK( SG_audit__init(pCtx, &q, pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); VERIFY_ERR_CHECK( SG_vc_comments__add(pCtx, pRepo, psz_hid_cs, MY_COMMENT, &q) ); VERIFY_ERR_CHECK( SG_vc_stamps__add(pCtx, pRepo, psz_hid_cs, "crap", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "tcrap", &q) ); VERIFY_ERR_CHECK( SG_vc_comments__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "text", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, MY_COMMENT)) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_stamps__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "stamp", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "crap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "tag", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "tcrap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "whatever", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (2 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (2 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); /* we don't know whether psz_my_val is tcrap or whatever. */ // VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); { const char* psz_remove = "whatever"; VERIFY_ERR_CHECK( SG_vc_tags__remove(pCtx, pRepo, &q, 1, &psz_remove) ); /* Note that by removing whatever, we are bringing the tags list back * to a state where it has been before (just tcrap). This changeset in * the tags table will have its own csid, because the parentage is * different, but it's root idtrie HID will be the same as a previous * node. */ } VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); SG_RBTREE_NULLFREE(pCtx, prb); SG_REPO_NULLFREE(pCtx, pRepo); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 1; fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 0; }
void SG_dagfrag__add_dagnode(SG_context * pCtx, SG_dagfrag * pFrag, SG_dagnode** ppdn) { _my_data * pMyDataCached = NULL; SG_bool bPresent = SG_FALSE; const char* psz_id = NULL; SG_dagnode* pdn = NULL; SG_NULLARGCHECK_RETURN(pFrag); SG_NULLARGCHECK_RETURN(ppdn); pdn = *ppdn; // if we are extending the fragment, delete the generation-sorted // member cache copy. (see __foreach_member()). it's either that // or update it in parallel as we change the real CACHE and that // doesn't seem worth the bother. SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache); pFrag->m_pRB_GenerationSortedMemberCache = NULL; // fetch the starting dagnode and compute the generation bounds. // first, see if the cache already has info for this dagnode. // if not, fetch it from the source and then add it to the cache. SG_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_id) ); #if DEBUG && TRACE_DAGFRAG && 0 SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Adding [%s] to frag.\r\n", psz_id) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif SG_ERR_CHECK( _cache__lookup(pCtx, pFrag,psz_id,&pMyDataCached,&bPresent) ); if (!bPresent) { // this dagnode was not already present in the cache. // add it to the cache directly and set the state. // we don't need to go thru the work queue for it. // // then the add all of its parents. SG_ERR_CHECK( _cache__add__dagnode(pCtx, pFrag, 0, pdn,SG_DFS_START_MEMBER, &pMyDataCached) ); *ppdn = NULL; SG_ERR_CHECK( _add_parents(pCtx, pFrag, pMyDataCached->m_pDagnode) ); } else { // the node was already present in the cache, so we have already // walked at least part of the graph around it. switch (pMyDataCached->m_state) { case SG_DFS_END_FRINGE: if (!pMyDataCached->m_pDagnode) { pMyDataCached->m_pDagnode = pdn; *ppdn = NULL; } pMyDataCached->m_state = SG_DFS_INTERIOR_MEMBER; SG_ERR_CHECK( _add_parents(pCtx, pFrag, pMyDataCached->m_pDagnode) ); break; default: break; /* SG_ASSERT_RELEASE_FAIL2( (0), (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]", pMyDataCached->m_state,psz_id) ); */ } } // fall through fail: SG_DAGNODE_NULLFREE(pCtx, *ppdn); }
void SG_sync_remote__request_fragball( SG_context* pCtx, SG_repo* pRepo, const SG_pathname* pFragballDirPathname, SG_vhash* pvhRequest, char** ppszFragballName) { SG_pathname* pFragballPathname = NULL; SG_uint64* paDagNums = NULL; SG_string* pstrFragballName = NULL; SG_rbtree* prbDagnodes = NULL; SG_rbtree_iterator* pit = NULL; SG_rev_spec* pRevSpec = NULL; SG_stringarray* psaFullHids = NULL; SG_rbtree* prbDagnums = NULL; SG_dagfrag* pFrag = NULL; char* pszRepoId = NULL; char* pszAdminId = NULL; SG_fragball_writer* pfb = NULL; SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(pFragballDirPathname); { char buf_filename[SG_TID_MAX_BUFFER_LENGTH]; SG_ERR_CHECK( SG_tid__generate(pCtx, buf_filename, sizeof(buf_filename)) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pFragballPathname, pFragballDirPathname, buf_filename) ); } if (!pvhRequest) { // Add leaves from every dag to the fragball. SG_uint32 count_dagnums; SG_uint32 i; SG_ERR_CHECK( SG_fragball_writer__alloc(pCtx, pRepo, pFragballPathname, SG_TRUE, 2, &pfb) ); SG_ERR_CHECK( SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums) ); for (i=0; i<count_dagnums; i++) { SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes) ); SG_ERR_CHECK( SG_fragball__write__dagnodes(pCtx, pfb, paDagNums[i], prbDagnodes) ); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); } SG_ERR_CHECK( SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName) ); SG_ERR_CHECK( SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName) ); SG_ERR_CHECK( SG_fragball_writer__close(pCtx, pfb) ); } else { // Specific dags/nodes were requested. Build that fragball. SG_bool found; #if TRACE_SYNC_REMOTE && 0 SG_ERR_CHECK( SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "fragball request") ); #endif SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found) ); if (found) { // SG_SYNC_STATUS_KEY__CLONE_REQUEST is currently ignored SG_ERR_CHECK( SG_repo__fetch_repo__fragball(pCtx, pRepo, 3, pFragballDirPathname, ppszFragballName) ); } else { // Not a full clone. SG_ERR_CHECK( SG_fragball_writer__alloc(pCtx, pRepo, pFragballPathname, SG_TRUE, 2, &pfb) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__SINCE, &found) ); if (found) { SG_vhash* pvh_since = NULL; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__SINCE, &pvh_since) ); SG_ERR_CHECK( _do_since(pCtx, pRepo, pvh_since, pfb) ); } SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found) ); if (found) { // Specific Dagnodes were requested. Add just those nodes to our "start from" rbtree. SG_vhash* pvhDags; SG_uint32 count_requested_dagnums; SG_uint32 i; const SG_variant* pvRevSpecs = NULL; SG_vhash* pvhRevSpec = NULL; // For each requested dag, get rev spec request. SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums) ); if (count_requested_dagnums) SG_ERR_CHECK( SG_repo__list_dags__rbtree(pCtx, pRepo, &prbDagnums) ); for (i=0; i<count_requested_dagnums; i++) { SG_bool isValidDagnum = SG_FALSE; SG_bool bSpecificNodesRequested = SG_FALSE; const char* pszRefDagNum = NULL; SG_uint64 iDagnum; // Get the dag's missing node vhash. SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszRefDagNum, &pvRevSpecs) ); // Verify that requested dagnum exists SG_ERR_CHECK( SG_rbtree__find(pCtx, prbDagnums, pszRefDagNum, &isValidDagnum, NULL) ); if (!isValidDagnum) continue; SG_ERR_CHECK( SG_dagnum__from_sz__hex(pCtx, pszRefDagNum, &iDagnum) ); if (pvRevSpecs && pvRevSpecs->type != SG_VARIANT_TYPE_NULL) { SG_uint32 countRevSpecs = 0; SG_ERR_CHECK( SG_variant__get__vhash(pCtx, pvRevSpecs, &pvhRevSpec) ); SG_ERR_CHECK( SG_rev_spec__from_vash(pCtx, pvhRevSpec, &pRevSpec) ); // Process the rev spec for each dag SG_ERR_CHECK( SG_rev_spec__count(pCtx, pRevSpec, &countRevSpecs) ); if (countRevSpecs > 0) { bSpecificNodesRequested = SG_TRUE; SG_ERR_CHECK( SG_rev_spec__get_all__repo(pCtx, pRepo, pRevSpec, SG_TRUE, &psaFullHids, NULL) ); SG_ERR_CHECK( SG_stringarray__to_rbtree_keys(pCtx, psaFullHids, &prbDagnodes) ); SG_STRINGARRAY_NULLFREE(pCtx, psaFullHids); } SG_REV_SPEC_NULLFREE(pCtx, pRevSpec); } if (!bSpecificNodesRequested) { // When no specific nodes are in the request, add all leaves. SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes) ); } if (prbDagnodes) // can be null when leaves of an empty dag are requested { // Get the leaves of the other repo, which we need to connect to. SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__LEAVES, &found) ); if (found) { SG_vhash* pvhRefAllLeaves; SG_vhash* pvhRefDagLeaves; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__LEAVES, &pvhRefAllLeaves) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, pszRefDagNum, &found) ); { SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRefAllLeaves, pszRefDagNum, &pvhRefDagLeaves) ); SG_ERR_CHECK( SG_sync__build_best_guess_dagfrag(pCtx, pRepo, iDagnum, prbDagnodes, pvhRefDagLeaves, &pFrag) ); } } else { // The other repo's leaves weren't provided: add just the requested nodes, make no attempt to connect. SG_ERR_CHECK( SG_repo__get_repo_id(pCtx, pRepo, &pszRepoId) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &pszAdminId) ); SG_ERR_CHECK( SG_dagfrag__alloc(pCtx, &pFrag, pszRepoId, pszAdminId, iDagnum) ); SG_ERR_CHECK( SG_dagfrag__load_from_repo__simple(pCtx, pFrag, pRepo, prbDagnodes) ); SG_NULLFREE(pCtx, pszRepoId); SG_NULLFREE(pCtx, pszAdminId); } SG_ERR_CHECK( SG_fragball__write__frag(pCtx, pfb, pFrag) ); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); SG_DAGFRAG_NULLFREE(pCtx, pFrag); } } // dagnum loop } // if "dags" exists /* Add requested blobs to the fragball */ SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found) ); if (found) { // Blobs were requested. SG_vhash* pvhBlobs; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs) ); SG_ERR_CHECK( SG_sync__add_blobs_to_fragball(pCtx, pfb, pvhBlobs) ); } SG_ERR_CHECK( SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName) ); SG_ERR_CHECK( SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName) ); } SG_ERR_CHECK( SG_fragball_writer__close(pCtx, pfb) ); } /* fallthru */ fail: // If we had an error, delete the half-baked fragball. if (pFragballPathname && SG_context__has_err(pCtx)) { SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pFragballPathname) ); } SG_PATHNAME_NULLFREE(pCtx, pFragballPathname); SG_NULLFREE(pCtx, paDagNums); SG_STRING_NULLFREE(pCtx, pstrFragballName); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_RBTREE_NULLFREE(pCtx, prbDagnums); SG_REV_SPEC_NULLFREE(pCtx, pRevSpec); SG_STRINGARRAY_NULLFREE(pCtx, psaFullHids); SG_DAGFRAG_NULLFREE(pCtx, pFrag); SG_NULLFREE(pCtx, pszRepoId); SG_NULLFREE(pCtx, pszAdminId); SG_FRAGBALL_WRITER_NULLFREE(pCtx, pfb); }
void SG_dagfrag__load_from_repo__one(SG_context * pCtx, SG_dagfrag * pFrag, SG_repo* pRepo, const char * szHidStart, SG_int32 nGenerations) { // load a fragment of the dag starting with the given dagnode // for nGenerations of parents. // // we add this portion of the graph to whatevery we already // have in our fragment. this may either augment (give us // a larger connected piece) or it may be an independent // subset. // // if nGenerations <= 0, load everything from this starting point // back to the NULL/root. // // generationStart is the generation of the starting dagnode. // // the starting dagnode *MAY* be in the final start-fringe. // normally, it will be. but if we are called multiple times // (and have more than one start point), it may be the case // that this node is a parent of one of the other start points. // // we compute generationEnd as the generation that we will NOT // include in the fragment; nodes of that generation will be in // the end-fringe. that is, we include [start...end) like most // C++ iterators. _my_data * pMyDataCached = NULL; SG_dagnode * pDagnodeAllocated = NULL; SG_dagnode * pDagnodeStart; SG_int32 generationStart, generationEnd; SG_bool bPresent = SG_FALSE; SG_rbtree* prb_WorkQueue = NULL; SG_NULLARGCHECK_RETURN(pFrag); SG_NONEMPTYCHECK_RETURN(szHidStart); // if we are extending the fragment, delete the generation-sorted // member cache copy. (see __foreach_member()). it's either that // or update it in parallel as we change the real CACHE and that // doesn't seem worth the bother. SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache); pFrag->m_pRB_GenerationSortedMemberCache = NULL; SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &prb_WorkQueue) ); // fetch the starting dagnode and compute the generation bounds. // first, see if the cache already has info for this dagnode. // if not, fetch it from the source and then add it to the cache. SG_ERR_CHECK( _cache__lookup(pCtx, pFrag,szHidStart,&pMyDataCached,&bPresent) ); if (!bPresent) { if (!pRepo) SG_ERR_THROW( SG_ERR_INVALID_WHILE_FROZEN ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, szHidStart, &pDagnodeAllocated) ); pDagnodeStart = pDagnodeAllocated; } else { pDagnodeStart = pMyDataCached->m_pDagnode; } SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pDagnodeStart,&generationStart) ); SG_ASSERT_RELEASE_FAIL2( (generationStart > 0), (pCtx,"Invalid generation value [%d] for dagnode [%s]", generationStart,szHidStart) ); if ((nGenerations <= 0) || (generationStart <= nGenerations)) generationEnd = 0; else generationEnd = generationStart - nGenerations; if (!bPresent) { // this dagnode was not already present in the cache. // add it to the cache directly and set the state. // we don't need to go thru the work queue for it. // // then the add all of its parents to the work queue. SG_ERR_CHECK( _cache__add__dagnode(pCtx, pFrag, generationStart, pDagnodeAllocated,SG_DFS_START_MEMBER, &pMyDataCached) ); pDagnodeAllocated = NULL; SG_ERR_CHECK( _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue) ); } else { // the node was already present in the cache, so we have already // walked at least part of the graph around it. switch (pMyDataCached->m_state) { default: //case SG_DFS_UNKNOWN: SG_ASSERT_RELEASE_FAIL2( (0), (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]", pMyDataCached->m_state,szHidStart) ); case SG_DFS_INTERIOR_MEMBER: // already in fragment case SG_DFS_START_MEMBER: // already in fragment, duplicated leaf? if (generationEnd < pMyDataCached->m_genDagnode) { // they've expanded the bounds of the fragment since we // last visited this dagnode. keep this dagnode in the // fragment and revisit the ancestors in case any were // put in the end-fringe that should now be included. // // we leave the state as INCLUDE or INCLUDE_AND_START // because a duplicate start point should remain a // start point. SG_ERR_CHECK( _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue) ); } else { // the current end-generation requested is >= the previous // end-generation, then we've completely explored this dagnode // already. that is, a complete walk from this node for nGenerations // would not reveal any new information. } break; case SG_DFS_END_FRINGE: { // they want to start at a dagnode that we put in the // end-fringe. this can happen if they need to expand // the bounds of the fragment to include older ancestors. // // we do not mark this as a start node because someone // else already has it as a parent. pMyDataCached->m_state = SG_DFS_INTERIOR_MEMBER; SG_ERR_CHECK( _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue) ); } break; } } // we optionally put the parents of the current node into the work queue. // // service the work queue until it is empty. this allows us to walk the graph without // recursion. that is, as we decide what to do with a node, we add the parents // to the queue. we then iterate thru the work queue until we have dealt with // everything -- that is, until all parents have been properly placed. // // we cannot use a standard iterator to drive this loop because we // modify the queue. while (1) { _process_work_queue_item(pCtx, pFrag,prb_WorkQueue,generationEnd,pRepo); if (!SG_context__has_err(pCtx)) break; // we processed everything in the queue and are done if (!SG_context__err_equals(pCtx,SG_ERR_RESTART_FOREACH)) SG_ERR_RETHROW; SG_context__err_reset(pCtx); // queue changed, restart iteration } SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue); /* ** we have loaded a piece of the dag (starting with the given start node ** and tracing all parent edges back n generations). we leave with everything ** in our progress queues so that other start nodes can be added to the ** fragment. this allows the processing of subsequent start nodes to ** override some of the decisions that we made. for example: ** ** Q_15 ** | ** | ** Z_16 ** / \ ** / \ ** Y_17 A_17 ** \ / \ ** \ / \ ** B_18 C_18 ** | ** | ** D_19 ** | ** | ** E_20 ** ** if we started with the leaf E_20 and requested 3 generations, we would have: ** start_set := { E } ** include_set := { B, D, E } ** end_set := { Y, A } ** ** after a subsequent call with the leaf C_18 and 3 generations, we would have: ** start_set := { C, E } ** include_set := { Z, A, B, C, D, E } ** end_set := { Q, Y } ** */ return; fail: SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue); SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated); }
void SG_workingdir__create_and_get( SG_context* pCtx, const char* pszDescriptorName, const SG_pathname* pPathDirPutTopLevelDirInHere, SG_bool bCreateDrawer, const char* psz_spec_hid_cs_baseline ) { SG_repo* pRepo = NULL; SG_rbtree* pIdsetLeaves = NULL; SG_uint32 count_leaves = 0; SG_changeset* pcs = NULL; const char* pszidUserSuperRoot = NULL; SG_bool b = SG_FALSE; char* psz_hid_cs_baseline = NULL; SG_pendingtree * pPendingTree = NULL; SG_vhash * pvhTimestamps = NULL; /* * Fetch the descriptor by its given name and use it to connect to * the repo. */ SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, pszDescriptorName, &pRepo) ); if (psz_spec_hid_cs_baseline) { SG_ERR_CHECK( SG_strdup(pCtx, psz_spec_hid_cs_baseline, &psz_hid_cs_baseline) ); } else { const char* psz_hid = NULL; /* * If you do not specify a hid to be the baseline, then this routine * currently only works if there is exactly one leaf in the repo. */ SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&pIdsetLeaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, pIdsetLeaves, &count_leaves) ); if (count_leaves != 1) SG_ERR_THROW( SG_ERR_MULTIPLE_HEADS_FROM_DAGNODE ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, pIdsetLeaves, &b, &psz_hid, NULL) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_hid, &psz_hid_cs_baseline) ); } /* * Load the desired changeset from the repo so we can look up the * id of its user root directory */ SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs_baseline, &pcs) ); SG_ERR_CHECK( SG_changeset__get_root(pCtx, pcs, &pszidUserSuperRoot) ); if (bCreateDrawer) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhTimestamps) ); // Retrieve everything into the WD and capture the timestamps on the files that we create. SG_ERR_CHECK( sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, pvhTimestamps) ); // this creates "repo.json" with the repo-descriptor. SG_ERR_CHECK( SG_workingdir__set_mapping(pCtx, pPathDirPutTopLevelDirInHere, pszDescriptorName, NULL) ); // this creates an empty "wd.json" file (which doesn't know anything). SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pPathDirPutTopLevelDirInHere, SG_TRUE, &pPendingTree) ); // force set the initial parents to the current changeset. SG_ERR_CHECK( SG_pendingtree__set_single_wd_parent(pCtx, pPendingTree, psz_hid_cs_baseline) ); // force initialize the timestamp cache to the list that we just built; this should // be the only timestamps in the cache since we just populated the WD. SG_ERR_CHECK( SG_pendingtree__set_wd_timestamp_cache(pCtx, pPendingTree, &pvhTimestamps) ); // this steals our vhash SG_ERR_CHECK( SG_pendingtree__save(pCtx, pPendingTree) ); } else { // Retrieve everything into the WD but do not create .sgdrawer or record timestamps. // This is more like an EXPORT operation. SG_ERR_CHECK( sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, NULL) ); } fail: SG_VHASH_NULLFREE(pCtx, pvhTimestamps); SG_NULLFREE(pCtx, psz_hid_cs_baseline); SG_CHANGESET_NULLFREE(pCtx, pcs); SG_RBTREE_NULLFREE(pCtx, pIdsetLeaves); SG_REPO_NULLFREE(pCtx, pRepo); SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); }
static void _do_since( SG_context* pCtx, SG_repo* pRepo, SG_vhash* pvh_since, SG_fragball_writer* pfb ) { SG_uint32 count_dagnums = 0; SG_uint32 i_dagnum = 0; SG_ihash* pih_new = NULL; SG_rbtree* prb = NULL; SG_vhash* pvh_blobs = NULL; SG_changeset* pcs = NULL; SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(pvh_since); SG_NULLARGCHECK_RETURN(pfb); // TODO do we need to deal with dags which are present here but not in pvh_since? SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_since, &count_dagnums) ); for (i_dagnum=0; i_dagnum<count_dagnums; i_dagnum++) { const char* psz_dagnum = NULL; SG_varray* pva_nodes = NULL; SG_uint64 dagnum = 0; SG_ERR_CHECK( SG_vhash__get_nth_pair__varray(pCtx, pvh_since, i_dagnum, &psz_dagnum, &pva_nodes) ); SG_ERR_CHECK( SG_dagnum__from_sz__hex(pCtx, psz_dagnum, &dagnum) ); SG_ERR_CHECK( SG_repo__find_new_dagnodes_since(pCtx, pRepo, dagnum, pva_nodes, &pih_new) ); if (pih_new) { SG_uint32 count = 0; SG_ERR_CHECK( SG_ihash__count(pCtx, pih_new, &count) ); if (count) { SG_uint32 i = 0; SG_ERR_CHECK( SG_rbtree__alloc(pCtx, &prb) ); SG_ERR_CHECK( SG_vhash__alloc(pCtx, &pvh_blobs) ); for (i=0; i<count; i++) { const char* psz_node = NULL; SG_ERR_CHECK( SG_ihash__get_nth_pair(pCtx, pih_new, i, &psz_node, NULL) ); SG_ERR_CHECK( SG_rbtree__add(pCtx, prb, psz_node) ); SG_ERR_CHECK( SG_vhash__add__null(pCtx, pvh_blobs, psz_node) ); SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_node, &pcs) ); SG_ERR_CHECK( _add_necessary_blobs(pCtx, pcs, pvh_blobs) ); SG_CHANGESET_NULLFREE(pCtx, pcs); } // put all these new nodes in the frag SG_ERR_CHECK( SG_fragball__write__dagnodes(pCtx, pfb, dagnum, prb) ); // and the blobs SG_ERR_CHECK( SG_sync__add_blobs_to_fragball(pCtx, pfb, pvh_blobs) ); SG_VHASH_NULLFREE(pCtx, pvh_blobs); SG_RBTREE_NULLFREE(pCtx, prb); } SG_IHASH_NULLFREE(pCtx, pih_new); } } fail: SG_CHANGESET_NULLFREE(pCtx, pcs); SG_VHASH_NULLFREE(pCtx, pvh_blobs); SG_RBTREE_NULLFREE(pCtx, prb); SG_IHASH_NULLFREE(pCtx, pih_new); }
void SG_dagquery__find_new_since_common( SG_context * pCtx, SG_repo * pRepo, SG_uint64 dagnum, const char * pszOldNodeHid, const char * pszNewNodeHid, SG_stringarray ** ppResults ) { _fnsc_work_queue_t workQueue = {NULL, 0, 0, 0, NULL}; SG_uint32 i; SG_dagnode * pDagnode = NULL; SG_stringarray * pResults = NULL; SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK(pRepo); SG_NONEMPTYCHECK(pszOldNodeHid); SG_NONEMPTYCHECK(pszNewNodeHid); SG_NULLARGCHECK(ppResults); SG_ERR_CHECK( SG_allocN(pCtx, _FNSC_WORK_QUEUE_INIT_LENGTH, workQueue.p) ); workQueue.allocatedLength = _FNSC_WORK_QUEUE_INIT_LENGTH; SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &workQueue.pRevnoCache) ); SG_ERR_CHECK( _fnsc_work_queue__insert(pCtx, &workQueue, pszOldNodeHid, dagnum, pRepo, _ANCESTOR_OF_OLD) ); SG_ERR_CHECK( _fnsc_work_queue__insert(pCtx, &workQueue, pszNewNodeHid, dagnum, pRepo, _ANCESTOR_OF_NEW) ); SG_ERR_CHECK( SG_STRINGARRAY__ALLOC(pCtx, &pResults, 32) ); while(workQueue.numAncestorsOfNewOnTheQueue > 0) { const char * pszHidRef = NULL; SG_byte isAncestorOf = 0; SG_ERR_CHECK( _fnsc_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &isAncestorOf) ); if (isAncestorOf==_ANCESTOR_OF_NEW) SG_ERR_CHECK( SG_stringarray__add(pCtx, pResults, pszHidRef) ); { SG_uint32 count_parents = 0; const char** parents = NULL; SG_ERR_CHECK( SG_dagnode__get_parents__ref(pCtx, pDagnode, &count_parents, &parents) ); for(i=0; i<count_parents; ++i) SG_ERR_CHECK( _fnsc_work_queue__insert(pCtx, &workQueue, parents[i], dagnum, pRepo, isAncestorOf) ); } SG_DAGNODE_NULLFREE(pCtx, pDagnode); } for(i=0; i<workQueue.length; ++i) SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode); SG_NULLFREE(pCtx, workQueue.p); SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache); *ppResults = pResults; return; fail: for(i=0; i<workQueue.length; ++i) SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode); SG_NULLFREE(pCtx, workQueue.p); SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_STRINGARRAY_NULLFREE(pCtx, pResults); }
void SG_dagquery__highest_revno_common_ancestor( SG_context * pCtx, SG_repo * pRepo, SG_uint64 dagnum, const SG_stringarray * pInputNodeHids, char ** ppOutputNodeHid ) { const char * const * paszInputNodeHids = NULL; SG_uint32 countInputNodes = 0; SG_repo_fetch_dagnodes_handle * pDagnodeFetcher = NULL; _hrca_work_queue_t workQueue = {NULL, 0, 0, NULL}; SG_uint32 i; SG_dagnode * pDagnode = NULL; const char * pszHidRef = NULL; SG_bitvector * pIsAncestorOf = NULL; SG_uint32 countIsAncestorOf = 0; SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK(pRepo); SG_NULLARGCHECK(pInputNodeHids); SG_ERR_CHECK( SG_stringarray__sz_array_and_count(pCtx, pInputNodeHids, &paszInputNodeHids, &countInputNodes) ); SG_ARGCHECK(countInputNodes>0, pInputNodeHids); SG_NULLARGCHECK(ppOutputNodeHid); SG_ERR_CHECK( SG_repo__fetch_dagnodes__begin(pCtx, pRepo, dagnum, &pDagnodeFetcher) ); SG_ERR_CHECK( SG_allocN(pCtx, _HRCA_WORK_QUEUE_INIT_LENGTH, workQueue.p) ); workQueue.allocatedLength = _HRCA_WORK_QUEUE_INIT_LENGTH; SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &workQueue.pRevnoCache) ); SG_ERR_CHECK( SG_BITVECTOR__ALLOC(pCtx, &pIsAncestorOf, countInputNodes) ); for(i=0; i<countInputNodes; ++i) { SG_ERR_CHECK( SG_bitvector__zero(pCtx, pIsAncestorOf) ); SG_ERR_CHECK( SG_bitvector__set_bit(pCtx, pIsAncestorOf, i, SG_TRUE) ); SG_ERR_CHECK( _hrca_work_queue__insert(pCtx, &workQueue, paszInputNodeHids[i], pRepo, pDagnodeFetcher, pIsAncestorOf) ); } SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); SG_ERR_CHECK( _hrca_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &pIsAncestorOf) ); SG_ERR_CHECK( SG_bitvector__count_set_bits(pCtx, pIsAncestorOf, &countIsAncestorOf) ); while(countIsAncestorOf < countInputNodes) { SG_uint32 count_parents = 0; const char** parents = NULL; SG_ERR_CHECK( SG_dagnode__get_parents__ref(pCtx, pDagnode, &count_parents, &parents) ); for(i=0; i<count_parents; ++i) SG_ERR_CHECK( _hrca_work_queue__insert(pCtx, &workQueue, parents[i], pRepo, pDagnodeFetcher, pIsAncestorOf) ); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); SG_ERR_CHECK( _hrca_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &pIsAncestorOf) ); SG_ERR_CHECK( SG_bitvector__count_set_bits(pCtx, pIsAncestorOf, &countIsAncestorOf) ); } SG_ERR_CHECK( SG_strdup(pCtx, pszHidRef, ppOutputNodeHid) ); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); for(i=0; i<workQueue.length; ++i) { SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode); SG_BITVECTOR_NULLFREE(pCtx, workQueue.p[i].pIsAncestorOf); } SG_NULLFREE(pCtx, workQueue.p); SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache); SG_ERR_CHECK( SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pDagnodeFetcher) ); return; fail: for(i=0; i<workQueue.length; ++i) { SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode); SG_BITVECTOR_NULLFREE(pCtx, workQueue.p[i].pIsAncestorOf); } SG_NULLFREE(pCtx, workQueue.p); SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); if(pDagnodeFetcher!=NULL) { SG_ERR_IGNORE( SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pDagnodeFetcher) ); } }
void SG_repo__pack__vcdiff(SG_context* pCtx, SG_repo * pRepo) { SG_rbtree* prb_leaves = NULL; SG_uint32 count_leaves = 0; const char* psz_hid_cs = NULL; SG_rbtree* prb_blobs = NULL; SG_bool b; SG_rbtree_iterator* pit = NULL; SG_rbtree_iterator* pit_for_gid = NULL; SG_bool b_for_gid; const char* psz_hid_ref = NULL; const char* psz_hid_blob = NULL; const char* psz_gid = NULL; SG_rbtree* prb = NULL; const char* psz_gen = NULL; SG_repo_tx_handle* pTx; SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&prb_leaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prb_leaves, &count_leaves) ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_leaves, &b, &psz_hid_cs, NULL) ); SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &prb_blobs) ); SG_ERR_CHECK( sg_pack__do_changeset(pCtx, pRepo, psz_hid_cs, prb_blobs) ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prb_blobs, &b, &psz_gid, (void**) &prb) ); while (b) { SG_uint32 count_for_gid = 0; SG_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count_for_gid) ); if (count_for_gid > 1) { psz_hid_ref = NULL; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit_for_gid, prb, &b_for_gid, &psz_gen, (void**) &psz_hid_blob) ); while (b_for_gid) { // Not a lot of thought went into doing each of these in its own repo tx. Consider alternatives. SG_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); if (psz_hid_ref) { SG_ERR_CHECK( SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid_blob, SG_BLOBENCODING__VCDIFF, psz_hid_ref, NULL, NULL, NULL, NULL) ); // TODO be tolerant here of SG_ERR_REPO_BUSY } else { psz_hid_ref = psz_hid_blob; SG_ERR_CHECK( SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid_ref, SG_BLOBENCODING__FULL, NULL, NULL, NULL, NULL, NULL) ); // TODO be tolerant here of SG_ERR_REPO_BUSY } SG_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit_for_gid, &b_for_gid, &psz_gen, (void**) &psz_hid_blob) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit_for_gid); psz_hid_ref = NULL; } SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &b, &psz_gid, (void**) &prb) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx, prb_blobs, _sg_repo__free_rbtree); SG_RBTREE_NULLFREE(pCtx, prb_leaves); return; fail: return; }
static void _advise_after_update(SG_context * pCtx, SG_option_state * pOptSt, SG_pathname * pPathCwd, const char * pszBaselineBeforeUpdate) { SG_pendingtree * pPendingTree = NULL; SG_repo * pRepo; char * pszBaselineAfterUpdate = NULL; SG_rbtree * prbLeaves = NULL; SG_uint32 nrLeaves; SG_bool bUpdateChangedBaseline; // re-open pendingtree to get the now-current baseline (we have to do // this in a new instance because the UPDATE saves the pendingtree which // frees all of the interesting stuff). SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pPathCwd, pOptSt->bIgnoreWarnings, &pPendingTree) ); SG_ERR_CHECK( SG_pendingtree__get_repo(pCtx, pPendingTree, &pRepo) ); SG_ERR_CHECK( _get_baseline(pCtx, pPendingTree, &pszBaselineAfterUpdate) ); // see if the update actually changed the baseline. bUpdateChangedBaseline = (strcmp(pszBaselineBeforeUpdate, pszBaselineAfterUpdate) != 0); // get the list of all heads/leaves. // // TODO 2010/06/30 Revisit this when we have NAMED BRANCHES because we // TODO want to filter this list for things within their BRANCH. SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx,pRepo,SG_DAGNUM__VERSION_CONTROL,&prbLeaves) ); #if defined(DEBUG) { SG_bool bFound = SG_FALSE; SG_ERR_CHECK( SG_rbtree__find(pCtx, prbLeaves, pszBaselineAfterUpdate, &bFound, NULL) ); SG_ASSERT( (bFound) ); } #endif SG_ERR_CHECK( SG_rbtree__count(pCtx, prbLeaves, &nrLeaves) ); if (nrLeaves > 1) { if (bUpdateChangedBaseline) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline updated to descendant head, but there are multiple heads; consider merging.\n") ); } else { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline already at head, but there are multiple heads; consider merging.\n") ); } } else { if (bUpdateChangedBaseline) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline updated to head.\n") ); } else { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline already at head.\n") ); } } fail: SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); SG_RBTREE_NULLFREE(pCtx, prbLeaves); SG_NULLFREE(pCtx, pszBaselineAfterUpdate); }
void SG_dagquery__find_descendant_heads(SG_context * pCtx, SG_repo * pRepo, SG_uint64 iDagNum, const char * pszHidStart, SG_bool bStopIfMultiple, SG_dagquery_find_head_status * pdqfhs, SG_rbtree ** pprbHeads) { SG_rbtree * prbLeaves = NULL; SG_rbtree * prbHeadsFound = NULL; SG_rbtree_iterator * pIter = NULL; const char * pszKey_k = NULL; SG_bool b; SG_dagquery_find_head_status dqfhs; SG_dagquery_relationship dqRel; SG_uint32 nrFound; SG_NULLARGCHECK_RETURN(pRepo); SG_NONEMPTYCHECK_RETURN(pszHidStart); SG_NULLARGCHECK_RETURN(pdqfhs); SG_NULLARGCHECK_RETURN(pprbHeads); SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &prbHeadsFound) ); // fetch a list of all of the LEAVES in the DAG. // this rbtree only contains keys; no assoc values. SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagNum, &prbLeaves) ); // if the starting point is a leaf, then we are done (we don't care how many // other leaves are in the rbtree because none will be a child of ours because // we are a leaf). SG_ERR_CHECK( SG_rbtree__find(pCtx, prbLeaves, pszHidStart, &b, NULL) ); if (b) { SG_ERR_CHECK( SG_rbtree__add(pCtx, prbHeadsFound, pszHidStart) ); dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__IS_LEAF; goto done; } // inspect each leaf and qualify it; put the ones that pass // into the list of actual heads. nrFound = 0; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pIter, prbLeaves, &b, &pszKey_k, NULL) ); while (b) { // is head[k] a descendant of start? SG_ERR_CHECK( SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, iDagNum, pszKey_k, pszHidStart, SG_FALSE, // we care about descendants, so don't skip SG_TRUE, // we don't care about ancestors, so skip them &dqRel) ); if (dqRel == SG_DAGQUERY_RELATIONSHIP__DESCENDANT) { nrFound++; if (bStopIfMultiple && (nrFound > 1)) { // they wanted a unique answer and we've found too many answers // (which they won't be able to use anyway) so just stop and // return the status. (we delete prbHeadsFound because it is // incomplete and so that they won't be tempted to use it.) SG_RBTREE_NULLFREE(pCtx, prbHeadsFound); dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__MULTIPLE; goto done; } SG_ERR_CHECK( SG_rbtree__add(pCtx, prbHeadsFound, pszKey_k) ); } SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pIter, &b, &pszKey_k, NULL) ); } switch (nrFound) { case 0: // this should NEVER happen. we should always be able to find a // leaf/head for a node. // // TODO the only case where this might happen is if named branches // TODO cause the leaf to be disqualified. so i'm going to THROW // TODO here rather than ASSERT. SG_ERR_THROW2( SG_ERR_DAG_NOT_CONSISTENT, (pCtx, "Could not find head/leaf for changeset [%s]", pszHidStart) ); break; case 1: dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__UNIQUE; break; default: dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__MULTIPLE; break; } done: *pprbHeads = prbHeadsFound; prbHeadsFound = NULL; *pdqfhs = dqfhs; fail: SG_RBTREE_NULLFREE(pCtx, prbLeaves); SG_RBTREE_NULLFREE(pCtx, prbHeadsFound); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIter); }
void SG_repo__db__calc_delta( SG_context * pCtx, SG_repo* pRepo, SG_uint64 dagnum, const char* psz_csid_from, const char* psz_csid_to, SG_uint32 flags, SG_vhash** ppvh_add, SG_vhash** ppvh_remove ) { SG_dagnode* pdn_from = NULL; SG_dagnode* pdn_to = NULL; SG_int32 gen_from = -1; SG_int32 gen_to = -1; SG_varray* pva_direct_backward_path = NULL; SG_varray* pva_direct_forward_path = NULL; SG_vhash* pvh_add = NULL; SG_vhash* pvh_remove = NULL; SG_rbtree* prb_temp = NULL; SG_daglca* plca = NULL; char* psz_csid_ancestor = NULL; SG_NULLARGCHECK_RETURN(psz_csid_from); SG_NULLARGCHECK_RETURN(psz_csid_to); SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(ppvh_add); SG_NULLARGCHECK_RETURN(ppvh_remove); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_csid_from, &pdn_from) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn_from, &gen_from) ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_csid_to, &pdn_to) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn_to, &gen_to) ); if (gen_from > gen_to) { SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_from, psz_csid_to, &pva_direct_backward_path ) ); if (pva_direct_backward_path) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_backward_path, flags, pvh_add, pvh_remove ) ); } } else if (gen_from < gen_to) { SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_to, psz_csid_from, &pva_direct_forward_path ) ); if (pva_direct_forward_path) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_forward_path, flags, pvh_remove, pvh_add ) ); } } if (!pvh_add && !pvh_remove) { SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &prb_temp) ); SG_ERR_CHECK( SG_rbtree__add(pCtx,prb_temp,psz_csid_from) ); SG_ERR_CHECK( SG_rbtree__add(pCtx,prb_temp,psz_csid_to) ); SG_ERR_CHECK( SG_repo__get_dag_lca(pCtx,pRepo,dagnum,prb_temp,&plca) ); { const char* psz_hid = NULL; SG_daglca_node_type node_type = 0; SG_int32 gen = -1; SG_ERR_CHECK( SG_daglca__iterator__first(pCtx, NULL, plca, SG_FALSE, &psz_hid, &node_type, &gen, NULL) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_hid, &psz_csid_ancestor) ); } SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_from, psz_csid_ancestor, &pva_direct_backward_path ) ); SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_to, psz_csid_ancestor, &pva_direct_forward_path ) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_backward_path, flags, pvh_add, pvh_remove ) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_forward_path, flags, pvh_remove, pvh_add ) ); } *ppvh_add = pvh_add; pvh_add = NULL; *ppvh_remove = pvh_remove; pvh_remove = NULL; fail: SG_NULLFREE(pCtx, psz_csid_ancestor); SG_RBTREE_NULLFREE(pCtx, prb_temp); SG_DAGLCA_NULLFREE(pCtx, plca); SG_VHASH_NULLFREE(pCtx, pvh_add); SG_VHASH_NULLFREE(pCtx, pvh_remove); SG_VARRAY_NULLFREE(pCtx, pva_direct_backward_path); SG_VARRAY_NULLFREE(pCtx, pva_direct_forward_path); SG_DAGNODE_NULLFREE(pCtx, pdn_from); SG_DAGNODE_NULLFREE(pCtx, pdn_to); }
/** * Compare all the nodes of a single DAG in two repos. */ static void _compare_one_dag(SG_context* pCtx, SG_repo* pRepo1, SG_repo* pRepo2, SG_uint32 iDagNum, SG_bool* pbIdentical) { SG_bool bFinalResult = SG_FALSE; SG_rbtree* prbRepo1Leaves = NULL; SG_rbtree* prbRepo2Leaves = NULL; SG_uint32 iRepo1LeafCount, iRepo2LeafCount; SG_rbtree_iterator* pIterator = NULL; const char* pszId = NULL; SG_dagnode* pRepo1Dagnode = NULL; SG_dagnode* pRepo2Dagnode = NULL; SG_bool bFoundRepo1Leaf = SG_FALSE; SG_bool bFoundRepo2Leaf = SG_FALSE; SG_bool bDagnodesEqual = SG_FALSE; SG_NULLARGCHECK_RETURN(pRepo1); SG_NULLARGCHECK_RETURN(pRepo2); SG_NULLARGCHECK_RETURN(pbIdentical); SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo1, iDagNum, &prbRepo1Leaves) ); SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo2, iDagNum, &prbRepo2Leaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prbRepo1Leaves, &iRepo1LeafCount) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prbRepo2Leaves, &iRepo2LeafCount) ); if (iRepo1LeafCount != iRepo2LeafCount) { #if TRACE_SYNC SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "leaf count differs\n") ); #endif goto Different; } SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pIterator, prbRepo1Leaves, &bFoundRepo1Leaf, &pszId, NULL) ); while (bFoundRepo1Leaf) { SG_ERR_CHECK( SG_rbtree__find(pCtx, prbRepo2Leaves, pszId, &bFoundRepo2Leaf, NULL) ); if (!bFoundRepo2Leaf) { #if TRACE_SYNC && 0 SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "couldn't locate leaf\r\n") ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Repo 1 leaves:\r\n") ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, prbRepo1Leaves) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Repo 2 leaves:\r\n") ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, prbRepo2Leaves) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif goto Different; } SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo1, pszId, &pRepo1Dagnode) ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo2, pszId, &pRepo2Dagnode) ); SG_ERR_CHECK( _compare_dagnodes(pCtx, pRepo1, pRepo1Dagnode, pRepo2, pRepo2Dagnode, &bDagnodesEqual) ); SG_DAGNODE_NULLFREE(pCtx, pRepo1Dagnode); SG_DAGNODE_NULLFREE(pCtx, pRepo2Dagnode); if (!bDagnodesEqual) goto Different; SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pIterator, &bFoundRepo1Leaf, &pszId, NULL) ); } bFinalResult = SG_TRUE; Different: *pbIdentical = bFinalResult; // fall through fail: SG_RBTREE_NULLFREE(pCtx, prbRepo1Leaves); SG_RBTREE_NULLFREE(pCtx, prbRepo2Leaves); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIterator); }
void SG_server__pull_request_fragball(SG_context* pCtx, SG_repo* pRepo, SG_vhash* pvhRequest, const SG_pathname* pFragballDirPathname, char** ppszFragballName, SG_vhash** ppvhStatus) { SG_pathname* pFragballPathname = NULL; SG_uint32* paDagNums = NULL; SG_rbtree* prbDagnodes = NULL; SG_string* pstrFragballName = NULL; char* pszRevFullHid = NULL; SG_rbtree_iterator* pit = NULL; SG_uint32* repoDagnums = NULL; SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(pFragballDirPathname); SG_NULLARGCHECK_RETURN(ppvhStatus); #if TRACE_SERVER SG_ERR_CHECK( SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "pull fragball request") ); #endif SG_ERR_CHECK( SG_fragball__create(pCtx, pFragballDirPathname, &pFragballPathname) ); if (!pvhRequest) { // Add leaves from every dag to the fragball. SG_uint32 count_dagnums; SG_uint32 i; SG_ERR_CHECK( SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums) ); for (i=0; i<count_dagnums; i++) { SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes) ); SG_ERR_CHECK( SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, paDagNums[i], prbDagnodes) ); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); } SG_ERR_CHECK( SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName) ); SG_ERR_CHECK( SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName) ); } else { // Build the requested fragball. SG_bool found; SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found) ); if (found) { // Full clone requested. SG_ERR_CHECK( SG_repo__fetch_repo__fragball(pCtx, pRepo, pFragballDirPathname, ppszFragballName) ); } else { // Not a full clone. SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found) ); if (found) { // Dagnodes were requested. SG_uint32 generations = 0; SG_vhash* pvhDags; SG_uint32 count_requested_dagnums; SG_uint32 count_repo_dagnums = 0; SG_uint32 i; const char* pszDagNum = NULL; const SG_variant* pvRequestedNodes = NULL; SG_vhash* pvhRequestedNodes = NULL; const char* pszHidRequestedDagnode = NULL; // Were additional generations requested? SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &found) ); if (found) SG_ERR_CHECK( SG_vhash__get__uint32(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &generations) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums) ); if (count_requested_dagnums) SG_ERR_CHECK( SG_repo__list_dags(pCtx, pRepo, &count_repo_dagnums, &repoDagnums) ); // For each requested dag, get the requested nodes. for (i=0; i<count_requested_dagnums; i++) { SG_uint32 iMissingNodeCount; SG_uint32 iDagnum; SG_uint32 j; SG_bool isValidDagnum = SG_FALSE; SG_bool bSpecificNodesRequested = SG_FALSE; // Get the dag's missing node vhash. SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszDagNum, &pvRequestedNodes) ); SG_ERR_CHECK( SG_dagnum__from_sz__decimal(pCtx, pszDagNum, &iDagnum) ); // Verify that requested dagnum exists for (j = 0; j < count_repo_dagnums; j++) { if (repoDagnums[j] == iDagnum) { isValidDagnum = SG_TRUE; break; } } if (!isValidDagnum) { char buf[SG_DAGNUM__BUF_MAX__NAME]; SG_ERR_CHECK( SG_dagnum__to_name(pCtx, iDagnum, buf, sizeof(buf)) ); SG_ERR_THROW2(SG_ERR_NO_SUCH_DAG, (pCtx, "%s", buf)); } if (pvRequestedNodes) { SG_ERR_CHECK( SG_variant__get__vhash(pCtx, pvRequestedNodes, &pvhRequestedNodes) ); // Get each node listed for the dag SG_ERR_CHECK( SG_vhash__count(pCtx, pvhRequestedNodes, &iMissingNodeCount) ); if (iMissingNodeCount > 0) { SG_uint32 j; const SG_variant* pvVal; bSpecificNodesRequested = SG_TRUE; SG_ERR_CHECK( SG_RBTREE__ALLOC__PARAMS(pCtx, &prbDagnodes, iMissingNodeCount, NULL) ); for (j=0; j<iMissingNodeCount; j++) { SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhRequestedNodes, j, &pszHidRequestedDagnode, &pvVal) ); if (pvVal) { const char* pszVal; SG_ERR_CHECK( SG_variant__get__sz(pCtx, pvVal, &pszVal) ); if (pszVal) { if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_HID_PREFIX)) { SG_ERR_CHECK( SG_repo__hidlookup__dagnode(pCtx, pRepo, iDagnum, pszHidRequestedDagnode, &pszRevFullHid) ); pszHidRequestedDagnode = pszRevFullHid; } else if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_TAG)) { SG_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, pszHidRequestedDagnode, &pszRevFullHid) ); if (!pszRevFullHid) SG_ERR_THROW(SG_ERR_TAG_NOT_FOUND); pszHidRequestedDagnode = pszRevFullHid; } else SG_ERR_THROW(SG_ERR_PULL_INVALID_FRAGBALL_REQUEST); } } SG_ERR_CHECK( SG_rbtree__update(pCtx, prbDagnodes, pszHidRequestedDagnode) ); // Get additional dagnode generations, if requested. SG_ERR_CHECK( SG_sync__add_n_generations(pCtx, pRepo, pszHidRequestedDagnode, prbDagnodes, generations) ); SG_NULLFREE(pCtx, pszRevFullHid); } } } if (!bSpecificNodesRequested) { // When no specific nodes are in the request, add all leaves. SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes) ); // Get additional dagnode generations, if requested. if (generations) { SG_bool found; const char* hid; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prbDagnodes, &found, &hid, NULL) ); while (found) { SG_ERR_CHECK( SG_sync__add_n_generations(pCtx, pRepo, hid, prbDagnodes, generations) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &found, &hid, NULL) ); } } } if (prbDagnodes) // can be null when leaves of an empty dag are requested { SG_ERR_CHECK( SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, iDagnum, prbDagnodes) ); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); } } // dagnum loop } // if "dags" exists /* Add requested blobs to the fragball */ SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found) ); if (found) { // Blobs were requested. SG_vhash* pvhBlobs; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs) ); SG_ERR_CHECK( SG_sync__add_blobs_to_fragball(pCtx, pRepo, pFragballPathname, pvhBlobs) ); } SG_ERR_CHECK( SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName) ); SG_ERR_CHECK( SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName) ); } } /* fallthru */ fail: // If we had an error, delete the half-baked fragball. if (pFragballPathname && SG_context__has_err(pCtx)) SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pFragballPathname) ); SG_PATHNAME_NULLFREE(pCtx, pFragballPathname); SG_NULLFREE(pCtx, paDagNums); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); SG_STRING_NULLFREE(pCtx, pstrFragballName); SG_NULLFREE(pCtx, pszRevFullHid); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_NULLFREE(pCtx, repoDagnums); }
void sg_pack__do_changeset(SG_context* pCtx, SG_repo* pRepo, const char* psz_hid_cs, SG_rbtree* prb_blobs) { SG_changeset* pcs = NULL; SG_int32 gen = 0; SG_uint32 count_blobs = 0; SG_uint32 count_parents = 0; SG_varray* pva_parents = NULL; SG_uint32 i; SG_rbtree* prb_new = NULL; const char* psz_hid_root_treenode = NULL; const char* psz_key = NULL; SG_vhash* pvh_lbl = NULL; SG_vhash* pvh_blobs = NULL; SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs, &pcs) ); SG_ERR_CHECK( SG_changeset__get_root(pCtx, pcs, &psz_hid_root_treenode) ); SG_ERR_CHECK( SG_changeset__get_generation(pCtx, pcs, &gen) ); SG_ERR_CHECK( SG_RBTREE__ALLOC__PARAMS(pCtx, &prb_new, count_blobs, NULL) ); SG_ERR_CHECK( SG_changeset__get_list_of_bloblists(pCtx, pcs, &pvh_lbl) ); /* add all the tree user file blobs */ SG_ERR_CHECK( SG_changeset__get_bloblist_name(pCtx, SG_BLOB_REFTYPE__TREEUSERFILE, &psz_key) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_lbl, psz_key, &pvh_blobs) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_blobs, &count_blobs) ); /* now write all the blobs */ for (i=0; i<count_blobs; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_blobs, i, &psz_hid, NULL) ); SG_ERR_CHECK( SG_rbtree__add(pCtx, prb_new, psz_hid) ); } /* and the treenode blobs */ SG_ERR_CHECK( SG_changeset__get_bloblist_name(pCtx, SG_BLOB_REFTYPE__TREENODE, &psz_key) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_lbl, psz_key, &pvh_blobs) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_blobs, &count_blobs) ); /* now write all the blobs */ for (i=0; i<count_blobs; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_rbtree__add(pCtx, prb_new, psz_hid) ); } SG_ERR_CHECK( sg_pack__do_get_dir__top(pCtx, pRepo, gen, psz_hid_root_treenode, prb_blobs, prb_new) ); SG_RBTREE_NULLFREE(pCtx, prb_new); SG_ERR_CHECK( SG_changeset__get_parents(pCtx, pcs, &pva_parents) ); if (pva_parents) { SG_ERR_CHECK( SG_varray__count(pCtx, pva_parents, &count_parents) ); for (i=0; i<count_parents; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva_parents, i, &psz_hid) ); SG_ERR_CHECK( sg_pack__do_changeset(pCtx, pRepo, psz_hid, prb_blobs) ); } } SG_CHANGESET_NULLFREE(pCtx, pcs); return; fail: SG_RBTREE_NULLFREE(pCtx, prb_new); }