void u0048_multidag_test__1(SG_context * pCtx) { char bufName[SG_TID_MAX_BUFFER_LENGTH + u0048_multidag__MY_LABEL_LENGTH]; SG_repo* pRepo = NULL; SG_rbtree* prb = NULL; SG_uint32 count; char* pid1 = NULL; char* pid1a = NULL; char* pid1b = NULL; char* pid1c = NULL; char* pid2 = NULL; char* pid2a = NULL; char* pid2b = NULL; VERIFY_ERR_CHECK( SG_strcpy(pCtx, bufName, sizeof(bufName), u0048_multidag__MY_LABEL) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, &bufName[u0048_multidag__MY_LABEL_LENGTH], (sizeof(bufName) - u0048_multidag__MY_LABEL_LENGTH), 32) ); /* create the repo */ VERIFY_ERR_CHECK( u0048_multidag__new_repo(pCtx, bufName, &pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid1, NULL, SG_DAGNUM__TESTING__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid1a, pid1, SG_DAGNUM__TESTING__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid1b, pid1, SG_DAGNUM__TESTING__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid1c, pid1, SG_DAGNUM__TESTING__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid2, NULL, SG_DAGNUM__TESTING2__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid2a, pid2, SG_DAGNUM__TESTING2__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid2b, pid2, SG_DAGNUM__TESTING2__NOTHING, pRepo) ); SG_NULLFREE(pCtx, pid1); SG_NULLFREE(pCtx, pid1a); SG_NULLFREE(pCtx, pid1b); SG_NULLFREE(pCtx, pid1c); SG_NULLFREE(pCtx, pid2); SG_NULLFREE(pCtx, pid2a); SG_NULLFREE(pCtx, pid2b); VERIFY_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, SG_DAGNUM__TESTING__NOTHING, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); SG_RBTREE_NULLFREE(pCtx, prb); VERIFY_COND("count", (3 == count)); VERIFY_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, SG_DAGNUM__TESTING2__NOTHING, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); SG_RBTREE_NULLFREE(pCtx, prb); VERIFY_COND("count", (2 == count)); SG_REPO_NULLFREE(pCtx, pRepo); return; fail: SG_REPO_NULLFREE(pCtx, pRepo); }
void SG_rbtree_ui64__count( SG_context * pCtx, const SG_rbtree_ui64 * prb, SG_uint32 * pCount) { SG_ERR_CHECK_RETURN( SG_rbtree__count(pCtx, (const SG_rbtree *)prb, pCount) ); }
void sg_repo__bind_vtable(SG_context* pCtx, SG_repo * pRepo, const char * pszStorage) { SG_uint32 count_vtables = 0; SG_NULLARGCHECK(pRepo); if (pRepo->p_vtable) // can only be bound once { SG_ERR_THROW2(SG_ERR_INVALIDARG, (pCtx, "pRepo->p_vtable is already bound")); } if (pRepo->pvh_descriptor) { SG_ERR_THROW2(SG_ERR_INVALIDARG, (pCtx, "pRepo->pvh_descriptor is already bound")); } if (!g_prb_repo_vtables) { SG_ERR_THROW2(SG_ERR_UNKNOWN_STORAGE_IMPLEMENTATION, (pCtx, "There are no repo storage plugins installed")); } SG_ERR_CHECK( SG_rbtree__count(pCtx, g_prb_repo_vtables, &count_vtables) ); if (0 == count_vtables) { SG_ERR_THROW2(SG_ERR_UNKNOWN_STORAGE_IMPLEMENTATION, (pCtx, "There are no repo storage plugins installed")); } if (!pszStorage || !*pszStorage) { if (1 == count_vtables) { SG_bool b = SG_FALSE; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, g_prb_repo_vtables, &b, NULL, (void**) &pRepo->p_vtable) ); SG_ASSERT(pRepo->p_vtable); } else { SG_ERR_THROW2(SG_ERR_UNKNOWN_STORAGE_IMPLEMENTATION, (pCtx, "Multiple repo storage plugins installed. Must specify.")); } } else { SG_bool b = SG_FALSE; SG_ERR_CHECK( SG_rbtree__find(pCtx, g_prb_repo_vtables, pszStorage, &b, (void**) &pRepo->p_vtable) ); if (!b || !pRepo->p_vtable) { SG_ERR_THROW(SG_ERR_UNKNOWN_STORAGE_IMPLEMENTATION); } } fail: ; }
void SG_random__select_2_random_rbtree_nodes(SG_context * pCtx, SG_rbtree * pRbtree, const char ** ppKey1, void ** ppAssocData1, const char ** ppKey2, void ** ppAssocData2) { SG_uint32 count, r1, r2, i; SG_rbtree_iterator * p = NULL; SG_bool ok = SG_FALSE; const char * pKey = NULL; void * pAssocData = NULL; const char * pKey1 = NULL; void * pAssocData1 = NULL; const char * pKey2 = NULL; void * pAssocData2 = NULL; SG_NULLARGCHECK_RETURN(pRbtree); SG_ERR_CHECK( SG_rbtree__count(pCtx, pRbtree, &count) ); SG_ARGCHECK(count>=2, pRbtree); r1 = SG_random_uint32(count); r2 = (r1+1+SG_random_uint32(count-1))%count; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &p, pRbtree, &ok, &pKey, &pAssocData) ); for(i=0;i<=r1||i<=r2;++i) { SG_ASSERT(ok); if(i==r1) { pKey1 = pKey; pAssocData1 = pAssocData; } else if(i==r2) { pKey2 = pKey; pAssocData2 = pAssocData; } SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, p, &ok, &pKey, &pAssocData) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, p); if(ppKey1!=NULL) *ppKey1 = pKey1; if(ppAssocData1!=NULL) *ppAssocData1 = pAssocData1; if(ppKey2!=NULL) *ppKey2 = pKey2; if(ppAssocData2!=NULL) *ppAssocData2 = pAssocData2; return; fail: SG_RBTREE_ITERATOR_NULLFREE(pCtx, p); }
/** * if more than one leaf changed the contents of the file, return the number of * unique new values that it was set to. */ void SG_mrg_cset_entry_conflict__count_unique_file_hid_blob(SG_context * pCtx, SG_mrg_cset_entry_conflict * pMrgCSetEntryConflict, SG_uint32 * pCount) { SG_NULLARGCHECK_RETURN(pMrgCSetEntryConflict); if (pMrgCSetEntryConflict->prbUnique_File_HidBlob) SG_ERR_CHECK_RETURN( SG_rbtree__count(pCtx,pMrgCSetEntryConflict->prbUnique_File_HidBlob,pCount) ); else *pCount = 0; }
/** * if more than one leaf moved the entry (to another directory), return the number of unique new values that it was set to. */ void SG_mrg_cset_entry_conflict__count_unique_gid_parent(SG_context * pCtx, SG_mrg_cset_entry_conflict * pMrgCSetEntryConflict, SG_uint32 * pCount) { SG_NULLARGCHECK_RETURN(pMrgCSetEntryConflict); if (pMrgCSetEntryConflict->prbUnique_GidParent) SG_ERR_CHECK_RETURN( SG_rbtree__count(pCtx,pMrgCSetEntryConflict->prbUnique_GidParent,pCount) ); else *pCount = 0; }
void SG_random__select_random_rbtree_node(SG_context * pCtx, SG_rbtree * pRbtree, const char ** ppKey, void ** ppAssocData) { SG_uint32 count, i; SG_rbtree_iterator * p = NULL; SG_bool ok; const char * pKey; void * pAssocData; SG_NULLARGCHECK_RETURN(pRbtree); SG_ERR_CHECK( SG_rbtree__count(pCtx, pRbtree, &count) ); SG_ARGCHECK(count!=0, pRbtree); i = SG_random_uint32(count); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &p, pRbtree, &ok, &pKey, &pAssocData) ); SG_ASSERT(ok); while(i>0) { --i; SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, p, &ok, &pKey, &pAssocData) ); SG_ASSERT(ok); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, p); if(ppKey!=NULL) *ppKey = pKey; if(ppAssocData!=NULL) *ppAssocData = pAssocData; return; fail: SG_RBTREE_ITERATOR_NULLFREE(pCtx, p); }
static void _advise_after_update(SG_context * pCtx, SG_option_state * pOptSt, SG_pathname * pPathCwd, const char * pszBaselineBeforeUpdate) { SG_pendingtree * pPendingTree = NULL; SG_repo * pRepo; char * pszBaselineAfterUpdate = NULL; SG_rbtree * prbLeaves = NULL; SG_uint32 nrLeaves; SG_bool bUpdateChangedBaseline; // re-open pendingtree to get the now-current baseline (we have to do // this in a new instance because the UPDATE saves the pendingtree which // frees all of the interesting stuff). SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pPathCwd, pOptSt->bIgnoreWarnings, &pPendingTree) ); SG_ERR_CHECK( SG_pendingtree__get_repo(pCtx, pPendingTree, &pRepo) ); SG_ERR_CHECK( _get_baseline(pCtx, pPendingTree, &pszBaselineAfterUpdate) ); // see if the update actually changed the baseline. bUpdateChangedBaseline = (strcmp(pszBaselineBeforeUpdate, pszBaselineAfterUpdate) != 0); // get the list of all heads/leaves. // // TODO 2010/06/30 Revisit this when we have NAMED BRANCHES because we // TODO want to filter this list for things within their BRANCH. SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx,pRepo,SG_DAGNUM__VERSION_CONTROL,&prbLeaves) ); #if defined(DEBUG) { SG_bool bFound = SG_FALSE; SG_ERR_CHECK( SG_rbtree__find(pCtx, prbLeaves, pszBaselineAfterUpdate, &bFound, NULL) ); SG_ASSERT( (bFound) ); } #endif SG_ERR_CHECK( SG_rbtree__count(pCtx, prbLeaves, &nrLeaves) ); if (nrLeaves > 1) { if (bUpdateChangedBaseline) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline updated to descendant head, but there are multiple heads; consider merging.\n") ); } else { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline already at head, but there are multiple heads; consider merging.\n") ); } } else { if (bUpdateChangedBaseline) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline updated to head.\n") ); } else { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline already at head.\n") ); } } fail: SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); SG_RBTREE_NULLFREE(pCtx, prbLeaves); SG_NULLFREE(pCtx, pszBaselineAfterUpdate); }
int u0050_logstuff_test__1(SG_context * pCtx, SG_pathname* pPathTopDir) { char bufName[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_pathname* pPathFile = NULL; SG_vhash* pvh = NULL; SG_dagnode* pdn = NULL; const char* psz_hid_cs = NULL; SG_repo* pRepo = NULL; SG_uint32 count; SG_rbtree* prb = NULL; SG_varray* pva = NULL; SG_rbtree* prb_reversed = NULL; const char* psz_val = NULL; SG_audit q; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufName, sizeof(bufName), 32) ); /* create the working dir */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, bufName) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); /* add stuff */ VERIFY_ERR_CHECK( u0050_logstuff__create_file__numbers(pCtx, pPathWorkingDir, "aaa", 20) ); /* create the repo */ VERIFY_ERR_CHECK( _ut_pt__new_repo(pCtx, bufName, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( u0050_logstuff__commit_all(pCtx, pPathWorkingDir, &pdn) ); VERIFY_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_hid_cs) ); SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, bufName, &pRepo) ); #define MY_COMMENT "The name of this new file sucks! What kind of a name is 'aaa'?" VERIFY_ERR_CHECK( SG_audit__init(pCtx, &q, pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); VERIFY_ERR_CHECK( SG_vc_comments__add(pCtx, pRepo, psz_hid_cs, MY_COMMENT, &q) ); VERIFY_ERR_CHECK( SG_vc_stamps__add(pCtx, pRepo, psz_hid_cs, "crap", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "tcrap", &q) ); VERIFY_ERR_CHECK( SG_vc_comments__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "text", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, MY_COMMENT)) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_stamps__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "stamp", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "crap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "tag", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "tcrap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "whatever", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (2 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (2 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); /* we don't know whether psz_my_val is tcrap or whatever. */ // VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); { const char* psz_remove = "whatever"; VERIFY_ERR_CHECK( SG_vc_tags__remove(pCtx, pRepo, &q, 1, &psz_remove) ); /* Note that by removing whatever, we are bringing the tags list back * to a state where it has been before (just tcrap). This changeset in * the tags table will have its own csid, because the parentage is * different, but it's root idtrie HID will be the same as a previous * node. */ } VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); SG_RBTREE_NULLFREE(pCtx, prb); SG_REPO_NULLFREE(pCtx, pRepo); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 1; fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 0; }
void SG_workingdir__create_and_get( SG_context* pCtx, const char* pszDescriptorName, const SG_pathname* pPathDirPutTopLevelDirInHere, SG_bool bCreateDrawer, const char* psz_spec_hid_cs_baseline ) { SG_repo* pRepo = NULL; SG_rbtree* pIdsetLeaves = NULL; SG_uint32 count_leaves = 0; SG_changeset* pcs = NULL; const char* pszidUserSuperRoot = NULL; SG_bool b = SG_FALSE; char* psz_hid_cs_baseline = NULL; SG_pendingtree * pPendingTree = NULL; SG_vhash * pvhTimestamps = NULL; /* * Fetch the descriptor by its given name and use it to connect to * the repo. */ SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, pszDescriptorName, &pRepo) ); if (psz_spec_hid_cs_baseline) { SG_ERR_CHECK( SG_strdup(pCtx, psz_spec_hid_cs_baseline, &psz_hid_cs_baseline) ); } else { const char* psz_hid = NULL; /* * If you do not specify a hid to be the baseline, then this routine * currently only works if there is exactly one leaf in the repo. */ SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&pIdsetLeaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, pIdsetLeaves, &count_leaves) ); if (count_leaves != 1) SG_ERR_THROW( SG_ERR_MULTIPLE_HEADS_FROM_DAGNODE ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, pIdsetLeaves, &b, &psz_hid, NULL) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_hid, &psz_hid_cs_baseline) ); } /* * Load the desired changeset from the repo so we can look up the * id of its user root directory */ SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs_baseline, &pcs) ); SG_ERR_CHECK( SG_changeset__get_root(pCtx, pcs, &pszidUserSuperRoot) ); if (bCreateDrawer) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhTimestamps) ); // Retrieve everything into the WD and capture the timestamps on the files that we create. SG_ERR_CHECK( sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, pvhTimestamps) ); // this creates "repo.json" with the repo-descriptor. SG_ERR_CHECK( SG_workingdir__set_mapping(pCtx, pPathDirPutTopLevelDirInHere, pszDescriptorName, NULL) ); // this creates an empty "wd.json" file (which doesn't know anything). SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pPathDirPutTopLevelDirInHere, SG_TRUE, &pPendingTree) ); // force set the initial parents to the current changeset. SG_ERR_CHECK( SG_pendingtree__set_single_wd_parent(pCtx, pPendingTree, psz_hid_cs_baseline) ); // force initialize the timestamp cache to the list that we just built; this should // be the only timestamps in the cache since we just populated the WD. SG_ERR_CHECK( SG_pendingtree__set_wd_timestamp_cache(pCtx, pPendingTree, &pvhTimestamps) ); // this steals our vhash SG_ERR_CHECK( SG_pendingtree__save(pCtx, pPendingTree) ); } else { // Retrieve everything into the WD but do not create .sgdrawer or record timestamps. // This is more like an EXPORT operation. SG_ERR_CHECK( sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, NULL) ); } fail: SG_VHASH_NULLFREE(pCtx, pvhTimestamps); SG_NULLFREE(pCtx, psz_hid_cs_baseline); SG_CHANGESET_NULLFREE(pCtx, pcs); SG_RBTREE_NULLFREE(pCtx, pIdsetLeaves); SG_REPO_NULLFREE(pCtx, pRepo); SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); }
/** * Compare all the nodes of a single DAG in two repos. */ static void _compare_one_dag(SG_context* pCtx, SG_repo* pRepo1, SG_repo* pRepo2, SG_uint32 iDagNum, SG_bool* pbIdentical) { SG_bool bFinalResult = SG_FALSE; SG_rbtree* prbRepo1Leaves = NULL; SG_rbtree* prbRepo2Leaves = NULL; SG_uint32 iRepo1LeafCount, iRepo2LeafCount; SG_rbtree_iterator* pIterator = NULL; const char* pszId = NULL; SG_dagnode* pRepo1Dagnode = NULL; SG_dagnode* pRepo2Dagnode = NULL; SG_bool bFoundRepo1Leaf = SG_FALSE; SG_bool bFoundRepo2Leaf = SG_FALSE; SG_bool bDagnodesEqual = SG_FALSE; SG_NULLARGCHECK_RETURN(pRepo1); SG_NULLARGCHECK_RETURN(pRepo2); SG_NULLARGCHECK_RETURN(pbIdentical); SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo1, iDagNum, &prbRepo1Leaves) ); SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo2, iDagNum, &prbRepo2Leaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prbRepo1Leaves, &iRepo1LeafCount) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prbRepo2Leaves, &iRepo2LeafCount) ); if (iRepo1LeafCount != iRepo2LeafCount) { #if TRACE_SYNC SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "leaf count differs\n") ); #endif goto Different; } SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pIterator, prbRepo1Leaves, &bFoundRepo1Leaf, &pszId, NULL) ); while (bFoundRepo1Leaf) { SG_ERR_CHECK( SG_rbtree__find(pCtx, prbRepo2Leaves, pszId, &bFoundRepo2Leaf, NULL) ); if (!bFoundRepo2Leaf) { #if TRACE_SYNC && 0 SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "couldn't locate leaf\r\n") ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Repo 1 leaves:\r\n") ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, prbRepo1Leaves) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Repo 2 leaves:\r\n") ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, prbRepo2Leaves) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif goto Different; } SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo1, pszId, &pRepo1Dagnode) ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo2, pszId, &pRepo2Dagnode) ); SG_ERR_CHECK( _compare_dagnodes(pCtx, pRepo1, pRepo1Dagnode, pRepo2, pRepo2Dagnode, &bDagnodesEqual) ); SG_DAGNODE_NULLFREE(pCtx, pRepo1Dagnode); SG_DAGNODE_NULLFREE(pCtx, pRepo2Dagnode); if (!bDagnodesEqual) goto Different; SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pIterator, &bFoundRepo1Leaf, &pszId, NULL) ); } bFinalResult = SG_TRUE; Different: *pbIdentical = bFinalResult; // fall through fail: SG_RBTREE_NULLFREE(pCtx, prbRepo1Leaves); SG_RBTREE_NULLFREE(pCtx, prbRepo2Leaves); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIterator); }
void SG_sync__build_best_guess_dagfrag( SG_context* pCtx, SG_repo* pRepo, SG_uint64 iDagNum, SG_rbtree* prbStartFromHids, SG_vhash* pvhConnectToHidsAndGens, SG_dagfrag** ppFrag) { SG_uint32 i, countConnectTo; SG_rbtree_iterator* pit = NULL; SG_dagnode* pdn = NULL; SG_dagfrag* pFrag = NULL; SG_repo_fetch_dagnodes_handle* pdh = NULL; SG_int32 minGen = SG_INT32_MAX; SG_int32 maxGen = 0; SG_uint32 gensToFetch = 0; char* psz_repo_id = NULL; char* psz_admin_id = NULL; SG_bool bNextHid; const char* pszRefHid; SG_int32 gen; #if TRACE_SYNC SG_int64 startTime; SG_int64 endTime; #endif SG_NULLARGCHECK_RETURN(prbStartFromHids); /* Find the minimum generation in pertinent "connect to" nodes. */ if (pvhConnectToHidsAndGens) { SG_ERR_CHECK( SG_vhash__count(pCtx, pvhConnectToHidsAndGens, &countConnectTo) ); for (i = 0; i < countConnectTo; i++) { SG_int32 gen; SG_ERR_CHECK( SG_vhash__get_nth_pair__int32(pCtx, pvhConnectToHidsAndGens, i, &pszRefHid, &gen) ); if (gen < minGen) minGen = gen; } } /* Happens when pulling into an empty repo, or when an entire dag is specifically requested. */ if (minGen == SG_INT32_MAX) minGen = -1; SG_ERR_CHECK( SG_repo__fetch_dagnodes__begin(pCtx, pRepo, iDagNum, &pdh) ); /* Find the maximum generation in pertinent "start from" nodes. */ SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prbStartFromHids, &bNextHid, &pszRefHid, NULL) ); while (bNextHid) { SG_ERR_CHECK( SG_repo__fetch_dagnodes__one(pCtx, pRepo, pdh, pszRefHid, &pdn) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn, &gen) ); if (gen > maxGen) maxGen = gen; SG_DAGNODE_NULLFREE(pCtx, pdn); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &bNextHid, &pszRefHid, NULL) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); if (maxGen <= minGen) gensToFetch = FALLBACK_GENS_PER_ROUNDTRIP; else gensToFetch = maxGen - minGen; #if TRACE_SYNC { char buf_dagnum[SG_DAGNUM__BUF_MAX__HEX]; SG_uint32 count; SG_ERR_CHECK( SG_dagnum__to_sz__hex(pCtx, iDagNum, buf_dagnum, sizeof(buf_dagnum)) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Building best guess dagfrag for dag %s...\n", buf_dagnum) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Starting from nodes:\n") ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, prbStartFromHids) ); SG_ERR_CHECK( SG_vhash_debug__dump_to_console__named(pCtx, pvhConnectToHidsAndGens, "Connecting to nodes") ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prbStartFromHids, &count) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "result has %d generations from %u starting nodes.\n", gensToFetch, count) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); SG_ERR_CHECK( SG_time__get_milliseconds_since_1970_utc(pCtx, &startTime) ); } #endif /* Return a frag with the corresponding generations filled in. */ SG_ERR_CHECK( SG_repo__get_repo_id(pCtx, pRepo, &psz_repo_id) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &psz_admin_id) ); SG_ERR_CHECK( SG_dagfrag__alloc(pCtx, &pFrag, psz_repo_id, psz_admin_id, iDagNum) ); SG_ERR_CHECK( SG_dagfrag__load_from_repo__multi(pCtx, pFrag, pRepo, prbStartFromHids, gensToFetch) ); #if TRACE_SYNC SG_ERR_CHECK( SG_time__get_milliseconds_since_1970_utc(pCtx, &endTime) ); { SG_uint32 dagnodeCount; double seconds = ((double)endTime-(double)startTime)/1000; SG_ERR_CHECK( SG_dagfrag__dagnode_count(pCtx, pFrag, &dagnodeCount) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, " - %u nodes in frag, built in %1.3f seconds\n", dagnodeCount, seconds) ); SG_ERR_CHECK( SG_dagfrag_debug__dump__console(pCtx, pFrag, "best-guess dagfrag", 0, SG_CS_STDERR) ); } #endif *ppFrag = pFrag; pFrag = NULL; /* Common cleanup */ fail: SG_NULLFREE(pCtx, psz_repo_id); SG_NULLFREE(pCtx, psz_admin_id); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_DAGFRAG_NULLFREE(pCtx, pFrag); SG_ERR_IGNORE( SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pdh) ); }
void SG_repo__pack__vcdiff(SG_context* pCtx, SG_repo * pRepo) { SG_rbtree* prb_leaves = NULL; SG_uint32 count_leaves = 0; const char* psz_hid_cs = NULL; SG_rbtree* prb_blobs = NULL; SG_bool b; SG_rbtree_iterator* pit = NULL; SG_rbtree_iterator* pit_for_gid = NULL; SG_bool b_for_gid; const char* psz_hid_ref = NULL; const char* psz_hid_blob = NULL; const char* psz_gid = NULL; SG_rbtree* prb = NULL; const char* psz_gen = NULL; SG_repo_tx_handle* pTx; SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&prb_leaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prb_leaves, &count_leaves) ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_leaves, &b, &psz_hid_cs, NULL) ); SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &prb_blobs) ); SG_ERR_CHECK( sg_pack__do_changeset(pCtx, pRepo, psz_hid_cs, prb_blobs) ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prb_blobs, &b, &psz_gid, (void**) &prb) ); while (b) { SG_uint32 count_for_gid = 0; SG_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count_for_gid) ); if (count_for_gid > 1) { psz_hid_ref = NULL; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit_for_gid, prb, &b_for_gid, &psz_gen, (void**) &psz_hid_blob) ); while (b_for_gid) { // Not a lot of thought went into doing each of these in its own repo tx. Consider alternatives. SG_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); if (psz_hid_ref) { SG_ERR_CHECK( SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid_blob, SG_BLOBENCODING__VCDIFF, psz_hid_ref, NULL, NULL, NULL, NULL) ); // TODO be tolerant here of SG_ERR_REPO_BUSY } else { psz_hid_ref = psz_hid_blob; SG_ERR_CHECK( SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid_ref, SG_BLOBENCODING__FULL, NULL, NULL, NULL, NULL, NULL) ); // TODO be tolerant here of SG_ERR_REPO_BUSY } SG_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit_for_gid, &b_for_gid, &psz_gen, (void**) &psz_hid_blob) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit_for_gid); psz_hid_ref = NULL; } SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &b, &psz_gid, (void**) &prb) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx, prb_blobs, _sg_repo__free_rbtree); SG_RBTREE_NULLFREE(pCtx, prb_leaves); return; fail: return; }