void SG_dagfrag__eat_other_frag(SG_context * pCtx, SG_dagfrag* pConsumerFrag, SG_dagfrag** ppFragToBeEaten) { SG_rbtree_iterator* pit = NULL; SG_bool b = SG_FALSE; _my_data * pMyData = NULL; const char* psz_id = NULL; SG_dagfrag* pFragToBeEaten = NULL; SG_NULLARGCHECK_RETURN(pConsumerFrag); SG_NULLARGCHECK_RETURN(ppFragToBeEaten); SG_NULLARGCHECK_RETURN(*ppFragToBeEaten); pFragToBeEaten = *ppFragToBeEaten; #if DEBUG && TRACE_DAGFRAG SG_ERR_CHECK( SG_dagfrag_debug__dump__console(pCtx, pFragToBeEaten, "frag to be eaten", 0, SG_CS_STDOUT) ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, pFragToBeEaten->m_pRB_Cache) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); SG_ERR_CHECK( SG_dagfrag_debug__dump__console(pCtx, pConsumerFrag, "new frag before meal", 0, SG_CS_STDOUT) ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, pConsumerFrag->m_pRB_Cache) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif if ( (pConsumerFrag->m_iDagNum != pFragToBeEaten->m_iDagNum) || (0 != strcmp(pConsumerFrag->m_sz_repo_id, pFragToBeEaten->m_sz_repo_id)) || (0 != strcmp(pConsumerFrag->m_sz_admin_id, pFragToBeEaten->m_sz_admin_id)) ) { SG_ERR_THROW_RETURN( SG_ERR_REPO_MISMATCH ); } SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit,pFragToBeEaten->m_pRB_Cache,&b,&psz_id,(void **)&pMyData) ); while (b) { if (pMyData->m_pDagnode) { SG_ERR_CHECK( SG_dagfrag__add_dagnode(pCtx, pConsumerFrag, &pMyData->m_pDagnode) ); } SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit,&b,&psz_id,(void **)&pMyData) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_DAGFRAG_NULLFREE(pCtx, pFragToBeEaten); *ppFragToBeEaten = NULL; #if DEBUG && TRACE_DAGFRAG SG_ERR_CHECK( SG_dagfrag_debug__dump__console(pCtx, pConsumerFrag, "new frag after meal", 0, SG_CS_STDOUT) ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, pConsumerFrag->m_pRB_Cache) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif return; fail: SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); }
void SG_dagfrag__foreach_member(SG_context * pCtx, SG_dagfrag * pFrag, SG_dagfrag__foreach_member_callback * pcb, void * pVoidCallerData) { // we want to iterate over the START_ and INTERIOR_ MEMBERS in the CACHE. // we need to use the SORTED MEMBER CACHE so that ancestors are presented // before descendants. struct _fm_data fm_data; SG_NULLARGCHECK_RETURN(pFrag); SG_NULLARGCHECK_RETURN(pcb); if (!pFrag->m_pRB_GenerationSortedMemberCache) SG_ERR_CHECK_RETURN( _my_create_generation_sorted_member_cache(pCtx,pFrag) ); fm_data.pFrag = pFrag; fm_data.pcb = pcb; fm_data.pVoidCallerData = pVoidCallerData; // we wrap their callback with our own so that we can munge the arguments // that they see. #if TRACE_DAGFRAG && 0 SG_ERR_CHECK_RETURN( SG_console(pCtx, SG_CS_STDERR, "SORTED MEMBER CACHE:\r\n") ); SG_ERR_CHECK_RETURN( SG_rbtree_debug__dump_keys_to_console(pCtx, pFrag->m_pRB_GenerationSortedMemberCache) ); SG_ERR_CHECK_RETURN( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif SG_ERR_CHECK_RETURN( SG_rbtree__foreach(pCtx, pFrag->m_pRB_GenerationSortedMemberCache, _sg_dagfrag__my_foreach_member_callback, &fm_data) ); }
void SG_dagfrag_debug__dump__console(SG_context* pCtx, SG_dagfrag* pFrag, const char* szLabel, SG_uint32 indent, SG_console_stream cs) { SG_string* pString; SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pString) ); SG_ERR_CHECK( SG_dagfrag_debug__dump(pCtx, pFrag, szLabel, indent, pString) ); SG_ERR_CHECK( SG_console(pCtx, cs, SG_string__sz(pString)) ); SG_ERR_CHECK( SG_console__flush(pCtx, cs) ); // fall through fail: SG_STRING_NULLFREE(pCtx, pString); }
/** * Compare all the nodes of a single DAG in two repos. */ static void _compare_one_dag(SG_context* pCtx, SG_repo* pRepo1, SG_repo* pRepo2, SG_uint32 iDagNum, SG_bool* pbIdentical) { SG_bool bFinalResult = SG_FALSE; SG_rbtree* prbRepo1Leaves = NULL; SG_rbtree* prbRepo2Leaves = NULL; SG_uint32 iRepo1LeafCount, iRepo2LeafCount; SG_rbtree_iterator* pIterator = NULL; const char* pszId = NULL; SG_dagnode* pRepo1Dagnode = NULL; SG_dagnode* pRepo2Dagnode = NULL; SG_bool bFoundRepo1Leaf = SG_FALSE; SG_bool bFoundRepo2Leaf = SG_FALSE; SG_bool bDagnodesEqual = SG_FALSE; SG_NULLARGCHECK_RETURN(pRepo1); SG_NULLARGCHECK_RETURN(pRepo2); SG_NULLARGCHECK_RETURN(pbIdentical); SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo1, iDagNum, &prbRepo1Leaves) ); SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo2, iDagNum, &prbRepo2Leaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prbRepo1Leaves, &iRepo1LeafCount) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prbRepo2Leaves, &iRepo2LeafCount) ); if (iRepo1LeafCount != iRepo2LeafCount) { #if TRACE_SYNC SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "leaf count differs\n") ); #endif goto Different; } SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pIterator, prbRepo1Leaves, &bFoundRepo1Leaf, &pszId, NULL) ); while (bFoundRepo1Leaf) { SG_ERR_CHECK( SG_rbtree__find(pCtx, prbRepo2Leaves, pszId, &bFoundRepo2Leaf, NULL) ); if (!bFoundRepo2Leaf) { #if TRACE_SYNC && 0 SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "couldn't locate leaf\r\n") ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Repo 1 leaves:\r\n") ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, prbRepo1Leaves) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Repo 2 leaves:\r\n") ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, prbRepo2Leaves) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif goto Different; } SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo1, pszId, &pRepo1Dagnode) ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo2, pszId, &pRepo2Dagnode) ); SG_ERR_CHECK( _compare_dagnodes(pCtx, pRepo1, pRepo1Dagnode, pRepo2, pRepo2Dagnode, &bDagnodesEqual) ); SG_DAGNODE_NULLFREE(pCtx, pRepo1Dagnode); SG_DAGNODE_NULLFREE(pCtx, pRepo2Dagnode); if (!bDagnodesEqual) goto Different; SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pIterator, &bFoundRepo1Leaf, &pszId, NULL) ); } bFinalResult = SG_TRUE; Different: *pbIdentical = bFinalResult; // fall through fail: SG_RBTREE_NULLFREE(pCtx, prbRepo1Leaves); SG_RBTREE_NULLFREE(pCtx, prbRepo2Leaves); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIterator); }
void SG_dagfrag__add_dagnode(SG_context * pCtx, SG_dagfrag * pFrag, SG_dagnode** ppdn) { _my_data * pMyDataCached = NULL; SG_bool bPresent = SG_FALSE; const char* psz_id = NULL; SG_dagnode* pdn = NULL; SG_NULLARGCHECK_RETURN(pFrag); SG_NULLARGCHECK_RETURN(ppdn); pdn = *ppdn; // if we are extending the fragment, delete the generation-sorted // member cache copy. (see __foreach_member()). it's either that // or update it in parallel as we change the real CACHE and that // doesn't seem worth the bother. SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache); pFrag->m_pRB_GenerationSortedMemberCache = NULL; // fetch the starting dagnode and compute the generation bounds. // first, see if the cache already has info for this dagnode. // if not, fetch it from the source and then add it to the cache. SG_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_id) ); #if DEBUG && TRACE_DAGFRAG && 0 SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Adding [%s] to frag.\r\n", psz_id) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif SG_ERR_CHECK( _cache__lookup(pCtx, pFrag,psz_id,&pMyDataCached,&bPresent) ); if (!bPresent) { // this dagnode was not already present in the cache. // add it to the cache directly and set the state. // we don't need to go thru the work queue for it. // // then the add all of its parents. SG_ERR_CHECK( _cache__add__dagnode(pCtx, pFrag, 0, pdn,SG_DFS_START_MEMBER, &pMyDataCached) ); *ppdn = NULL; SG_ERR_CHECK( _add_parents(pCtx, pFrag, pMyDataCached->m_pDagnode) ); } else { // the node was already present in the cache, so we have already // walked at least part of the graph around it. switch (pMyDataCached->m_state) { case SG_DFS_END_FRINGE: if (!pMyDataCached->m_pDagnode) { pMyDataCached->m_pDagnode = pdn; *ppdn = NULL; } pMyDataCached->m_state = SG_DFS_INTERIOR_MEMBER; SG_ERR_CHECK( _add_parents(pCtx, pFrag, pMyDataCached->m_pDagnode) ); break; default: break; /* SG_ASSERT_RELEASE_FAIL2( (0), (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]", pMyDataCached->m_state,psz_id) ); */ } } // fall through fail: SG_DAGNODE_NULLFREE(pCtx, *ppdn); }
void SG_sync__build_best_guess_dagfrag( SG_context* pCtx, SG_repo* pRepo, SG_uint64 iDagNum, SG_rbtree* prbStartFromHids, SG_vhash* pvhConnectToHidsAndGens, SG_dagfrag** ppFrag) { SG_uint32 i, countConnectTo; SG_rbtree_iterator* pit = NULL; SG_dagnode* pdn = NULL; SG_dagfrag* pFrag = NULL; SG_repo_fetch_dagnodes_handle* pdh = NULL; SG_int32 minGen = SG_INT32_MAX; SG_int32 maxGen = 0; SG_uint32 gensToFetch = 0; char* psz_repo_id = NULL; char* psz_admin_id = NULL; SG_bool bNextHid; const char* pszRefHid; SG_int32 gen; #if TRACE_SYNC SG_int64 startTime; SG_int64 endTime; #endif SG_NULLARGCHECK_RETURN(prbStartFromHids); /* Find the minimum generation in pertinent "connect to" nodes. */ if (pvhConnectToHidsAndGens) { SG_ERR_CHECK( SG_vhash__count(pCtx, pvhConnectToHidsAndGens, &countConnectTo) ); for (i = 0; i < countConnectTo; i++) { SG_int32 gen; SG_ERR_CHECK( SG_vhash__get_nth_pair__int32(pCtx, pvhConnectToHidsAndGens, i, &pszRefHid, &gen) ); if (gen < minGen) minGen = gen; } } /* Happens when pulling into an empty repo, or when an entire dag is specifically requested. */ if (minGen == SG_INT32_MAX) minGen = -1; SG_ERR_CHECK( SG_repo__fetch_dagnodes__begin(pCtx, pRepo, iDagNum, &pdh) ); /* Find the maximum generation in pertinent "start from" nodes. */ SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prbStartFromHids, &bNextHid, &pszRefHid, NULL) ); while (bNextHid) { SG_ERR_CHECK( SG_repo__fetch_dagnodes__one(pCtx, pRepo, pdh, pszRefHid, &pdn) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn, &gen) ); if (gen > maxGen) maxGen = gen; SG_DAGNODE_NULLFREE(pCtx, pdn); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &bNextHid, &pszRefHid, NULL) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); if (maxGen <= minGen) gensToFetch = FALLBACK_GENS_PER_ROUNDTRIP; else gensToFetch = maxGen - minGen; #if TRACE_SYNC { char buf_dagnum[SG_DAGNUM__BUF_MAX__HEX]; SG_uint32 count; SG_ERR_CHECK( SG_dagnum__to_sz__hex(pCtx, iDagNum, buf_dagnum, sizeof(buf_dagnum)) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Building best guess dagfrag for dag %s...\n", buf_dagnum) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Starting from nodes:\n") ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, prbStartFromHids) ); SG_ERR_CHECK( SG_vhash_debug__dump_to_console__named(pCtx, pvhConnectToHidsAndGens, "Connecting to nodes") ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prbStartFromHids, &count) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "result has %d generations from %u starting nodes.\n", gensToFetch, count) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); SG_ERR_CHECK( SG_time__get_milliseconds_since_1970_utc(pCtx, &startTime) ); } #endif /* Return a frag with the corresponding generations filled in. */ SG_ERR_CHECK( SG_repo__get_repo_id(pCtx, pRepo, &psz_repo_id) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &psz_admin_id) ); SG_ERR_CHECK( SG_dagfrag__alloc(pCtx, &pFrag, psz_repo_id, psz_admin_id, iDagNum) ); SG_ERR_CHECK( SG_dagfrag__load_from_repo__multi(pCtx, pFrag, pRepo, prbStartFromHids, gensToFetch) ); #if TRACE_SYNC SG_ERR_CHECK( SG_time__get_milliseconds_since_1970_utc(pCtx, &endTime) ); { SG_uint32 dagnodeCount; double seconds = ((double)endTime-(double)startTime)/1000; SG_ERR_CHECK( SG_dagfrag__dagnode_count(pCtx, pFrag, &dagnodeCount) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, " - %u nodes in frag, built in %1.3f seconds\n", dagnodeCount, seconds) ); SG_ERR_CHECK( SG_dagfrag_debug__dump__console(pCtx, pFrag, "best-guess dagfrag", 0, SG_CS_STDERR) ); } #endif *ppFrag = pFrag; pFrag = NULL; /* Common cleanup */ fail: SG_NULLFREE(pCtx, psz_repo_id); SG_NULLFREE(pCtx, psz_admin_id); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_DAGFRAG_NULLFREE(pCtx, pFrag); SG_ERR_IGNORE( SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pdh) ); }