void MyFn(one_dagnode)(SG_context * pCtx, SG_repo* pRepo) { char* pId = NULL; SG_dagnode* pdnCreated = NULL; SG_dagnode* pdnFetched = NULL; SG_repo_tx_handle* pTx = NULL; char buf_tid[SG_TID_MAX_BUFFER_LENGTH]; VERIFY_ERR_CHECK( SG_tid__generate(pCtx, buf_tid, sizeof(buf_tid)) ); VERIFY_ERR_CHECK( SG_repo__alloc_compute_hash__from_bytes(pCtx, pRepo, sizeof(buf_tid), (SG_byte *)buf_tid, &pId) ); VERIFY_ERR_CHECK( SG_dagnode__alloc(pCtx, &pdnCreated, pId, 1, 0) ); VERIFY_ERR_CHECK( SG_dagnode__freeze(pCtx, pdnCreated) ); // Add dagnode. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__store_dagnode(pCtx, pRepo, pTx, SG_DAGNUM__TESTING__NOTHING, pdnCreated) ); pdnCreated = NULL; // Should fail: tx not committed. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__fetch_dagnode(pCtx, pRepo, SG_DAGNUM__TESTING__NOTHING, pId, &pdnFetched), SG_ERR_NOT_FOUND ); // Dag node visible before repo tx committed. // Abort repo tx. VERIFY_ERR_CHECK( SG_repo__abort_tx(pCtx, pRepo, &pTx) ); VERIFY_COND("SG_repo__abort_tx should null/free the repo transaction.", !pTx); // Should fail: tx aborted. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__fetch_dagnode(pCtx, pRepo, SG_DAGNUM__TESTING__NOTHING, pId, &pdnFetched), SG_ERR_NOT_FOUND ); // Dag node exists after repo tx abort // Write dagnode, commit tx. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_dagnode__alloc(pCtx, &pdnCreated, pId, 1, 0) ); VERIFY_ERR_CHECK( SG_dagnode__freeze(pCtx, pdnCreated) ); VERIFY_ERR_CHECK( SG_repo__store_dagnode(pCtx, pRepo, pTx, SG_DAGNUM__TESTING__NOTHING, pdnCreated) ); pdnCreated = NULL; VERIFY_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); VERIFY_COND("SG_repo__commit_tx should null/free the repo transaction.", !pTx); // Read back the dagnode. It should exist now. VERIFY_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, SG_DAGNUM__TESTING__NOTHING, pId, &pdnFetched) ); // Fall through to common cleanup. fail: SG_NULLFREE(pCtx, pId); SG_DAGNODE_NULLFREE(pCtx, pdnCreated); SG_DAGNODE_NULLFREE(pCtx, pdnFetched); }
// Add a dagnode to the work queue if it's not already on it. If it is already // on it, this might tell us new information about whether it is a descendent of // "New" or "Old", so update it with the new isAncestorOf information. static void _fnsc_work_queue__insert( SG_context * pCtx, _fnsc_work_queue_t * pWorkQueue, const char * pszHid, SG_uint64 dagnum, SG_repo * pRepo, SG_byte isAncestorOf ) { SG_bool alreadyInTheQueue = SG_FALSE; SG_dagnode * pDagnode = NULL; SG_uint32 i; SG_uint32 revno = 0; char * revno__p = NULL; // First we check the cache. This will tell us whether the item is // already on the queue, and if so what its revno is. SG_ERR_CHECK( SG_rbtree__find(pCtx, pWorkQueue->pRevnoCache, pszHid, &alreadyInTheQueue, (void**)&revno__p) ); if(alreadyInTheQueue) { revno = (SG_uint32)(revno__p - (char*)NULL); } else { SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, pszHid, &pDagnode) ); SG_ERR_CHECK( SG_dagnode__get_revno(pCtx, pDagnode, &revno) ); } i = pWorkQueue->length; while(i>0 && pWorkQueue->p[i-1].revno > revno) --i; if (i>0 && pWorkQueue->p[i-1].revno == revno) { SG_ASSERT(alreadyInTheQueue); if (pWorkQueue->p[i-1].isAncestorOf==_ANCESTOR_OF_NEW && isAncestorOf!=_ANCESTOR_OF_NEW) --pWorkQueue->numAncestorsOfNewOnTheQueue; pWorkQueue->p[i-1].isAncestorOf |= isAncestorOf; // OR in the new isAncestorOfs } else { SG_ASSERT(pDagnode!=NULL); SG_ERR_CHECK( _fnsc_work_queue__insert_at(pCtx, pWorkQueue, i, pszHid, revno, &pDagnode, isAncestorOf) ); } return; fail: SG_DAGNODE_NULLFREE(pCtx, pDagnode); }
void SG_sync__add_n_generations(SG_context* pCtx, SG_repo* pRepo, const char* pszDagnodeHid, SG_rbtree* prbDagnodeHids, SG_uint32 generations) { _dagwalk_data dagWalkData; SG_dagnode* pStartNode = NULL; SG_int32 startGen; dagWalkData.pszStartNodeHid = pszDagnodeHid; SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, pszDagnodeHid, &pStartNode) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pStartNode, &startGen) ); dagWalkData.genLimit = startGen - generations; dagWalkData.prbVisitedNodes = prbDagnodeHids; SG_ERR_CHECK( SG_dagwalker__walk_dag_single(pCtx, pRepo, pszDagnodeHid, _dagwalk_callback, &dagWalkData) ); /* fall through */ fail: SG_DAGNODE_NULLFREE(pCtx, pStartNode); }
void SG_dagfrag__load_from_repo__simple(SG_context * pCtx, SG_dagfrag * pFrag, SG_repo* pRepo, SG_rbtree * prb_ids) { SG_dagnode* pdn = NULL; SG_rbtree_iterator* pit = NULL; SG_bool b = SG_FALSE; const char* psz_id = NULL; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit,prb_ids,&b,&psz_id,NULL) ); while (b) { SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, psz_id, &pdn) ); SG_ERR_CHECK( SG_dagfrag__add_dagnode(pCtx, pFrag, &pdn) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit,&b,&psz_id,NULL) ); } // fall thru to common cleanup fail: SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); }
void SG_repo__dag__find_direct_path_from_root( SG_context * pCtx, SG_repo* pRepo, SG_uint64 dagnum, const char* psz_csid, SG_varray** ppva ) { SG_varray* new_pva = NULL; #if SG_DOUBLE_CHECK__PATH_TO_ROOT SG_varray* old_pva = NULL; SG_dagnode* pdn = NULL; char* psz_cur = NULL; SG_string* pstr1 = NULL; SG_string* pstr2 = NULL; #endif SG_ERR_CHECK( SG_repo__find_dag_path(pCtx, pRepo, dagnum, NULL, psz_csid, &new_pva) ); #if SG_DOUBLE_CHECK__PATH_TO_ROOT SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &old_pva) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_csid, &psz_cur) ); while (1) { SG_uint32 count_parents = 0; const char** a_parents = NULL; SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_cur, &pdn) ); SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, old_pva, psz_cur) ); SG_ERR_CHECK( SG_dagnode__get_parents__ref(pCtx, pdn, &count_parents, &a_parents) ); if (0 == count_parents) { break; } SG_NULLFREE(pCtx, psz_cur); SG_ERR_CHECK( SG_STRDUP(pCtx, a_parents[0], &psz_cur) ); SG_DAGNODE_NULLFREE(pCtx, pdn); } SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, old_pva, "") ); SG_ERR_CHECK( SG_string__alloc(pCtx, &pstr1) ); SG_ERR_CHECK( SG_string__alloc(pCtx, &pstr2) ); SG_ERR_CHECK( SG_varray__to_json(pCtx, old_pva, pstr1) ); SG_ERR_CHECK( SG_varray__to_json(pCtx, new_pva, pstr2) ); if (0 != strcmp(SG_string__sz(pstr1), SG_string__sz(pstr2))) { // a failure here isn't actually ALWAYS bad. there can be more than one path // to root. fprintf(stderr, "old way:\n"); SG_VARRAY_STDERR(old_pva); fprintf(stderr, "new way:\n"); SG_VARRAY_STDERR(new_pva); SG_ERR_THROW( SG_ERR_UNSPECIFIED ); } #endif *ppva = new_pva; new_pva = NULL; fail: SG_VARRAY_NULLFREE(pCtx, new_pva); #if SG_DOUBLE_CHECK__PATH_TO_ROOT SG_STRING_NULLFREE(pCtx, pstr1); SG_STRING_NULLFREE(pCtx, pstr2); SG_VARRAY_NULLFREE(pCtx, old_pva); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_NULLFREE(pCtx, psz_cur); #endif }
void SG_dagquery__how_are_dagnodes_related(SG_context * pCtx, SG_repo * pRepo, SG_uint64 dagnum, const char * pszHid1, const char * pszHid2, SG_bool bSkipDescendantCheck, SG_bool bSkipAncestorCheck, SG_dagquery_relationship * pdqRel) { SG_dagnode * pdn1 = NULL; SG_dagnode * pdn2 = NULL; SG_dagfrag * pFrag = NULL; SG_dagquery_relationship dqRel = SG_DAGQUERY_RELATIONSHIP__UNKNOWN; SG_int32 gen1, gen2; SG_bool bFound; SG_NULLARGCHECK_RETURN(pRepo); SG_NONEMPTYCHECK_RETURN(pszHid1); SG_NONEMPTYCHECK_RETURN(pszHid2); SG_NULLARGCHECK_RETURN(pdqRel); if (strcmp(pszHid1, pszHid2) == 0) { dqRel = SG_DAGQUERY_RELATIONSHIP__SAME; goto cleanup; } // fetch the dagnode for both HIDs. this throws when the HID is not found. SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, pszHid1, &pdn1) ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, pszHid2, &pdn2) ); // we say that 2 nodes are either: // [1] ancestor/descendant of each other; // [2] or that they are peers (cousins) of each other (no matter // how distant in the DAG). (that have an LCA, but we don't // care about it.) // get the generation of both dagnodes. if they are the same, then they // cannot have an ancestor/descendant relationship and therefore must be // peers/cousins (we don't care how close/distant they are). SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn1, &gen1) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn2, &gen2) ); if (gen1 == gen2) { dqRel = SG_DAGQUERY_RELATIONSHIP__PEER; goto cleanup; } // see if one is an ancestor of the other. since we only have PARENT // edges in our DAG, we start with the deeper one and walk backwards // until we've visited all ancestors at the depth of the shallower one. // // i'm going to be lazy here and not reinvent a recursive-ish parent-edge // graph walker. instead, i'm going to create a DAGFRAG using the // deeper one and request the generation difference as the "thickness". // in theory, if we have an ancestor/descendant relationship, the // shallower one should be in the END-FRINGE of the DAGFRAG. // // i'm going to pick an arbitrary direction "cs1 is R of cs2". SG_ERR_CHECK( SG_dagfrag__alloc_transient(pCtx, dagnum, &pFrag) ); if (gen1 > gen2) // cs1 is *DEEPER* than cs2 { if (bSkipDescendantCheck) { dqRel = SG_DAGQUERY_RELATIONSHIP__UNKNOWN; } else { SG_ERR_CHECK( SG_dagfrag__load_from_repo__one(pCtx, pFrag, pRepo, pszHid1, (gen1 - gen2)) ); SG_ERR_CHECK( SG_dagfrag__query(pCtx, pFrag, pszHid2, NULL, NULL, &bFound, NULL) ); if (bFound) // pszHid2 is an ancestor of pszHid1. READ pszHid1 is a descendent of pszHid2. dqRel = SG_DAGQUERY_RELATIONSHIP__DESCENDANT; else // they are *distant* peers. dqRel = SG_DAGQUERY_RELATIONSHIP__PEER; } goto cleanup; } else { if (bSkipAncestorCheck) { dqRel = SG_DAGQUERY_RELATIONSHIP__UNKNOWN; } else { SG_ERR_CHECK( SG_dagfrag__load_from_repo__one(pCtx, pFrag, pRepo, pszHid2, (gen2 - gen1)) ); SG_ERR_CHECK( SG_dagfrag__query(pCtx, pFrag, pszHid1, NULL, NULL, &bFound, NULL) ); if (bFound) // pszHid1 is an ancestor of pszHid2. dqRel = SG_DAGQUERY_RELATIONSHIP__ANCESTOR; else // they are *distant* peers. dqRel = SG_DAGQUERY_RELATIONSHIP__PEER; } goto cleanup; } /*NOTREACHED*/ cleanup: *pdqRel = dqRel; fail: SG_DAGNODE_NULLFREE(pCtx, pdn1); SG_DAGNODE_NULLFREE(pCtx, pdn2); SG_DAGFRAG_NULLFREE(pCtx, pFrag); }
void SG_repo__db__calc_delta( SG_context * pCtx, SG_repo* pRepo, SG_uint64 dagnum, const char* psz_csid_from, const char* psz_csid_to, SG_uint32 flags, SG_vhash** ppvh_add, SG_vhash** ppvh_remove ) { SG_dagnode* pdn_from = NULL; SG_dagnode* pdn_to = NULL; SG_int32 gen_from = -1; SG_int32 gen_to = -1; SG_varray* pva_direct_backward_path = NULL; SG_varray* pva_direct_forward_path = NULL; SG_vhash* pvh_add = NULL; SG_vhash* pvh_remove = NULL; SG_rbtree* prb_temp = NULL; SG_daglca* plca = NULL; char* psz_csid_ancestor = NULL; SG_NULLARGCHECK_RETURN(psz_csid_from); SG_NULLARGCHECK_RETURN(psz_csid_to); SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(ppvh_add); SG_NULLARGCHECK_RETURN(ppvh_remove); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_csid_from, &pdn_from) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn_from, &gen_from) ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_csid_to, &pdn_to) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn_to, &gen_to) ); if (gen_from > gen_to) { SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_from, psz_csid_to, &pva_direct_backward_path ) ); if (pva_direct_backward_path) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_backward_path, flags, pvh_add, pvh_remove ) ); } } else if (gen_from < gen_to) { SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_to, psz_csid_from, &pva_direct_forward_path ) ); if (pva_direct_forward_path) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_forward_path, flags, pvh_remove, pvh_add ) ); } } if (!pvh_add && !pvh_remove) { SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &prb_temp) ); SG_ERR_CHECK( SG_rbtree__add(pCtx,prb_temp,psz_csid_from) ); SG_ERR_CHECK( SG_rbtree__add(pCtx,prb_temp,psz_csid_to) ); SG_ERR_CHECK( SG_repo__get_dag_lca(pCtx,pRepo,dagnum,prb_temp,&plca) ); { const char* psz_hid = NULL; SG_daglca_node_type node_type = 0; SG_int32 gen = -1; SG_ERR_CHECK( SG_daglca__iterator__first(pCtx, NULL, plca, SG_FALSE, &psz_hid, &node_type, &gen, NULL) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_hid, &psz_csid_ancestor) ); } SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_from, psz_csid_ancestor, &pva_direct_backward_path ) ); SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_to, psz_csid_ancestor, &pva_direct_forward_path ) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_backward_path, flags, pvh_add, pvh_remove ) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_forward_path, flags, pvh_remove, pvh_add ) ); } *ppvh_add = pvh_add; pvh_add = NULL; *ppvh_remove = pvh_remove; pvh_remove = NULL; fail: SG_NULLFREE(pCtx, psz_csid_ancestor); SG_RBTREE_NULLFREE(pCtx, prb_temp); SG_DAGLCA_NULLFREE(pCtx, plca); SG_VHASH_NULLFREE(pCtx, pvh_add); SG_VHASH_NULLFREE(pCtx, pvh_remove); SG_VARRAY_NULLFREE(pCtx, pva_direct_backward_path); SG_VARRAY_NULLFREE(pCtx, pva_direct_forward_path); SG_DAGNODE_NULLFREE(pCtx, pdn_from); SG_DAGNODE_NULLFREE(pCtx, pdn_to); }
/** * Compare all the nodes of a single DAG in two repos. */ static void _compare_one_dag(SG_context* pCtx, SG_repo* pRepo1, SG_repo* pRepo2, SG_uint32 iDagNum, SG_bool* pbIdentical) { SG_bool bFinalResult = SG_FALSE; SG_rbtree* prbRepo1Leaves = NULL; SG_rbtree* prbRepo2Leaves = NULL; SG_uint32 iRepo1LeafCount, iRepo2LeafCount; SG_rbtree_iterator* pIterator = NULL; const char* pszId = NULL; SG_dagnode* pRepo1Dagnode = NULL; SG_dagnode* pRepo2Dagnode = NULL; SG_bool bFoundRepo1Leaf = SG_FALSE; SG_bool bFoundRepo2Leaf = SG_FALSE; SG_bool bDagnodesEqual = SG_FALSE; SG_NULLARGCHECK_RETURN(pRepo1); SG_NULLARGCHECK_RETURN(pRepo2); SG_NULLARGCHECK_RETURN(pbIdentical); SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo1, iDagNum, &prbRepo1Leaves) ); SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo2, iDagNum, &prbRepo2Leaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prbRepo1Leaves, &iRepo1LeafCount) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prbRepo2Leaves, &iRepo2LeafCount) ); if (iRepo1LeafCount != iRepo2LeafCount) { #if TRACE_SYNC SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "leaf count differs\n") ); #endif goto Different; } SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pIterator, prbRepo1Leaves, &bFoundRepo1Leaf, &pszId, NULL) ); while (bFoundRepo1Leaf) { SG_ERR_CHECK( SG_rbtree__find(pCtx, prbRepo2Leaves, pszId, &bFoundRepo2Leaf, NULL) ); if (!bFoundRepo2Leaf) { #if TRACE_SYNC && 0 SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "couldn't locate leaf\r\n") ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Repo 1 leaves:\r\n") ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, prbRepo1Leaves) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Repo 2 leaves:\r\n") ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, prbRepo2Leaves) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif goto Different; } SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo1, pszId, &pRepo1Dagnode) ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo2, pszId, &pRepo2Dagnode) ); SG_ERR_CHECK( _compare_dagnodes(pCtx, pRepo1, pRepo1Dagnode, pRepo2, pRepo2Dagnode, &bDagnodesEqual) ); SG_DAGNODE_NULLFREE(pCtx, pRepo1Dagnode); SG_DAGNODE_NULLFREE(pCtx, pRepo2Dagnode); if (!bDagnodesEqual) goto Different; SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pIterator, &bFoundRepo1Leaf, &pszId, NULL) ); } bFinalResult = SG_TRUE; Different: *pbIdentical = bFinalResult; // fall through fail: SG_RBTREE_NULLFREE(pCtx, prbRepo1Leaves); SG_RBTREE_NULLFREE(pCtx, prbRepo2Leaves); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIterator); }
/** * Recursively compare dagnodes depth-first. */ static void _compare_dagnodes(SG_context* pCtx, SG_repo* pRepo1, SG_dagnode* pDagnode1, SG_repo* pRepo2, SG_dagnode* pDagnode2, SG_bool* pbIdentical) { SG_bool bDagnodesEqual = SG_FALSE; SG_uint32 iParentCount1, iParentCount2; const char** paParentIds1 = NULL; const char** paParentIds2 = NULL; SG_dagnode* pParentDagnode1 = NULL; SG_dagnode* pParentDagnode2 = NULL; SG_NULLARGCHECK_RETURN(pDagnode1); SG_NULLARGCHECK_RETURN(pDagnode2); SG_NULLARGCHECK_RETURN(pbIdentical); *pbIdentical = SG_TRUE; // Compare the dagnodes. If they're different, return false. SG_ERR_CHECK( SG_dagnode__equal(pCtx, pDagnode1, pDagnode2, &bDagnodesEqual) ); if (!bDagnodesEqual) { #if TRACE_SYNC SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "dagnodes not equal\n") ); #endif *pbIdentical = SG_FALSE; return; } // The dagnodes are identical. Look at their parents. SG_ERR_CHECK( SG_dagnode__get_parents(pCtx, pDagnode1, &iParentCount1, &paParentIds1) ); SG_ERR_CHECK( SG_dagnode__get_parents(pCtx, pDagnode2, &iParentCount2, &paParentIds2) ); if (iParentCount1 == iParentCount2) { // The dagnodes have the same number of parents. Compare the parents recursively. SG_uint32 i; for (i = 0; i < iParentCount1; i++) { SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo1, paParentIds1[i], &pParentDagnode1) ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo2, paParentIds2[i], &pParentDagnode2) ); SG_ERR_CHECK( _compare_dagnodes(pCtx, pRepo1, pParentDagnode1, pRepo2, pParentDagnode2, pbIdentical) ); SG_DAGNODE_NULLFREE(pCtx, pParentDagnode1); SG_DAGNODE_NULLFREE(pCtx, pParentDagnode2); if (!(*pbIdentical)) break; } } else { // The dagnodes have a different number of parents. *pbIdentical = SG_FALSE; } // fall through fail: SG_NULLFREE(pCtx, paParentIds1); SG_NULLFREE(pCtx, paParentIds2); SG_DAGNODE_NULLFREE(pCtx, pParentDagnode1); SG_DAGNODE_NULLFREE(pCtx, pParentDagnode2); }
void SG_dagfrag__load_from_repo__one(SG_context * pCtx, SG_dagfrag * pFrag, SG_repo* pRepo, const char * szHidStart, SG_int32 nGenerations) { // load a fragment of the dag starting with the given dagnode // for nGenerations of parents. // // we add this portion of the graph to whatevery we already // have in our fragment. this may either augment (give us // a larger connected piece) or it may be an independent // subset. // // if nGenerations <= 0, load everything from this starting point // back to the NULL/root. // // generationStart is the generation of the starting dagnode. // // the starting dagnode *MAY* be in the final start-fringe. // normally, it will be. but if we are called multiple times // (and have more than one start point), it may be the case // that this node is a parent of one of the other start points. // // we compute generationEnd as the generation that we will NOT // include in the fragment; nodes of that generation will be in // the end-fringe. that is, we include [start...end) like most // C++ iterators. _my_data * pMyDataCached = NULL; SG_dagnode * pDagnodeAllocated = NULL; SG_dagnode * pDagnodeStart; SG_int32 generationStart, generationEnd; SG_bool bPresent = SG_FALSE; SG_rbtree* prb_WorkQueue = NULL; SG_NULLARGCHECK_RETURN(pFrag); SG_NONEMPTYCHECK_RETURN(szHidStart); // if we are extending the fragment, delete the generation-sorted // member cache copy. (see __foreach_member()). it's either that // or update it in parallel as we change the real CACHE and that // doesn't seem worth the bother. SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache); pFrag->m_pRB_GenerationSortedMemberCache = NULL; SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &prb_WorkQueue) ); // fetch the starting dagnode and compute the generation bounds. // first, see if the cache already has info for this dagnode. // if not, fetch it from the source and then add it to the cache. SG_ERR_CHECK( _cache__lookup(pCtx, pFrag,szHidStart,&pMyDataCached,&bPresent) ); if (!bPresent) { if (!pRepo) SG_ERR_THROW( SG_ERR_INVALID_WHILE_FROZEN ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, szHidStart, &pDagnodeAllocated) ); pDagnodeStart = pDagnodeAllocated; } else { pDagnodeStart = pMyDataCached->m_pDagnode; } SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pDagnodeStart,&generationStart) ); SG_ASSERT_RELEASE_FAIL2( (generationStart > 0), (pCtx,"Invalid generation value [%d] for dagnode [%s]", generationStart,szHidStart) ); if ((nGenerations <= 0) || (generationStart <= nGenerations)) generationEnd = 0; else generationEnd = generationStart - nGenerations; if (!bPresent) { // this dagnode was not already present in the cache. // add it to the cache directly and set the state. // we don't need to go thru the work queue for it. // // then the add all of its parents to the work queue. SG_ERR_CHECK( _cache__add__dagnode(pCtx, pFrag, generationStart, pDagnodeAllocated,SG_DFS_START_MEMBER, &pMyDataCached) ); pDagnodeAllocated = NULL; SG_ERR_CHECK( _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue) ); } else { // the node was already present in the cache, so we have already // walked at least part of the graph around it. switch (pMyDataCached->m_state) { default: //case SG_DFS_UNKNOWN: SG_ASSERT_RELEASE_FAIL2( (0), (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]", pMyDataCached->m_state,szHidStart) ); case SG_DFS_INTERIOR_MEMBER: // already in fragment case SG_DFS_START_MEMBER: // already in fragment, duplicated leaf? if (generationEnd < pMyDataCached->m_genDagnode) { // they've expanded the bounds of the fragment since we // last visited this dagnode. keep this dagnode in the // fragment and revisit the ancestors in case any were // put in the end-fringe that should now be included. // // we leave the state as INCLUDE or INCLUDE_AND_START // because a duplicate start point should remain a // start point. SG_ERR_CHECK( _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue) ); } else { // the current end-generation requested is >= the previous // end-generation, then we've completely explored this dagnode // already. that is, a complete walk from this node for nGenerations // would not reveal any new information. } break; case SG_DFS_END_FRINGE: { // they want to start at a dagnode that we put in the // end-fringe. this can happen if they need to expand // the bounds of the fragment to include older ancestors. // // we do not mark this as a start node because someone // else already has it as a parent. pMyDataCached->m_state = SG_DFS_INTERIOR_MEMBER; SG_ERR_CHECK( _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue) ); } break; } } // we optionally put the parents of the current node into the work queue. // // service the work queue until it is empty. this allows us to walk the graph without // recursion. that is, as we decide what to do with a node, we add the parents // to the queue. we then iterate thru the work queue until we have dealt with // everything -- that is, until all parents have been properly placed. // // we cannot use a standard iterator to drive this loop because we // modify the queue. while (1) { _process_work_queue_item(pCtx, pFrag,prb_WorkQueue,generationEnd,pRepo); if (!SG_context__has_err(pCtx)) break; // we processed everything in the queue and are done if (!SG_context__err_equals(pCtx,SG_ERR_RESTART_FOREACH)) SG_ERR_RETHROW; SG_context__err_reset(pCtx); // queue changed, restart iteration } SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue); /* ** we have loaded a piece of the dag (starting with the given start node ** and tracing all parent edges back n generations). we leave with everything ** in our progress queues so that other start nodes can be added to the ** fragment. this allows the processing of subsequent start nodes to ** override some of the decisions that we made. for example: ** ** Q_15 ** | ** | ** Z_16 ** / \ ** / \ ** Y_17 A_17 ** \ / \ ** \ / \ ** B_18 C_18 ** | ** | ** D_19 ** | ** | ** E_20 ** ** if we started with the leaf E_20 and requested 3 generations, we would have: ** start_set := { E } ** include_set := { B, D, E } ** end_set := { Y, A } ** ** after a subsequent call with the leaf C_18 and 3 generations, we would have: ** start_set := { C, E } ** include_set := { Z, A, B, C, D, E } ** end_set := { Q, Y } ** */ return; fail: SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue); SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated); }
static void _process_work_queue_cb(SG_context * pCtx, const char * szHid, SG_UNUSED_PARAM(void * pAssocData), void * pVoidCallerData) { // we are given a random item in the work_queue. // // lookup the corresponding DATA node in the Cache, if it has one. // // and then evaluate where this node belongs: struct _work_queue_data * pWorkQueueData = (struct _work_queue_data *)pVoidCallerData; _my_data * pDataCached = NULL; SG_dagnode * pDagnodeAllocated = NULL; SG_bool bPresent = SG_FALSE; SG_UNUSED(pAssocData); SG_ERR_CHECK( _cache__lookup(pCtx, pWorkQueueData->pFrag,szHid,&pDataCached,&bPresent) ); if (!bPresent) { // dagnode is not present in the cache. therefore, we've never visited this // dagnode before. add it to the cache with proper settings and maybe add // all of the parents to the work queue. SG_int32 myGeneration; SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pWorkQueueData->pRepo, szHid,&pDagnodeAllocated) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pDagnodeAllocated,&myGeneration) ); if ((myGeneration > pWorkQueueData->generationEnd)) { SG_ERR_CHECK( _cache__add__dagnode(pCtx, pWorkQueueData->pFrag, myGeneration, pDagnodeAllocated,SG_DFS_INTERIOR_MEMBER, &pDataCached) ); pDagnodeAllocated = NULL; // cache takes ownership of dagnode SG_ERR_CHECK( _add_parents_to_work_queue(pCtx, pDataCached->m_pDagnode, pWorkQueueData->prb_WorkQueue) ); } else { SG_ERR_CHECK( _cache__add__fringe(pCtx, pWorkQueueData->pFrag, szHid) ); SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated); } } else { // dagnode already present in the cache. therefore, we have already visited it // before. we can change our minds about the state of this dagnode if something // has changed (such as the fragment bounds being widened). switch (pDataCached->m_state) { default: //case SG_DFS_UNKNOWN: SG_ASSERT_RELEASE_FAIL2( (0), (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]", pDataCached->m_state,szHid) ); case SG_DFS_START_MEMBER: // a dagnode has a parent that we are considering a START node. // this can happen when we were started from a non-leaf node and // then a subsequent call to __load is given a true leaf node or // a node deeper in the tree that has our original start node as // a parent. // // clear the start bit. (we only want true fragment-terminal // nodes marked as start nodes.) pDataCached->m_state = SG_DFS_INTERIOR_MEMBER; // FALL-THRU-INTENDED case SG_DFS_INTERIOR_MEMBER: // a dagnode that we have already visited is being re-visited. // this happpens for a number of reasons, such as when we hit // the parent of a branch/fork. we might get visisted because // we are a parent of each child. // // we also get revisited when the caller expands the scope of // the fragment. if (pWorkQueueData->generationEnd < pDataCached->m_genDagnode) { // the caller has expanded the scope of the fragment to include // older generations than the last time we visited this node. // this doesn't affect the state of this node, but it could mean // that older ancestors of this node should be looked at. SG_ERR_CHECK( _add_parents_to_work_queue(pCtx,pDataCached->m_pDagnode,pWorkQueueData->prb_WorkQueue) ); } break; case SG_DFS_END_FRINGE: // a dagnode that was on the end-fringe is being re-evaluated. if (pDataCached->m_genDagnode > pWorkQueueData->generationEnd) { // it looks like the bounds of the fragment were expanded and // now includes this dagnode. // // move it from END-FRINGE to INCLUDE state. // and re-eval all of its parents. pDataCached->m_state = SG_DFS_INTERIOR_MEMBER; SG_ERR_CHECK( _add_parents_to_work_queue(pCtx,pDataCached->m_pDagnode,pWorkQueueData->prb_WorkQueue) ); } break; } } // we have completely dealt with this dagnode, so remove it from the work queue // and cause our caller to restart the iteration (because we changed the queue). SG_ERR_CHECK( SG_rbtree__remove(pCtx,pWorkQueueData->prb_WorkQueue,szHid) ); SG_ERR_THROW( SG_ERR_RESTART_FOREACH ); fail: SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated); }