static void do_one_template( SG_context* pCtx, SG_repo* pRepo, SG_uint32 iDagNum, SG_audit* pq, unsigned char* pjson ) { SG_vhash* pvh_template = NULL; SG_changeset* pcs = NULL; SG_dagnode* pdn = NULL; SG_zingtx* ptx = NULL; SG_string* pstr = NULL; // now the main dag SG_ERR_CHECK( SG_zing__begin_tx(pCtx, pRepo, iDagNum, pq->who_szUserId, NULL, &ptx) ); SG_ERR_CHECK( my_strip_comments(pCtx, (char*) pjson, &pstr) ); SG_ERR_CHECK( SG_vhash__alloc__from_json(pCtx, &pvh_template, SG_string__sz(pstr))); SG_ERR_CHECK( SG_zingtx__store_template(pCtx, ptx, &pvh_template) ); SG_ERR_CHECK( SG_zing__commit_tx(pCtx, pq->when_int64, &ptx, &pcs, &pdn, NULL) ); SG_STRING_NULLFREE(pCtx, pstr); SG_CHANGESET_NULLFREE(pCtx, pcs); SG_DAGNODE_NULLFREE(pCtx, pdn); // fall thru fail: SG_STRING_NULLFREE(pCtx, pstr); SG_CHANGESET_NULLFREE(pCtx, pcs); SG_DAGNODE_NULLFREE(pCtx, pdn); }
void MyFn(commit_all)( SG_context* pCtx, const SG_pathname* pPathWorkingDir, SG_dagnode** ppdn ) { SG_pendingtree* pPendingTree = NULL; SG_repo* pRepo = NULL; SG_dagnode* pdn = NULL; SG_audit q; SG_ERR_CHECK( SG_pendingtree__alloc(pCtx, pPathWorkingDir, SG_FALSE, &pPendingTree) ); SG_ERR_CHECK( SG_pendingtree__get_repo(pCtx, pPendingTree, &pRepo) ); SG_ERR_CHECK( SG_audit__init(pCtx,&q,pRepo,SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); SG_ERR_CHECK( unittests_pendingtree__commit(pCtx, pPendingTree, &q, NULL, 0, NULL, NULL, 0, NULL, 0, NULL, 0, &pdn) ); SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); if (ppdn) { *ppdn = pdn; } else { SG_DAGNODE_NULLFREE(pCtx, pdn); } return; fail: SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); SG_DAGNODE_NULLFREE(pCtx, pdn); }
void MyFn(one_dagnode)(SG_context * pCtx, SG_repo* pRepo) { char* pId = NULL; SG_dagnode* pdnCreated = NULL; SG_dagnode* pdnFetched = NULL; SG_repo_tx_handle* pTx = NULL; char buf_tid[SG_TID_MAX_BUFFER_LENGTH]; VERIFY_ERR_CHECK( SG_tid__generate(pCtx, buf_tid, sizeof(buf_tid)) ); VERIFY_ERR_CHECK( SG_repo__alloc_compute_hash__from_bytes(pCtx, pRepo, sizeof(buf_tid), (SG_byte *)buf_tid, &pId) ); VERIFY_ERR_CHECK( SG_dagnode__alloc(pCtx, &pdnCreated, pId, 1, 0) ); VERIFY_ERR_CHECK( SG_dagnode__freeze(pCtx, pdnCreated) ); // Add dagnode. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_repo__store_dagnode(pCtx, pRepo, pTx, SG_DAGNUM__TESTING__NOTHING, pdnCreated) ); pdnCreated = NULL; // Should fail: tx not committed. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__fetch_dagnode(pCtx, pRepo, SG_DAGNUM__TESTING__NOTHING, pId, &pdnFetched), SG_ERR_NOT_FOUND ); // Dag node visible before repo tx committed. // Abort repo tx. VERIFY_ERR_CHECK( SG_repo__abort_tx(pCtx, pRepo, &pTx) ); VERIFY_COND("SG_repo__abort_tx should null/free the repo transaction.", !pTx); // Should fail: tx aborted. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__fetch_dagnode(pCtx, pRepo, SG_DAGNUM__TESTING__NOTHING, pId, &pdnFetched), SG_ERR_NOT_FOUND ); // Dag node exists after repo tx abort // Write dagnode, commit tx. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( SG_dagnode__alloc(pCtx, &pdnCreated, pId, 1, 0) ); VERIFY_ERR_CHECK( SG_dagnode__freeze(pCtx, pdnCreated) ); VERIFY_ERR_CHECK( SG_repo__store_dagnode(pCtx, pRepo, pTx, SG_DAGNUM__TESTING__NOTHING, pdnCreated) ); pdnCreated = NULL; VERIFY_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); VERIFY_COND("SG_repo__commit_tx should null/free the repo transaction.", !pTx); // Read back the dagnode. It should exist now. VERIFY_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, SG_DAGNUM__TESTING__NOTHING, pId, &pdnFetched) ); // Fall through to common cleanup. fail: SG_NULLFREE(pCtx, pId); SG_DAGNODE_NULLFREE(pCtx, pdnCreated); SG_DAGNODE_NULLFREE(pCtx, pdnFetched); }
static void _node__free_nonreusable_memory(SG_context * pCtx, _node_t * pNode) { SG_ASSERT(pNode!=NULL); SG_DAGNODE_NULLFREE(pCtx, pNode->pDagnode); SG_VARRAY_NULLFREE(pCtx, pNode->pAudits); SG_VHASH_NULLFREE(pCtx, pNode->pVcParents); }
void _my_data__free(SG_context *pCtx, void * pVoidData) { _my_data * pData = (_my_data *)pVoidData; if (!pData) return; SG_DAGNODE_NULLFREE(pCtx, pData->m_pDagnode); SG_NULLFREE(pCtx, pData); }
void u0051_hidlookup_test__1(SG_context * pCtx, SG_pathname* pPathTopDir) { char bufName[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_pathname* pPathFile = NULL; SG_dagnode* pdn = NULL; const char* psz_hid_cs = NULL; SG_repo* pRepo = NULL; char buf_partial[256]; char* psz_result = NULL; SG_audit q; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufName, sizeof(bufName), 32) ); /* create the working dir */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, bufName) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); /* add stuff */ VERIFY_ERR_CHECK( u0051_hidlookup__create_file__numbers(pCtx, pPathWorkingDir, "aaa", 20) ); /* create the repo */ VERIFY_ERR_CHECK( _ut_pt__new_repo(pCtx, bufName, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( u0051_hidlookup__commit_all(pCtx, pPathWorkingDir, &pdn) ); VERIFY_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_hid_cs) ); VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, bufName, &pRepo) ); VERIFY_ERR_CHECK( SG_audit__init(pCtx, &q, pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "remember", &q) ); VERIFY_ERR_CHECK( SG_strcpy(pCtx, buf_partial, sizeof(buf_partial), psz_hid_cs) ); buf_partial[10] = 0; VERIFY_ERR_CHECK( SG_repo__hidlookup__dagnode(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, buf_partial, &psz_result) ); VERIFY_COND("found", (0 == strcmp(psz_result, psz_hid_cs))); SG_NULLFREE(pCtx, psz_result); VERIFY_ERR_CHECK( SG_repo__hidlookup__blob(pCtx, pRepo, buf_partial, &psz_result) ); VERIFY_COND("found", (0 == strcmp(psz_result, psz_hid_cs))); SG_NULLFREE(pCtx, psz_result); VERIFY_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, "remember", &psz_result) ); VERIFY_COND("found", (0 == strcmp(psz_result, psz_hid_cs))); SG_NULLFREE(pCtx, psz_result); fail: SG_NULLFREE(pCtx, psz_result); SG_REPO_NULLFREE(pCtx, pRepo); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); }
void SG_group__add_users( SG_context* pCtx, SG_repo* pRepo, const char* psz_group_name, const char** paszMemberNames, SG_uint32 count_names ) { char* psz_hid_cs_leaf = NULL; SG_zingtx* pztx = NULL; SG_dagnode* pdn = NULL; SG_changeset* pcs = NULL; SG_audit q; SG_uint32 i = 0; char* psz_recid_group = NULL; char* psz_recid_user = NULL; SG_ERR_CHECK( SG_zing__get_leaf__fail_if_needs_merge(pCtx, pRepo, SG_DAGNUM__USERS, &psz_hid_cs_leaf) ); SG_ERR_CHECK( SG_audit__init(pCtx, &q, pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); // lookup the recid of the group SG_ERR_CHECK( SG_zing__lookup_recid(pCtx, pRepo, SG_DAGNUM__USERS, psz_hid_cs_leaf, "group", "name", psz_group_name, &psz_recid_group) ); /* start a changeset */ SG_ERR_CHECK( SG_zing__begin_tx(pCtx, pRepo, SG_DAGNUM__USERS, q.who_szUserId, psz_hid_cs_leaf, &pztx) ); SG_ERR_CHECK( SG_zingtx__add_parent(pCtx, pztx, psz_hid_cs_leaf) ); for (i=0; i<count_names; i++) { // lookup the recid of the user SG_ERR_CHECK( SG_zing__lookup_recid(pCtx, pRepo, SG_DAGNUM__USERS, psz_hid_cs_leaf, "user", "email", paszMemberNames[i], &psz_recid_user) ); SG_ERR_CHECK( SG_zingtx__add_link__unpacked(pCtx, pztx, psz_recid_user, psz_recid_group, "member") ); SG_NULLFREE(pCtx, psz_recid_user); } /* commit the changes */ SG_ERR_CHECK( SG_zing__commit_tx(pCtx, q.when_int64, &pztx, &pcs, &pdn, NULL) ); // fall thru fail: if (pztx) { SG_ERR_IGNORE( SG_zing__abort_tx(pCtx, &pztx) ); } SG_NULLFREE(pCtx, psz_hid_cs_leaf); SG_NULLFREE(pCtx, psz_recid_group); SG_NULLFREE(pCtx, psz_recid_user); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_CHANGESET_NULLFREE(pCtx, pcs); }
static void _deserialize_data_ver_1_cb(SG_context * pCtx, void * pVoidDeserializeData, SG_UNUSED_PARAM(const SG_varray * pva), SG_UNUSED_PARAM(SG_uint32 ndx), const SG_variant * pVariant) { struct _deserialize_data * pDeserializeData = (struct _deserialize_data *)pVoidDeserializeData; SG_vhash * pvhMyData; SG_vhash * pvhDagnode; SG_int64 gen64, state64; _my_data * pMyData; SG_dagnode * pDagnode = NULL; const char* psz_id = NULL; SG_UNUSED(pva); SG_UNUSED(ndx); SG_ERR_CHECK( SG_variant__get__vhash(pCtx,pVariant,&pvhMyData) ); #if DEBUG && TRACE_DAGFRAG && 0 SG_ERR_CHECK( SG_vhash_debug__dump_to_console(pCtx, pvhMyData) ); #endif SG_ERR_CHECK( SG_vhash__get__int64(pCtx,pvhMyData,KEY_DFS_STATE,&state64) ); if (SG_DFS_END_FRINGE == state64) { SG_ERR_CHECK( SG_vhash__get__sz(pCtx,pvhMyData,KEY_DAGNODE_ID,&psz_id) ); SG_ERR_CHECK( _cache__add__fringe(pCtx,pDeserializeData->pFrag, psz_id) ); } else { SG_ERR_CHECK( SG_vhash__get__vhash(pCtx,pvhMyData,KEY_ACTUAL_DAGNODE,&pvhDagnode) ); SG_ERR_CHECK( SG_vhash__get__int64(pCtx,pvhDagnode,KEY_GEN,&gen64) ); SG_ERR_CHECK( SG_dagnode__alloc__from_vhash(pCtx, &pDagnode, pvhDagnode) ); SG_ERR_CHECK( _cache__add__dagnode(pCtx, pDeserializeData->pFrag, (SG_int32)gen64, pDagnode, (SG_uint32)state64, &pMyData) ); pDagnode = NULL; // cache owns it now. } return; fail: SG_DAGNODE_NULLFREE(pCtx, pDagnode); }
// Add a dagnode to the work queue if it's not already on it. If it is already // on it, this might tell us new information about whether it is a descendent of // "New" or "Old", so update it with the new isAncestorOf information. static void _fnsc_work_queue__insert( SG_context * pCtx, _fnsc_work_queue_t * pWorkQueue, const char * pszHid, SG_uint64 dagnum, SG_repo * pRepo, SG_byte isAncestorOf ) { SG_bool alreadyInTheQueue = SG_FALSE; SG_dagnode * pDagnode = NULL; SG_uint32 i; SG_uint32 revno = 0; char * revno__p = NULL; // First we check the cache. This will tell us whether the item is // already on the queue, and if so what its revno is. SG_ERR_CHECK( SG_rbtree__find(pCtx, pWorkQueue->pRevnoCache, pszHid, &alreadyInTheQueue, (void**)&revno__p) ); if(alreadyInTheQueue) { revno = (SG_uint32)(revno__p - (char*)NULL); } else { SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, pszHid, &pDagnode) ); SG_ERR_CHECK( SG_dagnode__get_revno(pCtx, pDagnode, &revno) ); } i = pWorkQueue->length; while(i>0 && pWorkQueue->p[i-1].revno > revno) --i; if (i>0 && pWorkQueue->p[i-1].revno == revno) { SG_ASSERT(alreadyInTheQueue); if (pWorkQueue->p[i-1].isAncestorOf==_ANCESTOR_OF_NEW && isAncestorOf!=_ANCESTOR_OF_NEW) --pWorkQueue->numAncestorsOfNewOnTheQueue; pWorkQueue->p[i-1].isAncestorOf |= isAncestorOf; // OR in the new isAncestorOfs } else { SG_ASSERT(pDagnode!=NULL); SG_ERR_CHECK( _fnsc_work_queue__insert_at(pCtx, pWorkQueue, i, pszHid, revno, &pDagnode, isAncestorOf) ); } return; fail: SG_DAGNODE_NULLFREE(pCtx, pDagnode); }
// Add a dagnode to the work queue if it's not already on it. If it is already // on it update it with the new pIsAncestorOf information. static void _hrca_work_queue__insert( SG_context * pCtx, _hrca_work_queue_t * pWorkQueue, const char * pszHid, SG_repo * pRepo, SG_repo_fetch_dagnodes_handle * pDagnodeFetcher, SG_bitvector * pIsAncestorOf ) { SG_bool alreadyInTheQueue = SG_FALSE; SG_dagnode * pDagnode = NULL; SG_uint32 i; SG_uint32 revno = 0; char * revno__p = NULL; // First we check the cache. This will tell us whether the item is // already on the queue, and if so what its revno is. SG_ERR_CHECK( SG_rbtree__find(pCtx, pWorkQueue->pRevnoCache, pszHid, &alreadyInTheQueue, (void**)&revno__p) ); if(alreadyInTheQueue) { revno = (SG_uint32)(revno__p - (char*)NULL); } else { SG_ERR_CHECK( SG_repo__fetch_dagnodes__one(pCtx, pRepo, pDagnodeFetcher, pszHid, &pDagnode) ); SG_ERR_CHECK( SG_dagnode__get_revno(pCtx, pDagnode, &revno) ); } i = pWorkQueue->length; while(i>0 && pWorkQueue->p[i-1].revno > revno) --i; if (i>0 && pWorkQueue->p[i-1].revno == revno) { SG_ASSERT(alreadyInTheQueue); // OR in the new pIsAncestorOf SG_ERR_CHECK( SG_bitvector__assign__bv__or_eq__bv(pCtx, pWorkQueue->p[i-1].pIsAncestorOf, pIsAncestorOf) ); } else { SG_ASSERT(pDagnode!=NULL); SG_ERR_CHECK( _hrca_work_queue__insert_at(pCtx, pWorkQueue, i, pszHid, revno, &pDagnode, pIsAncestorOf) ); } return; fail: SG_DAGNODE_NULLFREE(pCtx, pDagnode); }
void SG_group__create( SG_context* pCtx, SG_repo* pRepo, const char* psz_name ) { char* psz_hid_cs_leaf = NULL; SG_zingtx* pztx = NULL; SG_zingrecord* prec = NULL; SG_dagnode* pdn = NULL; SG_changeset* pcs = NULL; SG_zingtemplate* pzt = NULL; SG_zingfieldattributes* pzfa = NULL; SG_audit q; SG_ERR_CHECK( SG_zing__get_leaf__fail_if_needs_merge(pCtx, pRepo, SG_DAGNUM__USERS, &psz_hid_cs_leaf) ); SG_ERR_CHECK( SG_audit__init(pCtx, &q, pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); /* start a changeset */ SG_ERR_CHECK( SG_zing__begin_tx(pCtx, pRepo, SG_DAGNUM__USERS, q.who_szUserId, psz_hid_cs_leaf, &pztx) ); SG_ERR_CHECK( SG_zingtx__add_parent(pCtx, pztx, psz_hid_cs_leaf) ); SG_ERR_CHECK( SG_zingtx__get_template(pCtx, pztx, &pzt) ); SG_ERR_CHECK( SG_zingtx__create_new_record(pCtx, pztx, "group", &prec) ); SG_ERR_CHECK( SG_zingtemplate__get_field_attributes(pCtx, pzt, "group", "name", &pzfa) ); SG_ERR_CHECK( SG_zingrecord__set_field__string(pCtx, prec, pzfa, psz_name) ); /* commit the changes */ SG_ERR_CHECK( SG_zing__commit_tx(pCtx, q.when_int64, &pztx, &pcs, &pdn, NULL) ); // fall thru fail: if (pztx) { SG_ERR_IGNORE( SG_zing__abort_tx(pCtx, &pztx) ); } SG_NULLFREE(pCtx, psz_hid_cs_leaf); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_CHANGESET_NULLFREE(pCtx, pcs); }
void SG_sync__add_n_generations(SG_context* pCtx, SG_repo* pRepo, const char* pszDagnodeHid, SG_rbtree* prbDagnodeHids, SG_uint32 generations) { _dagwalk_data dagWalkData; SG_dagnode* pStartNode = NULL; SG_int32 startGen; dagWalkData.pszStartNodeHid = pszDagnodeHid; SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, pszDagnodeHid, &pStartNode) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pStartNode, &startGen) ); dagWalkData.genLimit = startGen - generations; dagWalkData.prbVisitedNodes = prbDagnodeHids; SG_ERR_CHECK( SG_dagwalker__walk_dag_single(pCtx, pRepo, pszDagnodeHid, _dagwalk_callback, &dagWalkData) ); /* fall through */ fail: SG_DAGNODE_NULLFREE(pCtx, pStartNode); }
int u0050_logstuff_test__1(SG_context * pCtx, SG_pathname* pPathTopDir) { char bufName[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_pathname* pPathFile = NULL; SG_vhash* pvh = NULL; SG_dagnode* pdn = NULL; const char* psz_hid_cs = NULL; SG_repo* pRepo = NULL; SG_uint32 count; SG_rbtree* prb = NULL; SG_varray* pva = NULL; SG_rbtree* prb_reversed = NULL; const char* psz_val = NULL; SG_audit q; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufName, sizeof(bufName), 32) ); /* create the working dir */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, bufName) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); /* add stuff */ VERIFY_ERR_CHECK( u0050_logstuff__create_file__numbers(pCtx, pPathWorkingDir, "aaa", 20) ); /* create the repo */ VERIFY_ERR_CHECK( _ut_pt__new_repo(pCtx, bufName, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( u0050_logstuff__commit_all(pCtx, pPathWorkingDir, &pdn) ); VERIFY_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_hid_cs) ); SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, bufName, &pRepo) ); #define MY_COMMENT "The name of this new file sucks! What kind of a name is 'aaa'?" VERIFY_ERR_CHECK( SG_audit__init(pCtx, &q, pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); VERIFY_ERR_CHECK( SG_vc_comments__add(pCtx, pRepo, psz_hid_cs, MY_COMMENT, &q) ); VERIFY_ERR_CHECK( SG_vc_stamps__add(pCtx, pRepo, psz_hid_cs, "crap", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "tcrap", &q) ); VERIFY_ERR_CHECK( SG_vc_comments__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "text", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, MY_COMMENT)) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_stamps__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "stamp", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "crap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "tag", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "tcrap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "whatever", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (2 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (2 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); /* we don't know whether psz_my_val is tcrap or whatever. */ // VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); { const char* psz_remove = "whatever"; VERIFY_ERR_CHECK( SG_vc_tags__remove(pCtx, pRepo, &q, 1, &psz_remove) ); /* Note that by removing whatever, we are bringing the tags list back * to a state where it has been before (just tcrap). This changeset in * the tags table will have its own csid, because the parentage is * different, but it's root idtrie HID will be the same as a previous * node. */ } VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); SG_RBTREE_NULLFREE(pCtx, prb); SG_REPO_NULLFREE(pCtx, pRepo); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 1; fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 0; }
void SG_vc_hooks__install( SG_context* pCtx, SG_repo* pRepo, const char* psz_interface, const char* psz_js, const char *module, SG_uint32 version, SG_bool replaceOld, const SG_audit* pq ) { char* psz_hid_cs_leaf = NULL; SG_zingtx* pztx = NULL; SG_zingrecord* prec = NULL; SG_dagnode* pdn = NULL; SG_changeset* pcs = NULL; SG_zingtemplate* pzt = NULL; SG_zingfieldattributes* pzfa = NULL; SG_varray *oldRecs = NULL; SG_UNUSED(module); SG_UNUSED(version); if (replaceOld) { SG_ERR_CHECK( SG_vc_hooks__lookup_by_interface(pCtx, pRepo, psz_interface, &oldRecs) ); } // TODO consider validating the JS by compiling it SG_ERR_CHECK( SG_zing__get_leaf(pCtx, pRepo, NULL, SG_DAGNUM__VC_HOOKS, &psz_hid_cs_leaf) ); /* start a changeset */ SG_ERR_CHECK( SG_zing__begin_tx(pCtx, pRepo, SG_DAGNUM__VC_HOOKS, pq->who_szUserId, psz_hid_cs_leaf, &pztx) ); SG_ERR_CHECK( SG_zingtx__add_parent(pCtx, pztx, psz_hid_cs_leaf) ); if (replaceOld) { SG_uint32 i, count; SG_ERR_CHECK( SG_varray__count(pCtx, oldRecs, &count) ); for ( i = 0; i < count; ++i ) { const char *hidrec = NULL; SG_vhash *rec = NULL; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, oldRecs, i, &rec) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, rec, "hidrec", &hidrec) ); SG_ERR_CHECK( SG_zingtx__delete_record__hid(pCtx, pztx, "hook", hidrec) ); } } SG_ERR_CHECK( SG_zingtx__get_template(pCtx, pztx, &pzt) ); SG_ERR_CHECK( SG_zingtx__create_new_record(pCtx, pztx, "hook", &prec) ); SG_ERR_CHECK( SG_zingtemplate__get_field_attributes(pCtx, pzt, "hook", "interface", &pzfa) ); SG_ERR_CHECK( SG_zingrecord__set_field__string(pCtx, prec, pzfa, psz_interface) ); SG_ERR_CHECK( SG_zingtemplate__get_field_attributes(pCtx, pzt, "hook", "js", &pzfa) ); SG_ERR_CHECK( SG_zingrecord__set_field__string(pCtx, prec, pzfa, psz_js) ); if (module) { SG_ERR_CHECK( SG_zingtemplate__get_field_attributes(pCtx, pzt, "hook", "module", &pzfa) ); SG_ERR_CHECK( SG_zingrecord__set_field__string(pCtx, prec, pzfa, module) ); } if (version) { SG_ERR_CHECK( SG_zingtemplate__get_field_attributes(pCtx, pzt, "hook", "version", &pzfa) ); SG_ERR_CHECK( SG_zingrecord__set_field__int(pCtx, prec, pzfa, (SG_int64)version) ); } /* commit the changes */ SG_ERR_CHECK( SG_zing__commit_tx(pCtx, pq->when_int64, &pztx, &pcs, &pdn, NULL) ); // fall thru fail: if (pztx) { SG_ERR_IGNORE( SG_zing__abort_tx(pCtx, &pztx) ); } SG_VARRAY_NULLFREE(pCtx, oldRecs); SG_NULLFREE(pCtx, psz_hid_cs_leaf); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_CHANGESET_NULLFREE(pCtx, pcs); }
void SG_repo__dag__find_direct_path_from_root( SG_context * pCtx, SG_repo* pRepo, SG_uint64 dagnum, const char* psz_csid, SG_varray** ppva ) { SG_varray* new_pva = NULL; #if SG_DOUBLE_CHECK__PATH_TO_ROOT SG_varray* old_pva = NULL; SG_dagnode* pdn = NULL; char* psz_cur = NULL; SG_string* pstr1 = NULL; SG_string* pstr2 = NULL; #endif SG_ERR_CHECK( SG_repo__find_dag_path(pCtx, pRepo, dagnum, NULL, psz_csid, &new_pva) ); #if SG_DOUBLE_CHECK__PATH_TO_ROOT SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &old_pva) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_csid, &psz_cur) ); while (1) { SG_uint32 count_parents = 0; const char** a_parents = NULL; SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_cur, &pdn) ); SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, old_pva, psz_cur) ); SG_ERR_CHECK( SG_dagnode__get_parents__ref(pCtx, pdn, &count_parents, &a_parents) ); if (0 == count_parents) { break; } SG_NULLFREE(pCtx, psz_cur); SG_ERR_CHECK( SG_STRDUP(pCtx, a_parents[0], &psz_cur) ); SG_DAGNODE_NULLFREE(pCtx, pdn); } SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, old_pva, "") ); SG_ERR_CHECK( SG_string__alloc(pCtx, &pstr1) ); SG_ERR_CHECK( SG_string__alloc(pCtx, &pstr2) ); SG_ERR_CHECK( SG_varray__to_json(pCtx, old_pva, pstr1) ); SG_ERR_CHECK( SG_varray__to_json(pCtx, new_pva, pstr2) ); if (0 != strcmp(SG_string__sz(pstr1), SG_string__sz(pstr2))) { // a failure here isn't actually ALWAYS bad. there can be more than one path // to root. fprintf(stderr, "old way:\n"); SG_VARRAY_STDERR(old_pva); fprintf(stderr, "new way:\n"); SG_VARRAY_STDERR(new_pva); SG_ERR_THROW( SG_ERR_UNSPECIFIED ); } #endif *ppva = new_pva; new_pva = NULL; fail: SG_VARRAY_NULLFREE(pCtx, new_pva); #if SG_DOUBLE_CHECK__PATH_TO_ROOT SG_STRING_NULLFREE(pCtx, pstr1); SG_STRING_NULLFREE(pCtx, pstr2); SG_VARRAY_NULLFREE(pCtx, old_pva); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_NULLFREE(pCtx, psz_cur); #endif }
void SG_dagquery__highest_revno_common_ancestor( SG_context * pCtx, SG_repo * pRepo, SG_uint64 dagnum, const SG_stringarray * pInputNodeHids, char ** ppOutputNodeHid ) { const char * const * paszInputNodeHids = NULL; SG_uint32 countInputNodes = 0; SG_repo_fetch_dagnodes_handle * pDagnodeFetcher = NULL; _hrca_work_queue_t workQueue = {NULL, 0, 0, NULL}; SG_uint32 i; SG_dagnode * pDagnode = NULL; const char * pszHidRef = NULL; SG_bitvector * pIsAncestorOf = NULL; SG_uint32 countIsAncestorOf = 0; SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK(pRepo); SG_NULLARGCHECK(pInputNodeHids); SG_ERR_CHECK( SG_stringarray__sz_array_and_count(pCtx, pInputNodeHids, &paszInputNodeHids, &countInputNodes) ); SG_ARGCHECK(countInputNodes>0, pInputNodeHids); SG_NULLARGCHECK(ppOutputNodeHid); SG_ERR_CHECK( SG_repo__fetch_dagnodes__begin(pCtx, pRepo, dagnum, &pDagnodeFetcher) ); SG_ERR_CHECK( SG_allocN(pCtx, _HRCA_WORK_QUEUE_INIT_LENGTH, workQueue.p) ); workQueue.allocatedLength = _HRCA_WORK_QUEUE_INIT_LENGTH; SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &workQueue.pRevnoCache) ); SG_ERR_CHECK( SG_BITVECTOR__ALLOC(pCtx, &pIsAncestorOf, countInputNodes) ); for(i=0; i<countInputNodes; ++i) { SG_ERR_CHECK( SG_bitvector__zero(pCtx, pIsAncestorOf) ); SG_ERR_CHECK( SG_bitvector__set_bit(pCtx, pIsAncestorOf, i, SG_TRUE) ); SG_ERR_CHECK( _hrca_work_queue__insert(pCtx, &workQueue, paszInputNodeHids[i], pRepo, pDagnodeFetcher, pIsAncestorOf) ); } SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); SG_ERR_CHECK( _hrca_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &pIsAncestorOf) ); SG_ERR_CHECK( SG_bitvector__count_set_bits(pCtx, pIsAncestorOf, &countIsAncestorOf) ); while(countIsAncestorOf < countInputNodes) { SG_uint32 count_parents = 0; const char** parents = NULL; SG_ERR_CHECK( SG_dagnode__get_parents__ref(pCtx, pDagnode, &count_parents, &parents) ); for(i=0; i<count_parents; ++i) SG_ERR_CHECK( _hrca_work_queue__insert(pCtx, &workQueue, parents[i], pRepo, pDagnodeFetcher, pIsAncestorOf) ); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); SG_ERR_CHECK( _hrca_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &pIsAncestorOf) ); SG_ERR_CHECK( SG_bitvector__count_set_bits(pCtx, pIsAncestorOf, &countIsAncestorOf) ); } SG_ERR_CHECK( SG_strdup(pCtx, pszHidRef, ppOutputNodeHid) ); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); for(i=0; i<workQueue.length; ++i) { SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode); SG_BITVECTOR_NULLFREE(pCtx, workQueue.p[i].pIsAncestorOf); } SG_NULLFREE(pCtx, workQueue.p); SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache); SG_ERR_CHECK( SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pDagnodeFetcher) ); return; fail: for(i=0; i<workQueue.length; ++i) { SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode); SG_BITVECTOR_NULLFREE(pCtx, workQueue.p[i].pIsAncestorOf); } SG_NULLFREE(pCtx, workQueue.p); SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); if(pDagnodeFetcher!=NULL) { SG_ERR_IGNORE( SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pDagnodeFetcher) ); } }
void SG_dagfrag__load_from_repo__one(SG_context * pCtx, SG_dagfrag * pFrag, SG_repo* pRepo, const char * szHidStart, SG_int32 nGenerations) { // load a fragment of the dag starting with the given dagnode // for nGenerations of parents. // // we add this portion of the graph to whatevery we already // have in our fragment. this may either augment (give us // a larger connected piece) or it may be an independent // subset. // // if nGenerations <= 0, load everything from this starting point // back to the NULL/root. // // generationStart is the generation of the starting dagnode. // // the starting dagnode *MAY* be in the final start-fringe. // normally, it will be. but if we are called multiple times // (and have more than one start point), it may be the case // that this node is a parent of one of the other start points. // // we compute generationEnd as the generation that we will NOT // include in the fragment; nodes of that generation will be in // the end-fringe. that is, we include [start...end) like most // C++ iterators. _my_data * pMyDataCached = NULL; SG_dagnode * pDagnodeAllocated = NULL; SG_dagnode * pDagnodeStart; SG_int32 generationStart, generationEnd; SG_bool bPresent = SG_FALSE; SG_rbtree* prb_WorkQueue = NULL; SG_NULLARGCHECK_RETURN(pFrag); SG_NONEMPTYCHECK_RETURN(szHidStart); // if we are extending the fragment, delete the generation-sorted // member cache copy. (see __foreach_member()). it's either that // or update it in parallel as we change the real CACHE and that // doesn't seem worth the bother. SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache); pFrag->m_pRB_GenerationSortedMemberCache = NULL; SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &prb_WorkQueue) ); // fetch the starting dagnode and compute the generation bounds. // first, see if the cache already has info for this dagnode. // if not, fetch it from the source and then add it to the cache. SG_ERR_CHECK( _cache__lookup(pCtx, pFrag,szHidStart,&pMyDataCached,&bPresent) ); if (!bPresent) { if (!pRepo) SG_ERR_THROW( SG_ERR_INVALID_WHILE_FROZEN ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, szHidStart, &pDagnodeAllocated) ); pDagnodeStart = pDagnodeAllocated; } else { pDagnodeStart = pMyDataCached->m_pDagnode; } SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pDagnodeStart,&generationStart) ); SG_ASSERT_RELEASE_FAIL2( (generationStart > 0), (pCtx,"Invalid generation value [%d] for dagnode [%s]", generationStart,szHidStart) ); if ((nGenerations <= 0) || (generationStart <= nGenerations)) generationEnd = 0; else generationEnd = generationStart - nGenerations; if (!bPresent) { // this dagnode was not already present in the cache. // add it to the cache directly and set the state. // we don't need to go thru the work queue for it. // // then the add all of its parents to the work queue. SG_ERR_CHECK( _cache__add__dagnode(pCtx, pFrag, generationStart, pDagnodeAllocated,SG_DFS_START_MEMBER, &pMyDataCached) ); pDagnodeAllocated = NULL; SG_ERR_CHECK( _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue) ); } else { // the node was already present in the cache, so we have already // walked at least part of the graph around it. switch (pMyDataCached->m_state) { default: //case SG_DFS_UNKNOWN: SG_ASSERT_RELEASE_FAIL2( (0), (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]", pMyDataCached->m_state,szHidStart) ); case SG_DFS_INTERIOR_MEMBER: // already in fragment case SG_DFS_START_MEMBER: // already in fragment, duplicated leaf? if (generationEnd < pMyDataCached->m_genDagnode) { // they've expanded the bounds of the fragment since we // last visited this dagnode. keep this dagnode in the // fragment and revisit the ancestors in case any were // put in the end-fringe that should now be included. // // we leave the state as INCLUDE or INCLUDE_AND_START // because a duplicate start point should remain a // start point. SG_ERR_CHECK( _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue) ); } else { // the current end-generation requested is >= the previous // end-generation, then we've completely explored this dagnode // already. that is, a complete walk from this node for nGenerations // would not reveal any new information. } break; case SG_DFS_END_FRINGE: { // they want to start at a dagnode that we put in the // end-fringe. this can happen if they need to expand // the bounds of the fragment to include older ancestors. // // we do not mark this as a start node because someone // else already has it as a parent. pMyDataCached->m_state = SG_DFS_INTERIOR_MEMBER; SG_ERR_CHECK( _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue) ); } break; } } // we optionally put the parents of the current node into the work queue. // // service the work queue until it is empty. this allows us to walk the graph without // recursion. that is, as we decide what to do with a node, we add the parents // to the queue. we then iterate thru the work queue until we have dealt with // everything -- that is, until all parents have been properly placed. // // we cannot use a standard iterator to drive this loop because we // modify the queue. while (1) { _process_work_queue_item(pCtx, pFrag,prb_WorkQueue,generationEnd,pRepo); if (!SG_context__has_err(pCtx)) break; // we processed everything in the queue and are done if (!SG_context__err_equals(pCtx,SG_ERR_RESTART_FOREACH)) SG_ERR_RETHROW; SG_context__err_reset(pCtx); // queue changed, restart iteration } SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue); /* ** we have loaded a piece of the dag (starting with the given start node ** and tracing all parent edges back n generations). we leave with everything ** in our progress queues so that other start nodes can be added to the ** fragment. this allows the processing of subsequent start nodes to ** override some of the decisions that we made. for example: ** ** Q_15 ** | ** | ** Z_16 ** / \ ** / \ ** Y_17 A_17 ** \ / \ ** \ / \ ** B_18 C_18 ** | ** | ** D_19 ** | ** | ** E_20 ** ** if we started with the leaf E_20 and requested 3 generations, we would have: ** start_set := { E } ** include_set := { B, D, E } ** end_set := { Y, A } ** ** after a subsequent call with the leaf C_18 and 3 generations, we would have: ** start_set := { C, E } ** include_set := { Z, A, B, C, D, E } ** end_set := { Q, Y } ** */ return; fail: SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue); SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated); }
void SG_sync__build_best_guess_dagfrag( SG_context* pCtx, SG_repo* pRepo, SG_uint64 iDagNum, SG_rbtree* prbStartFromHids, SG_vhash* pvhConnectToHidsAndGens, SG_dagfrag** ppFrag) { SG_uint32 i, countConnectTo; SG_rbtree_iterator* pit = NULL; SG_dagnode* pdn = NULL; SG_dagfrag* pFrag = NULL; SG_repo_fetch_dagnodes_handle* pdh = NULL; SG_int32 minGen = SG_INT32_MAX; SG_int32 maxGen = 0; SG_uint32 gensToFetch = 0; char* psz_repo_id = NULL; char* psz_admin_id = NULL; SG_bool bNextHid; const char* pszRefHid; SG_int32 gen; #if TRACE_SYNC SG_int64 startTime; SG_int64 endTime; #endif SG_NULLARGCHECK_RETURN(prbStartFromHids); /* Find the minimum generation in pertinent "connect to" nodes. */ if (pvhConnectToHidsAndGens) { SG_ERR_CHECK( SG_vhash__count(pCtx, pvhConnectToHidsAndGens, &countConnectTo) ); for (i = 0; i < countConnectTo; i++) { SG_int32 gen; SG_ERR_CHECK( SG_vhash__get_nth_pair__int32(pCtx, pvhConnectToHidsAndGens, i, &pszRefHid, &gen) ); if (gen < minGen) minGen = gen; } } /* Happens when pulling into an empty repo, or when an entire dag is specifically requested. */ if (minGen == SG_INT32_MAX) minGen = -1; SG_ERR_CHECK( SG_repo__fetch_dagnodes__begin(pCtx, pRepo, iDagNum, &pdh) ); /* Find the maximum generation in pertinent "start from" nodes. */ SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prbStartFromHids, &bNextHid, &pszRefHid, NULL) ); while (bNextHid) { SG_ERR_CHECK( SG_repo__fetch_dagnodes__one(pCtx, pRepo, pdh, pszRefHid, &pdn) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn, &gen) ); if (gen > maxGen) maxGen = gen; SG_DAGNODE_NULLFREE(pCtx, pdn); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &bNextHid, &pszRefHid, NULL) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); if (maxGen <= minGen) gensToFetch = FALLBACK_GENS_PER_ROUNDTRIP; else gensToFetch = maxGen - minGen; #if TRACE_SYNC { char buf_dagnum[SG_DAGNUM__BUF_MAX__HEX]; SG_uint32 count; SG_ERR_CHECK( SG_dagnum__to_sz__hex(pCtx, iDagNum, buf_dagnum, sizeof(buf_dagnum)) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Building best guess dagfrag for dag %s...\n", buf_dagnum) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Starting from nodes:\n") ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, prbStartFromHids) ); SG_ERR_CHECK( SG_vhash_debug__dump_to_console__named(pCtx, pvhConnectToHidsAndGens, "Connecting to nodes") ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prbStartFromHids, &count) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "result has %d generations from %u starting nodes.\n", gensToFetch, count) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); SG_ERR_CHECK( SG_time__get_milliseconds_since_1970_utc(pCtx, &startTime) ); } #endif /* Return a frag with the corresponding generations filled in. */ SG_ERR_CHECK( SG_repo__get_repo_id(pCtx, pRepo, &psz_repo_id) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &psz_admin_id) ); SG_ERR_CHECK( SG_dagfrag__alloc(pCtx, &pFrag, psz_repo_id, psz_admin_id, iDagNum) ); SG_ERR_CHECK( SG_dagfrag__load_from_repo__multi(pCtx, pFrag, pRepo, prbStartFromHids, gensToFetch) ); #if TRACE_SYNC SG_ERR_CHECK( SG_time__get_milliseconds_since_1970_utc(pCtx, &endTime) ); { SG_uint32 dagnodeCount; double seconds = ((double)endTime-(double)startTime)/1000; SG_ERR_CHECK( SG_dagfrag__dagnode_count(pCtx, pFrag, &dagnodeCount) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, " - %u nodes in frag, built in %1.3f seconds\n", dagnodeCount, seconds) ); SG_ERR_CHECK( SG_dagfrag_debug__dump__console(pCtx, pFrag, "best-guess dagfrag", 0, SG_CS_STDERR) ); } #endif *ppFrag = pFrag; pFrag = NULL; /* Common cleanup */ fail: SG_NULLFREE(pCtx, psz_repo_id); SG_NULLFREE(pCtx, psz_admin_id); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_DAGFRAG_NULLFREE(pCtx, pFrag); SG_ERR_IGNORE( SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pdh) ); }
void SG_dagquery__how_are_dagnodes_related(SG_context * pCtx, SG_repo * pRepo, SG_uint64 dagnum, const char * pszHid1, const char * pszHid2, SG_bool bSkipDescendantCheck, SG_bool bSkipAncestorCheck, SG_dagquery_relationship * pdqRel) { SG_dagnode * pdn1 = NULL; SG_dagnode * pdn2 = NULL; SG_dagfrag * pFrag = NULL; SG_dagquery_relationship dqRel = SG_DAGQUERY_RELATIONSHIP__UNKNOWN; SG_int32 gen1, gen2; SG_bool bFound; SG_NULLARGCHECK_RETURN(pRepo); SG_NONEMPTYCHECK_RETURN(pszHid1); SG_NONEMPTYCHECK_RETURN(pszHid2); SG_NULLARGCHECK_RETURN(pdqRel); if (strcmp(pszHid1, pszHid2) == 0) { dqRel = SG_DAGQUERY_RELATIONSHIP__SAME; goto cleanup; } // fetch the dagnode for both HIDs. this throws when the HID is not found. SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, pszHid1, &pdn1) ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, pszHid2, &pdn2) ); // we say that 2 nodes are either: // [1] ancestor/descendant of each other; // [2] or that they are peers (cousins) of each other (no matter // how distant in the DAG). (that have an LCA, but we don't // care about it.) // get the generation of both dagnodes. if they are the same, then they // cannot have an ancestor/descendant relationship and therefore must be // peers/cousins (we don't care how close/distant they are). SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn1, &gen1) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn2, &gen2) ); if (gen1 == gen2) { dqRel = SG_DAGQUERY_RELATIONSHIP__PEER; goto cleanup; } // see if one is an ancestor of the other. since we only have PARENT // edges in our DAG, we start with the deeper one and walk backwards // until we've visited all ancestors at the depth of the shallower one. // // i'm going to be lazy here and not reinvent a recursive-ish parent-edge // graph walker. instead, i'm going to create a DAGFRAG using the // deeper one and request the generation difference as the "thickness". // in theory, if we have an ancestor/descendant relationship, the // shallower one should be in the END-FRINGE of the DAGFRAG. // // i'm going to pick an arbitrary direction "cs1 is R of cs2". SG_ERR_CHECK( SG_dagfrag__alloc_transient(pCtx, dagnum, &pFrag) ); if (gen1 > gen2) // cs1 is *DEEPER* than cs2 { if (bSkipDescendantCheck) { dqRel = SG_DAGQUERY_RELATIONSHIP__UNKNOWN; } else { SG_ERR_CHECK( SG_dagfrag__load_from_repo__one(pCtx, pFrag, pRepo, pszHid1, (gen1 - gen2)) ); SG_ERR_CHECK( SG_dagfrag__query(pCtx, pFrag, pszHid2, NULL, NULL, &bFound, NULL) ); if (bFound) // pszHid2 is an ancestor of pszHid1. READ pszHid1 is a descendent of pszHid2. dqRel = SG_DAGQUERY_RELATIONSHIP__DESCENDANT; else // they are *distant* peers. dqRel = SG_DAGQUERY_RELATIONSHIP__PEER; } goto cleanup; } else { if (bSkipAncestorCheck) { dqRel = SG_DAGQUERY_RELATIONSHIP__UNKNOWN; } else { SG_ERR_CHECK( SG_dagfrag__load_from_repo__one(pCtx, pFrag, pRepo, pszHid2, (gen2 - gen1)) ); SG_ERR_CHECK( SG_dagfrag__query(pCtx, pFrag, pszHid1, NULL, NULL, &bFound, NULL) ); if (bFound) // pszHid1 is an ancestor of pszHid2. dqRel = SG_DAGQUERY_RELATIONSHIP__ANCESTOR; else // they are *distant* peers. dqRel = SG_DAGQUERY_RELATIONSHIP__PEER; } goto cleanup; } /*NOTREACHED*/ cleanup: *pdqRel = dqRel; fail: SG_DAGNODE_NULLFREE(pCtx, pdn1); SG_DAGNODE_NULLFREE(pCtx, pdn2); SG_DAGFRAG_NULLFREE(pCtx, pFrag); }
/** * Compare all the nodes of a single DAG in two repos. */ static void _compare_one_dag(SG_context* pCtx, SG_repo* pRepo1, SG_repo* pRepo2, SG_uint32 iDagNum, SG_bool* pbIdentical) { SG_bool bFinalResult = SG_FALSE; SG_rbtree* prbRepo1Leaves = NULL; SG_rbtree* prbRepo2Leaves = NULL; SG_uint32 iRepo1LeafCount, iRepo2LeafCount; SG_rbtree_iterator* pIterator = NULL; const char* pszId = NULL; SG_dagnode* pRepo1Dagnode = NULL; SG_dagnode* pRepo2Dagnode = NULL; SG_bool bFoundRepo1Leaf = SG_FALSE; SG_bool bFoundRepo2Leaf = SG_FALSE; SG_bool bDagnodesEqual = SG_FALSE; SG_NULLARGCHECK_RETURN(pRepo1); SG_NULLARGCHECK_RETURN(pRepo2); SG_NULLARGCHECK_RETURN(pbIdentical); SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo1, iDagNum, &prbRepo1Leaves) ); SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo2, iDagNum, &prbRepo2Leaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prbRepo1Leaves, &iRepo1LeafCount) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prbRepo2Leaves, &iRepo2LeafCount) ); if (iRepo1LeafCount != iRepo2LeafCount) { #if TRACE_SYNC SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "leaf count differs\n") ); #endif goto Different; } SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pIterator, prbRepo1Leaves, &bFoundRepo1Leaf, &pszId, NULL) ); while (bFoundRepo1Leaf) { SG_ERR_CHECK( SG_rbtree__find(pCtx, prbRepo2Leaves, pszId, &bFoundRepo2Leaf, NULL) ); if (!bFoundRepo2Leaf) { #if TRACE_SYNC && 0 SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "couldn't locate leaf\r\n") ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Repo 1 leaves:\r\n") ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, prbRepo1Leaves) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Repo 2 leaves:\r\n") ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, prbRepo2Leaves) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif goto Different; } SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo1, pszId, &pRepo1Dagnode) ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo2, pszId, &pRepo2Dagnode) ); SG_ERR_CHECK( _compare_dagnodes(pCtx, pRepo1, pRepo1Dagnode, pRepo2, pRepo2Dagnode, &bDagnodesEqual) ); SG_DAGNODE_NULLFREE(pCtx, pRepo1Dagnode); SG_DAGNODE_NULLFREE(pCtx, pRepo2Dagnode); if (!bDagnodesEqual) goto Different; SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pIterator, &bFoundRepo1Leaf, &pszId, NULL) ); } bFinalResult = SG_TRUE; Different: *pbIdentical = bFinalResult; // fall through fail: SG_RBTREE_NULLFREE(pCtx, prbRepo1Leaves); SG_RBTREE_NULLFREE(pCtx, prbRepo2Leaves); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIterator); }
/** * Recursively compare dagnodes depth-first. */ static void _compare_dagnodes(SG_context* pCtx, SG_repo* pRepo1, SG_dagnode* pDagnode1, SG_repo* pRepo2, SG_dagnode* pDagnode2, SG_bool* pbIdentical) { SG_bool bDagnodesEqual = SG_FALSE; SG_uint32 iParentCount1, iParentCount2; const char** paParentIds1 = NULL; const char** paParentIds2 = NULL; SG_dagnode* pParentDagnode1 = NULL; SG_dagnode* pParentDagnode2 = NULL; SG_NULLARGCHECK_RETURN(pDagnode1); SG_NULLARGCHECK_RETURN(pDagnode2); SG_NULLARGCHECK_RETURN(pbIdentical); *pbIdentical = SG_TRUE; // Compare the dagnodes. If they're different, return false. SG_ERR_CHECK( SG_dagnode__equal(pCtx, pDagnode1, pDagnode2, &bDagnodesEqual) ); if (!bDagnodesEqual) { #if TRACE_SYNC SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "dagnodes not equal\n") ); #endif *pbIdentical = SG_FALSE; return; } // The dagnodes are identical. Look at their parents. SG_ERR_CHECK( SG_dagnode__get_parents(pCtx, pDagnode1, &iParentCount1, &paParentIds1) ); SG_ERR_CHECK( SG_dagnode__get_parents(pCtx, pDagnode2, &iParentCount2, &paParentIds2) ); if (iParentCount1 == iParentCount2) { // The dagnodes have the same number of parents. Compare the parents recursively. SG_uint32 i; for (i = 0; i < iParentCount1; i++) { SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo1, paParentIds1[i], &pParentDagnode1) ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo2, paParentIds2[i], &pParentDagnode2) ); SG_ERR_CHECK( _compare_dagnodes(pCtx, pRepo1, pParentDagnode1, pRepo2, pParentDagnode2, pbIdentical) ); SG_DAGNODE_NULLFREE(pCtx, pParentDagnode1); SG_DAGNODE_NULLFREE(pCtx, pParentDagnode2); if (!(*pbIdentical)) break; } } else { // The dagnodes have a different number of parents. *pbIdentical = SG_FALSE; } // fall through fail: SG_NULLFREE(pCtx, paParentIds1); SG_NULLFREE(pCtx, paParentIds2); SG_DAGNODE_NULLFREE(pCtx, pParentDagnode1); SG_DAGNODE_NULLFREE(pCtx, pParentDagnode2); }
static void _process_work_queue_cb(SG_context * pCtx, const char * szHid, SG_UNUSED_PARAM(void * pAssocData), void * pVoidCallerData) { // we are given a random item in the work_queue. // // lookup the corresponding DATA node in the Cache, if it has one. // // and then evaluate where this node belongs: struct _work_queue_data * pWorkQueueData = (struct _work_queue_data *)pVoidCallerData; _my_data * pDataCached = NULL; SG_dagnode * pDagnodeAllocated = NULL; SG_bool bPresent = SG_FALSE; SG_UNUSED(pAssocData); SG_ERR_CHECK( _cache__lookup(pCtx, pWorkQueueData->pFrag,szHid,&pDataCached,&bPresent) ); if (!bPresent) { // dagnode is not present in the cache. therefore, we've never visited this // dagnode before. add it to the cache with proper settings and maybe add // all of the parents to the work queue. SG_int32 myGeneration; SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pWorkQueueData->pRepo, szHid,&pDagnodeAllocated) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pDagnodeAllocated,&myGeneration) ); if ((myGeneration > pWorkQueueData->generationEnd)) { SG_ERR_CHECK( _cache__add__dagnode(pCtx, pWorkQueueData->pFrag, myGeneration, pDagnodeAllocated,SG_DFS_INTERIOR_MEMBER, &pDataCached) ); pDagnodeAllocated = NULL; // cache takes ownership of dagnode SG_ERR_CHECK( _add_parents_to_work_queue(pCtx, pDataCached->m_pDagnode, pWorkQueueData->prb_WorkQueue) ); } else { SG_ERR_CHECK( _cache__add__fringe(pCtx, pWorkQueueData->pFrag, szHid) ); SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated); } } else { // dagnode already present in the cache. therefore, we have already visited it // before. we can change our minds about the state of this dagnode if something // has changed (such as the fragment bounds being widened). switch (pDataCached->m_state) { default: //case SG_DFS_UNKNOWN: SG_ASSERT_RELEASE_FAIL2( (0), (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]", pDataCached->m_state,szHid) ); case SG_DFS_START_MEMBER: // a dagnode has a parent that we are considering a START node. // this can happen when we were started from a non-leaf node and // then a subsequent call to __load is given a true leaf node or // a node deeper in the tree that has our original start node as // a parent. // // clear the start bit. (we only want true fragment-terminal // nodes marked as start nodes.) pDataCached->m_state = SG_DFS_INTERIOR_MEMBER; // FALL-THRU-INTENDED case SG_DFS_INTERIOR_MEMBER: // a dagnode that we have already visited is being re-visited. // this happpens for a number of reasons, such as when we hit // the parent of a branch/fork. we might get visisted because // we are a parent of each child. // // we also get revisited when the caller expands the scope of // the fragment. if (pWorkQueueData->generationEnd < pDataCached->m_genDagnode) { // the caller has expanded the scope of the fragment to include // older generations than the last time we visited this node. // this doesn't affect the state of this node, but it could mean // that older ancestors of this node should be looked at. SG_ERR_CHECK( _add_parents_to_work_queue(pCtx,pDataCached->m_pDagnode,pWorkQueueData->prb_WorkQueue) ); } break; case SG_DFS_END_FRINGE: // a dagnode that was on the end-fringe is being re-evaluated. if (pDataCached->m_genDagnode > pWorkQueueData->generationEnd) { // it looks like the bounds of the fragment were expanded and // now includes this dagnode. // // move it from END-FRINGE to INCLUDE state. // and re-eval all of its parents. pDataCached->m_state = SG_DFS_INTERIOR_MEMBER; SG_ERR_CHECK( _add_parents_to_work_queue(pCtx,pDataCached->m_pDagnode,pWorkQueueData->prb_WorkQueue) ); } break; } } // we have completely dealt with this dagnode, so remove it from the work queue // and cause our caller to restart the iteration (because we changed the queue). SG_ERR_CHECK( SG_rbtree__remove(pCtx,pWorkQueueData->prb_WorkQueue,szHid) ); SG_ERR_THROW( SG_ERR_RESTART_FOREACH ); fail: SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated); }
void MyFn(no_repo_tx)(SG_context * pCtx, SG_repo* pRepo) { SG_repo_tx_handle* pTx = NULL; SG_repo_store_blob_handle* pbh; SG_byte* pBufIn = NULL; SG_uint32 lenBufIn = 0; char* pszHidReturned = NULL; char* pId = NULL; SG_dagnode* pdnCreated = NULL; char buf_tid[SG_TID_MAX_BUFFER_LENGTH]; VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__commit_tx(pCtx, pRepo, NULL), SG_ERR_INVALIDARG ); // commit_tx without repo tx didn't fail with INVALIDARG. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__abort_tx(pCtx, pRepo, NULL), SG_ERR_INVALIDARG ); // abort_tx without repo tx didn't fail with INVALIDARG. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__store_blob__begin(pCtx, pRepo, NULL, SG_BLOBENCODING__FULL, NULL, 10, 0, NULL, &pbh), SG_ERR_INVALIDARG ); // store_blob__begin without repo tx didn't fail with INVALIDARG. // Create a repo tx so we can test the other blob functions. VERIFY_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); VERIFY_ERR_CHECK( MyFn(alloc_random_buffer)(pCtx, &pBufIn, &lenBufIn) ); VERIFY_ERR_CHECK( SG_repo__store_blob__begin(pCtx, pRepo, pTx, SG_BLOBENCODING__FULL, NULL, lenBufIn, 0, NULL, &pbh) ); VERIFY_ERR_CHECK( SG_repo__store_blob__chunk(pCtx, pRepo, pbh, lenBufIn, pBufIn, NULL) ); VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__store_blob__end(pCtx, pRepo, NULL, &pbh, &pszHidReturned), SG_ERR_INVALIDARG ); // store_blob__end without repo tx didn't fail with INVALIDARG. VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__store_blob__abort(pCtx, pRepo, NULL, &pbh), SG_ERR_INVALIDARG ); // store_blob__abort without repo tx didn't fail with INVALIDARG. VERIFY_ERR_CHECK( SG_repo__store_blob__end(pCtx, pRepo, pTx, &pbh, &pszHidReturned) ); VERIFY_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); // create a TID just to get some random data. use it to create a HID. // use the HID as the HID of a hypothetical changeset so that we can create the dagnode. VERIFY_ERR_CHECK( SG_tid__generate(pCtx, buf_tid, sizeof(buf_tid)) ); VERIFY_ERR_CHECK( SG_repo__alloc_compute_hash__from_bytes(pCtx, pRepo, sizeof(buf_tid), (SG_byte *)buf_tid, &pId) ); VERIFY_ERR_CHECK( SG_dagnode__alloc(pCtx, &pdnCreated, pId, 1, 0) ); VERIFY_ERR_CHECK( SG_dagnode__freeze(pCtx, pdnCreated) ); VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD( SG_repo__store_dagnode(pCtx, pRepo, NULL, SG_DAGNUM__TESTING__NOTHING, pdnCreated), SG_ERR_INVALIDARG ); // store_dagnode without repo tx didn't fail with INVALIDARG. // We're intentionally not testing the higher-level store_blob_from_memory and store_blob_from_file // routines here because they're just wrappers for the begin/chunk/end routines we do test. // Fall through to common cleanup. fail: SG_NULLFREE(pCtx, pszHidReturned); SG_NULLFREE(pCtx, pBufIn); SG_NULLFREE(pCtx, pId); SG_DAGNODE_NULLFREE(pCtx, pdnCreated); }
void sg_vc_hooks__lookup_by_interface__single_result( SG_context* pCtx, SG_repo* pRepo, const char* psz_interface, SG_vhash** ppvh_latest_version ) { //This version will return only the hook with the largest version. //If multiple versions of the hook are defined, //all old versions will be deleted. SG_varray* pva_hooks = NULL; SG_vhash* pvh_latest_hook = NULL; SG_zingtx* pztx = NULL; char* psz_hid_cs_leaf = NULL; SG_dagnode* pdn = NULL; SG_changeset* pcs = NULL; SG_ERR_CHECK( SG_vc_hooks__lookup_by_interface( pCtx, pRepo, psz_interface, &pva_hooks ) ); if (!pva_hooks) { SG_ERR_THROW2( SG_ERR_VC_HOOK_MISSING, (pCtx, "%s", psz_interface) ); } else { SG_uint32 count = 0; SG_ERR_CHECK( SG_varray__count(pCtx, pva_hooks, &count) ); if (0 == count) { SG_ERR_THROW2( SG_ERR_VC_HOOK_MISSING, (pCtx, "%s", psz_interface) ); } else { if (count > 1) { SG_uint32 i = 0; SG_int32 nVersion = 0; SG_int32 nHighestVersion = -1; SG_int32 nAmbiguousVersion = -2; const char * hidRecToSave = NULL; const char * hidrec = NULL; SG_vhash * pvh_current_hook = NULL; //There are multiple versions installed for this hook. //delete the lesser numbered versions. for (i=0; i < count; i++) { SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pva_hooks, i, &pvh_current_hook) ); SG_ERR_CHECK( SG_vhash__get__int32(pCtx, pvh_current_hook, "version", &nVersion) ); if (nVersion == nHighestVersion) { nAmbiguousVersion = nHighestVersion; } if (nVersion > nHighestVersion) { nHighestVersion = nVersion; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_current_hook, "hidrec", &hidRecToSave) ); } } if (nAmbiguousVersion == nHighestVersion) SG_ERR_THROW2( SG_ERR_VC_HOOK_AMBIGUOUS, (pCtx, "%s defined multiple times at version %d", psz_interface, nHighestVersion) ); if (nHighestVersion > 0 && hidRecToSave != NULL) { SG_audit q; SG_ERR_CHECK( SG_audit__init(pCtx,&q,pRepo,SG_AUDIT__WHEN__NOW,SG_AUDIT__WHO__FROM_SETTINGS) ); SG_ERR_CHECK( SG_zing__get_leaf(pCtx, pRepo, NULL, SG_DAGNUM__VC_HOOKS, &psz_hid_cs_leaf) ); /* start a changeset */ SG_ERR_CHECK( SG_zing__begin_tx(pCtx, pRepo, SG_DAGNUM__VC_HOOKS, q.who_szUserId, psz_hid_cs_leaf, &pztx) ); SG_ERR_CHECK( SG_zingtx__add_parent(pCtx, pztx, psz_hid_cs_leaf) ); for (i=0; i < count; i++) { SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pva_hooks, i, &pvh_current_hook) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_current_hook, "hidrec", &hidrec) ); if (SG_strcmp__null(hidrec, hidRecToSave) != 0) { //This isn't the recid to save. Delete it! SG_ERR_CHECK( SG_zingtx__delete_record__hid(pCtx, pztx, "hook", hidrec) ); } } /* commit the changes */ SG_ERR_CHECK( SG_zing__commit_tx(pCtx, q.when_int64, &pztx, &pcs, &pdn, NULL) ); } } else { //There's only one hook installed return it. SG_vhash * pvh_temp = NULL; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pva_hooks, 0, &pvh_temp) ); SG_ERR_CHECK( SG_VHASH__ALLOC__COPY(pCtx, &pvh_latest_hook, pvh_temp) ); } } } SG_RETURN_AND_NULL(pvh_latest_hook, ppvh_latest_version); fail: if (pztx) { SG_ERR_IGNORE( SG_zing__abort_tx(pCtx, &pztx) ); } SG_NULLFREE(pCtx, psz_hid_cs_leaf); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_CHANGESET_NULLFREE(pCtx, pcs); SG_VARRAY_NULLFREE(pCtx, pva_hooks); }
void SG_dagquery__find_new_since_common( SG_context * pCtx, SG_repo * pRepo, SG_uint64 dagnum, const char * pszOldNodeHid, const char * pszNewNodeHid, SG_stringarray ** ppResults ) { _fnsc_work_queue_t workQueue = {NULL, 0, 0, 0, NULL}; SG_uint32 i; SG_dagnode * pDagnode = NULL; SG_stringarray * pResults = NULL; SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK(pRepo); SG_NONEMPTYCHECK(pszOldNodeHid); SG_NONEMPTYCHECK(pszNewNodeHid); SG_NULLARGCHECK(ppResults); SG_ERR_CHECK( SG_allocN(pCtx, _FNSC_WORK_QUEUE_INIT_LENGTH, workQueue.p) ); workQueue.allocatedLength = _FNSC_WORK_QUEUE_INIT_LENGTH; SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &workQueue.pRevnoCache) ); SG_ERR_CHECK( _fnsc_work_queue__insert(pCtx, &workQueue, pszOldNodeHid, dagnum, pRepo, _ANCESTOR_OF_OLD) ); SG_ERR_CHECK( _fnsc_work_queue__insert(pCtx, &workQueue, pszNewNodeHid, dagnum, pRepo, _ANCESTOR_OF_NEW) ); SG_ERR_CHECK( SG_STRINGARRAY__ALLOC(pCtx, &pResults, 32) ); while(workQueue.numAncestorsOfNewOnTheQueue > 0) { const char * pszHidRef = NULL; SG_byte isAncestorOf = 0; SG_ERR_CHECK( _fnsc_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &isAncestorOf) ); if (isAncestorOf==_ANCESTOR_OF_NEW) SG_ERR_CHECK( SG_stringarray__add(pCtx, pResults, pszHidRef) ); { SG_uint32 count_parents = 0; const char** parents = NULL; SG_ERR_CHECK( SG_dagnode__get_parents__ref(pCtx, pDagnode, &count_parents, &parents) ); for(i=0; i<count_parents; ++i) SG_ERR_CHECK( _fnsc_work_queue__insert(pCtx, &workQueue, parents[i], dagnum, pRepo, isAncestorOf) ); } SG_DAGNODE_NULLFREE(pCtx, pDagnode); } for(i=0; i<workQueue.length; ++i) SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode); SG_NULLFREE(pCtx, workQueue.p); SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache); *ppResults = pResults; return; fail: for(i=0; i<workQueue.length; ++i) SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode); SG_NULLFREE(pCtx, workQueue.p); SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_STRINGARRAY_NULLFREE(pCtx, pResults); }
void SG_repo__db__calc_delta( SG_context * pCtx, SG_repo* pRepo, SG_uint64 dagnum, const char* psz_csid_from, const char* psz_csid_to, SG_uint32 flags, SG_vhash** ppvh_add, SG_vhash** ppvh_remove ) { SG_dagnode* pdn_from = NULL; SG_dagnode* pdn_to = NULL; SG_int32 gen_from = -1; SG_int32 gen_to = -1; SG_varray* pva_direct_backward_path = NULL; SG_varray* pva_direct_forward_path = NULL; SG_vhash* pvh_add = NULL; SG_vhash* pvh_remove = NULL; SG_rbtree* prb_temp = NULL; SG_daglca* plca = NULL; char* psz_csid_ancestor = NULL; SG_NULLARGCHECK_RETURN(psz_csid_from); SG_NULLARGCHECK_RETURN(psz_csid_to); SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(ppvh_add); SG_NULLARGCHECK_RETURN(ppvh_remove); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_csid_from, &pdn_from) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn_from, &gen_from) ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_csid_to, &pdn_to) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn_to, &gen_to) ); if (gen_from > gen_to) { SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_from, psz_csid_to, &pva_direct_backward_path ) ); if (pva_direct_backward_path) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_backward_path, flags, pvh_add, pvh_remove ) ); } } else if (gen_from < gen_to) { SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_to, psz_csid_from, &pva_direct_forward_path ) ); if (pva_direct_forward_path) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_forward_path, flags, pvh_remove, pvh_add ) ); } } if (!pvh_add && !pvh_remove) { SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &prb_temp) ); SG_ERR_CHECK( SG_rbtree__add(pCtx,prb_temp,psz_csid_from) ); SG_ERR_CHECK( SG_rbtree__add(pCtx,prb_temp,psz_csid_to) ); SG_ERR_CHECK( SG_repo__get_dag_lca(pCtx,pRepo,dagnum,prb_temp,&plca) ); { const char* psz_hid = NULL; SG_daglca_node_type node_type = 0; SG_int32 gen = -1; SG_ERR_CHECK( SG_daglca__iterator__first(pCtx, NULL, plca, SG_FALSE, &psz_hid, &node_type, &gen, NULL) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_hid, &psz_csid_ancestor) ); } SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_from, psz_csid_ancestor, &pva_direct_backward_path ) ); SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_to, psz_csid_ancestor, &pva_direct_forward_path ) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_backward_path, flags, pvh_add, pvh_remove ) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_forward_path, flags, pvh_remove, pvh_add ) ); } *ppvh_add = pvh_add; pvh_add = NULL; *ppvh_remove = pvh_remove; pvh_remove = NULL; fail: SG_NULLFREE(pCtx, psz_csid_ancestor); SG_RBTREE_NULLFREE(pCtx, prb_temp); SG_DAGLCA_NULLFREE(pCtx, plca); SG_VHASH_NULLFREE(pCtx, pvh_add); SG_VHASH_NULLFREE(pCtx, pvh_remove); SG_VARRAY_NULLFREE(pCtx, pva_direct_backward_path); SG_VARRAY_NULLFREE(pCtx, pva_direct_forward_path); SG_DAGNODE_NULLFREE(pCtx, pdn_from); SG_DAGNODE_NULLFREE(pCtx, pdn_to); }
void SG_dagfrag__add_dagnode(SG_context * pCtx, SG_dagfrag * pFrag, SG_dagnode** ppdn) { _my_data * pMyDataCached = NULL; SG_bool bPresent = SG_FALSE; const char* psz_id = NULL; SG_dagnode* pdn = NULL; SG_NULLARGCHECK_RETURN(pFrag); SG_NULLARGCHECK_RETURN(ppdn); pdn = *ppdn; // if we are extending the fragment, delete the generation-sorted // member cache copy. (see __foreach_member()). it's either that // or update it in parallel as we change the real CACHE and that // doesn't seem worth the bother. SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache); pFrag->m_pRB_GenerationSortedMemberCache = NULL; // fetch the starting dagnode and compute the generation bounds. // first, see if the cache already has info for this dagnode. // if not, fetch it from the source and then add it to the cache. SG_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_id) ); #if DEBUG && TRACE_DAGFRAG && 0 SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Adding [%s] to frag.\r\n", psz_id) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif SG_ERR_CHECK( _cache__lookup(pCtx, pFrag,psz_id,&pMyDataCached,&bPresent) ); if (!bPresent) { // this dagnode was not already present in the cache. // add it to the cache directly and set the state. // we don't need to go thru the work queue for it. // // then the add all of its parents. SG_ERR_CHECK( _cache__add__dagnode(pCtx, pFrag, 0, pdn,SG_DFS_START_MEMBER, &pMyDataCached) ); *ppdn = NULL; SG_ERR_CHECK( _add_parents(pCtx, pFrag, pMyDataCached->m_pDagnode) ); } else { // the node was already present in the cache, so we have already // walked at least part of the graph around it. switch (pMyDataCached->m_state) { case SG_DFS_END_FRINGE: if (!pMyDataCached->m_pDagnode) { pMyDataCached->m_pDagnode = pdn; *ppdn = NULL; } pMyDataCached->m_state = SG_DFS_INTERIOR_MEMBER; SG_ERR_CHECK( _add_parents(pCtx, pFrag, pMyDataCached->m_pDagnode) ); break; default: break; /* SG_ASSERT_RELEASE_FAIL2( (0), (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]", pMyDataCached->m_state,psz_id) ); */ } } // fall through fail: SG_DAGNODE_NULLFREE(pCtx, *ppdn); }