static void _dagwalk_callback(SG_context* pCtx, SG_UNUSED_PARAM(SG_repo* pRepo), void* pData, SG_dagnode* pCurrentNode, SG_UNUSED_PARAM(SG_rbtree* pDagnodeCache), SG_bool* pbContinue) { _dagwalk_data* pDagWalkData = (_dagwalk_data*)pData; const char* pszCurrentNodeHid = NULL; SG_int32 genCurrentNode; SG_UNUSED(pRepo); SG_UNUSED(pDagnodeCache); SG_ERR_CHECK_RETURN( SG_dagnode__get_generation(pCtx, pCurrentNode, &genCurrentNode) ); if (genCurrentNode < pDagWalkData->genLimit) { *pbContinue = SG_FALSE; return; } SG_ERR_CHECK_RETURN( SG_dagnode__get_id_ref(pCtx, (const SG_dagnode*)pCurrentNode, &pszCurrentNodeHid) ); if (!strcmp(pDagWalkData->pszStartNodeHid, (const char*)pszCurrentNodeHid)) return; SG_ERR_CHECK_RETURN( SG_rbtree__update(pCtx, pDagWalkData->prbVisitedNodes, (const char*)pszCurrentNodeHid) ); // TODO: Stop walking when this node and all it siblings are already in prbVisitedNodes? }
void u0051_hidlookup_test__1(SG_context * pCtx, SG_pathname* pPathTopDir) { char bufName[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_pathname* pPathFile = NULL; SG_dagnode* pdn = NULL; const char* psz_hid_cs = NULL; SG_repo* pRepo = NULL; char buf_partial[256]; char* psz_result = NULL; SG_audit q; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufName, sizeof(bufName), 32) ); /* create the working dir */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, bufName) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); /* add stuff */ VERIFY_ERR_CHECK( u0051_hidlookup__create_file__numbers(pCtx, pPathWorkingDir, "aaa", 20) ); /* create the repo */ VERIFY_ERR_CHECK( _ut_pt__new_repo(pCtx, bufName, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( u0051_hidlookup__commit_all(pCtx, pPathWorkingDir, &pdn) ); VERIFY_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_hid_cs) ); VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, bufName, &pRepo) ); VERIFY_ERR_CHECK( SG_audit__init(pCtx, &q, pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "remember", &q) ); VERIFY_ERR_CHECK( SG_strcpy(pCtx, buf_partial, sizeof(buf_partial), psz_hid_cs) ); buf_partial[10] = 0; VERIFY_ERR_CHECK( SG_repo__hidlookup__dagnode(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, buf_partial, &psz_result) ); VERIFY_COND("found", (0 == strcmp(psz_result, psz_hid_cs))); SG_NULLFREE(pCtx, psz_result); VERIFY_ERR_CHECK( SG_repo__hidlookup__blob(pCtx, pRepo, buf_partial, &psz_result) ); VERIFY_COND("found", (0 == strcmp(psz_result, psz_hid_cs))); SG_NULLFREE(pCtx, psz_result); VERIFY_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, "remember", &psz_result) ); VERIFY_COND("found", (0 == strcmp(psz_result, psz_hid_cs))); SG_NULLFREE(pCtx, psz_result); fail: SG_NULLFREE(pCtx, psz_result); SG_REPO_NULLFREE(pCtx, pRepo); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); }
static void _hrca_work_queue__pop( SG_context * pCtx, _hrca_work_queue_t * pWorkQueue, SG_dagnode ** ppDagnode, const char ** ppszHidRef, SG_bitvector ** ppIsAncestorOf ) { const char * pszHidRef = NULL; SG_ERR_CHECK_RETURN( SG_dagnode__get_id_ref(pCtx, pWorkQueue->p[pWorkQueue->length-1].pDagnode, &pszHidRef) ); SG_ERR_CHECK_RETURN( SG_rbtree__remove(pCtx, pWorkQueue->pRevnoCache, pszHidRef) ); --pWorkQueue->length; *ppDagnode = pWorkQueue->p[pWorkQueue->length].pDagnode; *ppszHidRef = pszHidRef; *ppIsAncestorOf = pWorkQueue->p[pWorkQueue->length].pIsAncestorOf; }
static void _cache__add__dagnode(SG_context * pCtx, SG_dagfrag * pFrag, SG_int32 gen, SG_dagnode * pDagnode, // if successful, we take ownership of dagnode SG_uint32 state, _my_data ** ppData) // we retain ownership of DATA (but you may modify non-pointer values within it) { const char * szHid; _my_data * pDataAllocated = NULL; _my_data * pDataCached = NULL; _my_data * pOldData = NULL; SG_NULLARGCHECK_RETURN(pFrag); // this is probably not necessary for an internal routine SG_NULLARGCHECK_RETURN(pDagnode); // this is probably not necessary for an internal routine SG_ERR_CHECK_RETURN( SG_dagnode__get_id_ref(pCtx,pDagnode,&szHid) ); SG_ASSERT_RELEASE_RETURN2( (SG_DFS_END_FRINGE != state), (pCtx,"Adding end-fringe dagnode [%s] to dagfrag.",szHid) ); SG_ERR_CHECK( SG_alloc(pCtx, 1, sizeof(_my_data), &pDataAllocated) ); pDataAllocated->m_genDagnode = gen; pDataAllocated->m_state = state; SG_ERR_CHECK( SG_rbtree__update__with_assoc(pCtx,pFrag->m_pRB_Cache,szHid,pDataAllocated,(void **)&pOldData) ); SG_ASSERT_RELEASE_FAIL2( (!pOldData), (pCtx,"Possible memory leak adding [%s] to dagfrag.",szHid) ); // if everything is successful, the cache now owns pData and pDagnode. pDataCached = pDataAllocated; pDataAllocated = NULL; pDataCached->m_pDagnode = pDagnode; if (ppData) *ppData = pDataCached; return; fail: if (pDataCached) // caller still owns pDagnode on errors even if we got pData pDataCached->m_pDagnode = NULL; // into the cache. This may cause problems later if you keep going. SG_ERR_IGNORE( _my_data__free(pCtx, pDataAllocated) ); // free pData if we did not get it stuck into the cache. }
static void _tree__add_new_node(SG_context * pCtx, _tree_t * pTree, _node_t * pDisplayParent, const char * pszHid, _node_t ** ppNewNodeRef) { // Adds a new node to the tree. The new node is considered 'pending', though // it not actually added to the pending list at this point. _node_t * pNode = NULL; // Get the memory. if(pTree->pFreeList==NULL) { SG_ERR_CHECK( SG_alloc1(pCtx, pNode) ); SG_ERR_CHECK( _node_list__init(pCtx, &pNode->displayChildren, 2) ); } else { pNode = pTree->pFreeList; pTree->pFreeList = pNode->displayChildren.p[0]; pNode->displayChildren.count = 0; } if(ppNewNodeRef!=NULL) *ppNewNodeRef = pNode; // Populate the node. pNode->pDisplayParent = pDisplayParent; SG_ERR_CHECK( SG_repo__fetch_dagnodes__one(pCtx, pTree->pRepoRef, pTree->pDagnodeFetcher, pszHid, &pNode->pDagnode) ); SG_ERR_CHECK( SG_dagnode__get_revno(pCtx, pNode->pDagnode, &pNode->revno) ); SG_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pNode->pDagnode, &pNode->pszHidRef) ); pNode->isPending = SG_TRUE; // Add the node to its display parent. if(pDisplayParent!=NULL) { SG_ERR_CHECK( _node_list__append(pCtx, &pDisplayParent->displayChildren, &pNode) ); } else { SG_ASSERT(pTree->pRoot==NULL); pTree->pRoot = pNode; } return; fail: _node__nullfree(pCtx, &pNode); }
static void _fnsc_work_queue__pop( SG_context * pCtx, _fnsc_work_queue_t * pWorkQueue, SG_dagnode ** ppDagnode, const char ** ppszHidRef, SG_byte * isAncestorOf ) { const char * pszHidRef = NULL; SG_ERR_CHECK_RETURN( SG_dagnode__get_id_ref(pCtx, pWorkQueue->p[pWorkQueue->length-1].pDagnode, &pszHidRef) ); SG_ERR_CHECK_RETURN( SG_rbtree__remove(pCtx, pWorkQueue->pRevnoCache, pszHidRef) ); --pWorkQueue->length; if(pWorkQueue->p[pWorkQueue->length].isAncestorOf==_ANCESTOR_OF_NEW) --pWorkQueue->numAncestorsOfNewOnTheQueue; *ppDagnode = pWorkQueue->p[pWorkQueue->length].pDagnode; *ppszHidRef = pszHidRef; *isAncestorOf = pWorkQueue->p[pWorkQueue->length].isAncestorOf; }
static void _dagquery__new_since__callback(SG_context* pCtx, SG_repo *pRepo, void *pVoidData, SG_dagnode *pCurrentDagnode, SG_rbtree* pDagnodeCache, SG_dagwalker_continue* pContinue) { SG_dagquery_relationship rel; new_since_context* pcb = (new_since_context*)pVoidData; const char* szCurrentNodeHid = NULL; SG_UNUSED(pDagnodeCache); SG_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pCurrentDagnode, &szCurrentNodeHid) ); if (strcmp(szCurrentNodeHid, pcb->pszOldNodeHid) == 0) { *pContinue = SG_DAGWALKER_CONTINUE__EMPTY_QUEUE; return; } // TODO: Consider looking at revision numbers to make this faster. SG_ERR_CHECK( SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, pcb->dagnum, szCurrentNodeHid, pcb->pszOldNodeHid, SG_FALSE, SG_TRUE, &rel) ); if (rel != SG_DAGQUERY_RELATIONSHIP__DESCENDANT) { SG_ERR_CHECK( SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, pcb->dagnum, szCurrentNodeHid, pcb->pszOldNodeHid, SG_TRUE, SG_FALSE, &rel) ); if (rel == SG_DAGQUERY_RELATIONSHIP__ANCESTOR) { *pContinue = SG_DAGWALKER_CONTINUE__EMPTY_QUEUE; return; } } SG_ERR_CHECK( SG_rbtree__add(pCtx, pcb->prbNewNodeHids, szCurrentNodeHid) ); fail: ; }
int u0050_logstuff_test__1(SG_context * pCtx, SG_pathname* pPathTopDir) { char bufName[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_pathname* pPathFile = NULL; SG_vhash* pvh = NULL; SG_dagnode* pdn = NULL; const char* psz_hid_cs = NULL; SG_repo* pRepo = NULL; SG_uint32 count; SG_rbtree* prb = NULL; SG_varray* pva = NULL; SG_rbtree* prb_reversed = NULL; const char* psz_val = NULL; SG_audit q; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufName, sizeof(bufName), 32) ); /* create the working dir */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, bufName) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); /* add stuff */ VERIFY_ERR_CHECK( u0050_logstuff__create_file__numbers(pCtx, pPathWorkingDir, "aaa", 20) ); /* create the repo */ VERIFY_ERR_CHECK( _ut_pt__new_repo(pCtx, bufName, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( u0050_logstuff__commit_all(pCtx, pPathWorkingDir, &pdn) ); VERIFY_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_hid_cs) ); SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, bufName, &pRepo) ); #define MY_COMMENT "The name of this new file sucks! What kind of a name is 'aaa'?" VERIFY_ERR_CHECK( SG_audit__init(pCtx, &q, pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); VERIFY_ERR_CHECK( SG_vc_comments__add(pCtx, pRepo, psz_hid_cs, MY_COMMENT, &q) ); VERIFY_ERR_CHECK( SG_vc_stamps__add(pCtx, pRepo, psz_hid_cs, "crap", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "tcrap", &q) ); VERIFY_ERR_CHECK( SG_vc_comments__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "text", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, MY_COMMENT)) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_stamps__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "stamp", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "crap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "tag", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "tcrap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "whatever", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (2 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (2 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); /* we don't know whether psz_my_val is tcrap or whatever. */ // VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); { const char* psz_remove = "whatever"; VERIFY_ERR_CHECK( SG_vc_tags__remove(pCtx, pRepo, &q, 1, &psz_remove) ); /* Note that by removing whatever, we are bringing the tags list back * to a state where it has been before (just tcrap). This changeset in * the tags table will have its own csid, because the parentage is * different, but it's root idtrie HID will be the same as a previous * node. */ } VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); SG_RBTREE_NULLFREE(pCtx, prb); SG_REPO_NULLFREE(pCtx, pRepo); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 1; fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 0; }
void SG_dagfrag__add_dagnode(SG_context * pCtx, SG_dagfrag * pFrag, SG_dagnode** ppdn) { _my_data * pMyDataCached = NULL; SG_bool bPresent = SG_FALSE; const char* psz_id = NULL; SG_dagnode* pdn = NULL; SG_NULLARGCHECK_RETURN(pFrag); SG_NULLARGCHECK_RETURN(ppdn); pdn = *ppdn; // if we are extending the fragment, delete the generation-sorted // member cache copy. (see __foreach_member()). it's either that // or update it in parallel as we change the real CACHE and that // doesn't seem worth the bother. SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache); pFrag->m_pRB_GenerationSortedMemberCache = NULL; // fetch the starting dagnode and compute the generation bounds. // first, see if the cache already has info for this dagnode. // if not, fetch it from the source and then add it to the cache. SG_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_id) ); #if DEBUG && TRACE_DAGFRAG && 0 SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Adding [%s] to frag.\r\n", psz_id) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif SG_ERR_CHECK( _cache__lookup(pCtx, pFrag,psz_id,&pMyDataCached,&bPresent) ); if (!bPresent) { // this dagnode was not already present in the cache. // add it to the cache directly and set the state. // we don't need to go thru the work queue for it. // // then the add all of its parents. SG_ERR_CHECK( _cache__add__dagnode(pCtx, pFrag, 0, pdn,SG_DFS_START_MEMBER, &pMyDataCached) ); *ppdn = NULL; SG_ERR_CHECK( _add_parents(pCtx, pFrag, pMyDataCached->m_pDagnode) ); } else { // the node was already present in the cache, so we have already // walked at least part of the graph around it. switch (pMyDataCached->m_state) { case SG_DFS_END_FRINGE: if (!pMyDataCached->m_pDagnode) { pMyDataCached->m_pDagnode = pdn; *ppdn = NULL; } pMyDataCached->m_state = SG_DFS_INTERIOR_MEMBER; SG_ERR_CHECK( _add_parents(pCtx, pFrag, pMyDataCached->m_pDagnode) ); break; default: break; /* SG_ASSERT_RELEASE_FAIL2( (0), (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]", pMyDataCached->m_state,psz_id) ); */ } } // fall through fail: SG_DAGNODE_NULLFREE(pCtx, *ppdn); }