static void _dagquery__new_since__callback(SG_context* pCtx, SG_repo *pRepo, void *pVoidData, SG_dagnode *pCurrentDagnode, SG_rbtree* pDagnodeCache, SG_dagwalker_continue* pContinue) { SG_dagquery_relationship rel; new_since_context* pcb = (new_since_context*)pVoidData; const char* szCurrentNodeHid = NULL; SG_UNUSED(pDagnodeCache); SG_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pCurrentDagnode, &szCurrentNodeHid) ); if (strcmp(szCurrentNodeHid, pcb->pszOldNodeHid) == 0) { *pContinue = SG_DAGWALKER_CONTINUE__EMPTY_QUEUE; return; } // TODO: Consider looking at revision numbers to make this faster. SG_ERR_CHECK( SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, pcb->dagnum, szCurrentNodeHid, pcb->pszOldNodeHid, SG_FALSE, SG_TRUE, &rel) ); if (rel != SG_DAGQUERY_RELATIONSHIP__DESCENDANT) { SG_ERR_CHECK( SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, pcb->dagnum, szCurrentNodeHid, pcb->pszOldNodeHid, SG_TRUE, SG_FALSE, &rel) ); if (rel == SG_DAGQUERY_RELATIONSHIP__ANCESTOR) { *pContinue = SG_DAGWALKER_CONTINUE__EMPTY_QUEUE; return; } } SG_ERR_CHECK( SG_rbtree__add(pCtx, pcb->prbNewNodeHids, szCurrentNodeHid) ); fail: ; }
static void _do_since( SG_context* pCtx, SG_repo* pRepo, SG_vhash* pvh_since, SG_fragball_writer* pfb ) { SG_uint32 count_dagnums = 0; SG_uint32 i_dagnum = 0; SG_ihash* pih_new = NULL; SG_rbtree* prb = NULL; SG_vhash* pvh_blobs = NULL; SG_changeset* pcs = NULL; SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(pvh_since); SG_NULLARGCHECK_RETURN(pfb); // TODO do we need to deal with dags which are present here but not in pvh_since? SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_since, &count_dagnums) ); for (i_dagnum=0; i_dagnum<count_dagnums; i_dagnum++) { const char* psz_dagnum = NULL; SG_varray* pva_nodes = NULL; SG_uint64 dagnum = 0; SG_ERR_CHECK( SG_vhash__get_nth_pair__varray(pCtx, pvh_since, i_dagnum, &psz_dagnum, &pva_nodes) ); SG_ERR_CHECK( SG_dagnum__from_sz__hex(pCtx, psz_dagnum, &dagnum) ); SG_ERR_CHECK( SG_repo__find_new_dagnodes_since(pCtx, pRepo, dagnum, pva_nodes, &pih_new) ); if (pih_new) { SG_uint32 count = 0; SG_ERR_CHECK( SG_ihash__count(pCtx, pih_new, &count) ); if (count) { SG_uint32 i = 0; SG_ERR_CHECK( SG_rbtree__alloc(pCtx, &prb) ); SG_ERR_CHECK( SG_vhash__alloc(pCtx, &pvh_blobs) ); for (i=0; i<count; i++) { const char* psz_node = NULL; SG_ERR_CHECK( SG_ihash__get_nth_pair(pCtx, pih_new, i, &psz_node, NULL) ); SG_ERR_CHECK( SG_rbtree__add(pCtx, prb, psz_node) ); SG_ERR_CHECK( SG_vhash__add__null(pCtx, pvh_blobs, psz_node) ); SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_node, &pcs) ); SG_ERR_CHECK( _add_necessary_blobs(pCtx, pcs, pvh_blobs) ); SG_CHANGESET_NULLFREE(pCtx, pcs); } // put all these new nodes in the frag SG_ERR_CHECK( SG_fragball__write__dagnodes(pCtx, pfb, dagnum, prb) ); // and the blobs SG_ERR_CHECK( SG_sync__add_blobs_to_fragball(pCtx, pfb, pvh_blobs) ); SG_VHASH_NULLFREE(pCtx, pvh_blobs); SG_RBTREE_NULLFREE(pCtx, prb); } SG_IHASH_NULLFREE(pCtx, pih_new); } } fail: SG_CHANGESET_NULLFREE(pCtx, pcs); SG_VHASH_NULLFREE(pCtx, pvh_blobs); SG_RBTREE_NULLFREE(pCtx, prb); SG_IHASH_NULLFREE(pCtx, pih_new); }
void SG_repo__db__calc_delta( SG_context * pCtx, SG_repo* pRepo, SG_uint64 dagnum, const char* psz_csid_from, const char* psz_csid_to, SG_uint32 flags, SG_vhash** ppvh_add, SG_vhash** ppvh_remove ) { SG_dagnode* pdn_from = NULL; SG_dagnode* pdn_to = NULL; SG_int32 gen_from = -1; SG_int32 gen_to = -1; SG_varray* pva_direct_backward_path = NULL; SG_varray* pva_direct_forward_path = NULL; SG_vhash* pvh_add = NULL; SG_vhash* pvh_remove = NULL; SG_rbtree* prb_temp = NULL; SG_daglca* plca = NULL; char* psz_csid_ancestor = NULL; SG_NULLARGCHECK_RETURN(psz_csid_from); SG_NULLARGCHECK_RETURN(psz_csid_to); SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(ppvh_add); SG_NULLARGCHECK_RETURN(ppvh_remove); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_csid_from, &pdn_from) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn_from, &gen_from) ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_csid_to, &pdn_to) ); SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pdn_to, &gen_to) ); if (gen_from > gen_to) { SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_from, psz_csid_to, &pva_direct_backward_path ) ); if (pva_direct_backward_path) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_backward_path, flags, pvh_add, pvh_remove ) ); } } else if (gen_from < gen_to) { SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_to, psz_csid_from, &pva_direct_forward_path ) ); if (pva_direct_forward_path) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_forward_path, flags, pvh_remove, pvh_add ) ); } } if (!pvh_add && !pvh_remove) { SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &prb_temp) ); SG_ERR_CHECK( SG_rbtree__add(pCtx,prb_temp,psz_csid_from) ); SG_ERR_CHECK( SG_rbtree__add(pCtx,prb_temp,psz_csid_to) ); SG_ERR_CHECK( SG_repo__get_dag_lca(pCtx,pRepo,dagnum,prb_temp,&plca) ); { const char* psz_hid = NULL; SG_daglca_node_type node_type = 0; SG_int32 gen = -1; SG_ERR_CHECK( SG_daglca__iterator__first(pCtx, NULL, plca, SG_FALSE, &psz_hid, &node_type, &gen, NULL) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_hid, &psz_csid_ancestor) ); } SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_from, psz_csid_ancestor, &pva_direct_backward_path ) ); SG_ERR_CHECK( SG_repo__dag__find_direct_backward_path( pCtx, pRepo, dagnum, psz_csid_to, psz_csid_ancestor, &pva_direct_forward_path ) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_add) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_remove) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_backward_path, flags, pvh_add, pvh_remove ) ); SG_ERR_CHECK( SG_db__make_delta_from_path( pCtx, pRepo, dagnum, pva_direct_forward_path, flags, pvh_remove, pvh_add ) ); } *ppvh_add = pvh_add; pvh_add = NULL; *ppvh_remove = pvh_remove; pvh_remove = NULL; fail: SG_NULLFREE(pCtx, psz_csid_ancestor); SG_RBTREE_NULLFREE(pCtx, prb_temp); SG_DAGLCA_NULLFREE(pCtx, plca); SG_VHASH_NULLFREE(pCtx, pvh_add); SG_VHASH_NULLFREE(pCtx, pvh_remove); SG_VARRAY_NULLFREE(pCtx, pva_direct_backward_path); SG_VARRAY_NULLFREE(pCtx, pva_direct_forward_path); SG_DAGNODE_NULLFREE(pCtx, pdn_from); SG_DAGNODE_NULLFREE(pCtx, pdn_to); }
void SG_dagquery__find_descendant_heads(SG_context * pCtx, SG_repo * pRepo, SG_uint64 iDagNum, const char * pszHidStart, SG_bool bStopIfMultiple, SG_dagquery_find_head_status * pdqfhs, SG_rbtree ** pprbHeads) { SG_rbtree * prbLeaves = NULL; SG_rbtree * prbHeadsFound = NULL; SG_rbtree_iterator * pIter = NULL; const char * pszKey_k = NULL; SG_bool b; SG_dagquery_find_head_status dqfhs; SG_dagquery_relationship dqRel; SG_uint32 nrFound; SG_NULLARGCHECK_RETURN(pRepo); SG_NONEMPTYCHECK_RETURN(pszHidStart); SG_NULLARGCHECK_RETURN(pdqfhs); SG_NULLARGCHECK_RETURN(pprbHeads); SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &prbHeadsFound) ); // fetch a list of all of the LEAVES in the DAG. // this rbtree only contains keys; no assoc values. SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagNum, &prbLeaves) ); // if the starting point is a leaf, then we are done (we don't care how many // other leaves are in the rbtree because none will be a child of ours because // we are a leaf). SG_ERR_CHECK( SG_rbtree__find(pCtx, prbLeaves, pszHidStart, &b, NULL) ); if (b) { SG_ERR_CHECK( SG_rbtree__add(pCtx, prbHeadsFound, pszHidStart) ); dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__IS_LEAF; goto done; } // inspect each leaf and qualify it; put the ones that pass // into the list of actual heads. nrFound = 0; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pIter, prbLeaves, &b, &pszKey_k, NULL) ); while (b) { // is head[k] a descendant of start? SG_ERR_CHECK( SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, iDagNum, pszKey_k, pszHidStart, SG_FALSE, // we care about descendants, so don't skip SG_TRUE, // we don't care about ancestors, so skip them &dqRel) ); if (dqRel == SG_DAGQUERY_RELATIONSHIP__DESCENDANT) { nrFound++; if (bStopIfMultiple && (nrFound > 1)) { // they wanted a unique answer and we've found too many answers // (which they won't be able to use anyway) so just stop and // return the status. (we delete prbHeadsFound because it is // incomplete and so that they won't be tempted to use it.) SG_RBTREE_NULLFREE(pCtx, prbHeadsFound); dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__MULTIPLE; goto done; } SG_ERR_CHECK( SG_rbtree__add(pCtx, prbHeadsFound, pszKey_k) ); } SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pIter, &b, &pszKey_k, NULL) ); } switch (nrFound) { case 0: // this should NEVER happen. we should always be able to find a // leaf/head for a node. // // TODO the only case where this might happen is if named branches // TODO cause the leaf to be disqualified. so i'm going to THROW // TODO here rather than ASSERT. SG_ERR_THROW2( SG_ERR_DAG_NOT_CONSISTENT, (pCtx, "Could not find head/leaf for changeset [%s]", pszHidStart) ); break; case 1: dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__UNIQUE; break; default: dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__MULTIPLE; break; } done: *pprbHeads = prbHeadsFound; prbHeadsFound = NULL; *pdqfhs = dqfhs; fail: SG_RBTREE_NULLFREE(pCtx, prbLeaves); SG_RBTREE_NULLFREE(pCtx, prbHeadsFound); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIter); }
void sg_pack__do_changeset(SG_context* pCtx, SG_repo* pRepo, const char* psz_hid_cs, SG_rbtree* prb_blobs) { SG_changeset* pcs = NULL; SG_int32 gen = 0; SG_uint32 count_blobs = 0; SG_uint32 count_parents = 0; SG_varray* pva_parents = NULL; SG_uint32 i; SG_rbtree* prb_new = NULL; const char* psz_hid_root_treenode = NULL; const char* psz_key = NULL; SG_vhash* pvh_lbl = NULL; SG_vhash* pvh_blobs = NULL; SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs, &pcs) ); SG_ERR_CHECK( SG_changeset__get_root(pCtx, pcs, &psz_hid_root_treenode) ); SG_ERR_CHECK( SG_changeset__get_generation(pCtx, pcs, &gen) ); SG_ERR_CHECK( SG_RBTREE__ALLOC__PARAMS(pCtx, &prb_new, count_blobs, NULL) ); SG_ERR_CHECK( SG_changeset__get_list_of_bloblists(pCtx, pcs, &pvh_lbl) ); /* add all the tree user file blobs */ SG_ERR_CHECK( SG_changeset__get_bloblist_name(pCtx, SG_BLOB_REFTYPE__TREEUSERFILE, &psz_key) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_lbl, psz_key, &pvh_blobs) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_blobs, &count_blobs) ); /* now write all the blobs */ for (i=0; i<count_blobs; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_blobs, i, &psz_hid, NULL) ); SG_ERR_CHECK( SG_rbtree__add(pCtx, prb_new, psz_hid) ); } /* and the treenode blobs */ SG_ERR_CHECK( SG_changeset__get_bloblist_name(pCtx, SG_BLOB_REFTYPE__TREENODE, &psz_key) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_lbl, psz_key, &pvh_blobs) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_blobs, &count_blobs) ); /* now write all the blobs */ for (i=0; i<count_blobs; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_rbtree__add(pCtx, prb_new, psz_hid) ); } SG_ERR_CHECK( sg_pack__do_get_dir__top(pCtx, pRepo, gen, psz_hid_root_treenode, prb_blobs, prb_new) ); SG_RBTREE_NULLFREE(pCtx, prb_new); SG_ERR_CHECK( SG_changeset__get_parents(pCtx, pcs, &pva_parents) ); if (pva_parents) { SG_ERR_CHECK( SG_varray__count(pCtx, pva_parents, &count_parents) ); for (i=0; i<count_parents; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva_parents, i, &psz_hid) ); SG_ERR_CHECK( sg_pack__do_changeset(pCtx, pRepo, psz_hid, prb_blobs) ); } } SG_CHANGESET_NULLFREE(pCtx, pcs); return; fail: SG_RBTREE_NULLFREE(pCtx, prb_new); }