void SG_dagquery__new_since(SG_context * pCtx,
							SG_repo* pRepo,
							SG_uint64 iDagNum,
							const char* pszOldNodeHid,
							const char* pszNewNodeHid,
							SG_rbtree** pprbNewNodeHids)
{
	SG_dagquery_relationship rel;
	new_since_context cbCtx;

    cbCtx.dagnum = iDagNum;
	cbCtx.prbNewNodeHids = NULL;
	cbCtx.pszNewNodeHid = pszNewNodeHid;
	cbCtx.pszOldNodeHid = pszOldNodeHid;

	SG_ERR_CHECK(  SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, iDagNum, pszNewNodeHid, pszOldNodeHid, SG_FALSE, SG_FALSE, &rel)  );

	if (rel != SG_DAGQUERY_RELATIONSHIP__DESCENDANT)
		SG_ERR_THROW2_RETURN(SG_ERR_UNSPECIFIED, (pCtx, "pszNewNodeHid must be a descendant of pszOldNodeHid"));

	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &cbCtx.prbNewNodeHids)  );

	SG_ERR_CHECK(  SG_dagwalker__walk_dag_single(pCtx, pRepo, iDagNum, pszNewNodeHid, _dagquery__new_since__callback, (void*)&cbCtx)  );

	SG_RETURN_AND_NULL(cbCtx.prbNewNodeHids, pprbNewNodeHids);

	/* Fall through to common cleanup */
fail:
	SG_RBTREE_NULLFREE(pCtx, cbCtx.prbNewNodeHids);
}
Beispiel #2
0
void SG_dagfrag__alloc(SG_context * pCtx,
					   SG_dagfrag ** ppNew,
					   const char* psz_repo_id,
					   const char* psz_admin_id,
					   SG_uint32 iDagNum)
{
	SG_dagfrag * pFrag = NULL;

	SG_NULLARGCHECK_RETURN(ppNew);
	SG_ARGCHECK_RETURN( (iDagNum != SG_DAGNUM__NONE), iDagNum );
	SG_ERR_CHECK_RETURN(  SG_gid__argcheck(pCtx, psz_repo_id)  );
	SG_ERR_CHECK_RETURN(  SG_gid__argcheck(pCtx, psz_admin_id)  );

    SG_ERR_CHECK(  SG_alloc(pCtx, 1, sizeof(SG_dagfrag), &pFrag)  );

    SG_ERR_CHECK(  SG_strcpy(pCtx, pFrag->m_sz_repo_id, sizeof(pFrag->m_sz_repo_id), psz_repo_id)  );
    SG_ERR_CHECK(  SG_strcpy(pCtx, pFrag->m_sz_admin_id, sizeof(pFrag->m_sz_admin_id), psz_admin_id)  );

    pFrag->m_iDagNum = iDagNum;

	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx,&pFrag->m_pRB_Cache)  );

	pFrag->m_pRB_GenerationSortedMemberCache = NULL;		// only create this if needed

	*ppNew = pFrag;
	return;

fail:
	SG_DAGFRAG_NULLFREE(pCtx, pFrag);
}
Beispiel #3
0
static void _my_create_generation_sorted_member_cache(SG_context * pCtx,
													  SG_dagfrag * pFrag)
{
	// everything in the main CACHE is (implicitly) sorted by HID.  there are
	// times when we need this list sorted by generation.
	//
	// here we construct a copy of the CACHE in that alternate ordering.  we
	// simply borrow the associated data pointers of the items in the real
	// CACHE, so we don't own/free them.  we only include items of type
	// START_MEMBER and INTERIOR_MEMBER.
	//
	// WARNING: whenever you change the CACHE (such as during __add_{leaf,leaves}()),
	// WARNING: you must delete/recreate or likewise update this copy.

	SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache);
	pFrag->m_pRB_GenerationSortedMemberCache = NULL;

	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx,
									&pFrag->m_pRB_GenerationSortedMemberCache)  );
	SG_ERR_CHECK(  SG_rbtree__foreach(pCtx,
									  pFrag->m_pRB_Cache,
									  _sg_dagfrag__my_create_generation_sorted_member_cache_callback,
									  pFrag)  );
	return;

fail:
	SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache);
	pFrag->m_pRB_GenerationSortedMemberCache = NULL;
}
void SG_repo__install_implementation(
        SG_context* pCtx, 
        sg_repo__vtable* pvtable
        )
{
    if (!g_prb_repo_vtables)
    {
        SG_ERR_CHECK_RETURN(  SG_RBTREE__ALLOC(pCtx, &g_prb_repo_vtables)  );
    }

    SG_ERR_CHECK_RETURN(  SG_rbtree__add__with_assoc(pCtx, g_prb_repo_vtables, pvtable->pszStorage, pvtable)  );
}
void SG_jscore__new_runtime(
	SG_context * pCtx,
	JSContextCallback cb,
	JSFunctionSpec *shell_functions,
	SG_bool bSkipModules,
	JSRuntime **ppRt
	)
{
	char * szSsjsMutable = NULL;

	if(gpJSCoreGlobalState != NULL)
		SG_ERR_THROW_RETURN(SG_ERR_ALREADY_INITIALIZED);

	SG_ERR_CHECK(  SG_alloc1(pCtx, gpJSCoreGlobalState)  );

	// Store this for later.
	gpJSCoreGlobalState->cb = cb;
	gpJSCoreGlobalState->shell_functions = shell_functions;
	gpJSCoreGlobalState->bSkipModules = bSkipModules;

	if (! bSkipModules)
		SG_ERR_CHECK(  _sg_jscore_getpaths(pCtx)  );

	SG_ERR_CHECK(  SG_mutex__init(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &gpJSCoreGlobalState->prbJSMutexes)  );

	// Start up SpiderMonkey.
	JS_SetCStringsAreUTF8();
	gpJSCoreGlobalState->rt = JS_NewRuntime(64L * 1024L * 1024L); // TODO decide the right size here
	if(gpJSCoreGlobalState->rt==NULL)
		SG_ERR_THROW2(SG_ERR_JS, (pCtx, "Failed to allocate JS Runtime"));

	if (ppRt)
		*ppRt = gpJSCoreGlobalState->rt;

	return;
fail:
	SG_NULLFREE(pCtx, szSsjsMutable);

	SG_PATHNAME_NULLFREE(pCtx, gpJSCoreGlobalState->pPathToDispatchDotJS);
	SG_PATHNAME_NULLFREE(pCtx, gpJSCoreGlobalState->pPathToModules);
}
Beispiel #6
0
void SG_dagfrag__alloc_transient(SG_context * pCtx,
								 SG_dagfrag ** ppNew)
{
	SG_dagfrag * pFrag = NULL;

	SG_NULLARGCHECK_RETURN(ppNew);

    SG_ERR_CHECK(  SG_alloc(pCtx, 1, sizeof(SG_dagfrag), &pFrag)  );

	pFrag->m_sz_repo_id[0] = '*';
	pFrag->m_sz_admin_id[0] = '*';
    pFrag->m_iDagNum = SG_DAGNUM__NONE;

	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx,&pFrag->m_pRB_Cache)  );

	pFrag->m_pRB_GenerationSortedMemberCache = NULL;		// only create this if needed

	*ppNew = pFrag;
	return;

fail:
	SG_DAGFRAG_NULLFREE(pCtx, pFrag);
}
Beispiel #7
0
void sg_pack__do_blob(SG_context* pCtx, const char* psz_gid, const char* psz_hid, SG_int32 gen, SG_rbtree* prb_blobs, SG_rbtree* prb_new)
{
    SG_rbtree* prb = NULL;
    SG_bool b = SG_FALSE;
    char buf[64];

    SG_ERR_CHECK(  SG_rbtree__find(pCtx, prb_new, psz_hid, &b, NULL)  );
    if (b)
    {
        SG_ERR_CHECK(  SG_rbtree__find(pCtx, prb_blobs, psz_gid, &b, (void**) &prb)  );
        if (!b)
        {
            SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prb)  );
            SG_ERR_CHECK(  SG_rbtree__add__with_assoc(pCtx, prb_blobs, psz_gid, prb)  );
        }
        SG_ERR_CHECK(  SG_sprintf(pCtx, buf, sizeof(buf), "%05d", (int) gen)  );
        SG_ERR_CHECK(  SG_rbtree__add__with_pooled_sz(pCtx, prb, buf, psz_hid)  );
    }

    return;

fail:
    return;
}
void SG_mrg_cset_entry_conflict__append_change(SG_context * pCtx,
											   SG_mrg_cset_entry_conflict * pMrgCSetEntryConflict,
											   SG_mrg_cset_entry * pMrgCSetEntry_Leaf_k,
											   SG_mrg_cset_entry_neq neq)
{
	SG_NULLARGCHECK_RETURN(pMrgCSetEntryConflict);
	SG_NULLARGCHECK_RETURN(pMrgCSetEntry_Leaf_k);

	if (!pMrgCSetEntryConflict->pVec_MrgCSetEntry_Changes)
		SG_ERR_CHECK_RETURN(  SG_vector__alloc(pCtx,&pMrgCSetEntryConflict->pVec_MrgCSetEntry_Changes,2)  );

	SG_ERR_CHECK_RETURN(  SG_vector__append(pCtx,pMrgCSetEntryConflict->pVec_MrgCSetEntry_Changes,(void *)pMrgCSetEntry_Leaf_k,NULL)  );

	if (!pMrgCSetEntryConflict->pVec_MrgCSetEntryNeq_Changes)
		SG_ERR_CHECK_RETURN(  SG_vector_i64__alloc(pCtx,&pMrgCSetEntryConflict->pVec_MrgCSetEntryNeq_Changes,2)  );

	SG_ERR_CHECK_RETURN(  SG_vector_i64__append(pCtx,pMrgCSetEntryConflict->pVec_MrgCSetEntryNeq_Changes,(SG_int64)neq,NULL)  );

	//////////////////////////////////////////////////////////////////
	// add the value of the changed fields to the prbUnique_ rbtrees so that we can get a count of the unique new values.
	//
	//////////////////////////////////////////////////////////////////
	// the values for RENAME, MOVE, ATTRBITS, SYMLINKS, and SUBMODULES are collapsable.  that is, if we
	// have something like:
	//        A
	//       / \.
	//     L0   a0
	//         /  \.
	//        L1   L2
	//
	// and a rename in each Leaf, then we can either:
	// [a] prompt for them to choose L1 or L2's name and then
	//     prompt for them to choose L0 or the name from step 1.
	//
	// [b] prompt for them to choose L0, L1, or L2 in one question.
	//
	// unlike file-content-merging, the net-net is that we have 1 new value
	// that is one of the inputs (or maybe we let them pick a new onw), but
	// it is not a combination of them and so we don't need to display the
	// immediate ancestor in the prompt.
	//
	// so we carry-forward the unique values from the leaves for each of
	// these fields.  so the final merge-result may have more unique values
	// that it has direct parents.
	//////////////////////////////////////////////////////////////////

	if (neq & SG_MRG_CSET_ENTRY_NEQ__ATTRBITS)
	{
		SG_int_to_string_buffer buf;
		SG_int64_to_sz((SG_int64)pMrgCSetEntry_Leaf_k->attrBits, buf);

		if (!pMrgCSetEntryConflict->prbUnique_AttrBits)
			SG_ERR_CHECK_RETURN(  SG_RBTREE__ALLOC(pCtx,&pMrgCSetEntryConflict->prbUnique_AttrBits)  );

		SG_ERR_CHECK_RETURN(  _update_1_rbUnique(pCtx,pMrgCSetEntryConflict->prbUnique_AttrBits,buf,pMrgCSetEntry_Leaf_k)  );

		if (pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict && pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_AttrBits)
			SG_ERR_CHECK_RETURN(  _carry_forward_unique_values(pCtx,
															   pMrgCSetEntryConflict->prbUnique_AttrBits,
															   pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_AttrBits)  );
	}

	if (neq & SG_MRG_CSET_ENTRY_NEQ__ENTRYNAME)
	{
		if (!pMrgCSetEntryConflict->prbUnique_Entryname)
			SG_ERR_CHECK_RETURN(  SG_RBTREE__ALLOC(pCtx,&pMrgCSetEntryConflict->prbUnique_Entryname)  );

		SG_ERR_CHECK_RETURN(  _update_1_rbUnique(pCtx,
												 pMrgCSetEntryConflict->prbUnique_Entryname,
												 SG_string__sz(pMrgCSetEntry_Leaf_k->pStringEntryname),
												 pMrgCSetEntry_Leaf_k)  );

		if (pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict && pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_Entryname)
			SG_ERR_CHECK_RETURN(  _carry_forward_unique_values(pCtx,
															   pMrgCSetEntryConflict->prbUnique_Entryname,
															   pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_Entryname)  );
	}

	if (neq & SG_MRG_CSET_ENTRY_NEQ__GID_PARENT)
	{
		if (!pMrgCSetEntryConflict->prbUnique_GidParent)
			SG_ERR_CHECK_RETURN(  SG_RBTREE__ALLOC(pCtx,&pMrgCSetEntryConflict->prbUnique_GidParent)  );

		SG_ERR_CHECK_RETURN(  _update_1_rbUnique(pCtx,pMrgCSetEntryConflict->prbUnique_GidParent,pMrgCSetEntry_Leaf_k->bufGid_Parent,pMrgCSetEntry_Leaf_k)  );

		if (pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict && pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_GidParent)
			SG_ERR_CHECK_RETURN(  _carry_forward_unique_values(pCtx,
															   pMrgCSetEntryConflict->prbUnique_GidParent,
															   pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_GidParent)  );
	}

	if (neq & SG_MRG_CSET_ENTRY_NEQ__SYMLINK_HID_BLOB)
	{
		if (!pMrgCSetEntryConflict->prbUnique_Symlink_HidBlob)
			SG_ERR_CHECK_RETURN(  SG_RBTREE__ALLOC(pCtx,&pMrgCSetEntryConflict->prbUnique_Symlink_HidBlob)  );

		SG_ERR_CHECK_RETURN(  _update_1_rbUnique(pCtx,pMrgCSetEntryConflict->prbUnique_Symlink_HidBlob,pMrgCSetEntry_Leaf_k->bufHid_Blob,pMrgCSetEntry_Leaf_k)  );

		if (pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict && pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_Symlink_HidBlob)
			SG_ERR_CHECK_RETURN(  _carry_forward_unique_values(pCtx,
															   pMrgCSetEntryConflict->prbUnique_Symlink_HidBlob,
															   pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_Symlink_HidBlob)  );
	}

	if (neq & SG_MRG_CSET_ENTRY_NEQ__SUBMODULE_HID_BLOB)
	{
		if (!pMrgCSetEntryConflict->prbUnique_Submodule_HidBlob)
			SG_ERR_CHECK_RETURN(  SG_RBTREE__ALLOC(pCtx,&pMrgCSetEntryConflict->prbUnique_Submodule_HidBlob)  );

		SG_ERR_CHECK_RETURN(  _update_1_rbUnique(pCtx,pMrgCSetEntryConflict->prbUnique_Submodule_HidBlob,pMrgCSetEntry_Leaf_k->bufHid_Blob,pMrgCSetEntry_Leaf_k)  );

		if (pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict && pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_Submodule_HidBlob)
			SG_ERR_CHECK_RETURN(  _carry_forward_unique_values(pCtx,
															   pMrgCSetEntryConflict->prbUnique_Submodule_HidBlob,
															   pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_Submodule_HidBlob)  );
	}

	// 2010/09/13 Update: we now do the carry-forward on the set of
	//            unique HIDs for the various versions of the file
	//            content from each of the leaves.  This lets us
	//            completely flatten the sub-merges into one final
	//            result (with upto n values).
	//
	//            This means we won't be creating the auto-merge-plan
	//            at this point.
	//
	//            The problem with the auto-merge-plan as originally
	//            designed is that it was being driven based upon
	//            the overall topology of the DAG as a whole rather
	//            than the topology/history of the individual file.
	//            And by respecting the history of the individual
	//            file, I think we can get closer ancestors and better
	//            per-file merging and perhaps fewer criss-crosses
	//            and/or we push all of these issues to RESOLVE.

	if (neq & SG_MRG_CSET_ENTRY_NEQ__FILE_HID_BLOB)
	{
		if (!pMrgCSetEntryConflict->prbUnique_File_HidBlob)
			SG_ERR_CHECK_RETURN(  SG_RBTREE__ALLOC(pCtx,&pMrgCSetEntryConflict->prbUnique_File_HidBlob)  );

		SG_ASSERT(  (pMrgCSetEntry_Leaf_k->bufHid_Blob[0])  );

		// TODO 2010/09/13 the code that sets __FILE_HID_BLOB probably cannot tell
		// TODO            whether this branch did not change the file content
		// TODO            relative to the LCA or whether it did change it back to
		// TODO            the original value (an UNDO of the edits).  I would argue
		// TODO            that we should not list the former as a change, but that
		// TODO            we SHOULD list the latter.  The fix doesn't belong here,
		// TODO            but this is just where I was typing when I thought of it.
		
		SG_ERR_CHECK_RETURN(  _update_1_rbUnique(pCtx,pMrgCSetEntryConflict->prbUnique_File_HidBlob,pMrgCSetEntry_Leaf_k->bufHid_Blob,pMrgCSetEntry_Leaf_k)  );

		if (pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict && pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_File_HidBlob)
			SG_ERR_CHECK_RETURN(  _carry_forward_unique_values(pCtx,
															   pMrgCSetEntryConflict->prbUnique_File_HidBlob,
															   pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_File_HidBlob)  );
	}
}
void SG_dagquery__highest_revno_common_ancestor(
	SG_context * pCtx,
	SG_repo * pRepo,
	SG_uint64 dagnum,
	const SG_stringarray * pInputNodeHids,
	char ** ppOutputNodeHid
	)
{
	const char * const * paszInputNodeHids = NULL;
	SG_uint32 countInputNodes = 0;
	SG_repo_fetch_dagnodes_handle * pDagnodeFetcher = NULL;
	_hrca_work_queue_t workQueue = {NULL, 0, 0, NULL};
	SG_uint32 i;
	SG_dagnode * pDagnode = NULL;
	const char * pszHidRef = NULL;
	SG_bitvector * pIsAncestorOf = NULL;
	SG_uint32 countIsAncestorOf = 0;

	SG_ASSERT(pCtx!=NULL);
	SG_NULLARGCHECK(pRepo);
	SG_NULLARGCHECK(pInputNodeHids);
	SG_ERR_CHECK(  SG_stringarray__sz_array_and_count(pCtx, pInputNodeHids, &paszInputNodeHids, &countInputNodes)  );
	SG_ARGCHECK(countInputNodes>0, pInputNodeHids);
	SG_NULLARGCHECK(ppOutputNodeHid);

	SG_ERR_CHECK(  SG_repo__fetch_dagnodes__begin(pCtx, pRepo, dagnum, &pDagnodeFetcher)  );

	SG_ERR_CHECK(  SG_allocN(pCtx, _HRCA_WORK_QUEUE_INIT_LENGTH, workQueue.p)  );
	workQueue.allocatedLength = _HRCA_WORK_QUEUE_INIT_LENGTH;
	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &workQueue.pRevnoCache)  );

	SG_ERR_CHECK(  SG_BITVECTOR__ALLOC(pCtx, &pIsAncestorOf, countInputNodes)  );
	for(i=0; i<countInputNodes; ++i)
	{
		SG_ERR_CHECK(  SG_bitvector__zero(pCtx, pIsAncestorOf)  );
		SG_ERR_CHECK(  SG_bitvector__set_bit(pCtx, pIsAncestorOf, i, SG_TRUE)  );
		SG_ERR_CHECK(  _hrca_work_queue__insert(pCtx, &workQueue, paszInputNodeHids[i], pRepo, pDagnodeFetcher, pIsAncestorOf)  );
	}
	SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf);

	SG_ERR_CHECK(  _hrca_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &pIsAncestorOf)  );
	SG_ERR_CHECK(  SG_bitvector__count_set_bits(pCtx, pIsAncestorOf, &countIsAncestorOf)  );
	while(countIsAncestorOf < countInputNodes)
	{
		SG_uint32 count_parents = 0;
		const char** parents = NULL;
		SG_ERR_CHECK(  SG_dagnode__get_parents__ref(pCtx, pDagnode, &count_parents, &parents)  );
		for(i=0; i<count_parents; ++i)
			SG_ERR_CHECK(  _hrca_work_queue__insert(pCtx, &workQueue, parents[i], pRepo, pDagnodeFetcher, pIsAncestorOf)  );
		
		SG_DAGNODE_NULLFREE(pCtx, pDagnode);
		SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf);

		SG_ERR_CHECK(  _hrca_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &pIsAncestorOf)  );
		SG_ERR_CHECK(  SG_bitvector__count_set_bits(pCtx, pIsAncestorOf, &countIsAncestorOf)  );
	}

	SG_ERR_CHECK(  SG_strdup(pCtx, pszHidRef, ppOutputNodeHid)  );

	SG_DAGNODE_NULLFREE(pCtx, pDagnode);
	SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf);

	for(i=0; i<workQueue.length; ++i)
	{
		SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode);
		SG_BITVECTOR_NULLFREE(pCtx, workQueue.p[i].pIsAncestorOf);
	}
	SG_NULLFREE(pCtx, workQueue.p);
	SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache);

	SG_ERR_CHECK(  SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pDagnodeFetcher)  );

	return;
fail:
	for(i=0; i<workQueue.length; ++i)
	{
		SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode);
		SG_BITVECTOR_NULLFREE(pCtx, workQueue.p[i].pIsAncestorOf);
	}
	SG_NULLFREE(pCtx, workQueue.p);
	SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache);

	SG_DAGNODE_NULLFREE(pCtx, pDagnode);
	SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf);

	if(pDagnodeFetcher!=NULL)
	{
		SG_ERR_IGNORE(  SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pDagnodeFetcher)  );
	}
}
void SG_dagquery__find_new_since_common(
	SG_context * pCtx,
	SG_repo * pRepo,
	SG_uint64 dagnum,
	const char * pszOldNodeHid,
	const char * pszNewNodeHid,
	SG_stringarray ** ppResults
	)
{
	_fnsc_work_queue_t workQueue = {NULL, 0, 0, 0, NULL};
	SG_uint32 i;
	SG_dagnode * pDagnode = NULL;
	SG_stringarray * pResults = NULL;
	
	SG_ASSERT(pCtx!=NULL);
	SG_NULLARGCHECK(pRepo);
	SG_NONEMPTYCHECK(pszOldNodeHid);
	SG_NONEMPTYCHECK(pszNewNodeHid);
	SG_NULLARGCHECK(ppResults);
	
	SG_ERR_CHECK(  SG_allocN(pCtx, _FNSC_WORK_QUEUE_INIT_LENGTH, workQueue.p)  );
	workQueue.allocatedLength = _FNSC_WORK_QUEUE_INIT_LENGTH;
	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &workQueue.pRevnoCache)  );
	
	SG_ERR_CHECK(  _fnsc_work_queue__insert(pCtx, &workQueue, pszOldNodeHid, dagnum, pRepo, _ANCESTOR_OF_OLD)  );
	SG_ERR_CHECK(  _fnsc_work_queue__insert(pCtx, &workQueue, pszNewNodeHid, dagnum, pRepo, _ANCESTOR_OF_NEW)  );
	
	SG_ERR_CHECK(  SG_STRINGARRAY__ALLOC(pCtx, &pResults, 32)  );
	while(workQueue.numAncestorsOfNewOnTheQueue > 0)
	{
		const char * pszHidRef = NULL;
		SG_byte isAncestorOf = 0;
		
		SG_ERR_CHECK(  _fnsc_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &isAncestorOf)  );
		if (isAncestorOf==_ANCESTOR_OF_NEW)
			SG_ERR_CHECK(  SG_stringarray__add(pCtx, pResults, pszHidRef)  );
		
		{
			SG_uint32 count_parents = 0;
			const char** parents = NULL;
			SG_ERR_CHECK(  SG_dagnode__get_parents__ref(pCtx, pDagnode, &count_parents, &parents)  );
			for(i=0; i<count_parents; ++i)
				SG_ERR_CHECK(  _fnsc_work_queue__insert(pCtx, &workQueue, parents[i], dagnum, pRepo, isAncestorOf)  );
		}
		
		SG_DAGNODE_NULLFREE(pCtx, pDagnode);
	}
	
	for(i=0; i<workQueue.length; ++i)
		SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode);
	SG_NULLFREE(pCtx, workQueue.p);
	SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache);
	
	*ppResults = pResults;

	return;
fail:
	for(i=0; i<workQueue.length; ++i)
		SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode);
	SG_NULLFREE(pCtx, workQueue.p);
	SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache);

	SG_DAGNODE_NULLFREE(pCtx, pDagnode);

	SG_STRINGARRAY_NULLFREE(pCtx, pResults);
}
void SG_dagquery__find_descendant_heads(SG_context * pCtx,
										SG_repo * pRepo,
                                        SG_uint64 iDagNum,
										const char * pszHidStart,
										SG_bool bStopIfMultiple,
										SG_dagquery_find_head_status * pdqfhs,
										SG_rbtree ** pprbHeads)
{
	SG_rbtree * prbLeaves = NULL;
	SG_rbtree * prbHeadsFound = NULL;
	SG_rbtree_iterator * pIter = NULL;
	const char * pszKey_k = NULL;
	SG_bool b;
	SG_dagquery_find_head_status dqfhs;
	SG_dagquery_relationship dqRel;
	SG_uint32 nrFound;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NONEMPTYCHECK_RETURN(pszHidStart);
	SG_NULLARGCHECK_RETURN(pdqfhs);
	SG_NULLARGCHECK_RETURN(pprbHeads);

	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prbHeadsFound)  );

	// fetch a list of all of the LEAVES in the DAG.
	// this rbtree only contains keys; no assoc values.

	SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagNum, &prbLeaves)  );

	// if the starting point is a leaf, then we are done (we don't care how many
	// other leaves are in the rbtree because none will be a child of ours because
	// we are a leaf).

	SG_ERR_CHECK(  SG_rbtree__find(pCtx, prbLeaves, pszHidStart, &b, NULL)  );
	if (b)
	{
		SG_ERR_CHECK(  SG_rbtree__add(pCtx, prbHeadsFound, pszHidStart)  );

		dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__IS_LEAF;
		goto done;
	}

	// inspect each leaf and qualify it; put the ones that pass
	// into the list of actual heads.

	nrFound = 0;
	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pIter, prbLeaves, &b, &pszKey_k, NULL)  );
	while (b)
	{
		// is head[k] a descendant of start?
		SG_ERR_CHECK(  SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, iDagNum, pszKey_k, pszHidStart,
															 SG_FALSE, // we care about descendants, so don't skip
															 SG_TRUE,  // we don't care about ancestors, so skip them
															 &dqRel)  );

		if (dqRel == SG_DAGQUERY_RELATIONSHIP__DESCENDANT)
		{
			nrFound++;

			if (bStopIfMultiple && (nrFound > 1))
			{
				// they wanted a unique answer and we've found too many answers
				// (which they won't be able to use anyway) so just stop and
				// return the status.  (we delete prbHeadsFound because it is
				// incomplete and so that they won't be tempted to use it.)

				SG_RBTREE_NULLFREE(pCtx, prbHeadsFound);
				dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__MULTIPLE;
				goto done;
			}

			SG_ERR_CHECK(  SG_rbtree__add(pCtx, prbHeadsFound, pszKey_k)  );
		}

		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pIter, &b, &pszKey_k, NULL)  );
	}

	switch (nrFound)
	{
	case 0:
		// this should NEVER happen.  we should always be able to find a
		// leaf/head for a node.
		//
		// TODO the only case where this might happen is if named branches
		// TODO cause the leaf to be disqualified.  so i'm going to THROW
		// TODO here rather than ASSERT.

		SG_ERR_THROW2(  SG_ERR_DAG_NOT_CONSISTENT,
						(pCtx, "Could not find head/leaf for changeset [%s]", pszHidStart)  );
		break;

	case 1:
		dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__UNIQUE;
		break;

	default:
		dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__MULTIPLE;
		break;
	}

done:
	*pprbHeads = prbHeadsFound;
	prbHeadsFound = NULL;

	*pdqfhs = dqfhs;

fail:
	SG_RBTREE_NULLFREE(pCtx, prbLeaves);
	SG_RBTREE_NULLFREE(pCtx, prbHeadsFound);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIter);
}
void SG_repo__db__calc_delta(
        SG_context * pCtx,
        SG_repo* pRepo,
        SG_uint64 dagnum,
        const char* psz_csid_from,
        const char* psz_csid_to,
        SG_uint32 flags,
        SG_vhash** ppvh_add,
        SG_vhash** ppvh_remove
        )
{
    SG_dagnode* pdn_from = NULL;
    SG_dagnode* pdn_to = NULL;
    SG_int32 gen_from = -1;
    SG_int32 gen_to = -1;
    SG_varray* pva_direct_backward_path = NULL;
    SG_varray* pva_direct_forward_path = NULL;
    SG_vhash* pvh_add = NULL;
    SG_vhash* pvh_remove = NULL;
    SG_rbtree* prb_temp = NULL;
    SG_daglca* plca = NULL;
    char* psz_csid_ancestor = NULL;

    SG_NULLARGCHECK_RETURN(psz_csid_from);
    SG_NULLARGCHECK_RETURN(psz_csid_to);
    SG_NULLARGCHECK_RETURN(pRepo);
    SG_NULLARGCHECK_RETURN(ppvh_add);
    SG_NULLARGCHECK_RETURN(ppvh_remove);

    SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_csid_from, &pdn_from)  );
    SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pdn_from, &gen_from)  );
    SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_csid_to, &pdn_to)  );
    SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pdn_to, &gen_to)  );

    if (gen_from > gen_to)
    {
        SG_ERR_CHECK(  SG_repo__dag__find_direct_backward_path(
                    pCtx,
                    pRepo,
                    dagnum,
                    psz_csid_from,
                    psz_csid_to,
                    &pva_direct_backward_path
                    )  );
        if (pva_direct_backward_path)
        {
            SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_add)  );
            SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_remove)  );
            SG_ERR_CHECK(  SG_db__make_delta_from_path(
                        pCtx,
                        pRepo,
                        dagnum,
                        pva_direct_backward_path,
                        flags,
                        pvh_add,
                        pvh_remove
                        )  );
        }
    }
    else if (gen_from < gen_to)
    {
        SG_ERR_CHECK(  SG_repo__dag__find_direct_backward_path(
                    pCtx,
                    pRepo,
                    dagnum,
                    psz_csid_to,
                    psz_csid_from,
                    &pva_direct_forward_path
                    )  );
        if (pva_direct_forward_path)
        {
            SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_add)  );
            SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_remove)  );
            SG_ERR_CHECK(  SG_db__make_delta_from_path(
                        pCtx,
                        pRepo,
                        dagnum,
                        pva_direct_forward_path,
                        flags,
                        pvh_remove,
                        pvh_add
                        )  );
        }
    }

    if (!pvh_add && !pvh_remove)
    {
        SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prb_temp)  );
        SG_ERR_CHECK(  SG_rbtree__add(pCtx,prb_temp,psz_csid_from)  );
        SG_ERR_CHECK(  SG_rbtree__add(pCtx,prb_temp,psz_csid_to)  );
        SG_ERR_CHECK(  SG_repo__get_dag_lca(pCtx,pRepo,dagnum,prb_temp,&plca)  );
        {
            const char* psz_hid = NULL;
            SG_daglca_node_type node_type = 0;
            SG_int32 gen = -1;

            SG_ERR_CHECK(  SG_daglca__iterator__first(pCtx,
                                                      NULL,
                                                      plca,
                                                      SG_FALSE,
                                                      &psz_hid,
                                                      &node_type,
                                                      &gen,
                                                      NULL)  );
            SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_hid, &psz_csid_ancestor)  );
        }

        SG_ERR_CHECK(  SG_repo__dag__find_direct_backward_path(
                    pCtx,
                    pRepo,
                    dagnum,
                    psz_csid_from,
                    psz_csid_ancestor,
                    &pva_direct_backward_path
                    )  );
        SG_ERR_CHECK(  SG_repo__dag__find_direct_backward_path(
                    pCtx,
                    pRepo,
                    dagnum,
                    psz_csid_to,
                    psz_csid_ancestor,
                    &pva_direct_forward_path
                    )  );
        SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_add)  );
        SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_remove)  );
        SG_ERR_CHECK(  SG_db__make_delta_from_path(
                    pCtx,
                    pRepo,
                    dagnum,
                    pva_direct_backward_path,
                    flags,
                    pvh_add,
                    pvh_remove
                    )  );
        SG_ERR_CHECK(  SG_db__make_delta_from_path(
                    pCtx,
                    pRepo,
                    dagnum,
                    pva_direct_forward_path,
                    flags,
                    pvh_remove,
                    pvh_add
                    )  );
    }

    *ppvh_add = pvh_add;
    pvh_add = NULL;

    *ppvh_remove = pvh_remove;
    pvh_remove = NULL;

fail:
    SG_NULLFREE(pCtx, psz_csid_ancestor);
    SG_RBTREE_NULLFREE(pCtx, prb_temp);
    SG_DAGLCA_NULLFREE(pCtx, plca);
    SG_VHASH_NULLFREE(pCtx, pvh_add);
    SG_VHASH_NULLFREE(pCtx, pvh_remove);
    SG_VARRAY_NULLFREE(pCtx, pva_direct_backward_path);
    SG_VARRAY_NULLFREE(pCtx, pva_direct_forward_path);
    SG_DAGNODE_NULLFREE(pCtx, pdn_from);
    SG_DAGNODE_NULLFREE(pCtx, pdn_to);
}
Beispiel #13
0
void SG_dagfrag__load_from_repo__one(SG_context * pCtx,
									 SG_dagfrag * pFrag,
									 SG_repo* pRepo,
									 const char * szHidStart,
									 SG_int32 nGenerations)
{
	// load a fragment of the dag starting with the given dagnode
	// for nGenerations of parents.
	//
	// we add this portion of the graph to whatevery we already
	// have in our fragment.  this may either augment (give us
	// a larger connected piece) or it may be an independent
	// subset.
	//
	// if nGenerations <= 0, load everything from this starting point
	// back to the NULL/root.
	//
	// generationStart is the generation of the starting dagnode.
	//
	// the starting dagnode *MAY* be in the final start-fringe.
	// normally, it will be.  but if we are called multiple times
	// (and have more than one start point), it may be the case
	// that this node is a parent of one of the other start points.
	//
	// we compute generationEnd as the generation that we will NOT
	// include in the fragment; nodes of that generation will be in
	// the end-fringe.  that is, we include [start...end) like most
	// C++ iterators.

	_my_data * pMyDataCached = NULL;
	SG_dagnode * pDagnodeAllocated = NULL;
	SG_dagnode * pDagnodeStart;
	SG_int32 generationStart, generationEnd;
	SG_bool bPresent = SG_FALSE;
    SG_rbtree* prb_WorkQueue = NULL;

	SG_NULLARGCHECK_RETURN(pFrag);
	SG_NONEMPTYCHECK_RETURN(szHidStart);

	// if we are extending the fragment, delete the generation-sorted
	// member cache copy.  (see __foreach_member()).  it's either that
	// or update it in parallel as we change the real CACHE and that
	// doesn't seem worth the bother.

	SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache);
	pFrag->m_pRB_GenerationSortedMemberCache = NULL;

    SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prb_WorkQueue)  );

	// fetch the starting dagnode and compute the generation bounds.
	// first, see if the cache already has info for this dagnode.
	// if not, fetch it from the source and then add it to the cache.

	SG_ERR_CHECK(  _cache__lookup(pCtx, pFrag,szHidStart,&pMyDataCached,&bPresent)  );
	if (!bPresent)
	{
		if (!pRepo)
			SG_ERR_THROW(  SG_ERR_INVALID_WHILE_FROZEN  );

        SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, szHidStart, &pDagnodeAllocated)  );

		pDagnodeStart = pDagnodeAllocated;
	}
	else
	{
		pDagnodeStart = pMyDataCached->m_pDagnode;
	}

	SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pDagnodeStart,&generationStart)  );
	SG_ASSERT_RELEASE_FAIL2(  (generationStart > 0),
					  (pCtx,"Invalid generation value [%d] for dagnode [%s]",
					   generationStart,szHidStart)  );
	if ((nGenerations <= 0)  ||  (generationStart <= nGenerations))
		generationEnd = 0;
	else
		generationEnd = generationStart - nGenerations;

	if (!bPresent)
	{
		// this dagnode was not already present in the cache.
		// add it to the cache directly and set the state.
		// we don't need to go thru the work queue for it.
		//
		// then the add all of its parents to the work queue.

		SG_ERR_CHECK(  _cache__add__dagnode(pCtx,
											pFrag,
											generationStart,
											pDagnodeAllocated,SG_DFS_START_MEMBER,
											&pMyDataCached)  );
		pDagnodeAllocated = NULL;

		SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
	}
	else
	{
		// the node was already present in the cache, so we have already
		// walked at least part of the graph around it.

		switch (pMyDataCached->m_state)
		{
		default:
		//case SG_DFS_UNKNOWN:
			SG_ASSERT_RELEASE_FAIL2(  (0),
							  (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]",
							   pMyDataCached->m_state,szHidStart)  );

		case SG_DFS_INTERIOR_MEMBER:				// already in fragment
		case SG_DFS_START_MEMBER:	// already in fragment, duplicated leaf?
			if (generationEnd < pMyDataCached->m_genDagnode)
			{
				// they've expanded the bounds of the fragment since we
				// last visited this dagnode.  keep this dagnode in the
				// fragment and revisit the ancestors in case any were
				// put in the end-fringe that should now be included.
				//
				// we leave the state as INCLUDE or INCLUDE_AND_START
				// because a duplicate start point should remain a
				// start point.

				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
			}
			else
			{
				// the current end-generation requested is >= the previous
				// end-generation, then we've completely explored this dagnode
				// already.  that is, a complete walk from this node for nGenerations
				// would not reveal any new information.
			}
			break;

		case SG_DFS_END_FRINGE:
			{
				// they want to start at a dagnode that we put in the
				// end-fringe.  this can happen if they need to expand
				// the bounds of the fragment to include older ancestors.
				//
				// we do not mark this as a start node because someone
				// else already has it as a parent.

				pMyDataCached->m_state = SG_DFS_INTERIOR_MEMBER;
				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
			}
			break;
		}
	}

	// we optionally put the parents of the current node into the work queue.
	//
	// service the work queue until it is empty.  this allows us to walk the graph without
	// recursion.  that is, as we decide what to do with a node, we add the parents
	// to the queue.  we then iterate thru the work queue until we have dealt with
	// everything -- that is, until all parents have been properly placed.
	//
	// we cannot use a standard iterator to drive this loop because we
	// modify the queue.

	while (1)
	{
		_process_work_queue_item(pCtx, pFrag,prb_WorkQueue,generationEnd,pRepo);
		if (!SG_context__has_err(pCtx))
			break;							// we processed everything in the queue and are done

		if (!SG_context__err_equals(pCtx,SG_ERR_RESTART_FOREACH))
			SG_ERR_RETHROW;

		SG_context__err_reset(pCtx);		// queue changed, restart iteration
	}

	SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue);

	/*
	** we have loaded a piece of the dag (starting with the given start node
	** and tracing all parent edges back n generations).  we leave with everything
	** in our progress queues so that other start nodes can be added to the
	** fragment.  this allows the processing of subsequent start nodes to
	** override some of the decisions that we made.  for example:
	**
	**           Q_15
	**             |
	**             |
	**           Z_16
	**           /  \
	**          /    \
	**      Y_17      A_17
	**          \    /   \
	**           \  /     \
	**           B_18     C_18
	**             |
	**             |
	**           D_19
	**             |
	**             |
	**           E_20
	**
	** if we started with the leaf E_20 and requested 3 generations, we would have:
	**     start_set := { E }
	**     include_set := { B, D, E }
	**     end_set := { Y, A }
	**
	** after a subsequent call with the leaf C_18 and 3 generations, we would have:
	**     start_set := { C, E }
	**     include_set := { Z, A, B, C, D, E }
	**     end_set := { Q, Y }
	**
	*/

	return;

fail:
	SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue);
	SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated);
}
Beispiel #14
0
void SG_repo__pack__vcdiff(SG_context* pCtx, SG_repo * pRepo)
{
	SG_rbtree* prb_leaves = NULL;
	SG_uint32 count_leaves = 0;
    const char* psz_hid_cs = NULL;
    SG_rbtree* prb_blobs = NULL;
    SG_bool b;
    SG_rbtree_iterator* pit = NULL;
    SG_rbtree_iterator* pit_for_gid = NULL;
    SG_bool b_for_gid;
    const char* psz_hid_ref = NULL;
    const char* psz_hid_blob = NULL;
    const char* psz_gid = NULL;
    SG_rbtree* prb = NULL;
    const char* psz_gen = NULL;
	SG_repo_tx_handle* pTx;

    SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&prb_leaves)  );
    SG_ERR_CHECK(  SG_rbtree__count(pCtx, prb_leaves, &count_leaves)  );

    SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, NULL, prb_leaves, &b, &psz_hid_cs, NULL)  );

    SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prb_blobs)  );

    SG_ERR_CHECK(  sg_pack__do_changeset(pCtx, pRepo, psz_hid_cs, prb_blobs)  );

    SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit, prb_blobs, &b, &psz_gid, (void**) &prb)  );
    while (b)
    {
        SG_uint32 count_for_gid = 0;
        SG_ERR_CHECK(  SG_rbtree__count(pCtx, prb, &count_for_gid)  );
        if (count_for_gid > 1)
        {
            psz_hid_ref = NULL;
            SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit_for_gid, prb, &b_for_gid, &psz_gen, (void**) &psz_hid_blob)  );
            while (b_for_gid)
            {
				// Not a lot of thought went into doing each of these in its own repo tx.  Consider alternatives.
				SG_ERR_CHECK(  SG_repo__begin_tx(pCtx, pRepo, &pTx)  );

				if (psz_hid_ref)
                {
                    SG_ERR_CHECK(  SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid_blob, SG_BLOBENCODING__VCDIFF, psz_hid_ref, NULL, NULL, NULL, NULL)  );
                    // TODO be tolerant here of SG_ERR_REPO_BUSY
                }
                else
                {
                    psz_hid_ref = psz_hid_blob;

                    SG_ERR_CHECK(  SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid_ref, SG_BLOBENCODING__FULL, NULL, NULL, NULL, NULL, NULL)  );
                    // TODO be tolerant here of SG_ERR_REPO_BUSY
                }

				SG_ERR_CHECK(  SG_repo__commit_tx(pCtx, pRepo, &pTx)  );

                SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit_for_gid, &b_for_gid, &psz_gen, (void**) &psz_hid_blob)  );
            }
            SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit_for_gid);
            psz_hid_ref = NULL;
        }
        SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit, &b, &psz_gid, (void**) &prb)  );
    }
    SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);

	SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx, prb_blobs, _sg_repo__free_rbtree);

    SG_RBTREE_NULLFREE(pCtx, prb_leaves);

    return;

fail:
    return;
}