コード例 #1
0
ファイル: sg_dagfrag.c プロジェクト: avar/veracity
static void _cache__lookup(SG_context * pCtx,
						   SG_dagfrag * pFrag, const char * szHid,
						   _my_data ** ppData,		// you do not own this (but you may modify non-pointer values within)
						   SG_bool * pbPresent)
{
	// see if szHid is present in cache.  return the associated data.

	_my_data * pData;
    SG_bool b = SG_FALSE;

	SG_NULLARGCHECK_RETURN(pFrag);
	SG_NONEMPTYCHECK_RETURN(szHid);
	SG_NULLARGCHECK_RETURN(ppData);

	SG_ERR_CHECK_RETURN(  SG_rbtree__find(pCtx,pFrag->m_pRB_Cache,szHid,&b,((void **)&pData))  );
	if (b)
	{
		*pbPresent = SG_TRUE;
		*ppData = pData;
	}
    else
    {
		*pbPresent = SG_FALSE;
		*ppData = NULL;
    }
}
/**
 * The values for RENAME, MOVE, ATTRBITS, SYMLINKS, and SUBMODULES are collapsable.  (see below)
 * In the corresponding rbUnique's we only need to remember the set of unique values for the
 * field.  THESE ARE THE KEYS IN THE prbUnique.
 *
 * As a convenience, we associate a vector of entries with each key.  These form a many-to-one
 * thing so that we can report all of the entries that have this value.
 *
 * TODO since we should only process a cset once, we should not get any
 * TODO duplicates in the vector, but it might be possible.  i'm not going
 * TODO to worry about it now.  if this becomes a problem, consider doing
 * TODO a unique-insert into the vector -or- making the vector a sub-rbtree.
 *
 */
static void _update_1_rbUnique(SG_context * pCtx, SG_rbtree * prbUnique, const char * pszKey, SG_mrg_cset_entry * pMrgCSetEntry_Leaf_k)
{
	SG_vector * pVec_Allocated = NULL;
	SG_vector * pVec;
	SG_bool bFound;

	SG_ERR_CHECK(  SG_rbtree__find(pCtx,prbUnique,pszKey,&bFound,(void **)&pVec)  );
	if (!bFound)
	{
		SG_ERR_CHECK(  SG_VECTOR__ALLOC(pCtx,&pVec_Allocated,3)  );
		SG_ERR_CHECK(  SG_rbtree__add__with_assoc(pCtx,prbUnique,pszKey,pVec_Allocated)  );
		pVec = pVec_Allocated;
		pVec_Allocated = NULL;			// rbtree owns this now
	}

	SG_ERR_CHECK(  SG_vector__append(pCtx,pVec,pMrgCSetEntry_Leaf_k,NULL)  );

#if TRACE_WC_MERGE
	SG_ERR_IGNORE(  SG_console(pCtx,SG_CS_STDERR,"_update_1_rbUnique: [%s][%s]\n",
									   pszKey,
									   SG_string__sz(pMrgCSetEntry_Leaf_k->pMrgCSet->pStringCSetLabel))  );
#endif

	return;

fail:
	SG_VECTOR_NULLFREE(pCtx, pVec_Allocated);
}
コード例 #3
0
void sg_repo__bind_vtable(SG_context* pCtx, SG_repo * pRepo, const char * pszStorage)
{
    SG_uint32 count_vtables = 0;

	SG_NULLARGCHECK(pRepo);

	if (pRepo->p_vtable)			// can only be bound once
    {
		SG_ERR_THROW2(SG_ERR_INVALIDARG, (pCtx, "pRepo->p_vtable is already bound"));
    }

	if (pRepo->pvh_descriptor)
    {
		SG_ERR_THROW2(SG_ERR_INVALIDARG, (pCtx, "pRepo->pvh_descriptor is already bound"));
    }

    if (!g_prb_repo_vtables)
    {
		SG_ERR_THROW2(SG_ERR_UNKNOWN_STORAGE_IMPLEMENTATION, (pCtx, "There are no repo storage plugins installed"));
    }

    SG_ERR_CHECK(  SG_rbtree__count(pCtx, g_prb_repo_vtables, &count_vtables)  );

    if (0 == count_vtables)
    {
		SG_ERR_THROW2(SG_ERR_UNKNOWN_STORAGE_IMPLEMENTATION, (pCtx, "There are no repo storage plugins installed"));
    }

	if (!pszStorage || !*pszStorage)
    {
        if (1 == count_vtables)
        {
            SG_bool b = SG_FALSE;

            SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, NULL, g_prb_repo_vtables, &b, NULL, (void**) &pRepo->p_vtable)  );
            SG_ASSERT(pRepo->p_vtable);
        }
        else
        {
            SG_ERR_THROW2(SG_ERR_UNKNOWN_STORAGE_IMPLEMENTATION, (pCtx, "Multiple repo storage plugins installed.  Must specify."));
        }
    }
    else
    {
        SG_bool b = SG_FALSE;

        SG_ERR_CHECK(  SG_rbtree__find(pCtx, g_prb_repo_vtables, pszStorage, &b, (void**) &pRepo->p_vtable)  );
        if (!b || !pRepo->p_vtable)
        {
            SG_ERR_THROW(SG_ERR_UNKNOWN_STORAGE_IMPLEMENTATION);
        }
    }

fail:
    ;
}
コード例 #4
0
// Add a dagnode to the work queue if it's not already on it. If it is already
// on it, this might tell us new information about whether it is a descendent of
// "New" or "Old", so update it with the new isAncestorOf information.
static void _fnsc_work_queue__insert(
	SG_context * pCtx,
	_fnsc_work_queue_t * pWorkQueue,
	const char * pszHid,
	SG_uint64 dagnum,
	SG_repo * pRepo,
	SG_byte isAncestorOf
	)
{
	SG_bool alreadyInTheQueue = SG_FALSE;
	SG_dagnode * pDagnode = NULL;
	SG_uint32 i;
	SG_uint32 revno = 0;
	char * revno__p = NULL;
	
	// First we check the cache. This will tell us whether the item is
	// already on the queue, and if so what its revno is.
	SG_ERR_CHECK(  SG_rbtree__find(pCtx, pWorkQueue->pRevnoCache, pszHid, &alreadyInTheQueue, (void**)&revno__p)  );
	if(alreadyInTheQueue)
	{
		revno = (SG_uint32)(revno__p - (char*)NULL);
	}
	else
	{
		SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, pszHid, &pDagnode)  );
		SG_ERR_CHECK(  SG_dagnode__get_revno(pCtx, pDagnode, &revno)  );
	}
	
	i = pWorkQueue->length;
	while(i>0 && pWorkQueue->p[i-1].revno > revno)
		--i;
	
	if (i>0 && pWorkQueue->p[i-1].revno == revno)
	{
		SG_ASSERT(alreadyInTheQueue);
		if (pWorkQueue->p[i-1].isAncestorOf==_ANCESTOR_OF_NEW && isAncestorOf!=_ANCESTOR_OF_NEW)
			--pWorkQueue->numAncestorsOfNewOnTheQueue;
		pWorkQueue->p[i-1].isAncestorOf |= isAncestorOf; // OR in the new isAncestorOfs
	}
	else
	{
		SG_ASSERT(pDagnode!=NULL);
		SG_ERR_CHECK(  _fnsc_work_queue__insert_at(pCtx, pWorkQueue, i, pszHid, revno, &pDagnode, isAncestorOf)  );
	}
	
	return;
fail:
	SG_DAGNODE_NULLFREE(pCtx, pDagnode);
}
コード例 #5
0
void sg_pack__do_blob(SG_context* pCtx, const char* psz_gid, const char* psz_hid, SG_int32 gen, SG_rbtree* prb_blobs, SG_rbtree* prb_new)
{
    SG_rbtree* prb = NULL;
    SG_bool b = SG_FALSE;
    char buf[64];

    SG_ERR_CHECK(  SG_rbtree__find(pCtx, prb_new, psz_hid, &b, NULL)  );
    if (b)
    {
        SG_ERR_CHECK(  SG_rbtree__find(pCtx, prb_blobs, psz_gid, &b, (void**) &prb)  );
        if (!b)
        {
            SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prb)  );
            SG_ERR_CHECK(  SG_rbtree__add__with_assoc(pCtx, prb_blobs, psz_gid, prb)  );
        }
        SG_ERR_CHECK(  SG_sprintf(pCtx, buf, sizeof(buf), "%05d", (int) gen)  );
        SG_ERR_CHECK(  SG_rbtree__add__with_pooled_sz(pCtx, prb, buf, psz_hid)  );
    }

    return;

fail:
    return;
}
コード例 #6
0
// Add a dagnode to the work queue if it's not already on it. If it is already
// on it update it with the new pIsAncestorOf information.
static void _hrca_work_queue__insert(
	SG_context * pCtx,
	_hrca_work_queue_t * pWorkQueue,
	const char * pszHid,
	SG_repo * pRepo,
	SG_repo_fetch_dagnodes_handle * pDagnodeFetcher,
	SG_bitvector * pIsAncestorOf
	)
{
	SG_bool alreadyInTheQueue = SG_FALSE;
	SG_dagnode * pDagnode = NULL;
	SG_uint32 i;
	SG_uint32 revno = 0;
	char * revno__p = NULL;

	// First we check the cache. This will tell us whether the item is
	// already on the queue, and if so what its revno is.
	SG_ERR_CHECK(  SG_rbtree__find(pCtx, pWorkQueue->pRevnoCache, pszHid, &alreadyInTheQueue, (void**)&revno__p)  );
	if(alreadyInTheQueue)
	{
		revno = (SG_uint32)(revno__p - (char*)NULL);
	}
	else
	{
		SG_ERR_CHECK(  SG_repo__fetch_dagnodes__one(pCtx, pRepo, pDagnodeFetcher, pszHid, &pDagnode)  );
		SG_ERR_CHECK(  SG_dagnode__get_revno(pCtx, pDagnode, &revno)  );
	}

	i = pWorkQueue->length;
	while(i>0 && pWorkQueue->p[i-1].revno > revno)
		--i;

	if (i>0 && pWorkQueue->p[i-1].revno == revno)
	{
		SG_ASSERT(alreadyInTheQueue);
		// OR in the new pIsAncestorOf
		SG_ERR_CHECK(  SG_bitvector__assign__bv__or_eq__bv(pCtx, pWorkQueue->p[i-1].pIsAncestorOf, pIsAncestorOf)  );
	}
	else
	{
		SG_ASSERT(pDagnode!=NULL);
		SG_ERR_CHECK(  _hrca_work_queue__insert_at(pCtx, pWorkQueue, i, pszHid, revno, &pDagnode, pIsAncestorOf)  );
	}

	return;
fail:
	SG_DAGNODE_NULLFREE(pCtx, pDagnode);
}
コード例 #7
0
void SG_rbtree_ui64__find(
	SG_context* pCtx,
	const SG_rbtree_ui64* prb,
	SG_uint64 ui64_key,
    SG_bool* pbFound,
	void** pAssocData
	)
{
	sg_buf_ui64 bufUI64;
	(void)SG_hex__format_uint64(bufUI64, ui64_key);

	SG_ERR_CHECK_RETURN(  SG_rbtree__find(pCtx,
										  (const SG_rbtree *)prb,
										  bufUI64,
										  pbFound,
										  pAssocData)  );
}
/**
 * The values for RENAME, MOVE, ATTRBITS, SYMLINKS, and SUBMODULES are collapsable.  (see below)
 * In the corresponding rbUnique's we only need to remember the set of unique values for the
 * field.  THESE ARE THE KEYS IN THE prbUnique.
 *
 * As a convenience, we associate a vector of entries with each key.  These form a many-to-one
 * thing so that we can report all of the entries that have this value.
 *
 * Here we carry-forward the values from a sub-merge to the outer-merge by coping the keys
 * in the source-rbtree and insert in the destination-rbtree.
 *
 * NOTE: the term sub-merge here refers to the steps within an n-way merge;
 * it DOES NOT refer to a submodule.
 */
static void _carry_forward_unique_values(SG_context * pCtx,
										 SG_rbtree * prbDest,
										 SG_rbtree * prbSrc)
{
	SG_rbtree_iterator * pIter = NULL;
	SG_vector * pVec_Allocated = NULL;
	const char * pszKey;
	SG_vector * pVec_Src;
	SG_vector * pVec_Dest;
	SG_uint32 j, nr;
	SG_bool bFound;


	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx,&pIter,prbSrc,&bFound,&pszKey,(void **)&pVec_Src)  );
	while (bFound)
	{
		SG_ERR_CHECK(  SG_rbtree__find(pCtx,prbDest,pszKey,&bFound,(void **)&pVec_Dest)  );
		if (!bFound)
		{
			SG_ERR_CHECK(  SG_VECTOR__ALLOC(pCtx,&pVec_Allocated,3)  );
			SG_ERR_CHECK(  SG_rbtree__add__with_assoc(pCtx,prbDest,pszKey,pVec_Allocated)  );
			pVec_Dest = pVec_Allocated;
			pVec_Allocated = NULL;			// rbtree owns this now
		}

		SG_ERR_CHECK(  SG_vector__length(pCtx,pVec_Src,&nr)  );
		for (j=0; j<nr; j++)
		{
			SG_mrg_cset_entry * pMrgCSetEntry_x;

			SG_ERR_CHECK(  SG_vector__get(pCtx,pVec_Src,j,(void **)&pMrgCSetEntry_x)  );
			SG_ERR_CHECK(  SG_vector__append(pCtx,pVec_Dest,pMrgCSetEntry_x,NULL)  );

#if TRACE_WC_MERGE
			SG_ERR_IGNORE(  SG_console(pCtx,SG_CS_STDERR,"_carry_forward_unique_value: [%s][%s]\n",
									   pszKey,
									   SG_string__sz(pMrgCSetEntry_x->pMrgCSet->pStringCSetLabel))  );
#endif
		}

		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx,pIter,&bFound,&pszKey,NULL)  );
	}

fail:
	SG_RBTREE_ITERATOR_NULLFREE(pCtx,pIter);
}
コード例 #9
0
void SG_jscore__mutex__unlock(
	SG_context* pCtx,
	const char* pszName)
{
	SG_bool bExists = SG_FALSE;
	_namedMutex* pNamedMutex = NULL;

	SG_ASSERT(gpJSCoreGlobalState);

	/* We always acquire the rbtree mutex first, then the specific named mutex. A deadlock is impossible. */
	//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Waiting for JS mutex manager in UNLOCK.")  );
	SG_ERR_CHECK(  SG_mutex__lock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
	SG_ERR_CHECK(  SG_rbtree__find(pCtx, gpJSCoreGlobalState->prbJSMutexes, pszName, &bExists, (void**)&pNamedMutex)  );
	if (bExists)
	{
		SG_ASSERT(pNamedMutex);

		//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Releasing named JS mutex: %s", pszName)  );
		SG_ERR_CHECK(  SG_mutex__unlock(pCtx, &pNamedMutex->mutex)  );

		pNamedMutex->count--; // Cannot be touched unless you hold mutexJsNamed. We do here.
		if ( 0 == pNamedMutex->count )
		{
			//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Nobody else is waiting. Removing named JS mutex: %s", pszName)  );
			SG_ERR_CHECK(  SG_rbtree__remove(pCtx, gpJSCoreGlobalState->prbJSMutexes, pszName)  );
			SG_NULLFREE(pCtx, pNamedMutex);
		}
		//else if (pNamedMutex->count > 1) // Cannot be touched unless you hold mutexJsNamed. We do here.
			//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "%u threads are waiting for named JS mutex: %s", pNamedMutex->count-1, pszName)  );

		/* We deliberately unlock the rbtree mutex after the named mutex.
		   Creating a new mutex with the same name can't be allowed until this one is released, 
		   because they are logically the same lock. 
		   Unlocking doesn't block on anything and should be fast. */
		SG_ERR_CHECK(  SG_mutex__unlock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
		//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Released JS mutex manager in UNLOCK.")  );
	}
	else
		SG_ERR_THROW2(  SG_ERR_NOT_FOUND, (pCtx, "Named mutex: %s", pszName)  );

	return;

fail:
	SG_ERR_IGNORE(  SG_mutex__unlock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
}
コード例 #10
0
void SG_jscore__mutex__lock(
	SG_context* pCtx,
	const char* pszName)
{
	SG_bool bExists = SG_FALSE;
	_namedMutex* pFreeThisNamedMutex = NULL;
	_namedMutex* pNamedMutex = NULL;

	SG_ASSERT(gpJSCoreGlobalState);

	/* We always acquire the rbtree mutex first, then the specific named mutex. A deadlock is impossible. */
	//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Waiting for JS mutex manager in LOCK.")  );
	SG_ERR_CHECK(  SG_mutex__lock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
	SG_ERR_CHECK(  SG_rbtree__find(pCtx, gpJSCoreGlobalState->prbJSMutexes, pszName, &bExists, (void**)&pNamedMutex)  );
	if (!bExists)
	{
		SG_ERR_CHECK(  SG_alloc1(pCtx, pFreeThisNamedMutex)  );
		pNamedMutex = pFreeThisNamedMutex;
		pNamedMutex->count = 0;
		SG_ERR_CHECK(  SG_mutex__init(pCtx, &pNamedMutex->mutex)  );
		SG_ERR_CHECK(  SG_rbtree__add__with_assoc(pCtx, gpJSCoreGlobalState->prbJSMutexes, pszName, pNamedMutex)  );
		pFreeThisNamedMutex = NULL;
	}
	pNamedMutex->count++; // Cannot be touched unless you hold mutexJsNamed. We do here.
	if (pNamedMutex->count > 10)
		SG_ERR_CHECK(  SG_log__report_info(pCtx, "%u threads are waiting for named JS mutex: %s", pNamedMutex->count-1, pszName)  );

	/* We deliberately unlock the rbtree mutex before locking the named mutex.
	 * We want to hold the lock on the rbtree for as little time as possible. Any subsequent
	 * attempts to lock the same name will yield the correct named mutex and correctly block
	 * on it below, without blocking access to the name management rbtree. */
	SG_ERR_CHECK(  SG_mutex__unlock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
	//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Released JS mutex manager in LOCK.")  );

	//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Waiting for named JS mutex: %s", pszName)  );
	SG_ERR_CHECK(  SG_mutex__lock(pCtx, &pNamedMutex->mutex)  );
	//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Acquired named JS mutex: %s", pszName)  );

	return;

fail:
	SG_ERR_IGNORE(  SG_mutex__unlock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
	SG_NULLFREE(pCtx, pFreeThisNamedMutex);
}
コード例 #11
0
void sg_vv2__status__assert_in_work_queue(SG_context * pCtx,
										  sg_vv2status * pST,
										  sg_vv2status_od * pOD,
										  SG_uint32 depthInQueue)
{
	char buf[SG_GID_BUFFER_LENGTH + 20];
	SG_bool bFound;
	sg_vv2status_od * pOD_test;

	SG_ERR_CHECK_RETURN(  SG_sprintf(pCtx,
									 buf,SG_NrElements(buf),
									 "%08d.%s",
									 depthInQueue,pOD->bufGidObject)  );

	SG_ERR_CHECK_RETURN(  SG_rbtree__find(pCtx, pST->prbWorkQueue,buf,&bFound,(void **)&pOD_test)  );
	SG_ASSERT_RELEASE_RETURN2(  (bFound),
								(pCtx,
								 "Object [GID %s][depth %d] should have been in work-queue.",
								 pOD->bufGidObject,
								 depthInQueue)  );
}
コード例 #12
0
ファイル: sg__do_cmd_update.c プロジェクト: avar/veracity
static void _advise_after_update(SG_context * pCtx,
								 SG_option_state * pOptSt,
								 SG_pathname * pPathCwd,
								 const char * pszBaselineBeforeUpdate)
{
	SG_pendingtree * pPendingTree = NULL;
	SG_repo * pRepo;
	char * pszBaselineAfterUpdate = NULL;
	SG_rbtree * prbLeaves = NULL;
	SG_uint32 nrLeaves;
	SG_bool bUpdateChangedBaseline;

	// re-open pendingtree to get the now-current baseline (we have to do
	// this in a new instance because the UPDATE saves the pendingtree which
	// frees all of the interesting stuff).

	SG_ERR_CHECK(  SG_PENDINGTREE__ALLOC(pCtx, pPathCwd, pOptSt->bIgnoreWarnings, &pPendingTree)  );
	SG_ERR_CHECK(  SG_pendingtree__get_repo(pCtx, pPendingTree, &pRepo)  );

	SG_ERR_CHECK(  _get_baseline(pCtx, pPendingTree, &pszBaselineAfterUpdate)  );

	// see if the update actually changed the baseline.

	bUpdateChangedBaseline = (strcmp(pszBaselineBeforeUpdate, pszBaselineAfterUpdate) != 0);

	// get the list of all heads/leaves.
	//
	// TODO 2010/06/30 Revisit this when we have NAMED BRANCHES because we
	// TODO            want to filter this list for things within their BRANCH.

	SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx,pRepo,SG_DAGNUM__VERSION_CONTROL,&prbLeaves)  );
#if defined(DEBUG)
	{
		SG_bool bFound = SG_FALSE;
		SG_ERR_CHECK(  SG_rbtree__find(pCtx, prbLeaves, pszBaselineAfterUpdate, &bFound, NULL)  );
		SG_ASSERT(  (bFound)  );
	}
#endif	
	SG_ERR_CHECK(  SG_rbtree__count(pCtx, prbLeaves, &nrLeaves)  );

	if (nrLeaves > 1)
	{
		if (bUpdateChangedBaseline)
		{
			SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT,
									   "Baseline updated to descendant head, but there are multiple heads; consider merging.\n")  );
		}
		else
		{
			SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT,
									   "Baseline already at head, but there are multiple heads; consider merging.\n")  );
		}
	}
	else
	{
		if (bUpdateChangedBaseline)
		{
			SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT,
									   "Baseline updated to head.\n")  );
		}
		else
		{
			SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT,
									   "Baseline already at head.\n")  );
		}
	}

fail:
	SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree);
	SG_RBTREE_NULLFREE(pCtx, prbLeaves);
	SG_NULLFREE(pCtx, pszBaselineAfterUpdate);
}
コード例 #13
0
void SG_sync_remote__request_fragball(
	SG_context* pCtx,
	SG_repo* pRepo,
	const SG_pathname* pFragballDirPathname,
	SG_vhash* pvhRequest,
	char** ppszFragballName)
{
	SG_pathname* pFragballPathname = NULL;
	SG_uint64* paDagNums = NULL;
	SG_string* pstrFragballName = NULL;
	SG_rbtree* prbDagnodes = NULL;
	SG_rbtree_iterator* pit = NULL;
	SG_rev_spec* pRevSpec = NULL;
	SG_stringarray* psaFullHids = NULL;
	SG_rbtree* prbDagnums = NULL;
	SG_dagfrag* pFrag = NULL;
	char* pszRepoId = NULL;
	char* pszAdminId = NULL;
    SG_fragball_writer* pfb = NULL;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NULLARGCHECK_RETURN(pFragballDirPathname);

    {
        char buf_filename[SG_TID_MAX_BUFFER_LENGTH];
        SG_ERR_CHECK(  SG_tid__generate(pCtx, buf_filename, sizeof(buf_filename))  );
        SG_ERR_CHECK(  SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pFragballPathname, pFragballDirPathname, buf_filename)  );
    }

	if (!pvhRequest)
	{
		// Add leaves from every dag to the fragball.
		SG_uint32 count_dagnums;
		SG_uint32 i;

        SG_ERR_CHECK(  SG_fragball_writer__alloc(pCtx, pRepo, pFragballPathname, SG_TRUE, 2, &pfb)  );
		SG_ERR_CHECK(  SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums)  );

		for (i=0; i<count_dagnums; i++)
		{
			SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes)  );
			SG_ERR_CHECK(  SG_fragball__write__dagnodes(pCtx, pfb, paDagNums[i], prbDagnodes)  );
			SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
		}

		SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
		SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
        SG_ERR_CHECK(  SG_fragball_writer__close(pCtx, pfb)  );
	}
	else
	{
		// Specific dags/nodes were requested. Build that fragball.
		SG_bool found;

#if TRACE_SYNC_REMOTE && 0
		SG_ERR_CHECK(  SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "fragball request")  );
#endif

		SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found)  );
		if (found)
		{
            // SG_SYNC_STATUS_KEY__CLONE_REQUEST is currently ignored
            SG_ERR_CHECK(  SG_repo__fetch_repo__fragball(pCtx, pRepo, 3, pFragballDirPathname, ppszFragballName) );
		}
		else
		{
			// Not a full clone.

            SG_ERR_CHECK(  SG_fragball_writer__alloc(pCtx, pRepo, pFragballPathname, SG_TRUE, 2, &pfb)  );
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__SINCE, &found)  );
			if (found)
			{
                SG_vhash* pvh_since = NULL;

                SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__SINCE, &pvh_since)  );

                SG_ERR_CHECK(  _do_since(pCtx, pRepo, pvh_since, pfb)  );
            }

			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found)  );
			if (found)
			{
				// Specific Dagnodes were requested. Add just those nodes to our "start from" rbtree.

				SG_vhash* pvhDags;
				SG_uint32 count_requested_dagnums;
				SG_uint32 i;
				const SG_variant* pvRevSpecs = NULL;
				SG_vhash* pvhRevSpec = NULL;

				// For each requested dag, get rev spec request.
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags)  );
				SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums)  );
				if (count_requested_dagnums)
					SG_ERR_CHECK(  SG_repo__list_dags__rbtree(pCtx, pRepo, &prbDagnums)  );
				for (i=0; i<count_requested_dagnums; i++)
				{
					SG_bool isValidDagnum = SG_FALSE;
					SG_bool bSpecificNodesRequested = SG_FALSE;
					const char* pszRefDagNum = NULL;
					SG_uint64 iDagnum;

					// Get the dag's missing node vhash.
					SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszRefDagNum, &pvRevSpecs)  );

					// Verify that requested dagnum exists
					SG_ERR_CHECK(  SG_rbtree__find(pCtx, prbDagnums, pszRefDagNum, &isValidDagnum, NULL)  );
					if (!isValidDagnum)
                        continue;

					SG_ERR_CHECK(  SG_dagnum__from_sz__hex(pCtx, pszRefDagNum, &iDagnum)  );
					
					if (pvRevSpecs && pvRevSpecs->type != SG_VARIANT_TYPE_NULL)
					{
						SG_uint32 countRevSpecs = 0;
						
						SG_ERR_CHECK(  SG_variant__get__vhash(pCtx, pvRevSpecs, &pvhRevSpec)  );
						SG_ERR_CHECK(  SG_rev_spec__from_vash(pCtx, pvhRevSpec, &pRevSpec)  );

						// Process the rev spec for each dag
						SG_ERR_CHECK(  SG_rev_spec__count(pCtx, pRevSpec, &countRevSpecs)  );
						if (countRevSpecs > 0)
						{
							bSpecificNodesRequested = SG_TRUE;

							SG_ERR_CHECK(  SG_rev_spec__get_all__repo(pCtx, pRepo, pRevSpec, SG_TRUE, &psaFullHids, NULL)  );
							SG_ERR_CHECK(  SG_stringarray__to_rbtree_keys(pCtx, psaFullHids, &prbDagnodes)  );
							SG_STRINGARRAY_NULLFREE(pCtx, psaFullHids);
						}
						SG_REV_SPEC_NULLFREE(pCtx, pRevSpec);
					}

					if (!bSpecificNodesRequested)
					{
						// When no specific nodes are in the request, add all leaves.
						SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes)  );
					}

					if (prbDagnodes) // can be null when leaves of an empty dag are requested
					{
						// Get the leaves of the other repo, which we need to connect to.
						SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__LEAVES, &found)  );
						if (found)
						{
							SG_vhash* pvhRefAllLeaves;
							SG_vhash* pvhRefDagLeaves;
							SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__LEAVES, &pvhRefAllLeaves)  );
							SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, pszRefDagNum, &found)  );
							{
								SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRefAllLeaves, pszRefDagNum, &pvhRefDagLeaves)  );
								SG_ERR_CHECK(  SG_sync__build_best_guess_dagfrag(pCtx, pRepo, iDagnum, 
									prbDagnodes, pvhRefDagLeaves, &pFrag)  );
							}
						}
						else
						{
							// The other repo's leaves weren't provided: add just the requested nodes, make no attempt to connect.
							SG_ERR_CHECK(  SG_repo__get_repo_id(pCtx, pRepo, &pszRepoId)  );
							SG_ERR_CHECK(  SG_repo__get_admin_id(pCtx, pRepo, &pszAdminId)  );
							SG_ERR_CHECK(  SG_dagfrag__alloc(pCtx, &pFrag, pszRepoId, pszAdminId, iDagnum)  );
							SG_ERR_CHECK(  SG_dagfrag__load_from_repo__simple(pCtx, pFrag, pRepo, prbDagnodes)  );
							SG_NULLFREE(pCtx, pszRepoId);
							SG_NULLFREE(pCtx, pszAdminId);
						}

						SG_ERR_CHECK(  SG_fragball__write__frag(pCtx, pfb, pFrag)  );
						
						SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
						SG_DAGFRAG_NULLFREE(pCtx, pFrag);
					}

				} // dagnum loop
			} // if "dags" exists

			/* Add requested blobs to the fragball */
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found)  );
			if (found)
			{
				// Blobs were requested.
				SG_vhash* pvhBlobs;
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs)  );
				SG_ERR_CHECK(  SG_sync__add_blobs_to_fragball(pCtx, pfb, pvhBlobs)  );
			}

			SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
		}
        SG_ERR_CHECK(  SG_fragball_writer__close(pCtx, pfb)  );
	}

	/* fallthru */
fail:
	// If we had an error, delete the half-baked fragball.
	if (pFragballPathname && SG_context__has_err(pCtx))
    {
		SG_ERR_IGNORE(  SG_fsobj__remove__pathname(pCtx, pFragballPathname)  );
    }

	SG_PATHNAME_NULLFREE(pCtx, pFragballPathname);
	SG_NULLFREE(pCtx, paDagNums);
	SG_STRING_NULLFREE(pCtx, pstrFragballName);
	SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
	SG_RBTREE_NULLFREE(pCtx, prbDagnums);
	SG_REV_SPEC_NULLFREE(pCtx, pRevSpec);
	SG_STRINGARRAY_NULLFREE(pCtx, psaFullHids);
	SG_DAGFRAG_NULLFREE(pCtx, pFrag);
	SG_NULLFREE(pCtx, pszRepoId);
	SG_NULLFREE(pCtx, pszAdminId);
    SG_FRAGBALL_WRITER_NULLFREE(pCtx, pfb);
}
コード例 #14
0
void SG_dagquery__find_descendant_heads(SG_context * pCtx,
										SG_repo * pRepo,
                                        SG_uint64 iDagNum,
										const char * pszHidStart,
										SG_bool bStopIfMultiple,
										SG_dagquery_find_head_status * pdqfhs,
										SG_rbtree ** pprbHeads)
{
	SG_rbtree * prbLeaves = NULL;
	SG_rbtree * prbHeadsFound = NULL;
	SG_rbtree_iterator * pIter = NULL;
	const char * pszKey_k = NULL;
	SG_bool b;
	SG_dagquery_find_head_status dqfhs;
	SG_dagquery_relationship dqRel;
	SG_uint32 nrFound;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NONEMPTYCHECK_RETURN(pszHidStart);
	SG_NULLARGCHECK_RETURN(pdqfhs);
	SG_NULLARGCHECK_RETURN(pprbHeads);

	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prbHeadsFound)  );

	// fetch a list of all of the LEAVES in the DAG.
	// this rbtree only contains keys; no assoc values.

	SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagNum, &prbLeaves)  );

	// if the starting point is a leaf, then we are done (we don't care how many
	// other leaves are in the rbtree because none will be a child of ours because
	// we are a leaf).

	SG_ERR_CHECK(  SG_rbtree__find(pCtx, prbLeaves, pszHidStart, &b, NULL)  );
	if (b)
	{
		SG_ERR_CHECK(  SG_rbtree__add(pCtx, prbHeadsFound, pszHidStart)  );

		dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__IS_LEAF;
		goto done;
	}

	// inspect each leaf and qualify it; put the ones that pass
	// into the list of actual heads.

	nrFound = 0;
	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pIter, prbLeaves, &b, &pszKey_k, NULL)  );
	while (b)
	{
		// is head[k] a descendant of start?
		SG_ERR_CHECK(  SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, iDagNum, pszKey_k, pszHidStart,
															 SG_FALSE, // we care about descendants, so don't skip
															 SG_TRUE,  // we don't care about ancestors, so skip them
															 &dqRel)  );

		if (dqRel == SG_DAGQUERY_RELATIONSHIP__DESCENDANT)
		{
			nrFound++;

			if (bStopIfMultiple && (nrFound > 1))
			{
				// they wanted a unique answer and we've found too many answers
				// (which they won't be able to use anyway) so just stop and
				// return the status.  (we delete prbHeadsFound because it is
				// incomplete and so that they won't be tempted to use it.)

				SG_RBTREE_NULLFREE(pCtx, prbHeadsFound);
				dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__MULTIPLE;
				goto done;
			}

			SG_ERR_CHECK(  SG_rbtree__add(pCtx, prbHeadsFound, pszKey_k)  );
		}

		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pIter, &b, &pszKey_k, NULL)  );
	}

	switch (nrFound)
	{
	case 0:
		// this should NEVER happen.  we should always be able to find a
		// leaf/head for a node.
		//
		// TODO the only case where this might happen is if named branches
		// TODO cause the leaf to be disqualified.  so i'm going to THROW
		// TODO here rather than ASSERT.

		SG_ERR_THROW2(  SG_ERR_DAG_NOT_CONSISTENT,
						(pCtx, "Could not find head/leaf for changeset [%s]", pszHidStart)  );
		break;

	case 1:
		dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__UNIQUE;
		break;

	default:
		dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__MULTIPLE;
		break;
	}

done:
	*pprbHeads = prbHeadsFound;
	prbHeadsFound = NULL;

	*pdqfhs = dqfhs;

fail:
	SG_RBTREE_NULLFREE(pCtx, prbLeaves);
	SG_RBTREE_NULLFREE(pCtx, prbHeadsFound);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIter);
}
コード例 #15
0
/**
 * Compare all the nodes of a single DAG in two repos.
 */
static void _compare_one_dag(SG_context* pCtx,
							 SG_repo* pRepo1,
							 SG_repo* pRepo2,
							 SG_uint32 iDagNum,
							 SG_bool* pbIdentical)
{
	SG_bool bFinalResult = SG_FALSE;
	SG_rbtree* prbRepo1Leaves = NULL;
	SG_rbtree* prbRepo2Leaves = NULL;
	SG_uint32 iRepo1LeafCount, iRepo2LeafCount;
	SG_rbtree_iterator* pIterator = NULL;
	const char* pszId = NULL;
	SG_dagnode* pRepo1Dagnode = NULL;
	SG_dagnode* pRepo2Dagnode = NULL;
	SG_bool bFoundRepo1Leaf = SG_FALSE;
	SG_bool bFoundRepo2Leaf = SG_FALSE;
	SG_bool bDagnodesEqual = SG_FALSE;

	SG_NULLARGCHECK_RETURN(pRepo1);
	SG_NULLARGCHECK_RETURN(pRepo2);
	SG_NULLARGCHECK_RETURN(pbIdentical);

	SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo1, iDagNum, &prbRepo1Leaves)  );
	SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo2, iDagNum, &prbRepo2Leaves)  );

	SG_ERR_CHECK(  SG_rbtree__count(pCtx, prbRepo1Leaves, &iRepo1LeafCount)  );
	SG_ERR_CHECK(  SG_rbtree__count(pCtx, prbRepo2Leaves, &iRepo2LeafCount)  );

	if (iRepo1LeafCount != iRepo2LeafCount)
	{
#if TRACE_SYNC
		SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "leaf count differs\n")  );
#endif
		goto Different;
	}

	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pIterator, prbRepo1Leaves, &bFoundRepo1Leaf, &pszId, NULL)  );
	while (bFoundRepo1Leaf)
	{
		SG_ERR_CHECK(  SG_rbtree__find(pCtx, prbRepo2Leaves, pszId, &bFoundRepo2Leaf, NULL)  );
		if (!bFoundRepo2Leaf)
		{
#if TRACE_SYNC && 0
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "couldn't locate leaf\r\n")  );
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "Repo 1 leaves:\r\n")  );
			SG_ERR_CHECK(  SG_rbtree_debug__dump_keys_to_console(pCtx, prbRepo1Leaves) );
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "Repo 2 leaves:\r\n")  );
			SG_ERR_CHECK(  SG_rbtree_debug__dump_keys_to_console(pCtx, prbRepo2Leaves) );
			SG_ERR_CHECK(  SG_console__flush(pCtx, SG_CS_STDERR)  );
#endif
			goto Different;
		}

		SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo1, pszId, &pRepo1Dagnode)  );
		SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo2, pszId, &pRepo2Dagnode)  );

		SG_ERR_CHECK(  _compare_dagnodes(pCtx, pRepo1, pRepo1Dagnode, pRepo2, pRepo2Dagnode, &bDagnodesEqual)  );

		SG_DAGNODE_NULLFREE(pCtx, pRepo1Dagnode);
		SG_DAGNODE_NULLFREE(pCtx, pRepo2Dagnode);

		if (!bDagnodesEqual)
			goto Different;

		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pIterator, &bFoundRepo1Leaf, &pszId, NULL)  );
	}

	bFinalResult = SG_TRUE;

Different:
	*pbIdentical = bFinalResult;

	// fall through
fail:
	SG_RBTREE_NULLFREE(pCtx, prbRepo1Leaves);
	SG_RBTREE_NULLFREE(pCtx, prbRepo2Leaves);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIterator);
}