Esempio n. 1
0
void SG_dagfrag__equal(SG_context * pCtx,
					   const SG_dagfrag * pFrag1, const SG_dagfrag * pFrag2, SG_bool * pbResult)
{
	SG_rbtree_iterator * pIter1 = NULL;
	SG_rbtree_iterator * pIter2 = NULL;
	const char * szKey1;
	const char * szKey2;
	_my_data * pMyData1;
	_my_data * pMyData2;
	SG_bool bFound1, bFound2, bEqualDagnodes;
	SG_bool bFinalResult = SG_FALSE;

	SG_NULLARGCHECK_RETURN(pFrag1);
	SG_NULLARGCHECK_RETURN(pFrag2);
	SG_NULLARGCHECK_RETURN(pbResult);

	// we compare the RB-Cache because it has everything in it.
	// the work-queues are transient (used during __add_leaf()).
	// the generation-sorted-member-cache is only around when
	// needed and is a subset of the RB-Cache.

	// since the RB-Cache is ordered by HID, we don't need to do
	// any sorting.  just walk both versions in parallel.

	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx,&pIter1,pFrag1->m_pRB_Cache,&bFound1,&szKey1,(void **)&pMyData1)  );
	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx,&pIter2,pFrag2->m_pRB_Cache,&bFound2,&szKey2,(void **)&pMyData2)  );

	while (1)
	{
		if (!bFound1 && !bFound2)
			goto Equal;
		if (!bFound1 || !bFound2)
			goto Different;
		if (strcmp(szKey1,szKey2) != 0)
			goto Different;

		if (pMyData1->m_state != pMyData2->m_state)
			goto Different;

        if (SG_DFS_END_FRINGE != pMyData1->m_state)
        {
            if (pMyData1->m_genDagnode != pMyData2->m_genDagnode)
                goto Different;
            SG_ERR_CHECK(  SG_dagnode__equal(pCtx,pMyData1->m_pDagnode,pMyData2->m_pDagnode,&bEqualDagnodes)  );
            if (!bEqualDagnodes)
                goto Different;
        }

		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx,pIter1,&bFound1,&szKey1,(void **)&pMyData1)  );
		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx,pIter2,&bFound2,&szKey2,(void **)&pMyData2)  );
	}

Equal:
	bFinalResult = SG_TRUE;
Different:
	*pbResult = bFinalResult;
fail:
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIter1);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIter2);
}
Esempio n. 2
0
void SG_dagfrag__eat_other_frag(SG_context * pCtx,
								SG_dagfrag* pConsumerFrag,
								SG_dagfrag** ppFragToBeEaten)
{
	SG_rbtree_iterator* pit = NULL;
	SG_bool b = SG_FALSE;
	_my_data * pMyData = NULL;
	const char* psz_id = NULL;
	SG_dagfrag* pFragToBeEaten = NULL;

	SG_NULLARGCHECK_RETURN(pConsumerFrag);
	SG_NULLARGCHECK_RETURN(ppFragToBeEaten);
	SG_NULLARGCHECK_RETURN(*ppFragToBeEaten);

	pFragToBeEaten = *ppFragToBeEaten;

#if DEBUG && TRACE_DAGFRAG
	SG_ERR_CHECK(  SG_dagfrag_debug__dump__console(pCtx, pFragToBeEaten, "frag to be eaten", 0, SG_CS_STDOUT)  );
	SG_ERR_CHECK(  SG_rbtree_debug__dump_keys_to_console(pCtx, pFragToBeEaten->m_pRB_Cache)  );
	SG_ERR_CHECK(  SG_console__flush(pCtx, SG_CS_STDERR)  );
	SG_ERR_CHECK(  SG_dagfrag_debug__dump__console(pCtx, pConsumerFrag, "new frag before meal", 0, SG_CS_STDOUT)  );
	SG_ERR_CHECK(  SG_rbtree_debug__dump_keys_to_console(pCtx, pConsumerFrag->m_pRB_Cache)  );
	SG_ERR_CHECK(  SG_console__flush(pCtx, SG_CS_STDERR)  );
#endif

	if (
		(pConsumerFrag->m_iDagNum != pFragToBeEaten->m_iDagNum)
		|| (0 != strcmp(pConsumerFrag->m_sz_repo_id, pFragToBeEaten->m_sz_repo_id))
		|| (0 != strcmp(pConsumerFrag->m_sz_admin_id, pFragToBeEaten->m_sz_admin_id))
		)
	{
		SG_ERR_THROW_RETURN(  SG_ERR_REPO_MISMATCH  );
	}

	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit,pFragToBeEaten->m_pRB_Cache,&b,&psz_id,(void **)&pMyData)  );
	while (b)
	{
		if (pMyData->m_pDagnode)
		{
			SG_ERR_CHECK(  SG_dagfrag__add_dagnode(pCtx, pConsumerFrag, &pMyData->m_pDagnode)  );
		}
		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit,&b,&psz_id,(void **)&pMyData)  );
	}
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);

	SG_DAGFRAG_NULLFREE(pCtx, pFragToBeEaten);
	*ppFragToBeEaten = NULL;

#if DEBUG && TRACE_DAGFRAG
	SG_ERR_CHECK(  SG_dagfrag_debug__dump__console(pCtx, pConsumerFrag, "new frag after meal", 0, SG_CS_STDOUT)  );
	SG_ERR_CHECK(  SG_rbtree_debug__dump_keys_to_console(pCtx, pConsumerFrag->m_pRB_Cache)  );
	SG_ERR_CHECK(  SG_console__flush(pCtx, SG_CS_STDERR)  );
#endif

	return;

fail:
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
}
void SG_random__select_2_random_rbtree_nodes(SG_context * pCtx, SG_rbtree * pRbtree,
	const char ** ppKey1, void ** ppAssocData1,
	const char ** ppKey2, void ** ppAssocData2)
{
	SG_uint32 count, r1, r2, i;
	SG_rbtree_iterator * p = NULL;
	SG_bool ok = SG_FALSE;
	const char * pKey = NULL;
	void * pAssocData = NULL;
	const char * pKey1 = NULL;
	void * pAssocData1 = NULL;
	const char * pKey2 = NULL;
	void * pAssocData2 = NULL;

	SG_NULLARGCHECK_RETURN(pRbtree);

	SG_ERR_CHECK(  SG_rbtree__count(pCtx, pRbtree, &count)  );

	SG_ARGCHECK(count>=2, pRbtree);

	r1 = SG_random_uint32(count);
	r2 = (r1+1+SG_random_uint32(count-1))%count;

	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &p, pRbtree, &ok, &pKey, &pAssocData)  );
	for(i=0;i<=r1||i<=r2;++i)
	{
		SG_ASSERT(ok);
		if(i==r1)
		{
			pKey1 = pKey;
			pAssocData1 = pAssocData;
		}
		else if(i==r2)
		{
			pKey2 = pKey;
			pAssocData2 = pAssocData;
		}
		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, p, &ok, &pKey, &pAssocData)  );
	}

	SG_RBTREE_ITERATOR_NULLFREE(pCtx, p);

	if(ppKey1!=NULL)
		*ppKey1 = pKey1;
	if(ppAssocData1!=NULL)
		*ppAssocData1 = pAssocData1;
	if(ppKey2!=NULL)
		*ppKey2 = pKey2;
	if(ppAssocData2!=NULL)
		*ppAssocData2 = pAssocData2;

	return;
fail:
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, p);
}
/**
 * The values for RENAME, MOVE, ATTRBITS, SYMLINKS, and SUBMODULES are collapsable.  (see below)
 * In the corresponding rbUnique's we only need to remember the set of unique values for the
 * field.  THESE ARE THE KEYS IN THE prbUnique.
 *
 * As a convenience, we associate a vector of entries with each key.  These form a many-to-one
 * thing so that we can report all of the entries that have this value.
 *
 * Here we carry-forward the values from a sub-merge to the outer-merge by coping the keys
 * in the source-rbtree and insert in the destination-rbtree.
 *
 * NOTE: the term sub-merge here refers to the steps within an n-way merge;
 * it DOES NOT refer to a submodule.
 */
static void _carry_forward_unique_values(SG_context * pCtx,
										 SG_rbtree * prbDest,
										 SG_rbtree * prbSrc)
{
	SG_rbtree_iterator * pIter = NULL;
	SG_vector * pVec_Allocated = NULL;
	const char * pszKey;
	SG_vector * pVec_Src;
	SG_vector * pVec_Dest;
	SG_uint32 j, nr;
	SG_bool bFound;


	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx,&pIter,prbSrc,&bFound,&pszKey,(void **)&pVec_Src)  );
	while (bFound)
	{
		SG_ERR_CHECK(  SG_rbtree__find(pCtx,prbDest,pszKey,&bFound,(void **)&pVec_Dest)  );
		if (!bFound)
		{
			SG_ERR_CHECK(  SG_VECTOR__ALLOC(pCtx,&pVec_Allocated,3)  );
			SG_ERR_CHECK(  SG_rbtree__add__with_assoc(pCtx,prbDest,pszKey,pVec_Allocated)  );
			pVec_Dest = pVec_Allocated;
			pVec_Allocated = NULL;			// rbtree owns this now
		}

		SG_ERR_CHECK(  SG_vector__length(pCtx,pVec_Src,&nr)  );
		for (j=0; j<nr; j++)
		{
			SG_mrg_cset_entry * pMrgCSetEntry_x;

			SG_ERR_CHECK(  SG_vector__get(pCtx,pVec_Src,j,(void **)&pMrgCSetEntry_x)  );
			SG_ERR_CHECK(  SG_vector__append(pCtx,pVec_Dest,pMrgCSetEntry_x,NULL)  );

#if TRACE_WC_MERGE
			SG_ERR_IGNORE(  SG_console(pCtx,SG_CS_STDERR,"_carry_forward_unique_value: [%s][%s]\n",
									   pszKey,
									   SG_string__sz(pMrgCSetEntry_x->pMrgCSet->pStringCSetLabel))  );
#endif
		}

		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx,pIter,&bFound,&pszKey,NULL)  );
	}

fail:
	SG_RBTREE_ITERATOR_NULLFREE(pCtx,pIter);
}
static void _loadModuleDir(SG_context *pCtx, const SG_pathname *path, const char *modname, JSContext *cx, JSObject *glob)
{
	SG_rbtree * pJavascriptFiles = NULL;
	SG_rbtree_iterator * pJavascriptFile = NULL;
	const char *szJavascriptFile = NULL; 
	SG_pathname *pJavascriptFilePath = NULL;
	SG_bool ok = SG_FALSE;
	SG_bool valid = SG_FALSE;
	char *psz_js = NULL; // free
	SG_uint32 len_js = 0;
	jsval rval = JSVAL_VOID;
	
	SG_ERR_CHECK(  SG_dir__list(pCtx, path, NULL, NULL, ".js", &pJavascriptFiles)  );
	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pJavascriptFile, pJavascriptFiles, &ok, &szJavascriptFile, NULL)  );
	SG_ERR_CHECK(  SG_PATHNAME__ALLOC__COPY(pCtx, &pJavascriptFilePath, path)  );

	while(ok)
	{
		SG_ERR_CHECK(  _isValidJsFile(pCtx, szJavascriptFile, &valid)  );

		if (valid)
		{
			SG_ERR_CHECK(  SG_pathname__append__from_sz(pCtx, pJavascriptFilePath, szJavascriptFile)  );

			SG_ERR_CHECK(  sg_read_entire_file(pCtx, pJavascriptFilePath, &psz_js, &len_js)  );
			if(!JS_EvaluateScript(cx, glob, psz_js, len_js, szJavascriptFile, 1, &rval))
			{
				SG_ERR_CHECK_CURRENT;
				SG_ERR_THROW2(SG_ERR_JS, (pCtx, "An error occurred loading %s javascript file '%s'", modname, szJavascriptFile));
			}
			SG_NULLFREE(pCtx, psz_js);
			SG_ERR_CHECK(  SG_pathname__remove_last(pCtx, pJavascriptFilePath)  );
		}

		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pJavascriptFile, &ok, &szJavascriptFile, NULL)  );
	}

fail:
	SG_PATHNAME_NULLFREE(pCtx, pJavascriptFilePath);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pJavascriptFile);
	SG_RBTREE_NULLFREE(pCtx, pJavascriptFiles);
	SG_NULLFREE(pCtx, psz_js);
}
Esempio n. 6
0
void SG_dagfrag__load_from_repo__simple(SG_context * pCtx,
										SG_dagfrag * pFrag,
										SG_repo* pRepo,
										SG_rbtree * prb_ids)
{
	SG_dagnode* pdn = NULL;
	SG_rbtree_iterator* pit = NULL;
	SG_bool b = SG_FALSE;
	const char* psz_id = NULL;

	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit,prb_ids,&b,&psz_id,NULL)  );
	while (b)
	{
		SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, psz_id, &pdn)  );
		SG_ERR_CHECK(  SG_dagfrag__add_dagnode(pCtx, pFrag, &pdn)  );
		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit,&b,&psz_id,NULL)  );
	}

	// fall thru to common cleanup

fail:
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
}
void SG_rbtree_ui64__iterator__next(
	SG_context* pCtx,
	SG_rbtree_ui64_iterator *pIterator,
	SG_bool* pbOK,
	SG_uint64 * pui64_key,
	void** ppAssocData
	)
{
	SG_uint64 ui;
	const char * psz_key;
	void * pAssoc;
	SG_bool bOK;

	SG_ERR_CHECK_RETURN(  SG_rbtree__iterator__next(pCtx,
													(SG_rbtree_iterator *)pIterator,
													&bOK,
													&psz_key,
													&pAssoc)  );
	if (bOK)
	{
#if TRACE_RBTREE_UI64
		SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDERR,
								   "Rbtree_ui64_next: %s\n", psz_key)  );
#endif
		SG_ERR_CHECK_RETURN(  SG_hex__parse_hex_uint64(pCtx, psz_key, SG_STRLEN(psz_key), &ui)  );

		*pbOK = SG_TRUE;
		*pui64_key = ui;
		*ppAssocData = pAssoc;
	}
	else
	{
		*pbOK = SG_FALSE;
		*pui64_key = 0ULL;
		*ppAssocData = NULL;
	}
}
void SG_random__select_random_rbtree_node(SG_context * pCtx, SG_rbtree * pRbtree, const char ** ppKey, void ** ppAssocData)
{
	SG_uint32 count, i;
	SG_rbtree_iterator * p = NULL;
	SG_bool ok;
	const char * pKey;
	void * pAssocData;

	SG_NULLARGCHECK_RETURN(pRbtree);

	SG_ERR_CHECK(  SG_rbtree__count(pCtx, pRbtree, &count)  );

	SG_ARGCHECK(count!=0, pRbtree);

	i = SG_random_uint32(count);

	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &p, pRbtree, &ok, &pKey, &pAssocData)  );
	SG_ASSERT(ok);
	while(i>0)
	{
		--i;
		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, p, &ok, &pKey, &pAssocData)  );
		SG_ASSERT(ok);
	}

	SG_RBTREE_ITERATOR_NULLFREE(pCtx, p);

	if(ppKey!=NULL)
		*ppKey = pKey;
	if(ppAssocData!=NULL)
		*ppAssocData = pAssocData;

	return;
fail:
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, p);
}
void sg_repo__query_implementation__list_vtables(SG_context * pCtx, SG_vhash ** pp_vhash)
{
	SG_vhash * pvh = NULL;
    SG_rbtree_iterator* pit = NULL;
    SG_bool b = SG_FALSE;
    const char* psz_name = NULL;

	SG_ERR_CHECK_RETURN(  SG_VHASH__ALLOC(pCtx, &pvh)  );

    SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit, g_prb_repo_vtables, &b, &psz_name, NULL)  );
    while (b)
    {
        SG_ERR_CHECK(  SG_vhash__add__null(pCtx, pvh, psz_name)  );

        SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit, &b, &psz_name, NULL)  );
    }

	*pp_vhash = pvh;
    pvh = NULL;

fail:
    SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
	SG_VHASH_NULLFREE(pCtx, pvh);
}
Esempio n. 10
0
void SG_server__pull_request_fragball(SG_context* pCtx,
									  SG_repo* pRepo,
									  SG_vhash* pvhRequest,
									  const SG_pathname* pFragballDirPathname,
									  char** ppszFragballName,
									  SG_vhash** ppvhStatus)
{
	SG_pathname* pFragballPathname = NULL;
	SG_uint32* paDagNums = NULL;
    SG_rbtree* prbDagnodes = NULL;
	SG_string* pstrFragballName = NULL;
	char* pszRevFullHid = NULL;
	SG_rbtree_iterator* pit = NULL;
	SG_uint32* repoDagnums = NULL;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NULLARGCHECK_RETURN(pFragballDirPathname);
	SG_NULLARGCHECK_RETURN(ppvhStatus);

#if TRACE_SERVER
	SG_ERR_CHECK(  SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "pull fragball request")  );
#endif

	SG_ERR_CHECK(  SG_fragball__create(pCtx, pFragballDirPathname, &pFragballPathname)  );

	if (!pvhRequest)
	{
		// Add leaves from every dag to the fragball.
		SG_uint32 count_dagnums;
		SG_uint32 i;
		SG_ERR_CHECK(  SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums)  );

		for (i=0; i<count_dagnums; i++)
		{
			SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes)  );
			SG_ERR_CHECK(  SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, paDagNums[i], prbDagnodes)  );
			SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
		}

		SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
		SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
	}
	else
	{
		// Build the requested fragball.
		SG_bool found;

		SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found)  );
		if (found)
		{
			// Full clone requested.
			SG_ERR_CHECK(  SG_repo__fetch_repo__fragball(pCtx, pRepo, pFragballDirPathname, ppszFragballName) );
		}
		else
		{
			// Not a full clone.

			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found)  );
			if (found)
			{
				// Dagnodes were requested.

				SG_uint32 generations = 0;
				SG_vhash* pvhDags;
				SG_uint32 count_requested_dagnums;
				SG_uint32 count_repo_dagnums = 0;
				SG_uint32 i;
				const char* pszDagNum = NULL;
				const SG_variant* pvRequestedNodes = NULL;
				SG_vhash* pvhRequestedNodes = NULL;
				const char* pszHidRequestedDagnode = NULL;

				// Were additional generations requested?
				SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &found)  );
				if (found)
					SG_ERR_CHECK(  SG_vhash__get__uint32(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &generations)  );

				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags)  );
				SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums)  );
				if (count_requested_dagnums)
					SG_ERR_CHECK(  SG_repo__list_dags(pCtx, pRepo, &count_repo_dagnums, &repoDagnums)  );

				// For each requested dag, get the requested nodes.
				for (i=0; i<count_requested_dagnums; i++)
				{
					SG_uint32 iMissingNodeCount;
					SG_uint32 iDagnum;
					SG_uint32 j;
					SG_bool isValidDagnum = SG_FALSE;
					SG_bool bSpecificNodesRequested = SG_FALSE;

					// Get the dag's missing node vhash.
					SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszDagNum, &pvRequestedNodes)  );
					SG_ERR_CHECK(  SG_dagnum__from_sz__decimal(pCtx, pszDagNum, &iDagnum)  );

					// Verify that requested dagnum exists
					for (j = 0; j < count_repo_dagnums; j++)
					{
						if (repoDagnums[j] == iDagnum)
						{
							isValidDagnum = SG_TRUE;
							break;
						}
					}
					if (!isValidDagnum)
					{
						char buf[SG_DAGNUM__BUF_MAX__NAME];
						SG_ERR_CHECK(  SG_dagnum__to_name(pCtx, iDagnum, buf, sizeof(buf))  );
						SG_ERR_THROW2(SG_ERR_NO_SUCH_DAG, (pCtx, "%s", buf));
					}

					if (pvRequestedNodes)
					{
						SG_ERR_CHECK(  SG_variant__get__vhash(pCtx, pvRequestedNodes, &pvhRequestedNodes)  );

						// Get each node listed for the dag
						SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhRequestedNodes, &iMissingNodeCount)  );
						if (iMissingNodeCount > 0)
						{
							SG_uint32 j;
							const SG_variant* pvVal;

							bSpecificNodesRequested = SG_TRUE;

							SG_ERR_CHECK(  SG_RBTREE__ALLOC__PARAMS(pCtx, &prbDagnodes, iMissingNodeCount, NULL)  );
							for (j=0; j<iMissingNodeCount; j++)
							{
								SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvhRequestedNodes, j, &pszHidRequestedDagnode, &pvVal)  );

								if (pvVal)
								{
									const char* pszVal;
									SG_ERR_CHECK(  SG_variant__get__sz(pCtx, pvVal, &pszVal)  );
									if (pszVal)
									{
										if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_HID_PREFIX))
										{
											SG_ERR_CHECK(  SG_repo__hidlookup__dagnode(pCtx, pRepo, iDagnum, pszHidRequestedDagnode, &pszRevFullHid)  );
											pszHidRequestedDagnode = pszRevFullHid;
										}
										else if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_TAG))
										{
											SG_ERR_CHECK(  SG_vc_tags__lookup__tag(pCtx, pRepo, pszHidRequestedDagnode, &pszRevFullHid)  );
											if (!pszRevFullHid)
												SG_ERR_THROW(SG_ERR_TAG_NOT_FOUND);
											pszHidRequestedDagnode = pszRevFullHid;
										}
										else
											SG_ERR_THROW(SG_ERR_PULL_INVALID_FRAGBALL_REQUEST);
									}
								}
								
								SG_ERR_CHECK(  SG_rbtree__update(pCtx, prbDagnodes, pszHidRequestedDagnode)  );
								// Get additional dagnode generations, if requested.
								SG_ERR_CHECK(  SG_sync__add_n_generations(pCtx, pRepo, pszHidRequestedDagnode, prbDagnodes, generations)  );
								SG_NULLFREE(pCtx, pszRevFullHid);
							}
						}
					}

					if (!bSpecificNodesRequested)
					{
						// When no specific nodes are in the request, add all leaves.
						SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes)  );

						// Get additional dagnode generations, if requested.
						if (generations)
						{
							SG_bool found;
							const char* hid;
							
							SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit, prbDagnodes, &found, &hid, NULL)  );
							while (found)
							{
								SG_ERR_CHECK(  SG_sync__add_n_generations(pCtx, pRepo, hid, prbDagnodes, generations)  );
								SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit, &found, &hid, NULL)  );
							}
						}
					}

					if (prbDagnodes) // can be null when leaves of an empty dag are requested
					{
						SG_ERR_CHECK(  SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, iDagnum, prbDagnodes)  );
						SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
					}

				} // dagnum loop
			} // if "dags" exists

			/* Add requested blobs to the fragball */
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found)  );
			if (found)
			{
				// Blobs were requested.
				SG_vhash* pvhBlobs;
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs)  );
				SG_ERR_CHECK(  SG_sync__add_blobs_to_fragball(pCtx, pRepo, pFragballPathname, pvhBlobs)  );
			}

			SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
		}
	}
	
	/* fallthru */
fail:
	// If we had an error, delete the half-baked fragball.
	if (pFragballPathname && SG_context__has_err(pCtx))
		SG_ERR_IGNORE(  SG_fsobj__remove__pathname(pCtx, pFragballPathname)  );

	SG_PATHNAME_NULLFREE(pCtx, pFragballPathname);
	SG_NULLFREE(pCtx, paDagNums);
	SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
	SG_STRING_NULLFREE(pCtx, pstrFragballName);
	SG_NULLFREE(pCtx, pszRevFullHid);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
	SG_NULLFREE(pCtx, repoDagnums);
}
Esempio n. 11
0
/**
 * Add to the fragball request vhash (see SG_server_prototypes.h for format).
 */
void SG_pull__add(
	SG_context* pCtx,
	SG_pull* pPull,
	SG_uint32 iDagnum,
	SG_rbtree* prbDagnodes,
	SG_rbtree* prbTags,
	SG_rbtree* prbDagnodePrefixes)
{
	_sg_pull* pMyPull = NULL;
	char bufDagnum[SG_DAGNUM__BUF_MAX__DEC];
	SG_bool found = SG_FALSE;
	SG_vhash* pvhDags = NULL; // Needs to be freed
	SG_vhash* pvhDagsRef = NULL; // Does not need to be freed, owned by parent vhash
	SG_vhash* pvhDagnum = NULL; // Needs to be freed
	SG_vhash* pvhDagnumRef = NULL; // Does not need to be freed, owned by parent vhash
	SG_rbtree_iterator* pit = NULL;

	SG_NULLARGCHECK_RETURN(pPull);
	SG_ARGCHECK_RETURN(iDagnum, iDagnum);
	pMyPull = (_sg_pull*)pPull;

	if (!pMyPull->pvhFragballRequest)
		SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pMyPull->pvhFragballRequest)  );

	SG_ERR_CHECK(  SG_dagnum__to_sz__decimal(pCtx, iDagnum, bufDagnum, sizeof(bufDagnum))  );

	/* Get dagnum vhash, adding it if necessary. */
	SG_ERR_CHECK(  SG_vhash__has(pCtx, pMyPull->pvhFragballRequest, SG_SYNC_STATUS_KEY__DAGS, &found)  );
	if (found)
		SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pMyPull->pvhFragballRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDagsRef)  );
	else
	{
		SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvhDags)  );
		pvhDagsRef = pvhDags;
		SG_ERR_CHECK(  SG_vhash__add__vhash(pCtx, pMyPull->pvhFragballRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags)  );
	}
	
	SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhDagsRef, bufDagnum, &found)  );
	if (found)
		SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhDagsRef, bufDagnum, &pvhDagnumRef)  );
	
	if (!pvhDagnumRef)
	{
		SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvhDagnum)  );
		pvhDagnumRef = pvhDagnum;
		SG_ERR_CHECK(  SG_vhash__add__vhash(pCtx, pvhDagsRef, bufDagnum, &pvhDagnum)  );
	}

	/* If dagnodes were provided, add them to the dagnum vhash */
	if (prbDagnodes)
	{
		const char* pszHid;
		SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit, prbDagnodes, &found, &pszHid, NULL)  );
		while (found)
		{
			SG_ERR_CHECK(  SG_vhash__update__null(pCtx, pvhDagnumRef, pszHid)  );
			SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit, &found, &pszHid, NULL)  );
		}
		SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
	}	
	/* If tags were provided, add them to the dagnum vhash */
	if (prbTags)
	{
		const char* pszTag;
		SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit, prbTags, &found, &pszTag, NULL)  );
		while (found)
		{
			SG_ERR_CHECK(  SG_vhash__update__string__sz(pCtx, pvhDagnumRef, pszTag, SG_SYNC_REQUEST_VALUE_TAG)  );
			SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit, &found, &pszTag, NULL)  );
		}
		SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
	}	
	/* If dagnode hid prefixes were provided, add them to the dagnum vhash */
	if (prbDagnodePrefixes)
	{
		const char* pszHidPrefix;
		SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit, prbDagnodePrefixes, &found, &pszHidPrefix, NULL)  );
		while (found)
		{
			SG_ERR_CHECK(  SG_vhash__update__string__sz(pCtx, pvhDagnumRef, pszHidPrefix, SG_SYNC_REQUEST_VALUE_HID_PREFIX)  );
			SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit, &found, &pszHidPrefix, NULL)  );
		}
		SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
	}	

	return;

fail:
	SG_VHASH_NULLFREE(pCtx, pvhDagnum);
	SG_VHASH_NULLFREE(pCtx, pvhDags);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
}
static void _sg_jscore__install_modules(SG_context * pCtx, JSContext *cx, JSObject *glob,
	const SG_vhash *pServerConfig)
{
	SG_pathname *pModuleDirPath = NULL;
	SG_rbtree_iterator *pModuleDir = NULL;
	SG_rbtree *pModules = NULL;
	const char *szModuleDir = NULL;
	SG_bool ok = SG_FALSE;
	SG_uint32 len_js = 0;
	jsval rval;
	char *psz_js = NULL;
	jsval fo = JSVAL_VOID;
	JSObject * jsoServerConfig = NULL;

	if (gpJSCoreGlobalState->bSkipModules)
		return;

	SG_ERR_CHECK(  _modulesInstalled(pCtx, cx, glob, &ok)  );

	if (ok)
		return;

	if (! gpJSCoreGlobalState->pPathToDispatchDotJS)
		return;
	
	SG_ERR_CHECK(  _setModulesInstalled(pCtx, cx, glob)  );

	SG_ERR_CHECK(  sg_read_entire_file(pCtx, gpJSCoreGlobalState->pPathToDispatchDotJS, &psz_js, &len_js)  );
	if(!JS_EvaluateScript(cx, glob, psz_js, len_js, "dispatch.js", 1, &rval))
	{
		SG_ERR_CHECK_CURRENT;
		SG_ERR_THROW2(SG_ERR_JS, (pCtx, "An error occurred evaluating dispatch.js!"));
	}
	SG_NULLFREE(pCtx, psz_js);

	// Call init function in dispatch.js
	if(pServerConfig)
	{
		jsval arg;
		JSBool js_ok;
		jsval rval2;

		SG_JS_NULL_CHECK(  jsoServerConfig = JS_NewObject(cx, NULL, NULL, NULL)  );
		SG_ERR_CHECK(  sg_jsglue__copy_vhash_into_jsobject(pCtx, cx, pServerConfig, jsoServerConfig)  );
		arg = OBJECT_TO_JSVAL(jsoServerConfig);
		js_ok = JS_CallFunctionName(cx, glob, "init", 1, &arg, &rval2);
		SG_ERR_CHECK_CURRENT;
		if(!js_ok)
			SG_ERR_THROW2(SG_ERR_JS, (pCtx, "An error occurred initializing JavaScript framework: call to JavaScript init() failed"));
		
		jsoServerConfig = NULL;
	}

	// Load core.
	SG_ERR_CHECK(  _loadModuleDir(pCtx, gpJSCoreGlobalState->pPathToCore, "core", cx, glob)  );

	// Load modules.

	SG_ERR_CHECK(  SG_dir__list(pCtx, gpJSCoreGlobalState->pPathToModules, NULL, NULL, NULL, &pModules)  );
	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pModuleDir, pModules, &ok, &szModuleDir, NULL)  );
	SG_ERR_CHECK(  SG_PATHNAME__ALLOC__COPY(pCtx, &pModuleDirPath, gpJSCoreGlobalState->pPathToModules)  );
	while(ok)
	{
		if (szModuleDir[0] != '.')
		{
			SG_fsobj_stat fss;

			SG_ERR_CHECK(  SG_pathname__append__from_sz(pCtx, pModuleDirPath, szModuleDir)  );

			SG_ERR_CHECK(  SG_fsobj__stat__pathname(pCtx, pModuleDirPath, &fss)  );

			if (fss.type & SG_FSOBJ_TYPE__DIRECTORY)  // dot paths?
			{
				SG_ERR_CHECK(  _loadModuleDir(pCtx, pModuleDirPath, szModuleDir, cx, glob)  );
			}

			SG_ERR_CHECK(  SG_pathname__remove_last(pCtx, pModuleDirPath)  );
		}

		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pModuleDir, &ok, &szModuleDir, NULL)  );
	}
												
	if (! JS_LookupProperty(cx, glob, "initModules", &fo))
	{
		SG_ERR_CHECK_CURRENT;
		SG_ERR_THROW2(SG_ERR_JS, (pCtx, "lookup of initModules() failed"));
	}

	if (!JSVAL_IS_VOID(fo))
		if (! JS_CallFunctionName(cx, glob, "initModules", 0, NULL, &rval))
			{
				SG_ERR_CHECK_CURRENT;
				SG_ERR_THROW2(SG_ERR_JS, (pCtx, "Call to initModules() failed"));
			}

fail:
	SG_NULLFREE(pCtx, psz_js);
	SG_PATHNAME_NULLFREE(pCtx, pModuleDirPath);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pModuleDir);
	SG_RBTREE_NULLFREE(pCtx, pModules);
}
void SG_dagquery__find_descendant_heads(SG_context * pCtx,
										SG_repo * pRepo,
                                        SG_uint64 iDagNum,
										const char * pszHidStart,
										SG_bool bStopIfMultiple,
										SG_dagquery_find_head_status * pdqfhs,
										SG_rbtree ** pprbHeads)
{
	SG_rbtree * prbLeaves = NULL;
	SG_rbtree * prbHeadsFound = NULL;
	SG_rbtree_iterator * pIter = NULL;
	const char * pszKey_k = NULL;
	SG_bool b;
	SG_dagquery_find_head_status dqfhs;
	SG_dagquery_relationship dqRel;
	SG_uint32 nrFound;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NONEMPTYCHECK_RETURN(pszHidStart);
	SG_NULLARGCHECK_RETURN(pdqfhs);
	SG_NULLARGCHECK_RETURN(pprbHeads);

	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prbHeadsFound)  );

	// fetch a list of all of the LEAVES in the DAG.
	// this rbtree only contains keys; no assoc values.

	SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagNum, &prbLeaves)  );

	// if the starting point is a leaf, then we are done (we don't care how many
	// other leaves are in the rbtree because none will be a child of ours because
	// we are a leaf).

	SG_ERR_CHECK(  SG_rbtree__find(pCtx, prbLeaves, pszHidStart, &b, NULL)  );
	if (b)
	{
		SG_ERR_CHECK(  SG_rbtree__add(pCtx, prbHeadsFound, pszHidStart)  );

		dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__IS_LEAF;
		goto done;
	}

	// inspect each leaf and qualify it; put the ones that pass
	// into the list of actual heads.

	nrFound = 0;
	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pIter, prbLeaves, &b, &pszKey_k, NULL)  );
	while (b)
	{
		// is head[k] a descendant of start?
		SG_ERR_CHECK(  SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, iDagNum, pszKey_k, pszHidStart,
															 SG_FALSE, // we care about descendants, so don't skip
															 SG_TRUE,  // we don't care about ancestors, so skip them
															 &dqRel)  );

		if (dqRel == SG_DAGQUERY_RELATIONSHIP__DESCENDANT)
		{
			nrFound++;

			if (bStopIfMultiple && (nrFound > 1))
			{
				// they wanted a unique answer and we've found too many answers
				// (which they won't be able to use anyway) so just stop and
				// return the status.  (we delete prbHeadsFound because it is
				// incomplete and so that they won't be tempted to use it.)

				SG_RBTREE_NULLFREE(pCtx, prbHeadsFound);
				dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__MULTIPLE;
				goto done;
			}

			SG_ERR_CHECK(  SG_rbtree__add(pCtx, prbHeadsFound, pszKey_k)  );
		}

		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pIter, &b, &pszKey_k, NULL)  );
	}

	switch (nrFound)
	{
	case 0:
		// this should NEVER happen.  we should always be able to find a
		// leaf/head for a node.
		//
		// TODO the only case where this might happen is if named branches
		// TODO cause the leaf to be disqualified.  so i'm going to THROW
		// TODO here rather than ASSERT.

		SG_ERR_THROW2(  SG_ERR_DAG_NOT_CONSISTENT,
						(pCtx, "Could not find head/leaf for changeset [%s]", pszHidStart)  );
		break;

	case 1:
		dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__UNIQUE;
		break;

	default:
		dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__MULTIPLE;
		break;
	}

done:
	*pprbHeads = prbHeadsFound;
	prbHeadsFound = NULL;

	*pdqfhs = dqfhs;

fail:
	SG_RBTREE_NULLFREE(pCtx, prbLeaves);
	SG_RBTREE_NULLFREE(pCtx, prbHeadsFound);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIter);
}
Esempio n. 14
0
/**
 * Compare all the nodes of a single DAG in two repos.
 */
static void _compare_one_dag(SG_context* pCtx,
							 SG_repo* pRepo1,
							 SG_repo* pRepo2,
							 SG_uint32 iDagNum,
							 SG_bool* pbIdentical)
{
	SG_bool bFinalResult = SG_FALSE;
	SG_rbtree* prbRepo1Leaves = NULL;
	SG_rbtree* prbRepo2Leaves = NULL;
	SG_uint32 iRepo1LeafCount, iRepo2LeafCount;
	SG_rbtree_iterator* pIterator = NULL;
	const char* pszId = NULL;
	SG_dagnode* pRepo1Dagnode = NULL;
	SG_dagnode* pRepo2Dagnode = NULL;
	SG_bool bFoundRepo1Leaf = SG_FALSE;
	SG_bool bFoundRepo2Leaf = SG_FALSE;
	SG_bool bDagnodesEqual = SG_FALSE;

	SG_NULLARGCHECK_RETURN(pRepo1);
	SG_NULLARGCHECK_RETURN(pRepo2);
	SG_NULLARGCHECK_RETURN(pbIdentical);

	SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo1, iDagNum, &prbRepo1Leaves)  );
	SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo2, iDagNum, &prbRepo2Leaves)  );

	SG_ERR_CHECK(  SG_rbtree__count(pCtx, prbRepo1Leaves, &iRepo1LeafCount)  );
	SG_ERR_CHECK(  SG_rbtree__count(pCtx, prbRepo2Leaves, &iRepo2LeafCount)  );

	if (iRepo1LeafCount != iRepo2LeafCount)
	{
#if TRACE_SYNC
		SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "leaf count differs\n")  );
#endif
		goto Different;
	}

	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pIterator, prbRepo1Leaves, &bFoundRepo1Leaf, &pszId, NULL)  );
	while (bFoundRepo1Leaf)
	{
		SG_ERR_CHECK(  SG_rbtree__find(pCtx, prbRepo2Leaves, pszId, &bFoundRepo2Leaf, NULL)  );
		if (!bFoundRepo2Leaf)
		{
#if TRACE_SYNC && 0
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "couldn't locate leaf\r\n")  );
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "Repo 1 leaves:\r\n")  );
			SG_ERR_CHECK(  SG_rbtree_debug__dump_keys_to_console(pCtx, prbRepo1Leaves) );
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "Repo 2 leaves:\r\n")  );
			SG_ERR_CHECK(  SG_rbtree_debug__dump_keys_to_console(pCtx, prbRepo2Leaves) );
			SG_ERR_CHECK(  SG_console__flush(pCtx, SG_CS_STDERR)  );
#endif
			goto Different;
		}

		SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo1, pszId, &pRepo1Dagnode)  );
		SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo2, pszId, &pRepo2Dagnode)  );

		SG_ERR_CHECK(  _compare_dagnodes(pCtx, pRepo1, pRepo1Dagnode, pRepo2, pRepo2Dagnode, &bDagnodesEqual)  );

		SG_DAGNODE_NULLFREE(pCtx, pRepo1Dagnode);
		SG_DAGNODE_NULLFREE(pCtx, pRepo2Dagnode);

		if (!bDagnodesEqual)
			goto Different;

		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pIterator, &bFoundRepo1Leaf, &pszId, NULL)  );
	}

	bFinalResult = SG_TRUE;

Different:
	*pbIdentical = bFinalResult;

	// fall through
fail:
	SG_RBTREE_NULLFREE(pCtx, prbRepo1Leaves);
	SG_RBTREE_NULLFREE(pCtx, prbRepo2Leaves);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIterator);
}
Esempio n. 15
0
void SG_sync__build_best_guess_dagfrag(
	SG_context* pCtx,
	SG_repo* pRepo,
	SG_uint64 iDagNum,
	SG_rbtree* prbStartFromHids,
	SG_vhash* pvhConnectToHidsAndGens,
	SG_dagfrag** ppFrag)
{
	SG_uint32 i, countConnectTo;
	SG_rbtree_iterator* pit = NULL;
	SG_dagnode* pdn = NULL;
	SG_dagfrag* pFrag = NULL;
	SG_repo_fetch_dagnodes_handle* pdh = NULL;

	SG_int32 minGen = SG_INT32_MAX;
	SG_int32 maxGen = 0;
	SG_uint32 gensToFetch = 0;

	char* psz_repo_id = NULL;
	char* psz_admin_id = NULL;

	SG_bool bNextHid;
	const char* pszRefHid;
	SG_int32 gen;

#if TRACE_SYNC
	SG_int64 startTime;
	SG_int64 endTime;
#endif

	SG_NULLARGCHECK_RETURN(prbStartFromHids);

	/* Find the minimum generation in pertinent "connect to" nodes. */
	if (pvhConnectToHidsAndGens)
	{
		SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhConnectToHidsAndGens, &countConnectTo)  );
		for (i = 0; i < countConnectTo; i++)
		{
			SG_int32 gen;
			SG_ERR_CHECK(  SG_vhash__get_nth_pair__int32(pCtx, pvhConnectToHidsAndGens, i, &pszRefHid, &gen)  );
			if (gen < minGen)
				minGen = gen;
		}
	}

	/* Happens when pulling into an empty repo, or when an entire dag is specifically requested. */
	if (minGen == SG_INT32_MAX)
		minGen = -1;

	SG_ERR_CHECK(  SG_repo__fetch_dagnodes__begin(pCtx, pRepo, iDagNum, &pdh)  );

	/* Find the maximum generation in pertinent "start from" nodes. */
	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit, prbStartFromHids, &bNextHid, &pszRefHid, NULL)  );
	while (bNextHid)
	{
		SG_ERR_CHECK(  SG_repo__fetch_dagnodes__one(pCtx, pRepo, pdh, pszRefHid, &pdn)  );
		SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pdn, &gen)  );
		if (gen > maxGen)
			maxGen = gen;
		SG_DAGNODE_NULLFREE(pCtx, pdn);

		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit, &bNextHid, &pszRefHid, NULL)  );
	}
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);

	if (maxGen <= minGen)
		gensToFetch = FALLBACK_GENS_PER_ROUNDTRIP;
	else
		gensToFetch = maxGen - minGen;

#if TRACE_SYNC
	{
		char buf_dagnum[SG_DAGNUM__BUF_MAX__HEX];
		SG_uint32 count;

		SG_ERR_CHECK(  SG_dagnum__to_sz__hex(pCtx, iDagNum, buf_dagnum, sizeof(buf_dagnum))  );

		SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "Building best guess dagfrag for dag %s...\n", buf_dagnum)  );
		SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "Starting from nodes:\n")  );
		SG_ERR_CHECK(  SG_rbtree_debug__dump_keys_to_console(pCtx, prbStartFromHids)  );
		SG_ERR_CHECK(  SG_vhash_debug__dump_to_console__named(pCtx, pvhConnectToHidsAndGens, "Connecting to nodes")  );

		SG_ERR_CHECK(  SG_rbtree__count(pCtx, prbStartFromHids, &count)  );

		SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, "result has %d generations from %u starting nodes.\n",
			gensToFetch, count)  );
		SG_ERR_CHECK(  SG_console__flush(pCtx, SG_CS_STDERR)  );

		SG_ERR_CHECK(  SG_time__get_milliseconds_since_1970_utc(pCtx, &startTime)  );
	}
#endif

	/* Return a frag with the corresponding generations filled in. */
	SG_ERR_CHECK(  SG_repo__get_repo_id(pCtx, pRepo, &psz_repo_id)  );
	SG_ERR_CHECK(  SG_repo__get_admin_id(pCtx, pRepo, &psz_admin_id)  );
	SG_ERR_CHECK(  SG_dagfrag__alloc(pCtx, &pFrag, psz_repo_id, psz_admin_id, iDagNum)  );

	SG_ERR_CHECK(  SG_dagfrag__load_from_repo__multi(pCtx, pFrag, pRepo, prbStartFromHids, gensToFetch)  );
#if TRACE_SYNC
	SG_ERR_CHECK(  SG_time__get_milliseconds_since_1970_utc(pCtx, &endTime)  );
	{
		SG_uint32 dagnodeCount;
		double seconds = ((double)endTime-(double)startTime)/1000;

		SG_ERR_CHECK(  SG_dagfrag__dagnode_count(pCtx, pFrag, &dagnodeCount)  );
		SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR, " - %u nodes in frag, built in %1.3f seconds\n", dagnodeCount, seconds)  );
		SG_ERR_CHECK(  SG_dagfrag_debug__dump__console(pCtx, pFrag, "best-guess dagfrag", 0, SG_CS_STDERR)  );
	}
#endif

	*ppFrag = pFrag;
	pFrag = NULL;

	/* Common cleanup */
fail:
	SG_NULLFREE(pCtx, psz_repo_id);
	SG_NULLFREE(pCtx, psz_admin_id);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
	SG_DAGNODE_NULLFREE(pCtx, pdn);
	SG_DAGFRAG_NULLFREE(pCtx, pFrag);
	SG_ERR_IGNORE(  SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pdh)  );
}
Esempio n. 16
0
void SG_repo__pack__vcdiff(SG_context* pCtx, SG_repo * pRepo)
{
	SG_rbtree* prb_leaves = NULL;
	SG_uint32 count_leaves = 0;
    const char* psz_hid_cs = NULL;
    SG_rbtree* prb_blobs = NULL;
    SG_bool b;
    SG_rbtree_iterator* pit = NULL;
    SG_rbtree_iterator* pit_for_gid = NULL;
    SG_bool b_for_gid;
    const char* psz_hid_ref = NULL;
    const char* psz_hid_blob = NULL;
    const char* psz_gid = NULL;
    SG_rbtree* prb = NULL;
    const char* psz_gen = NULL;
	SG_repo_tx_handle* pTx;

    SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&prb_leaves)  );
    SG_ERR_CHECK(  SG_rbtree__count(pCtx, prb_leaves, &count_leaves)  );

    SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, NULL, prb_leaves, &b, &psz_hid_cs, NULL)  );

    SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prb_blobs)  );

    SG_ERR_CHECK(  sg_pack__do_changeset(pCtx, pRepo, psz_hid_cs, prb_blobs)  );

    SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit, prb_blobs, &b, &psz_gid, (void**) &prb)  );
    while (b)
    {
        SG_uint32 count_for_gid = 0;
        SG_ERR_CHECK(  SG_rbtree__count(pCtx, prb, &count_for_gid)  );
        if (count_for_gid > 1)
        {
            psz_hid_ref = NULL;
            SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit_for_gid, prb, &b_for_gid, &psz_gen, (void**) &psz_hid_blob)  );
            while (b_for_gid)
            {
				// Not a lot of thought went into doing each of these in its own repo tx.  Consider alternatives.
				SG_ERR_CHECK(  SG_repo__begin_tx(pCtx, pRepo, &pTx)  );

				if (psz_hid_ref)
                {
                    SG_ERR_CHECK(  SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid_blob, SG_BLOBENCODING__VCDIFF, psz_hid_ref, NULL, NULL, NULL, NULL)  );
                    // TODO be tolerant here of SG_ERR_REPO_BUSY
                }
                else
                {
                    psz_hid_ref = psz_hid_blob;

                    SG_ERR_CHECK(  SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid_ref, SG_BLOBENCODING__FULL, NULL, NULL, NULL, NULL, NULL)  );
                    // TODO be tolerant here of SG_ERR_REPO_BUSY
                }

				SG_ERR_CHECK(  SG_repo__commit_tx(pCtx, pRepo, &pTx)  );

                SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit_for_gid, &b_for_gid, &psz_gen, (void**) &psz_hid_blob)  );
            }
            SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit_for_gid);
            psz_hid_ref = NULL;
        }
        SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit, &b, &psz_gid, (void**) &prb)  );
    }
    SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);

	SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx, prb_blobs, _sg_repo__free_rbtree);

    SG_RBTREE_NULLFREE(pCtx, prb_leaves);

    return;

fail:
    return;
}