示例#1
0
void SG_dagfrag_debug__dump(SG_context * pCtx,
							SG_dagfrag * pFrag,
							const char * szLabel,
							SG_uint32 indent,
							SG_string * pStringOutput)
{
	char buf[4000];
	struct _dump_data dump_data;
	dump_data.indent = indent+4;
	dump_data.nrDigits = 6;
	dump_data.pStringOutput = pStringOutput;

	SG_ERR_CHECK_RETURN(  SG_sprintf(pCtx, buf,SG_NrElements(buf),"%*cDagFrag[%p] [%s]\n",indent,' ',pFrag,szLabel)  );
	SG_ERR_CHECK_RETURN(  SG_string__append__sz(pCtx, pStringOutput,buf)  );

	dump_data.stateWanted = SG_DFS_START_MEMBER;
	SG_ERR_CHECK_RETURN(  SG_sprintf(pCtx, buf,SG_NrElements(buf),"%*cStartMembers:\n",indent+2,' ')  );
	SG_ERR_CHECK_RETURN(  SG_string__append__sz(pCtx, pStringOutput,buf)  );
	SG_ERR_CHECK_RETURN(  SG_rbtree__foreach(pCtx, pFrag->m_pRB_Cache,_dump_cb,&dump_data)  );

	dump_data.stateWanted = SG_DFS_INTERIOR_MEMBER;
	SG_ERR_CHECK_RETURN(  SG_sprintf(pCtx,buf,SG_NrElements(buf),"%*cInteriorMembers:\n",indent+2,' ')  );
	SG_ERR_CHECK_RETURN(  SG_string__append__sz(pCtx,pStringOutput,buf)  );
	SG_ERR_CHECK_RETURN(  SG_rbtree__foreach(pCtx,pFrag->m_pRB_Cache,_dump_cb,&dump_data)  );

	dump_data.stateWanted = SG_DFS_END_FRINGE;
	SG_ERR_CHECK_RETURN(  SG_sprintf(pCtx,buf,SG_NrElements(buf),"%*cEndFringe:\n",indent+2,' ')  );
	SG_ERR_CHECK_RETURN(  SG_string__append__sz(pCtx,pStringOutput,buf)  );
	SG_ERR_CHECK_RETURN(  SG_rbtree__foreach(pCtx,pFrag->m_pRB_Cache,_dump_cb,&dump_data)  );
}
示例#2
0
static void _process_work_queue_item(SG_context * pCtx,
									 SG_dagfrag * pFrag, SG_rbtree* prb_WorkQueue, SG_int32 generationEnd, SG_repo* pRepo)
{
	// fetch first item in work_queue and process it.
	// we let the SG_rbtree__foreach() give us the first
	// item in the queue.  (we don't actually care which
	// one; a random item would do.)
	//
	// we stop the iteration after 1 step because we will
	// be modifying the queue.
	//
	// our caller must call us in a loop to do a complete
	// iteration over the whole queue.
	//
	// this would be a little clearer if we sg_rbtree__first() was public
	// and we could just:
	//     while (1) _process(__first)

	struct _work_queue_data wq_data;
    wq_data.prb_WorkQueue = prb_WorkQueue;
	wq_data.pFrag = pFrag;
	wq_data.generationEnd = generationEnd;
    wq_data.pRepo = pRepo;

	SG_ERR_CHECK_RETURN(  SG_rbtree__foreach(pCtx,prb_WorkQueue,_process_work_queue_cb,&wq_data)  );
}
示例#3
0
void SG_dagfrag__foreach_member(SG_context * pCtx,
								SG_dagfrag * pFrag,
								SG_dagfrag__foreach_member_callback * pcb,
								void * pVoidCallerData)
{
	// we want to iterate over the START_ and INTERIOR_ MEMBERS in the CACHE.
	// we need to use the SORTED MEMBER CACHE so that ancestors are presented
	// before descendants.

	struct _fm_data fm_data;

	SG_NULLARGCHECK_RETURN(pFrag);
	SG_NULLARGCHECK_RETURN(pcb);

	if (!pFrag->m_pRB_GenerationSortedMemberCache)
		SG_ERR_CHECK_RETURN(  _my_create_generation_sorted_member_cache(pCtx,pFrag)  );

	fm_data.pFrag = pFrag;
	fm_data.pcb = pcb;
	fm_data.pVoidCallerData = pVoidCallerData;

	// we wrap their callback with our own so that we can munge the arguments
	// that they see.

#if TRACE_DAGFRAG && 0
	SG_ERR_CHECK_RETURN(  SG_console(pCtx, SG_CS_STDERR, "SORTED MEMBER CACHE:\r\n")  );
	SG_ERR_CHECK_RETURN(  SG_rbtree_debug__dump_keys_to_console(pCtx, pFrag->m_pRB_GenerationSortedMemberCache)  );
	SG_ERR_CHECK_RETURN(  SG_console__flush(pCtx, SG_CS_STDERR)  );
#endif

	SG_ERR_CHECK_RETURN(  SG_rbtree__foreach(pCtx,
											 pFrag->m_pRB_GenerationSortedMemberCache,
											 _sg_dagfrag__my_foreach_member_callback,
											 &fm_data)  );
}
示例#4
0
static void _my_create_generation_sorted_member_cache(SG_context * pCtx,
													  SG_dagfrag * pFrag)
{
	// everything in the main CACHE is (implicitly) sorted by HID.  there are
	// times when we need this list sorted by generation.
	//
	// here we construct a copy of the CACHE in that alternate ordering.  we
	// simply borrow the associated data pointers of the items in the real
	// CACHE, so we don't own/free them.  we only include items of type
	// START_MEMBER and INTERIOR_MEMBER.
	//
	// WARNING: whenever you change the CACHE (such as during __add_{leaf,leaves}()),
	// WARNING: you must delete/recreate or likewise update this copy.

	SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache);
	pFrag->m_pRB_GenerationSortedMemberCache = NULL;

	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx,
									&pFrag->m_pRB_GenerationSortedMemberCache)  );
	SG_ERR_CHECK(  SG_rbtree__foreach(pCtx,
									  pFrag->m_pRB_Cache,
									  _sg_dagfrag__my_create_generation_sorted_member_cache_callback,
									  pFrag)  );
	return;

fail:
	SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache);
	pFrag->m_pRB_GenerationSortedMemberCache = NULL;
}
void SG_rbtree_ui64__foreach(
	SG_context* pCtx,
	const SG_rbtree_ui64* prb,
	SG_rbtree_ui64_foreach_callback* cb,
	void* ctx
	)
{
	struct _cb_interlude_data interludeData;

	interludeData.pfn_cb_ui64 = cb;
	interludeData.pVoidData_ui64 = ctx;

	SG_ERR_CHECK_RETURN(  SG_rbtree__foreach(pCtx,
											 (const SG_rbtree *)prb,
											 _foreach_cb_interlude,
											 &interludeData)  );
}
示例#6
0
void SG_dagfrag__load_from_repo__multi(SG_context * pCtx,
									   SG_dagfrag * pFrag,
									   SG_repo* pRepo,
									   SG_rbtree * prbLeaves,
									   SG_int32 nGenerations)
{
	struct _add_leaves_data add_leaves_data;

	SG_NULLARGCHECK_RETURN(pFrag);
	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NULLARGCHECK_RETURN(prbLeaves);

	add_leaves_data.pFrag = pFrag;
	add_leaves_data.nGenerations = nGenerations;
    add_leaves_data.pRepo = pRepo;

	SG_ERR_CHECK_RETURN(  SG_rbtree__foreach(pCtx, prbLeaves,_sg_dagfrag__add_leaves_callback,&add_leaves_data)  );
}
示例#7
0
void SG_dagfrag__to_vhash__shared(SG_context * pCtx,
								  SG_dagfrag * pFrag,
								  SG_vhash * pvhShared,
								  SG_vhash ** ppvhNew)
{
	SG_vhash * pvhFrag = NULL;
	SG_varray * pvaMyData = NULL;
	struct _serialize_data serialize_data;

	SG_NULLARGCHECK_RETURN(pFrag);
	SG_NULLARGCHECK_RETURN(ppvhNew);

	SG_ASSERT(  (! IS_TRANSIENT(pFrag))  );

	SG_ERR_CHECK(  SG_VHASH__ALLOC__SHARED(pCtx,&pvhFrag,5,pvhShared)  );

	SG_ERR_CHECK(  SG_vhash__add__string__sz(pCtx,pvhFrag,KEY_REPO_ID,pFrag->m_sz_repo_id)  );
	SG_ERR_CHECK(  SG_vhash__add__string__sz(pCtx,pvhFrag,KEY_ADMIN_ID,pFrag->m_sz_admin_id)  );
	SG_ERR_CHECK(  SG_vhash__add__int64(pCtx,pvhFrag,KEY_DAGNUM,(SG_int64)pFrag->m_iDagNum)  );
	SG_ERR_CHECK(  SG_vhash__add__string__sz(pCtx,pvhFrag,KEY_VERSION,VALUE_VERSION)  );

	SG_ERR_CHECK(  SG_VARRAY__ALLOC(pCtx,&pvaMyData)  );

	serialize_data.pvhFrag = pvhFrag;
	serialize_data.pvaMyData = pvaMyData;

	// walk the complete RB_Cache and add complete info for each my_data item
	// (regardless of whether the dagnode is a START or INTERIOR member or in
	// the END-FRINGE.  These will be in an essentially random order (HID).

	SG_ERR_CHECK(  SG_rbtree__foreach(pCtx,
									  pFrag->m_pRB_Cache,
									  _serialize_data_cb,
									  &serialize_data)  );

	SG_ERR_CHECK(  SG_vhash__add__varray(pCtx,pvhFrag,KEY_DATA,&pvaMyData)  );

	*ppvhNew = pvhFrag;
	return;

fail:
	SG_VHASH_NULLFREE(pCtx, pvhFrag);
	SG_VARRAY_NULLFREE(pCtx, pvaMyData);
}
示例#8
0
void SG_dagfrag__foreach_end_fringe(SG_context * pCtx,
									const SG_dagfrag * pFrag,
									SG_dagfrag__foreach_end_fringe_callback * pcb,
									void * pVoidCallerData)
{
	// our caller wants to iterate over the end_fringe nodes in the CACHE.

	struct _fef_data fef_data;

	SG_NULLARGCHECK_RETURN(pFrag);
	SG_NULLARGCHECK_RETURN(pcb);

	fef_data.pcb = pcb;
	fef_data.pVoidCallerData = pVoidCallerData;

	// we wrap their callback with our own so that we can munge the arguments
	// that they see.

	SG_ERR_CHECK_RETURN(  SG_rbtree__foreach(pCtx,
											 pFrag->m_pRB_Cache,_sg_dagfrag__foreach_end_fringe_callback,&fef_data)  );
}