/**
 * The values for RENAME, MOVE, ATTRBITS, SYMLINKS, and SUBMODULES are collapsable.  (see below)
 * In the corresponding rbUnique's we only need to remember the set of unique values for the
 * field.  THESE ARE THE KEYS IN THE prbUnique.
 *
 * As a convenience, we associate a vector of entries with each key.  These form a many-to-one
 * thing so that we can report all of the entries that have this value.
 *
 * TODO since we should only process a cset once, we should not get any
 * TODO duplicates in the vector, but it might be possible.  i'm not going
 * TODO to worry about it now.  if this becomes a problem, consider doing
 * TODO a unique-insert into the vector -or- making the vector a sub-rbtree.
 *
 */
static void _update_1_rbUnique(SG_context * pCtx, SG_rbtree * prbUnique, const char * pszKey, SG_mrg_cset_entry * pMrgCSetEntry_Leaf_k)
{
	SG_vector * pVec_Allocated = NULL;
	SG_vector * pVec;
	SG_bool bFound;

	SG_ERR_CHECK(  SG_rbtree__find(pCtx,prbUnique,pszKey,&bFound,(void **)&pVec)  );
	if (!bFound)
	{
		SG_ERR_CHECK(  SG_VECTOR__ALLOC(pCtx,&pVec_Allocated,3)  );
		SG_ERR_CHECK(  SG_rbtree__add__with_assoc(pCtx,prbUnique,pszKey,pVec_Allocated)  );
		pVec = pVec_Allocated;
		pVec_Allocated = NULL;			// rbtree owns this now
	}

	SG_ERR_CHECK(  SG_vector__append(pCtx,pVec,pMrgCSetEntry_Leaf_k,NULL)  );

#if TRACE_WC_MERGE
	SG_ERR_IGNORE(  SG_console(pCtx,SG_CS_STDERR,"_update_1_rbUnique: [%s][%s]\n",
									   pszKey,
									   SG_string__sz(pMrgCSetEntry_Leaf_k->pMrgCSet->pStringCSetLabel))  );
#endif

	return;

fail:
	SG_VECTOR_NULLFREE(pCtx, pVec_Allocated);
}
コード例 #2
0
void SG_repo__install_implementation(
        SG_context* pCtx, 
        sg_repo__vtable* pvtable
        )
{
    if (!g_prb_repo_vtables)
    {
        SG_ERR_CHECK_RETURN(  SG_RBTREE__ALLOC(pCtx, &g_prb_repo_vtables)  );
    }

    SG_ERR_CHECK_RETURN(  SG_rbtree__add__with_assoc(pCtx, g_prb_repo_vtables, pvtable->pszStorage, pvtable)  );
}
コード例 #3
0
void SG_rbtree_ui64__add__with_assoc(
	SG_context* pCtx,
	SG_rbtree_ui64* prb,
	SG_uint64 ui64_key,
	void* assoc
	)
{
	sg_buf_ui64 bufUI64;
	(void)SG_hex__format_uint64(bufUI64, ui64_key);

	SG_ERR_CHECK_RETURN(  SG_rbtree__add__with_assoc(pCtx,
													 (SG_rbtree *)prb,
													 bufUI64,
													 assoc)  );
}
/**
 * The values for RENAME, MOVE, ATTRBITS, SYMLINKS, and SUBMODULES are collapsable.  (see below)
 * In the corresponding rbUnique's we only need to remember the set of unique values for the
 * field.  THESE ARE THE KEYS IN THE prbUnique.
 *
 * As a convenience, we associate a vector of entries with each key.  These form a many-to-one
 * thing so that we can report all of the entries that have this value.
 *
 * Here we carry-forward the values from a sub-merge to the outer-merge by coping the keys
 * in the source-rbtree and insert in the destination-rbtree.
 *
 * NOTE: the term sub-merge here refers to the steps within an n-way merge;
 * it DOES NOT refer to a submodule.
 */
static void _carry_forward_unique_values(SG_context * pCtx,
										 SG_rbtree * prbDest,
										 SG_rbtree * prbSrc)
{
	SG_rbtree_iterator * pIter = NULL;
	SG_vector * pVec_Allocated = NULL;
	const char * pszKey;
	SG_vector * pVec_Src;
	SG_vector * pVec_Dest;
	SG_uint32 j, nr;
	SG_bool bFound;


	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx,&pIter,prbSrc,&bFound,&pszKey,(void **)&pVec_Src)  );
	while (bFound)
	{
		SG_ERR_CHECK(  SG_rbtree__find(pCtx,prbDest,pszKey,&bFound,(void **)&pVec_Dest)  );
		if (!bFound)
		{
			SG_ERR_CHECK(  SG_VECTOR__ALLOC(pCtx,&pVec_Allocated,3)  );
			SG_ERR_CHECK(  SG_rbtree__add__with_assoc(pCtx,prbDest,pszKey,pVec_Allocated)  );
			pVec_Dest = pVec_Allocated;
			pVec_Allocated = NULL;			// rbtree owns this now
		}

		SG_ERR_CHECK(  SG_vector__length(pCtx,pVec_Src,&nr)  );
		for (j=0; j<nr; j++)
		{
			SG_mrg_cset_entry * pMrgCSetEntry_x;

			SG_ERR_CHECK(  SG_vector__get(pCtx,pVec_Src,j,(void **)&pMrgCSetEntry_x)  );
			SG_ERR_CHECK(  SG_vector__append(pCtx,pVec_Dest,pMrgCSetEntry_x,NULL)  );

#if TRACE_WC_MERGE
			SG_ERR_IGNORE(  SG_console(pCtx,SG_CS_STDERR,"_carry_forward_unique_value: [%s][%s]\n",
									   pszKey,
									   SG_string__sz(pMrgCSetEntry_x->pMrgCSet->pStringCSetLabel))  );
#endif
		}

		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx,pIter,&bFound,&pszKey,NULL)  );
	}

fail:
	SG_RBTREE_ITERATOR_NULLFREE(pCtx,pIter);
}
コード例 #5
0
void SG_jscore__mutex__lock(
	SG_context* pCtx,
	const char* pszName)
{
	SG_bool bExists = SG_FALSE;
	_namedMutex* pFreeThisNamedMutex = NULL;
	_namedMutex* pNamedMutex = NULL;

	SG_ASSERT(gpJSCoreGlobalState);

	/* We always acquire the rbtree mutex first, then the specific named mutex. A deadlock is impossible. */
	//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Waiting for JS mutex manager in LOCK.")  );
	SG_ERR_CHECK(  SG_mutex__lock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
	SG_ERR_CHECK(  SG_rbtree__find(pCtx, gpJSCoreGlobalState->prbJSMutexes, pszName, &bExists, (void**)&pNamedMutex)  );
	if (!bExists)
	{
		SG_ERR_CHECK(  SG_alloc1(pCtx, pFreeThisNamedMutex)  );
		pNamedMutex = pFreeThisNamedMutex;
		pNamedMutex->count = 0;
		SG_ERR_CHECK(  SG_mutex__init(pCtx, &pNamedMutex->mutex)  );
		SG_ERR_CHECK(  SG_rbtree__add__with_assoc(pCtx, gpJSCoreGlobalState->prbJSMutexes, pszName, pNamedMutex)  );
		pFreeThisNamedMutex = NULL;
	}
	pNamedMutex->count++; // Cannot be touched unless you hold mutexJsNamed. We do here.
	if (pNamedMutex->count > 10)
		SG_ERR_CHECK(  SG_log__report_info(pCtx, "%u threads are waiting for named JS mutex: %s", pNamedMutex->count-1, pszName)  );

	/* We deliberately unlock the rbtree mutex before locking the named mutex.
	 * We want to hold the lock on the rbtree for as little time as possible. Any subsequent
	 * attempts to lock the same name will yield the correct named mutex and correctly block
	 * on it below, without blocking access to the name management rbtree. */
	SG_ERR_CHECK(  SG_mutex__unlock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
	//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Released JS mutex manager in LOCK.")  );

	//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Waiting for named JS mutex: %s", pszName)  );
	SG_ERR_CHECK(  SG_mutex__lock(pCtx, &pNamedMutex->mutex)  );
	//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Acquired named JS mutex: %s", pszName)  );

	return;

fail:
	SG_ERR_IGNORE(  SG_mutex__unlock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
	SG_NULLFREE(pCtx, pFreeThisNamedMutex);
}
コード例 #6
0
// Add a new dagnode to the work queue, having already determined that it needs
// to be added at a particular location in the list.
static void _hrca_work_queue__insert_at(
	SG_context * pCtx,
	_hrca_work_queue_t * pWorkQueue,
	SG_uint32 index,
	const char * pszHid,
	SG_uint32 revno,
	SG_dagnode ** ppDagnode, // Takes ownership and nulls the caller's copy.
	SG_bitvector * pIsAncestorOf
	)
{
	SG_ERR_CHECK(  SG_rbtree__add__with_assoc(pCtx, pWorkQueue->pRevnoCache, pszHid, ((char*)NULL)+revno)  );

	if (pWorkQueue->length==pWorkQueue->allocatedLength)
	{
		_hrca_work_queue_item_t * tmp = NULL;
		SG_ERR_CHECK(  SG_allocN(pCtx, pWorkQueue->allocatedLength*2, tmp)  );
		(void)memmove(tmp, pWorkQueue->p,
			pWorkQueue->length*sizeof(_hrca_work_queue_item_t));
		SG_NULLFREE(pCtx, pWorkQueue->p);
		pWorkQueue->p = tmp;
		pWorkQueue->allocatedLength *= 2;
	}

	if (index < pWorkQueue->length)
	{
		(void)memmove(&pWorkQueue->p[index+1], &pWorkQueue->p[index],
			(pWorkQueue->length-index)*sizeof(_hrca_work_queue_item_t));
	}

	pWorkQueue->p[index].revno = revno;
	SG_ERR_CHECK(  SG_BITVECTOR__ALLOC__COPY(pCtx, &pWorkQueue->p[index].pIsAncestorOf, pIsAncestorOf)  );
	pWorkQueue->p[index].pDagnode = *ppDagnode;

	*ppDagnode = NULL;

	++pWorkQueue->length;

	return;
fail:
	;
}
コード例 #7
0
// Add a new dagnode to the work queue, having already determined that it needs
// to be added at a particular location in the list.
static void _fnsc_work_queue__insert_at(
	SG_context * pCtx,
	_fnsc_work_queue_t * pWorkQueue,
	SG_uint32 index,
	const char * pszHid,
	SG_uint32 revno,
	SG_dagnode ** ppDagnode, // Takes ownership and nulls the caller's copy.
	SG_byte isAncestorOf
	)
{
	SG_ERR_CHECK_RETURN(  SG_rbtree__add__with_assoc(pCtx, pWorkQueue->pRevnoCache, pszHid, ((char*)NULL)+revno)  );
	
	if (pWorkQueue->length==pWorkQueue->allocatedLength)
	{
		_fnsc_work_queue_item_t * tmp = NULL;
		SG_ERR_CHECK_RETURN(  SG_allocN(pCtx, pWorkQueue->allocatedLength*2, tmp)  );
		(void)memmove(tmp, pWorkQueue->p,
			pWorkQueue->length*sizeof(_fnsc_work_queue_item_t));
		SG_NULLFREE(pCtx, pWorkQueue->p);
		pWorkQueue->p = tmp;
		pWorkQueue->allocatedLength *= 2;
	}
	
	if (index < pWorkQueue->length)
	{
		(void)memmove(&pWorkQueue->p[index+1], &pWorkQueue->p[index],
			(pWorkQueue->length-index)*sizeof(_fnsc_work_queue_item_t));
	}
	
	pWorkQueue->p[index].revno = revno;
	pWorkQueue->p[index].isAncestorOf = isAncestorOf;
	pWorkQueue->p[index].pDagnode = *ppDagnode;
	
	*ppDagnode = NULL;
	
	++pWorkQueue->length;
	if(isAncestorOf==_ANCESTOR_OF_NEW)
	{
		++pWorkQueue->numAncestorsOfNewOnTheQueue;
	}
}
コード例 #8
0
/**
 * create (depth,ObjectGID) key and add entry to work-queue.
 */
void sg_vv2__status__add_to_work_queue(SG_context * pCtx, sg_vv2status * pST, sg_vv2status_od * pOD)
{
	char buf[SG_GID_BUFFER_LENGTH + 20];

	SG_ERR_CHECK_RETURN(  SG_sprintf(pCtx,
									 buf,SG_NrElements(buf),
									 "%08d.%s",
									 pOD->minDepthInTree,pOD->bufGidObject)  );

#if TRACE_VV2_STATUS
	SG_console(pCtx,
			   SG_CS_STDERR,
			   "TD_ADQUE [GID %s][minDepth %d] type[%d,%d] depth[%d,%d]\n",
			   pOD->bufGidObject,pOD->minDepthInTree,
			   (int)((pOD->apInst[SG_VV2__OD_NDX_ORIG]) ? (int)pOD->apInst[SG_VV2__OD_NDX_ORIG]->typeInst : -1),
			   (int)((pOD->apInst[SG_VV2__OD_NDX_DEST]) ? (int)pOD->apInst[SG_VV2__OD_NDX_DEST]->typeInst : -1),
			   ((pOD->apInst[SG_VV2__OD_NDX_ORIG]) ? pOD->apInst[SG_VV2__OD_NDX_ORIG]->depthInTree : -1),
			   ((pOD->apInst[SG_VV2__OD_NDX_DEST]) ? pOD->apInst[SG_VV2__OD_NDX_DEST]->depthInTree : -1));
	SG_ERR_DISCARD;
#endif

	SG_ERR_CHECK_RETURN(  SG_rbtree__add__with_assoc(pCtx,pST->prbWorkQueue,buf,pOD)  );
}
コード例 #9
0
void sg_pack__do_blob(SG_context* pCtx, const char* psz_gid, const char* psz_hid, SG_int32 gen, SG_rbtree* prb_blobs, SG_rbtree* prb_new)
{
    SG_rbtree* prb = NULL;
    SG_bool b = SG_FALSE;
    char buf[64];

    SG_ERR_CHECK(  SG_rbtree__find(pCtx, prb_new, psz_hid, &b, NULL)  );
    if (b)
    {
        SG_ERR_CHECK(  SG_rbtree__find(pCtx, prb_blobs, psz_gid, &b, (void**) &prb)  );
        if (!b)
        {
            SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prb)  );
            SG_ERR_CHECK(  SG_rbtree__add__with_assoc(pCtx, prb_blobs, psz_gid, prb)  );
        }
        SG_ERR_CHECK(  SG_sprintf(pCtx, buf, sizeof(buf), "%05d", (int) gen)  );
        SG_ERR_CHECK(  SG_rbtree__add__with_pooled_sz(pCtx, prb, buf, psz_hid)  );
    }

    return;

fail:
    return;
}