/**
 * remove the first item in the work-queue and return it for processing.
 *
 * You DO NOT own the returned sg_vv2status_od pointer.
 */
void sg_vv2__status__remove_first_from_work_queue(SG_context * pCtx,
												  sg_vv2status * pST,
												  SG_bool * pbFound,
												  sg_vv2status_od ** ppOD)
{
	const char * szKey;
	sg_vv2status_od * pOD;
	SG_bool bFound;

	SG_ERR_CHECK_RETURN(  SG_rbtree__iterator__first(pCtx,
													 NULL,pST->prbWorkQueue,
													 &bFound,
													 &szKey,(void **)&pOD)  );
	if (bFound)
	{
		SG_ERR_CHECK_RETURN(  SG_rbtree__remove(pCtx, pST->prbWorkQueue,szKey)  );

#if TRACE_VV2_STATUS
		SG_console(pCtx,SG_CS_STDERR,"TD_RMQUE %s (head)\n",szKey);
		SG_ERR_DISCARD;
#endif
	}

	*pbFound = bFound;
	*ppOD = ((bFound) ? pOD : NULL);
}
static void _hrca_work_queue__pop(
	SG_context * pCtx,
	_hrca_work_queue_t * pWorkQueue,
	SG_dagnode ** ppDagnode,
	const char ** ppszHidRef,
	SG_bitvector ** ppIsAncestorOf
	)
{
	const char * pszHidRef = NULL;
	SG_ERR_CHECK_RETURN(  SG_dagnode__get_id_ref(pCtx, pWorkQueue->p[pWorkQueue->length-1].pDagnode, &pszHidRef)  );
	SG_ERR_CHECK_RETURN(  SG_rbtree__remove(pCtx, pWorkQueue->pRevnoCache, pszHidRef)  );
	--pWorkQueue->length;
	
	*ppDagnode = pWorkQueue->p[pWorkQueue->length].pDagnode;
	*ppszHidRef = pszHidRef;
	*ppIsAncestorOf = pWorkQueue->p[pWorkQueue->length].pIsAncestorOf;
}
void SG_jscore__mutex__unlock(
	SG_context* pCtx,
	const char* pszName)
{
	SG_bool bExists = SG_FALSE;
	_namedMutex* pNamedMutex = NULL;

	SG_ASSERT(gpJSCoreGlobalState);

	/* We always acquire the rbtree mutex first, then the specific named mutex. A deadlock is impossible. */
	//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Waiting for JS mutex manager in UNLOCK.")  );
	SG_ERR_CHECK(  SG_mutex__lock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
	SG_ERR_CHECK(  SG_rbtree__find(pCtx, gpJSCoreGlobalState->prbJSMutexes, pszName, &bExists, (void**)&pNamedMutex)  );
	if (bExists)
	{
		SG_ASSERT(pNamedMutex);

		//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Releasing named JS mutex: %s", pszName)  );
		SG_ERR_CHECK(  SG_mutex__unlock(pCtx, &pNamedMutex->mutex)  );

		pNamedMutex->count--; // Cannot be touched unless you hold mutexJsNamed. We do here.
		if ( 0 == pNamedMutex->count )
		{
			//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Nobody else is waiting. Removing named JS mutex: %s", pszName)  );
			SG_ERR_CHECK(  SG_rbtree__remove(pCtx, gpJSCoreGlobalState->prbJSMutexes, pszName)  );
			SG_NULLFREE(pCtx, pNamedMutex);
		}
		//else if (pNamedMutex->count > 1) // Cannot be touched unless you hold mutexJsNamed. We do here.
			//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "%u threads are waiting for named JS mutex: %s", pNamedMutex->count-1, pszName)  );

		/* We deliberately unlock the rbtree mutex after the named mutex.
		   Creating a new mutex with the same name can't be allowed until this one is released, 
		   because they are logically the same lock. 
		   Unlocking doesn't block on anything and should be fast. */
		SG_ERR_CHECK(  SG_mutex__unlock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
		//SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Released JS mutex manager in UNLOCK.")  );
	}
	else
		SG_ERR_THROW2(  SG_ERR_NOT_FOUND, (pCtx, "Named mutex: %s", pszName)  );

	return;

fail:
	SG_ERR_IGNORE(  SG_mutex__unlock(pCtx, &gpJSCoreGlobalState->mutexJsNamed)  );
}
static void _fnsc_work_queue__pop(
	SG_context * pCtx,
	_fnsc_work_queue_t * pWorkQueue,
	SG_dagnode ** ppDagnode,
	const char ** ppszHidRef,
	SG_byte * isAncestorOf
	)
{
	const char * pszHidRef = NULL;
	SG_ERR_CHECK_RETURN(  SG_dagnode__get_id_ref(pCtx, pWorkQueue->p[pWorkQueue->length-1].pDagnode, &pszHidRef)  );
	SG_ERR_CHECK_RETURN(  SG_rbtree__remove(pCtx, pWorkQueue->pRevnoCache, pszHidRef)  );
	--pWorkQueue->length;
	if(pWorkQueue->p[pWorkQueue->length].isAncestorOf==_ANCESTOR_OF_NEW)
		--pWorkQueue->numAncestorsOfNewOnTheQueue;
	
	*ppDagnode = pWorkQueue->p[pWorkQueue->length].pDagnode;
	*ppszHidRef = pszHidRef;
	*isAncestorOf = pWorkQueue->p[pWorkQueue->length].isAncestorOf;
}
void sg_vv2__status__remove_from_work_queue(SG_context * pCtx,
											sg_vv2status * pST,
											sg_vv2status_od * pOD,
											SG_uint32 depthInQueue)
{
	char buf[SG_GID_BUFFER_LENGTH + 20];

	SG_ERR_CHECK_RETURN(  SG_sprintf(pCtx,
									 buf,SG_NrElements(buf),
									 "%08d.%s",
									 depthInQueue,pOD->bufGidObject)  );

#if TRACE_VV2_STATUS
	SG_console(pCtx,
			   SG_CS_STDERR,
			   "TD_RMQUE [GID %s][minDepth %d] (short circuit)\n",
			   pOD->bufGidObject,depthInQueue);
	SG_ERR_DISCARD;
#endif

	SG_ERR_CHECK_RETURN(  SG_rbtree__remove(pCtx,pST->prbWorkQueue,buf)  );
}
Beispiel #6
0
static void _process_work_queue_cb(SG_context * pCtx,
								   const char * szHid, SG_UNUSED_PARAM(void * pAssocData), void * pVoidCallerData)
{
	// we are given a random item in the work_queue.
	//
	// lookup the corresponding DATA node in the Cache, if it has one.
	//
	// and then evaluate where this node belongs:

	struct _work_queue_data * pWorkQueueData = (struct _work_queue_data *)pVoidCallerData;
	_my_data * pDataCached = NULL;
	SG_dagnode * pDagnodeAllocated = NULL;
	SG_bool bPresent = SG_FALSE;
	SG_UNUSED(pAssocData);

	SG_ERR_CHECK(  _cache__lookup(pCtx, pWorkQueueData->pFrag,szHid,&pDataCached,&bPresent)  );
	if (!bPresent)
	{
		// dagnode is not present in the cache.  therefore, we've never visited this
		// dagnode before.  add it to the cache with proper settings and maybe add
		// all of the parents to the work queue.

		SG_int32 myGeneration;

        SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pWorkQueueData->pRepo, szHid,&pDagnodeAllocated)  );

		SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pDagnodeAllocated,&myGeneration)  );

        if ((myGeneration > pWorkQueueData->generationEnd))
        {
            SG_ERR_CHECK(  _cache__add__dagnode(pCtx,
												pWorkQueueData->pFrag,
												myGeneration,
												pDagnodeAllocated,SG_DFS_INTERIOR_MEMBER,
												&pDataCached)  );
            pDagnodeAllocated = NULL;	// cache takes ownership of dagnode
			SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pDataCached->m_pDagnode, pWorkQueueData->prb_WorkQueue)  );
        }
        else
        {
            SG_ERR_CHECK(  _cache__add__fringe(pCtx, pWorkQueueData->pFrag, szHid)  );
            SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated);
        }
	}
	else
	{
		// dagnode already present in the cache. therefore, we have already visited it
		// before.  we can change our minds about the state of this dagnode if something
		// has changed (such as the fragment bounds being widened).

		switch (pDataCached->m_state)
		{
		default:
		//case SG_DFS_UNKNOWN:
			SG_ASSERT_RELEASE_FAIL2(  (0),
							  (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]",
							   pDataCached->m_state,szHid)  );

		case SG_DFS_START_MEMBER:
			// a dagnode has a parent that we are considering a START node.
			// this can happen when we were started from a non-leaf node and
			// then a subsequent call to __load is given a true leaf node or
			// a node deeper in the tree that has our original start node as
			// a parent.
			//
			// clear the start bit.  (we only want true fragment-terminal
			// nodes marked as start nodes.)

			pDataCached->m_state = SG_DFS_INTERIOR_MEMBER;
			// FALL-THRU-INTENDED

		case SG_DFS_INTERIOR_MEMBER:
			// a dagnode that we have already visited is being re-visited.
			// this happpens for a number of reasons, such as when we hit
			// the parent of a branch/fork.  we might get visisted because
			// we are a parent of each child.
			//
			// we also get revisited when the caller expands the scope of
			// the fragment.

			if (pWorkQueueData->generationEnd < pDataCached->m_genDagnode)
			{
				// the caller has expanded the scope of the fragment to include
				// older generations than the last time we visited this node.
				// this doesn't affect the state of this node, but it could mean
				// that older ancestors of this node should be looked at.

				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx,pDataCached->m_pDagnode,pWorkQueueData->prb_WorkQueue)  );
			}
			break;

		case SG_DFS_END_FRINGE:
			// a dagnode that was on the end-fringe is being re-evaluated.

			if (pDataCached->m_genDagnode > pWorkQueueData->generationEnd)
			{
				// it looks like the bounds of the fragment were expanded and
				// now includes this dagnode.
				//
				// move it from END-FRINGE to INCLUDE state.
				// and re-eval all of its parents.

				pDataCached->m_state = SG_DFS_INTERIOR_MEMBER;
				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx,pDataCached->m_pDagnode,pWorkQueueData->prb_WorkQueue)  );
			}
			break;
		}
	}

	// we have completely dealt with this dagnode, so remove it from the work queue
	// and cause our caller to restart the iteration (because we changed the queue).

	SG_ERR_CHECK(  SG_rbtree__remove(pCtx,pWorkQueueData->prb_WorkQueue,szHid)  );
	SG_ERR_THROW(  SG_ERR_RESTART_FOREACH  );

fail:
	SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated);
}