Ejemplo n.º 1
0
static void _cache__add__dagnode(SG_context * pCtx,
								 SG_dagfrag * pFrag,
								 SG_int32 gen,
								 SG_dagnode * pDagnode,		// if successful, we take ownership of dagnode
								 SG_uint32 state,
								 _my_data ** ppData)		// we retain ownership of DATA (but you may modify non-pointer values within it)
{
	const char * szHid;
	_my_data * pDataAllocated = NULL;
	_my_data * pDataCached = NULL;
	_my_data * pOldData = NULL;

	SG_NULLARGCHECK_RETURN(pFrag);				// this is probably not necessary for an internal routine
	SG_NULLARGCHECK_RETURN(pDagnode);			// this is probably not necessary for an internal routine

	SG_ERR_CHECK_RETURN(  SG_dagnode__get_id_ref(pCtx,pDagnode,&szHid)	);

	SG_ASSERT_RELEASE_RETURN2(  (SG_DFS_END_FRINGE != state),
						(pCtx,"Adding end-fringe dagnode [%s] to dagfrag.",szHid)  );

	SG_ERR_CHECK(  SG_alloc(pCtx, 1, sizeof(_my_data), &pDataAllocated)  );

	pDataAllocated->m_genDagnode = gen;
	pDataAllocated->m_state = state;

	SG_ERR_CHECK(  SG_rbtree__update__with_assoc(pCtx,pFrag->m_pRB_Cache,szHid,pDataAllocated,(void **)&pOldData)  );
	SG_ASSERT_RELEASE_FAIL2(  (!pOldData),
					  (pCtx,"Possible memory leak adding [%s] to dagfrag.",szHid)  );

	// if everything is successful, the cache now owns pData and pDagnode.

	pDataCached = pDataAllocated;
	pDataAllocated = NULL;
	pDataCached->m_pDagnode = pDagnode;

	if (ppData)
		*ppData = pDataCached;

	return;

fail:
	if (pDataCached)					// caller still owns pDagnode on errors even if we got pData
		pDataCached->m_pDagnode = NULL;	// into the cache. This may cause problems later if you keep going.

	SG_ERR_IGNORE(  _my_data__free(pCtx, pDataAllocated)  );  // free pData if we did not get it stuck into the cache.
}
Ejemplo n.º 2
0
void SG_dagfrag__load_from_repo__one(SG_context * pCtx,
									 SG_dagfrag * pFrag,
									 SG_repo* pRepo,
									 const char * szHidStart,
									 SG_int32 nGenerations)
{
	// load a fragment of the dag starting with the given dagnode
	// for nGenerations of parents.
	//
	// we add this portion of the graph to whatevery we already
	// have in our fragment.  this may either augment (give us
	// a larger connected piece) or it may be an independent
	// subset.
	//
	// if nGenerations <= 0, load everything from this starting point
	// back to the NULL/root.
	//
	// generationStart is the generation of the starting dagnode.
	//
	// the starting dagnode *MAY* be in the final start-fringe.
	// normally, it will be.  but if we are called multiple times
	// (and have more than one start point), it may be the case
	// that this node is a parent of one of the other start points.
	//
	// we compute generationEnd as the generation that we will NOT
	// include in the fragment; nodes of that generation will be in
	// the end-fringe.  that is, we include [start...end) like most
	// C++ iterators.

	_my_data * pMyDataCached = NULL;
	SG_dagnode * pDagnodeAllocated = NULL;
	SG_dagnode * pDagnodeStart;
	SG_int32 generationStart, generationEnd;
	SG_bool bPresent = SG_FALSE;
    SG_rbtree* prb_WorkQueue = NULL;

	SG_NULLARGCHECK_RETURN(pFrag);
	SG_NONEMPTYCHECK_RETURN(szHidStart);

	// if we are extending the fragment, delete the generation-sorted
	// member cache copy.  (see __foreach_member()).  it's either that
	// or update it in parallel as we change the real CACHE and that
	// doesn't seem worth the bother.

	SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache);
	pFrag->m_pRB_GenerationSortedMemberCache = NULL;

    SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prb_WorkQueue)  );

	// fetch the starting dagnode and compute the generation bounds.
	// first, see if the cache already has info for this dagnode.
	// if not, fetch it from the source and then add it to the cache.

	SG_ERR_CHECK(  _cache__lookup(pCtx, pFrag,szHidStart,&pMyDataCached,&bPresent)  );
	if (!bPresent)
	{
		if (!pRepo)
			SG_ERR_THROW(  SG_ERR_INVALID_WHILE_FROZEN  );

        SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, szHidStart, &pDagnodeAllocated)  );

		pDagnodeStart = pDagnodeAllocated;
	}
	else
	{
		pDagnodeStart = pMyDataCached->m_pDagnode;
	}

	SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pDagnodeStart,&generationStart)  );
	SG_ASSERT_RELEASE_FAIL2(  (generationStart > 0),
					  (pCtx,"Invalid generation value [%d] for dagnode [%s]",
					   generationStart,szHidStart)  );
	if ((nGenerations <= 0)  ||  (generationStart <= nGenerations))
		generationEnd = 0;
	else
		generationEnd = generationStart - nGenerations;

	if (!bPresent)
	{
		// this dagnode was not already present in the cache.
		// add it to the cache directly and set the state.
		// we don't need to go thru the work queue for it.
		//
		// then the add all of its parents to the work queue.

		SG_ERR_CHECK(  _cache__add__dagnode(pCtx,
											pFrag,
											generationStart,
											pDagnodeAllocated,SG_DFS_START_MEMBER,
											&pMyDataCached)  );
		pDagnodeAllocated = NULL;

		SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
	}
	else
	{
		// the node was already present in the cache, so we have already
		// walked at least part of the graph around it.

		switch (pMyDataCached->m_state)
		{
		default:
		//case SG_DFS_UNKNOWN:
			SG_ASSERT_RELEASE_FAIL2(  (0),
							  (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]",
							   pMyDataCached->m_state,szHidStart)  );

		case SG_DFS_INTERIOR_MEMBER:				// already in fragment
		case SG_DFS_START_MEMBER:	// already in fragment, duplicated leaf?
			if (generationEnd < pMyDataCached->m_genDagnode)
			{
				// they've expanded the bounds of the fragment since we
				// last visited this dagnode.  keep this dagnode in the
				// fragment and revisit the ancestors in case any were
				// put in the end-fringe that should now be included.
				//
				// we leave the state as INCLUDE or INCLUDE_AND_START
				// because a duplicate start point should remain a
				// start point.

				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
			}
			else
			{
				// the current end-generation requested is >= the previous
				// end-generation, then we've completely explored this dagnode
				// already.  that is, a complete walk from this node for nGenerations
				// would not reveal any new information.
			}
			break;

		case SG_DFS_END_FRINGE:
			{
				// they want to start at a dagnode that we put in the
				// end-fringe.  this can happen if they need to expand
				// the bounds of the fragment to include older ancestors.
				//
				// we do not mark this as a start node because someone
				// else already has it as a parent.

				pMyDataCached->m_state = SG_DFS_INTERIOR_MEMBER;
				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
			}
			break;
		}
	}

	// we optionally put the parents of the current node into the work queue.
	//
	// service the work queue until it is empty.  this allows us to walk the graph without
	// recursion.  that is, as we decide what to do with a node, we add the parents
	// to the queue.  we then iterate thru the work queue until we have dealt with
	// everything -- that is, until all parents have been properly placed.
	//
	// we cannot use a standard iterator to drive this loop because we
	// modify the queue.

	while (1)
	{
		_process_work_queue_item(pCtx, pFrag,prb_WorkQueue,generationEnd,pRepo);
		if (!SG_context__has_err(pCtx))
			break;							// we processed everything in the queue and are done

		if (!SG_context__err_equals(pCtx,SG_ERR_RESTART_FOREACH))
			SG_ERR_RETHROW;

		SG_context__err_reset(pCtx);		// queue changed, restart iteration
	}

	SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue);

	/*
	** we have loaded a piece of the dag (starting with the given start node
	** and tracing all parent edges back n generations).  we leave with everything
	** in our progress queues so that other start nodes can be added to the
	** fragment.  this allows the processing of subsequent start nodes to
	** override some of the decisions that we made.  for example:
	**
	**           Q_15
	**             |
	**             |
	**           Z_16
	**           /  \
	**          /    \
	**      Y_17      A_17
	**          \    /   \
	**           \  /     \
	**           B_18     C_18
	**             |
	**             |
	**           D_19
	**             |
	**             |
	**           E_20
	**
	** if we started with the leaf E_20 and requested 3 generations, we would have:
	**     start_set := { E }
	**     include_set := { B, D, E }
	**     end_set := { Y, A }
	**
	** after a subsequent call with the leaf C_18 and 3 generations, we would have:
	**     start_set := { C, E }
	**     include_set := { Z, A, B, C, D, E }
	**     end_set := { Q, Y }
	**
	*/

	return;

fail:
	SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue);
	SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated);
}
Ejemplo n.º 3
0
static void _process_work_queue_cb(SG_context * pCtx,
								   const char * szHid, SG_UNUSED_PARAM(void * pAssocData), void * pVoidCallerData)
{
	// we are given a random item in the work_queue.
	//
	// lookup the corresponding DATA node in the Cache, if it has one.
	//
	// and then evaluate where this node belongs:

	struct _work_queue_data * pWorkQueueData = (struct _work_queue_data *)pVoidCallerData;
	_my_data * pDataCached = NULL;
	SG_dagnode * pDagnodeAllocated = NULL;
	SG_bool bPresent = SG_FALSE;
	SG_UNUSED(pAssocData);

	SG_ERR_CHECK(  _cache__lookup(pCtx, pWorkQueueData->pFrag,szHid,&pDataCached,&bPresent)  );
	if (!bPresent)
	{
		// dagnode is not present in the cache.  therefore, we've never visited this
		// dagnode before.  add it to the cache with proper settings and maybe add
		// all of the parents to the work queue.

		SG_int32 myGeneration;

        SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pWorkQueueData->pRepo, szHid,&pDagnodeAllocated)  );

		SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pDagnodeAllocated,&myGeneration)  );

        if ((myGeneration > pWorkQueueData->generationEnd))
        {
            SG_ERR_CHECK(  _cache__add__dagnode(pCtx,
												pWorkQueueData->pFrag,
												myGeneration,
												pDagnodeAllocated,SG_DFS_INTERIOR_MEMBER,
												&pDataCached)  );
            pDagnodeAllocated = NULL;	// cache takes ownership of dagnode
			SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pDataCached->m_pDagnode, pWorkQueueData->prb_WorkQueue)  );
        }
        else
        {
            SG_ERR_CHECK(  _cache__add__fringe(pCtx, pWorkQueueData->pFrag, szHid)  );
            SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated);
        }
	}
	else
	{
		// dagnode already present in the cache. therefore, we have already visited it
		// before.  we can change our minds about the state of this dagnode if something
		// has changed (such as the fragment bounds being widened).

		switch (pDataCached->m_state)
		{
		default:
		//case SG_DFS_UNKNOWN:
			SG_ASSERT_RELEASE_FAIL2(  (0),
							  (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]",
							   pDataCached->m_state,szHid)  );

		case SG_DFS_START_MEMBER:
			// a dagnode has a parent that we are considering a START node.
			// this can happen when we were started from a non-leaf node and
			// then a subsequent call to __load is given a true leaf node or
			// a node deeper in the tree that has our original start node as
			// a parent.
			//
			// clear the start bit.  (we only want true fragment-terminal
			// nodes marked as start nodes.)

			pDataCached->m_state = SG_DFS_INTERIOR_MEMBER;
			// FALL-THRU-INTENDED

		case SG_DFS_INTERIOR_MEMBER:
			// a dagnode that we have already visited is being re-visited.
			// this happpens for a number of reasons, such as when we hit
			// the parent of a branch/fork.  we might get visisted because
			// we are a parent of each child.
			//
			// we also get revisited when the caller expands the scope of
			// the fragment.

			if (pWorkQueueData->generationEnd < pDataCached->m_genDagnode)
			{
				// the caller has expanded the scope of the fragment to include
				// older generations than the last time we visited this node.
				// this doesn't affect the state of this node, but it could mean
				// that older ancestors of this node should be looked at.

				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx,pDataCached->m_pDagnode,pWorkQueueData->prb_WorkQueue)  );
			}
			break;

		case SG_DFS_END_FRINGE:
			// a dagnode that was on the end-fringe is being re-evaluated.

			if (pDataCached->m_genDagnode > pWorkQueueData->generationEnd)
			{
				// it looks like the bounds of the fragment were expanded and
				// now includes this dagnode.
				//
				// move it from END-FRINGE to INCLUDE state.
				// and re-eval all of its parents.

				pDataCached->m_state = SG_DFS_INTERIOR_MEMBER;
				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx,pDataCached->m_pDagnode,pWorkQueueData->prb_WorkQueue)  );
			}
			break;
		}
	}

	// we have completely dealt with this dagnode, so remove it from the work queue
	// and cause our caller to restart the iteration (because we changed the queue).

	SG_ERR_CHECK(  SG_rbtree__remove(pCtx,pWorkQueueData->prb_WorkQueue,szHid)  );
	SG_ERR_THROW(  SG_ERR_RESTART_FOREACH  );

fail:
	SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated);
}
/**
 * We get called once for each input in the stringarray
 * of files to unlock.
 *
 * Validate it and find the item's GID and add it to
 * the given VHASH.
 *
 * TODO 2012/03/02 This is a little different from the
 * TODO            version in sg_wc__lock.c.  Is this
 * TODO            really necessary.
 *
 */
static void _map_input(SG_context * pCtx,
                       SG_wc_tx * pWcTx,
                       SG_vhash * pvh_gids,
                       const char * pszInput)
{
    SG_string * pStringRepoPath = NULL;
    char * pszGid = NULL;
    sg_wc_liveview_item * pLVI;		// we do not own this
    SG_wc_status_flags statusFlags;
    SG_bool bKnown;
    char cDomain;

    SG_NONEMPTYCHECK_RETURN( pszInput );	// throw if omitted, do not assume "@/".

    SG_ERR_CHECK(  sg_wc_db__path__anything_to_repopath(pCtx, pWcTx->pDb, pszInput,
                   SG_WC_DB__PATH__IMPORT_FLAGS__TREAT_NULL_AS_ERROR,
                   &pStringRepoPath, &cDomain)  );
#if TRACE_WC_LOCK
    SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDERR,
                               "SG_wc__unlock: '%s' normalized to [domain %c] '%s'\n",
                               pszInput, cDomain, SG_string__sz(pStringRepoPath))  );
#endif

    // find the GID/alias of the named item while taking
    // account whatever domain component.  (that is, they
    // could have said "@g12345...." or "@b/....." rather
    // than a live path.)
    //
    // Fetch the LVI for this item.  This may implicitly
    // cause a SCANDIR/READIR and/or sync up with the DB.
    // This is good because it also means we will get the
    // exact-match stuff on each component within the
    // pathname.

    SG_ERR_CHECK(  sg_wc_tx__liveview__fetch_item__domain(pCtx, pWcTx, pStringRepoPath,
                   &bKnown, &pLVI)  );
    if (!bKnown)
    {
        // We only get this if the path is completely bogus and
        // took us off into the weeds (as opposed to reporting
        // something just not-controlled).
        SG_ERR_THROW2(  SG_ERR_NOT_FOUND,
                        (pCtx, "Unknown item '%s'.", SG_string__sz(pStringRepoPath))  );
    }

    if (pLVI->tneType != SG_TREENODEENTRY_TYPE_REGULAR_FILE)
        SG_ERR_THROW2(  SG_ERR_VC_LOCK_FILES_ONLY,
                        (pCtx, "%s", pszInput)  );

    SG_ERR_CHECK(  sg_wc__status__compute_flags(pCtx, pWcTx, pLVI,
                   SG_TRUE,	// --no-ignores (faster)
                   SG_FALSE,	// trust TSC
                   &statusFlags)  );
    if (statusFlags & (SG_WC_STATUS_FLAGS__U__FOUND
                       |SG_WC_STATUS_FLAGS__U__IGNORED))
        SG_ERR_THROW2(  SG_ERR_ITEM_NOT_UNDER_VERSION_CONTROL,
                        (pCtx, "%s", pszInput)  );
    // TODO 2012/03/02 Not sure checking for __S__ADDED is needed on an unlock.
    if (statusFlags & SG_WC_STATUS_FLAGS__S__ADDED)
        SG_ERR_THROW2(  SG_ERR_VC_LOCK_NOT_COMMITTED_YET,
                        (pCtx, "%s", pszInput)  );

    SG_ERR_CHECK(  sg_wc_db__gid__get_gid_from_alias(pCtx, pWcTx->pDb, pLVI->uiAliasGid, &pszGid)  );
    SG_ASSERT_RELEASE_FAIL2(  (pszGid[0] == 'g'),	// we get 't' domain IDs for uncontrolled items
                              (pCtx, "%s has temp id %s", pszInput, pszGid)  );

    // TODO 2012/03/02 The original PendingTree version of the
    // TODO            code did a SG_vhash__add__null() which
    // TODO            will throw on duplicates.  I'm going to
    // TODO            soften that.

    SG_ERR_CHECK(  SG_vhash__update__null(pCtx, pvh_gids, pszGid)  );

fail:
    SG_STRING_NULLFREE(pCtx, pStringRepoPath);
    SG_NULLFREE(pCtx, pszGid);
}
/**
 * Request to UNLOCK on one or more files.
 *
 * WARNING: This routine deviates from the model of most
 * WARNING: of the SG_wc__ level-8 and/or SG_wc_tx level-7
 * WARNING: API routines because we cannot just "queue" an
 * WARNING: unlock like we do a RENAME with everything
 * WARNING: contained within the pWcTx; we actually have
 * WARNING: to update the locks dag (which is outside of
 * WARNING: the scope of the WC TX).
 * WARNING:
 * WARNING: So we only have a level-8 API
 * WARNING: so that we can completely control/bound the TX.
 *
 * We also deviate in that we don't take a --test
 * nor --verbose option.  Which means we don't have a
 * JOURNAL to mess with.
 *
 */
void SG_wc__unlock(SG_context * pCtx,
                   const SG_pathname* pPathWc,
                   const SG_stringarray * psaInputs,
                   SG_bool bForce,
                   const char * psz_username,
                   const char * psz_password,
                   const char * psz_repo_upstream)
{
    SG_wc_tx * pWcTx = NULL;
    SG_audit q;
    SG_uint32 nrInputs = 0;
    SG_uint32 k;
    char * psz_tied_branch_name = NULL;
    char * psz_repo_upstream_allocated = NULL;
    SG_vhash * pvh_gids = NULL;
    const char * pszRepoDescriptorName = NULL;	// we do not own this

    if (psaInputs)
        SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaInputs, &nrInputs)  );
    if (nrInputs == 0)
        SG_ERR_THROW2(  SG_ERR_INVALIDARG,
                        (pCtx, "Nothing to unlock")  );

    // psz_username is optional (assume no auth required)
    // psz_password is optional (assume no auth required)
    // psz_server is optional (assume default server)

    // Begin a WC TX so that we get all of the good stuff
    // (like mapping the CWD into a REPO handle and mapping
    // the inputs into GIDs).
    //
    // At this point I don't believe that setting a lock
    // will actually make any changes in WC.DB, so I'm
    // making it a READ-ONLY TX.
    //
    // My assumption is that the lock actually gets
    // written to the Locks DAG and shared with the server.
    // But I still want a TX handle for all of the other stuff.

    SG_ERR_CHECK(  SG_WC_TX__ALLOC__BEGIN(pCtx, &pWcTx, pPathWc, SG_FALSE)  );

    // We need the repo descriptor name later for the push/pull
    // and to optionally look up the default destination for
    // this repo.  The pRepo stores this *IFF* it was properly
    // opened (using a name).

    SG_ERR_CHECK(  SG_repo__get_descriptor_name(pCtx, pWcTx->pDb->pRepo,
                   &pszRepoDescriptorName)  );
    SG_ASSERT_RELEASE_FAIL2(  (pszRepoDescriptorName && *pszRepoDescriptorName),
                              (pCtx, "SG_wc__unlock: Could not get repo descriptor name.")  );

    // now we need to know what branch we are tied to.
    // if we're not tied, fail
    SG_ERR_CHECK(  SG_wc_tx__branch__get(pCtx, pWcTx, &psz_tied_branch_name)  );
    if (!psz_tied_branch_name)
        SG_ERR_THROW(  SG_ERR_NOT_TIED  );

    SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_gids)  );
    for (k=0; k<nrInputs; k++)
    {
        const char * pszInput_k;

        SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaInputs, k, &pszInput_k)  );
        SG_ERR_CHECK(  _map_input(pCtx, pWcTx, pvh_gids, pszInput_k)  );
    }

    if (!psz_repo_upstream)
    {
        SG_localsettings__descriptor__get__sz(pCtx, pszRepoDescriptorName,
                                              "paths/default",
                                              &psz_repo_upstream_allocated);
        if (SG_context__err_equals(pCtx, SG_ERR_NOT_FOUND))
            SG_ERR_REPLACE_ANY_RETHROW(  SG_ERR_NO_SERVER_SPECIFIED  );
        else
            SG_ERR_CHECK_CURRENT;

        psz_repo_upstream = psz_repo_upstream_allocated;
    }

    SG_ERR_CHECK(  SG_audit__init(pCtx, &q, pWcTx->pDb->pRepo,
                                  SG_AUDIT__WHEN__NOW,
                                  SG_AUDIT__WHO__FROM_SETTINGS)  );

    // OK, we have all the pieces.  Time to call the unlock code
    SG_ERR_CHECK(  SG_vc_locks__unlock(
                       pCtx,
                       pszRepoDescriptorName,
                       psz_repo_upstream,
                       psz_username,
                       psz_password,
                       psz_tied_branch_name,
                       pvh_gids,
                       bForce,
                       &q
                   )  );

    // Fall through and let the normal fail code discard/cancel
    // the read-only WC TX.  This will not affect the Locks DAG
    // nor the server.

fail:
    SG_ERR_IGNORE(  SG_wc_tx__cancel(pCtx, pWcTx)  );
    SG_WC_TX__NULLFREE(pCtx, pWcTx);
    SG_NULLFREE(pCtx, psz_tied_branch_name);
    SG_NULLFREE(pCtx, psz_repo_upstream_allocated);
    SG_VHASH_NULLFREE(pCtx, pvh_gids);
}