/**
 * Attach/tie the WD to the given branch.
 *
 * We optionally validate the name (so that they can attach to
 * old pre-2.0 branches that allowed punctuation characters).
 *
 * We optionally verify that the branch name does/does not exist
 * (corresponding to --attach or --attach-new).
 *
 * We don't care one way or the other and only provide these
 * options as a service to the caller.  You probably don't need
 * to use them since the caller should have already validated/verified
 * all this before while they were parsing the user's input.
 *
 */
void sg_wc_db__branch__attach(SG_context * pCtx,
                              sg_wc_db * pDb,
                              const char * pszBranchName,
                              SG_vc_branches__check_attach_name__flags flags,
                              SG_bool bValidateName)
{
    sqlite3_stmt * pStmt = NULL;
    char * pszNormalizedBranchName = NULL;	// however, we always normalize the name

    SG_NONEMPTYCHECK_RETURN( pszBranchName );

#if TRACE_WC_DB
    SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDERR,
                               "sg_wc_db__branch__attach: [flags %d][validate %d] %s\n",
                               (SG_uint32)flags, bValidateName, pszBranchName)  );
#endif

    SG_ERR_CHECK(  SG_vc_branches__check_attach_name(pCtx, pDb->pRepo, pszBranchName,
                   flags, bValidateName,
                   &pszNormalizedBranchName)  );

    SG_ERR_CHECK(  sg_sqlite__prepare(pCtx, pDb->psql, &pStmt,
                                      ("INSERT OR REPLACE INTO tbl_branch ( id, name ) VALUES ( ?, ? )"))  );
    SG_ERR_CHECK(  sg_sqlite__bind_int(pCtx, pStmt, 1, ID_KEY)  );
    // probably unnecessary since __check_attach_name should have thrown.
    if (pszNormalizedBranchName && *pszNormalizedBranchName)
        SG_ERR_CHECK(  sg_sqlite__bind_text(pCtx, pStmt, 2, pszNormalizedBranchName)  );
    else
        SG_ERR_CHECK(  sg_sqlite__bind_null(pCtx, pStmt, 2)  );

    SG_ERR_CHECK(  sg_sqlite__step(pCtx, pStmt, SQLITE_DONE)  );
    SG_ERR_CHECK(  sg_sqlite__finalize(pCtx, pStmt)  );
    SG_NULLFREE(pCtx, pszNormalizedBranchName);
    return;

fail:
    SG_ERR_IGNORE(  sg_sqlite__finalize(pCtx, pStmt)  );
    SG_NULLFREE(pCtx, pszNormalizedBranchName);
}
void SG_dagquery__how_are_dagnodes_related(SG_context * pCtx,
										   SG_repo * pRepo,
                                           SG_uint64 dagnum,
										   const char * pszHid1,
										   const char * pszHid2,
										   SG_bool bSkipDescendantCheck,
										   SG_bool bSkipAncestorCheck,
										   SG_dagquery_relationship * pdqRel)
{
	SG_dagnode * pdn1 = NULL;
	SG_dagnode * pdn2 = NULL;
	SG_dagfrag * pFrag = NULL;
	SG_dagquery_relationship dqRel = SG_DAGQUERY_RELATIONSHIP__UNKNOWN;
	SG_int32 gen1, gen2;
	SG_bool bFound;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NONEMPTYCHECK_RETURN(pszHid1);
	SG_NONEMPTYCHECK_RETURN(pszHid2);
	SG_NULLARGCHECK_RETURN(pdqRel);

	if (strcmp(pszHid1, pszHid2) == 0)
	{
		dqRel = SG_DAGQUERY_RELATIONSHIP__SAME;
		goto cleanup;
	}

	// fetch the dagnode for both HIDs.  this throws when the HID is not found.

	SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, pszHid1, &pdn1)  );
	SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, pszHid2, &pdn2)  );

	// we say that 2 nodes are either:
	// [1] ancestor/descendant of each other;
	// [2] or that they are peers (cousins) of each other (no matter
	//     how distant in the DAG).  (that have an LCA, but we don't
	//     care about it.)

	// get the generation of both dagnodes.  if they are the same, then they
	// cannot have an ancestor/descendant relationship and therefore must be
	// peers/cousins (we don't care how close/distant they are).

	SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pdn1, &gen1)  );
	SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pdn2, &gen2)  );
	if (gen1 == gen2)
	{
		dqRel = SG_DAGQUERY_RELATIONSHIP__PEER;
		goto cleanup;
	}

	// see if one is an ancestor of the other.  since we only have PARENT
	// edges in our DAG, we start with the deeper one and walk backwards
	// until we've visited all ancestors at the depth of the shallower one.
	//
	// i'm going to be lazy here and not reinvent a recursive-ish parent-edge
	// graph walker.  instead, i'm going to create a DAGFRAG using the
	// deeper one and request the generation difference as the "thickness".
	// in theory, if we have an ancestor/descendant relationship, the
	// shallower one should be in the END-FRINGE of the DAGFRAG.
	//
	// i'm going to pick an arbitrary direction "cs1 is R of cs2".

	SG_ERR_CHECK(  SG_dagfrag__alloc_transient(pCtx, dagnum, &pFrag)  );
	if (gen1 > gen2)		// cs1 is *DEEPER* than cs2
	{
		if (bSkipDescendantCheck)
		{
			dqRel = SG_DAGQUERY_RELATIONSHIP__UNKNOWN;
		}
		else
		{
			SG_ERR_CHECK(  SG_dagfrag__load_from_repo__one(pCtx, pFrag, pRepo, pszHid1, (gen1 - gen2))  );
			SG_ERR_CHECK(  SG_dagfrag__query(pCtx, pFrag, pszHid2, NULL, NULL, &bFound, NULL)  );

			if (bFound)			// pszHid2 is an ancestor of pszHid1.  READ pszHid1 is a descendent of pszHid2.
				dqRel = SG_DAGQUERY_RELATIONSHIP__DESCENDANT;
			else				// they are *distant* peers.
				dqRel = SG_DAGQUERY_RELATIONSHIP__PEER;
		}
		goto cleanup;
	}
	else
	{
		if (bSkipAncestorCheck)
		{
			dqRel = SG_DAGQUERY_RELATIONSHIP__UNKNOWN;
		}
		else
		{
			SG_ERR_CHECK(  SG_dagfrag__load_from_repo__one(pCtx, pFrag, pRepo, pszHid2, (gen2 - gen1))  );
			SG_ERR_CHECK(  SG_dagfrag__query(pCtx, pFrag, pszHid1, NULL, NULL, &bFound, NULL)  );

			if (bFound)			// pszHid1 is an ancestor of pszHid2.
				dqRel = SG_DAGQUERY_RELATIONSHIP__ANCESTOR;
			else				// they are *distant* peers.
				dqRel = SG_DAGQUERY_RELATIONSHIP__PEER;
		}
		goto cleanup;
	}

	/*NOTREACHED*/

cleanup:
	*pdqRel = dqRel;

fail:
	SG_DAGNODE_NULLFREE(pCtx, pdn1);
	SG_DAGNODE_NULLFREE(pCtx, pdn2);
	SG_DAGFRAG_NULLFREE(pCtx, pFrag);
}
/**
 * The classic interface for a RENAME.
 *
 * The inputs are:
 * [] the repo-path of the item to be renamed.
 * [] the ENTRYNAME of the new name.
 *
 * This API corresponds to the usual command line usage
 * where you don't repeat the path of the destination,
 * just the entryname.  It is a simple RENAME not a
 * MOVE+RENAME.  This is an arbitrary choice because
 * the underlying layers allow both to be done in one
 * step.  This division is primarily for user convenience.
 *
 * We optionally allow/disallow after-the-fact renames.
 *
 */
void SG_wc_tx__rename(SG_context * pCtx,
					  SG_wc_tx * pWcTx,
					  const char * pszInput_Src,
					  const char * pszNewEntryname,
					  SG_bool bNoAllowAfterTheFact)
{
	SG_string * pStringRepoPath_Src = NULL;
	sg_wc_liveview_item * pLVI_Src;			// we do not own this
	sg_wc_liveview_item * pLVI_DestDir;		// we do not own this
	char chDomain_Src;
	SG_bool bKnown_Src;
	SG_bool bKnown_DestDir;
	SG_wc_status_flags xu_mask = SG_WC_STATUS_FLAGS__ZERO;

	SG_NULLARGCHECK_RETURN( pWcTx );
	SG_NONEMPTYCHECK_RETURN( pszNewEntryname );

	SG_ERR_CHECK(  sg_wc_db__path__anything_to_repopath(pCtx, pWcTx->pDb, pszInput_Src,
														SG_WC_DB__PATH__IMPORT_FLAGS__TREAT_NULL_AS_ERROR,
														&pStringRepoPath_Src, &chDomain_Src)  );

#if TRACE_WC_TX_MOVE_RENAME
	SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDERR,
							   "SG_wc_tx__rename: source '%s' normalized to [domain %c] '%s'\n",
							   pszInput_Src, chDomain_Src, SG_string__sz(pStringRepoPath_Src))  );
	SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDERR,
							   "SG_wc_tx__rename: new name '%s'\n",
							   pszNewEntryname)  );
#endif

	if ((strchr(pszNewEntryname, '/')  != NULL)
		|| (strchr(pszNewEntryname, '\\')  != NULL)
		|| (pszNewEntryname[0] == '@'))
		SG_ERR_THROW2(  SG_ERR_INVALIDARG,
						(pCtx,
						 "The destination of a RENAME must be a simple entryname, not a pathname: '%s'",
						 pszNewEntryname)  );

	// do minimal validation here (just enough to print
	// better error messages while we still have access
	// to the user-input and the normalized domain-aware
	// repo-paths.  save the serious validation for the
	// "move-rename" code.

	SG_ERR_CHECK(  sg_wc_tx__liveview__fetch_item__domain(pCtx, pWcTx, pStringRepoPath_Src,
														  &bKnown_Src, &pLVI_Src)  );
	if (!bKnown_Src || (SG_WC_PRESCAN_FLAGS__IS_UNCONTROLLED_UNQUALIFIED(pLVI_Src->scan_flags_Live)))
		SG_ERR_THROW2(  SG_ERR_ITEM_NOT_UNDER_VERSION_CONTROL,
						(pCtx, "Source '%s' (%s)",
						 pszInput_Src, SG_string__sz(pStringRepoPath_Src))  );
	if (pLVI_Src == pWcTx->pLiveViewItem_Root)
		SG_ERR_THROW2(  SG_ERR_INVALIDARG,
						(pCtx, "Cannot MOVE/RENAME the root directory '%s' (%s)",
						 pszInput_Src, SG_string__sz(pStringRepoPath_Src))  );

	// since the source input/repo-path
	// could be a GID- or baseline- domain string, we can't
	// use the actual text in it.  we have to use the LVI.
	//
	// get LVI of the parent directory of the source item.

	SG_ERR_CHECK(  sg_wc_tx__liveview__fetch_random_item(pCtx, pWcTx,
														 pLVI_Src->pLiveViewDir->uiAliasGidDir,
														 &bKnown_DestDir, &pLVI_DestDir)  );
	SG_ASSERT_RELEASE_FAIL( (bKnown_DestDir) );

	if (pLVI_Src->pvhIssue && (pLVI_Src->statusFlags_x_xr_xu & SG_WC_STATUS_FLAGS__X__UNRESOLVED))
	{
		xu_mask |= (pLVI_Src->statusFlags_x_xr_xu & SG_WC_STATUS_FLAGS__XU__EXISTENCE);
		xu_mask |= (pLVI_Src->statusFlags_x_xr_xu & SG_WC_STATUS_FLAGS__XU__NAME);
	}

	SG_ERR_CHECK(  sg_wc_tx__rp__move_rename__lvi_lvi_sz(pCtx, pWcTx,
														 pLVI_Src,
														 pLVI_DestDir,
														 pszNewEntryname,
														 bNoAllowAfterTheFact,
														 xu_mask)  );
	
fail:
	SG_STRING_NULLFREE(pCtx, pStringRepoPath_Src);
}
void SG_dagquery__find_descendant_heads(SG_context * pCtx,
										SG_repo * pRepo,
                                        SG_uint64 iDagNum,
										const char * pszHidStart,
										SG_bool bStopIfMultiple,
										SG_dagquery_find_head_status * pdqfhs,
										SG_rbtree ** pprbHeads)
{
	SG_rbtree * prbLeaves = NULL;
	SG_rbtree * prbHeadsFound = NULL;
	SG_rbtree_iterator * pIter = NULL;
	const char * pszKey_k = NULL;
	SG_bool b;
	SG_dagquery_find_head_status dqfhs;
	SG_dagquery_relationship dqRel;
	SG_uint32 nrFound;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NONEMPTYCHECK_RETURN(pszHidStart);
	SG_NULLARGCHECK_RETURN(pdqfhs);
	SG_NULLARGCHECK_RETURN(pprbHeads);

	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prbHeadsFound)  );

	// fetch a list of all of the LEAVES in the DAG.
	// this rbtree only contains keys; no assoc values.

	SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagNum, &prbLeaves)  );

	// if the starting point is a leaf, then we are done (we don't care how many
	// other leaves are in the rbtree because none will be a child of ours because
	// we are a leaf).

	SG_ERR_CHECK(  SG_rbtree__find(pCtx, prbLeaves, pszHidStart, &b, NULL)  );
	if (b)
	{
		SG_ERR_CHECK(  SG_rbtree__add(pCtx, prbHeadsFound, pszHidStart)  );

		dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__IS_LEAF;
		goto done;
	}

	// inspect each leaf and qualify it; put the ones that pass
	// into the list of actual heads.

	nrFound = 0;
	SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pIter, prbLeaves, &b, &pszKey_k, NULL)  );
	while (b)
	{
		// is head[k] a descendant of start?
		SG_ERR_CHECK(  SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, iDagNum, pszKey_k, pszHidStart,
															 SG_FALSE, // we care about descendants, so don't skip
															 SG_TRUE,  // we don't care about ancestors, so skip them
															 &dqRel)  );

		if (dqRel == SG_DAGQUERY_RELATIONSHIP__DESCENDANT)
		{
			nrFound++;

			if (bStopIfMultiple && (nrFound > 1))
			{
				// they wanted a unique answer and we've found too many answers
				// (which they won't be able to use anyway) so just stop and
				// return the status.  (we delete prbHeadsFound because it is
				// incomplete and so that they won't be tempted to use it.)

				SG_RBTREE_NULLFREE(pCtx, prbHeadsFound);
				dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__MULTIPLE;
				goto done;
			}

			SG_ERR_CHECK(  SG_rbtree__add(pCtx, prbHeadsFound, pszKey_k)  );
		}

		SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pIter, &b, &pszKey_k, NULL)  );
	}

	switch (nrFound)
	{
	case 0:
		// this should NEVER happen.  we should always be able to find a
		// leaf/head for a node.
		//
		// TODO the only case where this might happen is if named branches
		// TODO cause the leaf to be disqualified.  so i'm going to THROW
		// TODO here rather than ASSERT.

		SG_ERR_THROW2(  SG_ERR_DAG_NOT_CONSISTENT,
						(pCtx, "Could not find head/leaf for changeset [%s]", pszHidStart)  );
		break;

	case 1:
		dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__UNIQUE;
		break;

	default:
		dqfhs = SG_DAGQUERY_FIND_HEAD_STATUS__MULTIPLE;
		break;
	}

done:
	*pprbHeads = prbHeadsFound;
	prbHeadsFound = NULL;

	*pdqfhs = dqfhs;

fail:
	SG_RBTREE_NULLFREE(pCtx, prbLeaves);
	SG_RBTREE_NULLFREE(pCtx, prbHeadsFound);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIter);
}
void sg_wc_tx__rp__undo_delete__lvi_lvi_sz(SG_context * pCtx,
										   SG_wc_tx * pWcTx,
										   sg_wc_liveview_item * pLVI_Src,
										   sg_wc_liveview_item * pLVI_DestDir,
										   const char * pszEntryname_Dest,
										   const SG_wc_undo_delete_args * pArgs,
										   SG_wc_status_flags xu_mask)
{
	SG_string * pStringRepoPath_Src_Computed = NULL;
	SG_string * pStringRepoPath_DestDir_Computed = NULL;
	SG_string * pStringRepoPath_Dest_Computed = NULL;
	sg_wc_liveview_item * pLVI_Dest;		// we do not own this
	sg_wc_liveview_dir * pLVD_DestDir;		// we do not own this
	SG_bool bKnown_Dest;

	SG_NULLARGCHECK_RETURN( pWcTx );
	SG_NULLARGCHECK_RETURN( pLVI_Src );
	SG_NULLARGCHECK_RETURN( pLVI_DestDir );
	SG_NONEMPTYCHECK_RETURN( pszEntryname_Dest );
	// pArgs is optional

	SG_ERR_CHECK(  sg_wc_tx__liveview__compute_live_repo_path(pCtx, pWcTx, pLVI_Src,
															  &pStringRepoPath_Src_Computed)  );
	SG_ERR_CHECK(  sg_wc_tx__liveview__compute_live_repo_path(pCtx, pWcTx, pLVI_DestDir,
															  &pStringRepoPath_DestDir_Computed)  );

	if (SG_WC_PRESCAN_FLAGS__IS_CONTROLLED_DELETED(pLVI_Src->scan_flags_Live) == SG_FALSE)
		SG_ERR_THROW2(  SG_ERR_WC__ITEM_ALREADY_EXISTS,
						(pCtx, "Source '%s'",
						 SG_string__sz(pStringRepoPath_Src_Computed))  );

	if (SG_WC_PRESCAN_FLAGS__IS_CONTROLLED_DELETED(pLVI_DestDir->scan_flags_Live))
		SG_ERR_THROW2(  SG_ERR_NOT_FOUND,
						(pCtx, "The destination directory '%s' has been REMOVED.",
						 SG_string__sz(pStringRepoPath_DestDir_Computed))  );
	if (SG_WC_PRESCAN_FLAGS__IS_CONTROLLED_LOST(pLVI_DestDir->scan_flags_Live))
		SG_ERR_THROW2(  SG_ERR_NOT_FOUND,
						(pCtx, "The destination directory '%s' is currently LOST.",
						 SG_string__sz(pStringRepoPath_DestDir_Computed))  );
	
	SG_ERR_CHECK(  SG_STRING__ALLOC__COPY(pCtx,
										  &pStringRepoPath_Dest_Computed,
										  pStringRepoPath_DestDir_Computed)  );
	SG_ERR_CHECK(  SG_repopath__append_entryname(pCtx,
												 pStringRepoPath_Dest_Computed,
												 pszEntryname_Dest,
												 SG_FALSE)  );
	SG_ERR_CHECK(  sg_wc_tx__liveview__fetch_item(pCtx, pWcTx,
												  pStringRepoPath_Dest_Computed,
												  &bKnown_Dest, &pLVI_Dest)  );
	if (bKnown_Dest)
		SG_ERR_THROW2(  SG_ERR_WC__ITEM_ALREADY_EXISTS,
						(pCtx, "The destination '%s' already exists.",
						 SG_string__sz(pStringRepoPath_Dest_Computed))  );

	// I'm not going to worry about path-cycle problems
	// because we already know that the source does not
	// exist and the destination directory does, so it
	// isn't possible to have a cycle.

	SG_ERR_CHECK(  sg_wc_tx__liveview__fetch_dir(pCtx, pWcTx, pLVI_DestDir, &pLVD_DestDir)  );
	SG_ERR_CHECK(  sg_wc_liveview_dir__can_add_new_entryname(pCtx, pWcTx->pDb,
															 pLVD_DestDir,
															 NULL,
															 NULL,
															 pszEntryname_Dest,
															 SG_TREENODEENTRY_TYPE__INVALID,
															 SG_FALSE)  );

	SG_ERR_CHECK(  sg_wc_tx__queue__undo_delete(pCtx, pWcTx,
												pStringRepoPath_Dest_Computed,
												pLVI_Src,
												pLVI_DestDir,
												pszEntryname_Dest,
												pArgs,
												xu_mask)  );

fail:
	SG_STRING_NULLFREE(pCtx, pStringRepoPath_Src_Computed);
	SG_STRING_NULLFREE(pCtx, pStringRepoPath_DestDir_Computed);
	SG_STRING_NULLFREE(pCtx, pStringRepoPath_Dest_Computed);
}
Esempio n. 6
0
void SG_dagfrag__load_from_repo__one(SG_context * pCtx,
									 SG_dagfrag * pFrag,
									 SG_repo* pRepo,
									 const char * szHidStart,
									 SG_int32 nGenerations)
{
	// load a fragment of the dag starting with the given dagnode
	// for nGenerations of parents.
	//
	// we add this portion of the graph to whatevery we already
	// have in our fragment.  this may either augment (give us
	// a larger connected piece) or it may be an independent
	// subset.
	//
	// if nGenerations <= 0, load everything from this starting point
	// back to the NULL/root.
	//
	// generationStart is the generation of the starting dagnode.
	//
	// the starting dagnode *MAY* be in the final start-fringe.
	// normally, it will be.  but if we are called multiple times
	// (and have more than one start point), it may be the case
	// that this node is a parent of one of the other start points.
	//
	// we compute generationEnd as the generation that we will NOT
	// include in the fragment; nodes of that generation will be in
	// the end-fringe.  that is, we include [start...end) like most
	// C++ iterators.

	_my_data * pMyDataCached = NULL;
	SG_dagnode * pDagnodeAllocated = NULL;
	SG_dagnode * pDagnodeStart;
	SG_int32 generationStart, generationEnd;
	SG_bool bPresent = SG_FALSE;
    SG_rbtree* prb_WorkQueue = NULL;

	SG_NULLARGCHECK_RETURN(pFrag);
	SG_NONEMPTYCHECK_RETURN(szHidStart);

	// if we are extending the fragment, delete the generation-sorted
	// member cache copy.  (see __foreach_member()).  it's either that
	// or update it in parallel as we change the real CACHE and that
	// doesn't seem worth the bother.

	SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache);
	pFrag->m_pRB_GenerationSortedMemberCache = NULL;

    SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prb_WorkQueue)  );

	// fetch the starting dagnode and compute the generation bounds.
	// first, see if the cache already has info for this dagnode.
	// if not, fetch it from the source and then add it to the cache.

	SG_ERR_CHECK(  _cache__lookup(pCtx, pFrag,szHidStart,&pMyDataCached,&bPresent)  );
	if (!bPresent)
	{
		if (!pRepo)
			SG_ERR_THROW(  SG_ERR_INVALID_WHILE_FROZEN  );

        SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, szHidStart, &pDagnodeAllocated)  );

		pDagnodeStart = pDagnodeAllocated;
	}
	else
	{
		pDagnodeStart = pMyDataCached->m_pDagnode;
	}

	SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pDagnodeStart,&generationStart)  );
	SG_ASSERT_RELEASE_FAIL2(  (generationStart > 0),
					  (pCtx,"Invalid generation value [%d] for dagnode [%s]",
					   generationStart,szHidStart)  );
	if ((nGenerations <= 0)  ||  (generationStart <= nGenerations))
		generationEnd = 0;
	else
		generationEnd = generationStart - nGenerations;

	if (!bPresent)
	{
		// this dagnode was not already present in the cache.
		// add it to the cache directly and set the state.
		// we don't need to go thru the work queue for it.
		//
		// then the add all of its parents to the work queue.

		SG_ERR_CHECK(  _cache__add__dagnode(pCtx,
											pFrag,
											generationStart,
											pDagnodeAllocated,SG_DFS_START_MEMBER,
											&pMyDataCached)  );
		pDagnodeAllocated = NULL;

		SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
	}
	else
	{
		// the node was already present in the cache, so we have already
		// walked at least part of the graph around it.

		switch (pMyDataCached->m_state)
		{
		default:
		//case SG_DFS_UNKNOWN:
			SG_ASSERT_RELEASE_FAIL2(  (0),
							  (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]",
							   pMyDataCached->m_state,szHidStart)  );

		case SG_DFS_INTERIOR_MEMBER:				// already in fragment
		case SG_DFS_START_MEMBER:	// already in fragment, duplicated leaf?
			if (generationEnd < pMyDataCached->m_genDagnode)
			{
				// they've expanded the bounds of the fragment since we
				// last visited this dagnode.  keep this dagnode in the
				// fragment and revisit the ancestors in case any were
				// put in the end-fringe that should now be included.
				//
				// we leave the state as INCLUDE or INCLUDE_AND_START
				// because a duplicate start point should remain a
				// start point.

				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
			}
			else
			{
				// the current end-generation requested is >= the previous
				// end-generation, then we've completely explored this dagnode
				// already.  that is, a complete walk from this node for nGenerations
				// would not reveal any new information.
			}
			break;

		case SG_DFS_END_FRINGE:
			{
				// they want to start at a dagnode that we put in the
				// end-fringe.  this can happen if they need to expand
				// the bounds of the fragment to include older ancestors.
				//
				// we do not mark this as a start node because someone
				// else already has it as a parent.

				pMyDataCached->m_state = SG_DFS_INTERIOR_MEMBER;
				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
			}
			break;
		}
	}

	// we optionally put the parents of the current node into the work queue.
	//
	// service the work queue until it is empty.  this allows us to walk the graph without
	// recursion.  that is, as we decide what to do with a node, we add the parents
	// to the queue.  we then iterate thru the work queue until we have dealt with
	// everything -- that is, until all parents have been properly placed.
	//
	// we cannot use a standard iterator to drive this loop because we
	// modify the queue.

	while (1)
	{
		_process_work_queue_item(pCtx, pFrag,prb_WorkQueue,generationEnd,pRepo);
		if (!SG_context__has_err(pCtx))
			break;							// we processed everything in the queue and are done

		if (!SG_context__err_equals(pCtx,SG_ERR_RESTART_FOREACH))
			SG_ERR_RETHROW;

		SG_context__err_reset(pCtx);		// queue changed, restart iteration
	}

	SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue);

	/*
	** we have loaded a piece of the dag (starting with the given start node
	** and tracing all parent edges back n generations).  we leave with everything
	** in our progress queues so that other start nodes can be added to the
	** fragment.  this allows the processing of subsequent start nodes to
	** override some of the decisions that we made.  for example:
	**
	**           Q_15
	**             |
	**             |
	**           Z_16
	**           /  \
	**          /    \
	**      Y_17      A_17
	**          \    /   \
	**           \  /     \
	**           B_18     C_18
	**             |
	**             |
	**           D_19
	**             |
	**             |
	**           E_20
	**
	** if we started with the leaf E_20 and requested 3 generations, we would have:
	**     start_set := { E }
	**     include_set := { B, D, E }
	**     end_set := { Y, A }
	**
	** after a subsequent call with the leaf C_18 and 3 generations, we would have:
	**     start_set := { C, E }
	**     include_set := { Z, A, B, C, D, E }
	**     end_set := { Q, Y }
	**
	*/

	return;

fail:
	SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue);
	SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated);
}
/**
 * We get called once for each input in the stringarray
 * of files to unlock.
 *
 * Validate it and find the item's GID and add it to
 * the given VHASH.
 *
 * TODO 2012/03/02 This is a little different from the
 * TODO            version in sg_wc__lock.c.  Is this
 * TODO            really necessary.
 *
 */
static void _map_input(SG_context * pCtx,
                       SG_wc_tx * pWcTx,
                       SG_vhash * pvh_gids,
                       const char * pszInput)
{
    SG_string * pStringRepoPath = NULL;
    char * pszGid = NULL;
    sg_wc_liveview_item * pLVI;		// we do not own this
    SG_wc_status_flags statusFlags;
    SG_bool bKnown;
    char cDomain;

    SG_NONEMPTYCHECK_RETURN( pszInput );	// throw if omitted, do not assume "@/".

    SG_ERR_CHECK(  sg_wc_db__path__anything_to_repopath(pCtx, pWcTx->pDb, pszInput,
                   SG_WC_DB__PATH__IMPORT_FLAGS__TREAT_NULL_AS_ERROR,
                   &pStringRepoPath, &cDomain)  );
#if TRACE_WC_LOCK
    SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDERR,
                               "SG_wc__unlock: '%s' normalized to [domain %c] '%s'\n",
                               pszInput, cDomain, SG_string__sz(pStringRepoPath))  );
#endif

    // find the GID/alias of the named item while taking
    // account whatever domain component.  (that is, they
    // could have said "@g12345...." or "@b/....." rather
    // than a live path.)
    //
    // Fetch the LVI for this item.  This may implicitly
    // cause a SCANDIR/READIR and/or sync up with the DB.
    // This is good because it also means we will get the
    // exact-match stuff on each component within the
    // pathname.

    SG_ERR_CHECK(  sg_wc_tx__liveview__fetch_item__domain(pCtx, pWcTx, pStringRepoPath,
                   &bKnown, &pLVI)  );
    if (!bKnown)
    {
        // We only get this if the path is completely bogus and
        // took us off into the weeds (as opposed to reporting
        // something just not-controlled).
        SG_ERR_THROW2(  SG_ERR_NOT_FOUND,
                        (pCtx, "Unknown item '%s'.", SG_string__sz(pStringRepoPath))  );
    }

    if (pLVI->tneType != SG_TREENODEENTRY_TYPE_REGULAR_FILE)
        SG_ERR_THROW2(  SG_ERR_VC_LOCK_FILES_ONLY,
                        (pCtx, "%s", pszInput)  );

    SG_ERR_CHECK(  sg_wc__status__compute_flags(pCtx, pWcTx, pLVI,
                   SG_TRUE,	// --no-ignores (faster)
                   SG_FALSE,	// trust TSC
                   &statusFlags)  );
    if (statusFlags & (SG_WC_STATUS_FLAGS__U__FOUND
                       |SG_WC_STATUS_FLAGS__U__IGNORED))
        SG_ERR_THROW2(  SG_ERR_ITEM_NOT_UNDER_VERSION_CONTROL,
                        (pCtx, "%s", pszInput)  );
    // TODO 2012/03/02 Not sure checking for __S__ADDED is needed on an unlock.
    if (statusFlags & SG_WC_STATUS_FLAGS__S__ADDED)
        SG_ERR_THROW2(  SG_ERR_VC_LOCK_NOT_COMMITTED_YET,
                        (pCtx, "%s", pszInput)  );

    SG_ERR_CHECK(  sg_wc_db__gid__get_gid_from_alias(pCtx, pWcTx->pDb, pLVI->uiAliasGid, &pszGid)  );
    SG_ASSERT_RELEASE_FAIL2(  (pszGid[0] == 'g'),	// we get 't' domain IDs for uncontrolled items
                              (pCtx, "%s has temp id %s", pszInput, pszGid)  );

    // TODO 2012/03/02 The original PendingTree version of the
    // TODO            code did a SG_vhash__add__null() which
    // TODO            will throw on duplicates.  I'm going to
    // TODO            soften that.

    SG_ERR_CHECK(  SG_vhash__update__null(pCtx, pvh_gids, pszGid)  );

fail:
    SG_STRING_NULLFREE(pCtx, pStringRepoPath);
    SG_NULLFREE(pCtx, pszGid);
}
Esempio n. 8
0
void SG_localsettings__get__variant(SG_context * pCtx, const char * psz_path, SG_repo* pRepo, SG_variant** ppv, SG_string** ppstr_where_found)
{
    SG_jsondb* p = NULL;
    SG_string* pstr_path = NULL;
    SG_variant* pv = NULL;
    char* psz_repo_id = NULL;
    char* psz_admin_id = NULL;
    const char* psz_ref_descriptor_name = NULL;

    SG_ASSERT(pCtx);
    SG_NONEMPTYCHECK_RETURN(psz_path);

    SG_ERR_CHECK(  SG_closet__get_localsettings(pCtx, &p)  );

    SG_ERR_CHECK(  SG_STRING__ALLOC(pCtx, &pstr_path)  );
    if ('/' == psz_path[0])
    {
        SG_bool b = SG_FALSE;

        SG_ERR_CHECK(  SG_string__sprintf(pCtx, pstr_path, "%s", psz_path)  );
        SG_ERR_CHECK(  SG_jsondb__has(pCtx, p, SG_string__sz(pstr_path), &b)  );
        if (b)
        {
            SG_ERR_CHECK(  SG_jsondb__get__variant(pCtx, p, SG_string__sz(pstr_path), &pv)  );
        }
    }
    else
    {
        SG_bool b_has_val = SG_FALSE;

        // try the instance of the repo
        if (!b_has_val && pRepo)
        {
            SG_ERR_CHECK(  SG_repo__get_descriptor_name(pCtx, pRepo, &psz_ref_descriptor_name)  );

            SG_ERR_CHECK(  SG_string__sprintf(pCtx, pstr_path, "%s/%s/%s",
                        SG_LOCALSETTING__SCOPE__INSTANCE,
                        psz_ref_descriptor_name,
                        psz_path
                        )  );
            SG_ERR_CHECK(  SG_jsondb__has(pCtx, p, SG_string__sz(pstr_path), &b_has_val)  );
            if (b_has_val)
            {
                SG_ERR_CHECK(  SG_jsondb__get__variant(pCtx, p, SG_string__sz(pstr_path), &pv)  );
            }
        }

        // then the repo
        if (!b_has_val && pRepo)
        {
            SG_ERR_CHECK(  SG_repo__get_repo_id(pCtx, pRepo, &psz_repo_id)  );

            SG_ERR_CHECK(  SG_string__sprintf(pCtx, pstr_path, "%s/%s/%s",
                        SG_LOCALSETTING__SCOPE__REPO,
                        psz_repo_id,
                        psz_path
                        )  );
            SG_ERR_CHECK(  SG_jsondb__has(pCtx, p, SG_string__sz(pstr_path), &b_has_val)  );
            if (b_has_val)
            {
                SG_ERR_CHECK(  SG_jsondb__get__variant(pCtx, p, SG_string__sz(pstr_path), &pv)  );
            }
        }

        // then the admin group of repos
        if (!b_has_val && pRepo)
        {
            SG_ERR_CHECK(  SG_repo__get_admin_id(pCtx, pRepo, &psz_admin_id)  );

            SG_ERR_CHECK(  SG_string__sprintf(pCtx, pstr_path, "%s/%s/%s",
                        SG_LOCALSETTING__SCOPE__ADMIN,
                        psz_admin_id,
                        psz_path
                        )  );
            SG_ERR_CHECK(  SG_jsondb__has(pCtx, p, SG_string__sz(pstr_path), &b_has_val)  );
            if (b_has_val)
            {
                SG_ERR_CHECK(  SG_jsondb__get__variant(pCtx, p, SG_string__sz(pstr_path), &pv)  );
            }
        }

        // then the machine
        if (!b_has_val)
        {
            SG_ERR_CHECK(  SG_string__sprintf(pCtx, pstr_path, "%s/%s", SG_LOCALSETTING__SCOPE__MACHINE, psz_path)  );
            SG_ERR_CHECK(  SG_jsondb__has(pCtx, p, SG_string__sz(pstr_path), &b_has_val)  );
            if (b_has_val)
            {
                SG_ERR_CHECK(  SG_jsondb__get__variant(pCtx, p, SG_string__sz(pstr_path), &pv)  );
            }
        }

        // then the factory default
        if (!b_has_val)
        {
            SG_STRING_NULLFREE(pCtx, pstr_path);
            SG_ERR_CHECK(  SG_localsettings__factory__get__variant(pCtx, psz_path, &pv)  );
        }
    }

    *ppv = pv;
    pv = NULL;

    if (ppstr_where_found)
    {
        *ppstr_where_found = pstr_path;
        pstr_path = NULL;
    }

fail:
    SG_NULLFREE(pCtx, psz_repo_id);
    SG_NULLFREE(pCtx, psz_admin_id);
    SG_VARIANT_NULLFREE(pCtx, pv);
    SG_STRING_NULLFREE(pCtx, pstr_path);
    SG_JSONDB_NULLFREE(pCtx, p);
}