void GoUctDefaultPriorKnowledge::InitializeForGlobalHeuristic( const GoPointList& empty, const SgPointSet& pattern, const SgPointSet& atari, int nuSimulations) { const GoBoard& bd = Board(); for (GoPointList::Iterator it(empty); it; ++it) { const SgPoint p = *it; SG_ASSERT (bd.IsEmpty(p)); if (BadSelfAtari(bd, p)) Initialize(p, 0.1f, nuSimulations); else if (atari[p]) Initialize(p, 1.0f, 3); else if (pattern[p]) Initialize(*it, 0.6 + (m_patternGammas[*it] / m_maxPatternGamma) * 0.4, 3); else Initialize(p, 0.5f, 3); } }
static void _sg_jscontext__create(SG_context * pCtx, SG_jscontext ** ppJs) { SG_jscontext * pJs = NULL; SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK_RETURN(ppJs); SG_httprequestprofiler__start(SG_HTTPREQUESTPROFILER_CATEGORY__JSCONTEXT_CREATION); SG_ERR_CHECK( SG_alloc1(pCtx, pJs) ); SG_ERR_CHECK( SG_jscore__new_context(pCtx, &pJs->cx, &pJs->glob, gpJSContextPoolGlobalState->pServerConfig) ); pJs->isInARequest = SG_TRUE; *ppJs = pJs; SG_httprequestprofiler__stop(); return; fail: SG_NULLFREE(pCtx, pJs); SG_httprequestprofiler__stop(); }
GoAdditiveKnowledge* GoUctKnowledgeFactory::CreateByType(const GoBoard& bd, KnowledgeType type) { switch(type) { case KNOWLEDGE_NONE: return 0; case KNOWLEDGE_GREENPEEP: return new GoUctAdditiveKnowledgeGreenpeep(bd, GreenpeepParam()); break; case KNOWLEDGE_RULEBASED: return new GoUctAdditiveKnowledgeFuego(bd); break; case KNOWLEDGE_FEATURES: return m_featureKnowledgeFactory.Create(bd); break; case KNOWLEDGE_BOTH: { GoUctAdditiveKnowledgeFuego* f = new GoUctAdditiveKnowledgeFuego(bd); SgUctValue minimum = f->MinValue(); GoUctAdditiveKnowledgeMultiple* m = new GoUctAdditiveKnowledgeMultiple(bd, minimum, m_param.m_combinationType); m->AddKnowledge(f); m->AddKnowledge( new GoUctAdditiveKnowledgeGreenpeep(bd, GreenpeepParam())); return m; } break; default: SG_ASSERT(false); return 0; } }
static void on_uv_idle_cb(uv_idle_t * handle) { sg_etp_session_t * session = (sg_etp_session_t *)handle->data; int len = 0; len = ikcp_peeksize(session->kcp); if (len > 0) { if (len > session->recv_buf_len) { session->recv_buf = realloc(session->recv_buf, len); session->recv_buf_len = len; } SG_ASSERT(NULL != session->recv_buf, "alloc recv buf failed"); len = ikcp_recv(session->kcp, session->recv_buf, len); SG_CALLBACK(session->on_data, session, session->recv_buf, len); } else { uv_idle_stop(&(session->idle)); } }
void sg_wc_tx__apply__store_symlink(SG_context * pCtx, SG_wc_tx * pWcTx, const SG_vhash * pvh) { SG_pathname * pPath = NULL; SG_string * pStringSymlink = NULL; const char * pszRepoPath; // we do not own this const char * pszHidExpected; // we do not own this char * pszHidObserved = NULL; sg_wc_liveview_item * pLVI; // we do not own this SG_int64 alias; SG_bool bKnown; SG_bool bDontBother_BlobEncoding; SG_bool bSrcIsSparse; SG_ERR_CHECK( SG_vhash__get__sz( pCtx, pvh, "src", &pszRepoPath) ); SG_ERR_CHECK( SG_vhash__get__int64(pCtx, pvh, "alias", &alias) ); SG_ERR_CHECK( SG_vhash__get__sz( pCtx, pvh, "hid", &pszHidExpected) ); SG_ERR_CHECK( SG_vhash__get__bool( pCtx, pvh, "src_sparse", &bSrcIsSparse) ); #if TRACE_WC_TX_APPLY SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, ("sg_wc_tx__apply__store_symlink: '%s' [src-sparse %d]\n"), pszRepoPath, bSrcIsSparse) ); #endif SG_ERR_CHECK( sg_wc_tx__liveview__fetch_random_item(pCtx, pWcTx, alias, &bKnown, &pLVI) ); SG_ASSERT( (bSrcIsSparse == SG_WC_PRESCAN_FLAGS__IS_CONTROLLED_SPARSE(pLVI->scan_flags_Live)) ); if (bSrcIsSparse) { // We've been asked to store the target of the symlink ***during a COMMIT*** // and are given the *Expected-HID* (and we need to get the actual target // from the WD) and it is assumed that that will generate the same HID // that we were given. // // However, if the symlink is sparse (not populated) we can't do __readlink() // to get the (current) target. So we have to // assume that we already have a blob in the repo for it. // // Since sparse items now have p_d_sparse dynamic data in tbl_PC, we assume // that whoever last modified the content of the symlink and set p_d_sparse->pszHid // also recorded the blob we need to be present now. (See __apply__overwrite_symlink()) // // for sanity's sake verify that we already have this blob in the repo. SG_uint64 len = 0; SG_ERR_CHECK( SG_repo__fetch_blob__begin(pCtx, pWcTx->pDb->pRepo, pszHidExpected, SG_TRUE, NULL, NULL, NULL, &len, NULL) ); // so we don't need to do anything because we already // have a copy of this blob in the repo. return; } // We never bother compressing/encoding the symlink content // since it is so short. bDontBother_BlobEncoding = SG_TRUE; SG_ERR_CHECK( sg_wc_db__path__sz_repopath_to_absolute(pCtx, pWcTx->pDb, pszRepoPath, &pPath) ); SG_ERR_CHECK( SG_fsobj__readlink(pCtx, pPath, &pStringSymlink) ); SG_ERR_CHECK( SG_committing__add_bytes__string(pCtx, pWcTx->pCommittingInProgress, pStringSymlink, bDontBother_BlobEncoding, &pszHidObserved) ); // See note in __apply__store_file() about race condition. // If the HID computed now differs from what we thought // it should be, we lost the race. if (strcmp(pszHidObserved, pszHidExpected) != 0) SG_ERR_THROW2( SG_ERR_ASSERT, (pCtx, "The symlink '%s' changed during the commit.", pszRepoPath) ); fail: SG_PATHNAME_NULLFREE(pCtx, pPath); SG_STRING_NULLFREE(pCtx, pStringSymlink); SG_NULLFREE(pCtx, pszHidObserved); }
void SgMiaiMap::ConvertToSgMiaiStrategy(SgMiaiStrategy* s) const { SG_UNUSED(s); SG_ASSERT(false); }
SG_bool SG_context__has_err(const SG_context* pCtx) { SG_ASSERT( pCtx ); return ( SG_CONTEXT__HAS_ERR(pCtx) ); }
static void _tree__add_next_node_to_results(SG_context * pCtx, _tree_t * pTree, SG_varray * pResults, SG_uint32 * pCountResults, SG_bool * pbLastResultWasIndented) { SG_vhash * pResult = NULL; SG_varray * pChildren = NULL; SG_uint32 i; SG_varray * pTmp = NULL; // Add pTree->pNextResult to results list. SG_ERR_CHECK( SG_varray__appendnew__vhash(pCtx, pResults, &pResult) ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pResult, "revno", pTree->pNextResult->revno) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pResult, "changeset_id", pTree->pNextResult->pszHidRef) ); SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, pResult, "parents", &pTree->pNextResult->pVcParents) ); SG_ERR_CHECK( SG_vhash__addnew__varray(pCtx, pResult, "displayChildren", &pChildren) ); for(i=0; i<pTree->pNextResult->displayChildren.count; ++i) SG_ERR_CHECK( SG_varray__append__int64(pCtx, pChildren, pTree->pNextResult->displayChildren.p[i]->revno) ); if(pTree->pNextResult->displayChildren.count>1 && pTree->indentLevel>0) { SG_varray * pContinuationToken = NULL; SG_ERR_CHECK( SG_vhash__addnew__varray(pCtx, pResult, "continuationToken", &pContinuationToken) ); SG_ERR_CHECK( _tree__generate_continuation_token(pCtx, pTree, pContinuationToken) ); } SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pResult, "indent", pTree->indentLevel) ); *pbLastResultWasIndented = (pTree->indentLevel > 0); if(pTree->pNextResult->pDisplayParent!=NULL) { SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pResult, "displayParent", pTree->pNextResult->pDisplayParent->revno) ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pResult, "indexInParent", pTree->pNextResult->pDisplayParent->displayChildren.count) ); SG_ASSERT(*pbLastResultWasIndented); } else { SG_ASSERT(!*pbLastResultWasIndented); } ++(*pCountResults); // Advance pTree->pNextResult pointer to next result. while(pTree->pNextResult->displayChildren.count==0 && pTree->pNextResult!=pTree->pRoot) { // We have already added this node and all children to the results. // Free the memory that will not be reused, and then put the node // on the "free list". _node__free_nonreusable_memory(pCtx, pTree->pNextResult); pTree->pNextResult->displayChildren.p[0] = pTree->pFreeList; pTree->pNextResult->displayChildren.count = 1; pTree->pFreeList = pTree->pNextResult; // Move back up up in the tree. pTree->pNextResult = pTree->pNextResult->pDisplayParent; if(pTree->pNextResult!=NULL) { // The node we just freed... Remove it from its display parent too. --pTree->pNextResult->displayChildren.count; // All children but the leftmost one are indented by 1 from the parent. if(pTree->pNextResult->displayChildren.count>0) --pTree->indentLevel; } } if(pTree->pNextResult->displayChildren.count>0) { SG_uint32 i = pTree->pNextResult->displayChildren.count-1; pTree->pNextResult = pTree->pNextResult->displayChildren.p[i]; if(i>=1) ++pTree->indentLevel; } else { pTree->pNextResult = NULL; } // If we advanced past root... if(pTree->pNextResult==NULL || pTree->pNextResult==pTree->pRoot->displayChildren.p[0]) { // Out with the old root, in with the new... _node__free_nonreusable_memory(pCtx, pTree->pRoot); pTree->pRoot->displayChildren.p[0] = pTree->pFreeList; pTree->pRoot->displayChildren.count = 1; pTree->pFreeList = pTree->pRoot; pTree->pRoot = pTree->pNextResult; if(pTree->pRoot!=NULL) pTree->pRoot->pDisplayParent = NULL; } return; fail: SG_VARRAY_NULLFREE(pCtx, pTmp); }
bool DfpnSolver::Validate(DfpnHashTable& hashTable, const SgBlackWhite winner, SgSearchTracer& tracer) { SG_ASSERT_BW(winner); DfpnData data; if (! TTRead(data)) { PointSequence pv; StartSearch(hashTable, pv); const bool wasRead = TTRead(data); SG_DEBUG_ONLY(wasRead); SG_ASSERT(wasRead); } const bool orNode = (winner == GetColorToMove()); if (orNode) { if (! data.m_bounds.IsWinning()) { SgWarning() << "OR not winning. DfpnData:" << data << std::endl; return false; } } else // AND node { if (! data.m_bounds.IsLosing()) { SgWarning() << "AND not losing. DfpnData:" << data << std::endl; return false; } } SgEmptyBlackWhite currentWinner; if (TerminalState(GetColorToMove(), currentWinner)) { if (winner == currentWinner) return true; else { SgWarning() << "winner disagreement: " << SgEBW(winner) << ' ' << SgEBW(currentWinner) << std::endl; return false; } } std::vector<SgMove> moves; if (orNode) moves.push_back(data.m_bestMove); else // AND node GenerateChildren(moves); // recurse for (std::vector<SgMove>::const_iterator it = moves.begin(); it != moves.end(); ++it) { tracer.AddTraceNode(*it, GetColorToMove()); PlayMove(*it); if (! Validate(hashTable, winner, tracer)) return false; UndoMove(); tracer.TakeBackTraceNode(); } return true; }
static void _advise_after_update(SG_context * pCtx, SG_option_state * pOptSt, SG_pathname * pPathCwd, const char * pszBaselineBeforeUpdate) { SG_pendingtree * pPendingTree = NULL; SG_repo * pRepo; char * pszBaselineAfterUpdate = NULL; SG_rbtree * prbLeaves = NULL; SG_uint32 nrLeaves; SG_bool bUpdateChangedBaseline; // re-open pendingtree to get the now-current baseline (we have to do // this in a new instance because the UPDATE saves the pendingtree which // frees all of the interesting stuff). SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pPathCwd, pOptSt->bIgnoreWarnings, &pPendingTree) ); SG_ERR_CHECK( SG_pendingtree__get_repo(pCtx, pPendingTree, &pRepo) ); SG_ERR_CHECK( _get_baseline(pCtx, pPendingTree, &pszBaselineAfterUpdate) ); // see if the update actually changed the baseline. bUpdateChangedBaseline = (strcmp(pszBaselineBeforeUpdate, pszBaselineAfterUpdate) != 0); // get the list of all heads/leaves. // // TODO 2010/06/30 Revisit this when we have NAMED BRANCHES because we // TODO want to filter this list for things within their BRANCH. SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx,pRepo,SG_DAGNUM__VERSION_CONTROL,&prbLeaves) ); #if defined(DEBUG) { SG_bool bFound = SG_FALSE; SG_ERR_CHECK( SG_rbtree__find(pCtx, prbLeaves, pszBaselineAfterUpdate, &bFound, NULL) ); SG_ASSERT( (bFound) ); } #endif SG_ERR_CHECK( SG_rbtree__count(pCtx, prbLeaves, &nrLeaves) ); if (nrLeaves > 1) { if (bUpdateChangedBaseline) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline updated to descendant head, but there are multiple heads; consider merging.\n") ); } else { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline already at head, but there are multiple heads; consider merging.\n") ); } } else { if (bUpdateChangedBaseline) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline updated to head.\n") ); } else { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline already at head.\n") ); } } fail: SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); SG_RBTREE_NULLFREE(pCtx, prbLeaves); SG_NULLFREE(pCtx, pszBaselineAfterUpdate); }
void CBlockManager::_RegisterBlock(s32 id, CBlockBase *block) { SG_ASSERT( id > 0 && id <= CBlockBase::E_Block_Count && block!=NULL ); m_Blocks[ id-1 ] = block; }
static void _merge__compute_target_hid(SG_context * pCtx, SG_mrg * pMrg) { const SG_rev_spec * pRevSpec = ((pMrg->pMergeArgs) ? pMrg->pMergeArgs->pRevSpec : NULL); SG_stringarray * psaHids = NULL; SG_stringarray * psaMissingHids = NULL; SG_rev_spec * pRevSpec_Allocated = NULL; SG_bool bRequestedAttachedBranch = SG_FALSE; SG_stringarray * psaBranchesRequested = NULL; const char * pszBranchNameRequested = NULL; SG_uint32 nrMatched = 0; SG_uint32 nrMatchedExcludingParent = 0; if (pRevSpec) { SG_uint32 uTotal = 0u; SG_uint32 uBranches = 0u; SG_ERR_CHECK( SG_rev_spec__count(pCtx, pRevSpec, &uTotal) ); SG_ERR_CHECK( SG_rev_spec__count_branches(pCtx, pRevSpec, &uBranches) ); if (uTotal == 0u) { // if the rev spec is empty, just pretend it doesn't exist pRevSpec = NULL; } else if (uTotal > 1u) { // we can only handle a single specification SG_ERR_THROW2(SG_ERR_INVALIDARG, (pCtx, "Merge can accept at most one revision/tag/branch specifier.")); } else if (uTotal == 1u && uBranches == 1u) { SG_ERR_CHECK( SG_rev_spec__branches(pCtx, (/*const*/ SG_rev_spec *)pRevSpec, &psaBranchesRequested) ); SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, psaBranchesRequested, 0, &pszBranchNameRequested) ); if (pMrg->pszBranchName_Starting) bRequestedAttachedBranch = (strcmp(pszBranchNameRequested, pMrg->pszBranchName_Starting) == 0); } } if (!pRevSpec) { if (!pMrg->pszBranchName_Starting) SG_ERR_THROW( SG_ERR_NOT_TIED ); SG_ERR_CHECK( SG_REV_SPEC__ALLOC(pCtx, &pRevSpec_Allocated) ); SG_ERR_CHECK( SG_rev_spec__add_branch(pCtx, pRevSpec_Allocated, pMrg->pszBranchName_Starting) ); pRevSpec = pRevSpec_Allocated; pszBranchNameRequested = pMrg->pszBranchName_Starting; bRequestedAttachedBranch = SG_TRUE; } // Lookup the given (or synthesized) --rev/--tag/--branch // and see how many csets it refers to. Disregard/filter-out // any that are not present in the local repo. SG_ERR_CHECK( SG_rev_spec__get_all__repo(pCtx, pMrg->pWcTx->pDb->pRepo, pRevSpec, SG_TRUE, &psaHids, &psaMissingHids) ); SG_ERR_CHECK( SG_stringarray__count(pCtx, psaHids, &nrMatched) ); if (nrMatched == 0) { SG_uint32 nrMissing = 0; SG_ASSERT_RELEASE_FAIL( (psaMissingHids != NULL) ); SG_ERR_CHECK( SG_stringarray__count(pCtx, psaMissingHids, &nrMissing) ); if (nrMissing == 1) { const char * psz_0; SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, psaMissingHids, 0, &psz_0) ); SG_ERR_THROW2( SG_ERR_BRANCH_HEAD_CHANGESET_NOT_PRESENT, (pCtx, "Branch '%s' refers to changeset '%s'. Consider pulling.", pszBranchNameRequested, psz_0) ); } else { SG_ERR_THROW2( SG_ERR_BRANCH_HEAD_CHANGESET_NOT_PRESENT, (pCtx, "Branch '%s' refers to %d changesets that are not present. Consider pulling.", pszBranchNameRequested, nrMissing) ); } } else if (nrMatched == 1) { // We found a single unique match for our request. // We ***DO NOT*** disqualify the current baseline // in this case. We let routines like do_cmd_merge_preview() // report that. const char * psz_0; SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, psaHids, 0, &psz_0) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_0, &pMrg->pszHidTarget) ); } else { // We can only get here if pRevSpec contained a "--branch ..." // reference (because the "--rev" lookup throws when given a // non-unique prefix and "--tag" can only be bound to a single // cset). // // If they referenced the attached branch (and the baseline is // pointing at a head), we'll get our baseline in the result set, // so get rid of it. SG_ERR_CHECK( SG_stringarray__remove_all(pCtx, psaHids, pMrg->pszHid_StartingBaseline, NULL) ); SG_ERR_CHECK( SG_stringarray__count(pCtx, psaHids, &nrMatchedExcludingParent) ); if (nrMatchedExcludingParent == 1) { // parent may or may not be a head of this branch, but // we found a single head or single other head. const char * psz_0; SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, psaHids, 0, &psz_0) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_0, &pMrg->pszHidTarget) ); } else if (nrMatchedExcludingParent < nrMatched) { // There were at least 3 heads of this branch and the baseline // is one of them. Throwing a generic 'needs merge' message is // not helpful. SG_ERR_THROW2( SG_ERR_BRANCH_NEEDS_MERGE, (pCtx, "Branch '%s' has %d heads (excluding the baseline). Consider merging one of the other heads using --rev/--tag.", pszBranchNameRequested, nrMatchedExcludingParent) ); } else //if (nrMatchedExcludingParent == nrMatched) { // The requested branch has multiple heads and the current // baseline is NOT one of them. The current baseline MAY OR MAY NOT // be in that branch. (And independently, we may or may not be // attached to that branch.) // // See how the heads are related to the current baseline. const char * pszDescendant0 = NULL; const char * pszAncestor0 = NULL; SG_uint32 nrDescendants = 0; SG_uint32 nrAncestors = 0; SG_uint32 k; for (k=0; k<nrMatched; k++) { const char * psz_k; SG_dagquery_relationship dqRel; SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, psaHids, k, &psz_k) ); SG_ERR_CHECK( SG_dagquery__how_are_dagnodes_related(pCtx, pMrg->pWcTx->pDb->pRepo, SG_DAGNUM__VERSION_CONTROL, psz_k, pMrg->pszHid_StartingBaseline, SG_FALSE, SG_FALSE, &dqRel) ); if (dqRel == SG_DAGQUERY_RELATIONSHIP__DESCENDANT) { pszDescendant0 = psz_k; nrDescendants++; // target[k] is descendant of baseline } else if (dqRel == SG_DAGQUERY_RELATIONSHIP__ANCESTOR) { pszAncestor0 = psz_k; nrAncestors++; // target[k] is ancestor of baseline } } SG_ASSERT( ((nrDescendants == 0) || (nrAncestors == 0)) ); if (nrDescendants == 1) { if (bRequestedAttachedBranch) // The current baseline is attached to the same branch, just not a head. SG_ERR_THROW2( SG_ERR_BRANCH_NEEDS_MERGE, (pCtx, "Branch '%s' has %d heads. Only changeset '%s' is a descendant of the current baseline. Consider updating to it and then merging the branch.", pszBranchNameRequested, nrMatched, pszDescendant0) ); else if (pMrg->pszBranchName_Starting) // currently attached to a different branch SG_ERR_THROW2( SG_ERR_BRANCH_NEEDS_MERGE, (pCtx, "Branch '%s' has %d heads. Only changeset '%s' is a descendant of the current baseline. Consider updating to it. You are attached to branch '%s'.", pszBranchNameRequested, nrMatched, pszDescendant0, pMrg->pszBranchName_Starting) ); else // currently detached SG_ERR_THROW2( SG_ERR_BRANCH_NEEDS_MERGE, (pCtx, "Branch '%s' has %d heads. Only changeset '%s' is a descendant of the current baseline. Consider updating to it. You are not attached to a branch.", pszBranchNameRequested, nrMatched, pszDescendant0) ); } else if (nrDescendants > 1) // nrDescendants may or may not be equal to nrMatched since there may be peers too. { if (bRequestedAttachedBranch) // The current baseline is attached to the same branch, just not a head. SG_ERR_THROW2( SG_ERR_BRANCH_NEEDS_MERGE, (pCtx, "Branch '%s' has %d heads. %d are descendants of the current baseline. Consider updating to one of them and then merging the branch.", pszBranchNameRequested, nrMatched, nrDescendants) ); else if (pMrg->pszBranchName_Starting) // currently attached to a different branch SG_ERR_THROW2( SG_ERR_BRANCH_NEEDS_MERGE, (pCtx, "Branch '%s' has %d heads. %d are descendants of the current baseline. Consider updating to one of them. You are attached to branch '%s'.", pszBranchNameRequested, nrMatched, nrDescendants, pMrg->pszBranchName_Starting) ); else // currently detached SG_ERR_THROW2( SG_ERR_BRANCH_NEEDS_MERGE, (pCtx, "Branch '%s' has %d heads. %d are descendants of the current baseline. Consider updating to one of them. You are not attached to a branch.", pszBranchNameRequested, nrMatched, nrDescendants) ); } else if (nrAncestors == 1) { if (bRequestedAttachedBranch) // The current baseline is attached to the same branch, but the head pointer is not pointing at us. SG_ERR_THROW2( SG_ERR_BRANCH_NEEDS_MERGE, (pCtx, "Branch '%s' has %d heads. Changeset '%s' is an ancestor of the current baseline. Consider moving that head forward and then merging the branch.", pszBranchNameRequested, nrMatched, pszAncestor0) ); else if (pMrg->pszBranchName_Starting) // currently attached to a different branch SG_ERR_THROW2( SG_ERR_BRANCH_NEEDS_MERGE, (pCtx, "Branch '%s' has %d heads. Changeset '%s' is an ancestor of the current baseline. Consider moving that head forward. You are attached to branch '%s'.", pszBranchNameRequested, nrMatched, pszAncestor0, pMrg->pszBranchName_Starting) ); else // currently detached SG_ERR_THROW2( SG_ERR_BRANCH_NEEDS_MERGE, (pCtx, "Branch '%s' has %d heads. Changeset '%s' is an ancestor of the current baseline. Consider moving that head forward. You are not attached to a branch.", pszBranchNameRequested, nrMatched, pszAncestor0) ); } else if (nrAncestors > 1) // nrAncestors may or may not be equal to nrMatched since there may be peers too. { SG_ERR_THROW2( SG_ERR_BRANCH_NEEDS_MERGE, (pCtx, "Branch '%s' has %d heads. All of them are ancestors of the current baseline. Consider moving one of the heads forward and removing the others.", pszBranchNameRequested, nrMatched) ); } else // All of the heads are peers of the current baseline. { if (bRequestedAttachedBranch) // The current baseline is attached to the same branch, but the head pointer is not pointing at us. SG_ERR_THROW2( SG_ERR_BRANCH_NEEDS_MERGE, (pCtx, "Branch '%s' has %d heads. All are peers of the current baseline. Consider merging one of the other heads using --rev/--tag.", pszBranchNameRequested, nrMatched) ); else if (pMrg->pszBranchName_Starting) // currently attached to a different branch SG_ERR_THROW2( SG_ERR_BRANCH_NEEDS_MERGE, (pCtx, "Branch '%s' has %d heads. All are peers of the current baseline. Consider merging one of the other heads using --rev/--tag. You are attached to branch '%s'.", pszBranchNameRequested, nrMatched, pMrg->pszBranchName_Starting) ); else // currently detached SG_ERR_THROW2( SG_ERR_BRANCH_NEEDS_MERGE, (pCtx, "Branch '%s' has %d heads. All are peers of the current baseline. Consider merging one of the other heads using --rev/--tag. You are not attached to a branch.", pszBranchNameRequested, nrMatched) ); } } } fail: SG_STRINGARRAY_NULLFREE(pCtx, psaBranchesRequested); SG_STRINGARRAY_NULLFREE(pCtx, psaHids); SG_STRINGARRAY_NULLFREE(pCtx, psaMissingHids); SG_REV_SPEC_NULLFREE(pCtx, pRevSpec_Allocated); }
void SG_mrg_cset_entry_conflict__append_change(SG_context * pCtx, SG_mrg_cset_entry_conflict * pMrgCSetEntryConflict, SG_mrg_cset_entry * pMrgCSetEntry_Leaf_k, SG_mrg_cset_entry_neq neq) { SG_NULLARGCHECK_RETURN(pMrgCSetEntryConflict); SG_NULLARGCHECK_RETURN(pMrgCSetEntry_Leaf_k); if (!pMrgCSetEntryConflict->pVec_MrgCSetEntry_Changes) SG_ERR_CHECK_RETURN( SG_vector__alloc(pCtx,&pMrgCSetEntryConflict->pVec_MrgCSetEntry_Changes,2) ); SG_ERR_CHECK_RETURN( SG_vector__append(pCtx,pMrgCSetEntryConflict->pVec_MrgCSetEntry_Changes,(void *)pMrgCSetEntry_Leaf_k,NULL) ); if (!pMrgCSetEntryConflict->pVec_MrgCSetEntryNeq_Changes) SG_ERR_CHECK_RETURN( SG_vector_i64__alloc(pCtx,&pMrgCSetEntryConflict->pVec_MrgCSetEntryNeq_Changes,2) ); SG_ERR_CHECK_RETURN( SG_vector_i64__append(pCtx,pMrgCSetEntryConflict->pVec_MrgCSetEntryNeq_Changes,(SG_int64)neq,NULL) ); ////////////////////////////////////////////////////////////////// // add the value of the changed fields to the prbUnique_ rbtrees so that we can get a count of the unique new values. // ////////////////////////////////////////////////////////////////// // the values for RENAME, MOVE, ATTRBITS, SYMLINKS, and SUBMODULES are collapsable. that is, if we // have something like: // A // / \. // L0 a0 // / \. // L1 L2 // // and a rename in each Leaf, then we can either: // [a] prompt for them to choose L1 or L2's name and then // prompt for them to choose L0 or the name from step 1. // // [b] prompt for them to choose L0, L1, or L2 in one question. // // unlike file-content-merging, the net-net is that we have 1 new value // that is one of the inputs (or maybe we let them pick a new onw), but // it is not a combination of them and so we don't need to display the // immediate ancestor in the prompt. // // so we carry-forward the unique values from the leaves for each of // these fields. so the final merge-result may have more unique values // that it has direct parents. ////////////////////////////////////////////////////////////////// if (neq & SG_MRG_CSET_ENTRY_NEQ__ATTRBITS) { SG_int_to_string_buffer buf; SG_int64_to_sz((SG_int64)pMrgCSetEntry_Leaf_k->attrBits, buf); if (!pMrgCSetEntryConflict->prbUnique_AttrBits) SG_ERR_CHECK_RETURN( SG_RBTREE__ALLOC(pCtx,&pMrgCSetEntryConflict->prbUnique_AttrBits) ); SG_ERR_CHECK_RETURN( _update_1_rbUnique(pCtx,pMrgCSetEntryConflict->prbUnique_AttrBits,buf,pMrgCSetEntry_Leaf_k) ); if (pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict && pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_AttrBits) SG_ERR_CHECK_RETURN( _carry_forward_unique_values(pCtx, pMrgCSetEntryConflict->prbUnique_AttrBits, pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_AttrBits) ); } if (neq & SG_MRG_CSET_ENTRY_NEQ__ENTRYNAME) { if (!pMrgCSetEntryConflict->prbUnique_Entryname) SG_ERR_CHECK_RETURN( SG_RBTREE__ALLOC(pCtx,&pMrgCSetEntryConflict->prbUnique_Entryname) ); SG_ERR_CHECK_RETURN( _update_1_rbUnique(pCtx, pMrgCSetEntryConflict->prbUnique_Entryname, SG_string__sz(pMrgCSetEntry_Leaf_k->pStringEntryname), pMrgCSetEntry_Leaf_k) ); if (pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict && pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_Entryname) SG_ERR_CHECK_RETURN( _carry_forward_unique_values(pCtx, pMrgCSetEntryConflict->prbUnique_Entryname, pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_Entryname) ); } if (neq & SG_MRG_CSET_ENTRY_NEQ__GID_PARENT) { if (!pMrgCSetEntryConflict->prbUnique_GidParent) SG_ERR_CHECK_RETURN( SG_RBTREE__ALLOC(pCtx,&pMrgCSetEntryConflict->prbUnique_GidParent) ); SG_ERR_CHECK_RETURN( _update_1_rbUnique(pCtx,pMrgCSetEntryConflict->prbUnique_GidParent,pMrgCSetEntry_Leaf_k->bufGid_Parent,pMrgCSetEntry_Leaf_k) ); if (pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict && pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_GidParent) SG_ERR_CHECK_RETURN( _carry_forward_unique_values(pCtx, pMrgCSetEntryConflict->prbUnique_GidParent, pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_GidParent) ); } if (neq & SG_MRG_CSET_ENTRY_NEQ__SYMLINK_HID_BLOB) { if (!pMrgCSetEntryConflict->prbUnique_Symlink_HidBlob) SG_ERR_CHECK_RETURN( SG_RBTREE__ALLOC(pCtx,&pMrgCSetEntryConflict->prbUnique_Symlink_HidBlob) ); SG_ERR_CHECK_RETURN( _update_1_rbUnique(pCtx,pMrgCSetEntryConflict->prbUnique_Symlink_HidBlob,pMrgCSetEntry_Leaf_k->bufHid_Blob,pMrgCSetEntry_Leaf_k) ); if (pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict && pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_Symlink_HidBlob) SG_ERR_CHECK_RETURN( _carry_forward_unique_values(pCtx, pMrgCSetEntryConflict->prbUnique_Symlink_HidBlob, pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_Symlink_HidBlob) ); } if (neq & SG_MRG_CSET_ENTRY_NEQ__SUBMODULE_HID_BLOB) { if (!pMrgCSetEntryConflict->prbUnique_Submodule_HidBlob) SG_ERR_CHECK_RETURN( SG_RBTREE__ALLOC(pCtx,&pMrgCSetEntryConflict->prbUnique_Submodule_HidBlob) ); SG_ERR_CHECK_RETURN( _update_1_rbUnique(pCtx,pMrgCSetEntryConflict->prbUnique_Submodule_HidBlob,pMrgCSetEntry_Leaf_k->bufHid_Blob,pMrgCSetEntry_Leaf_k) ); if (pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict && pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_Submodule_HidBlob) SG_ERR_CHECK_RETURN( _carry_forward_unique_values(pCtx, pMrgCSetEntryConflict->prbUnique_Submodule_HidBlob, pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_Submodule_HidBlob) ); } // 2010/09/13 Update: we now do the carry-forward on the set of // unique HIDs for the various versions of the file // content from each of the leaves. This lets us // completely flatten the sub-merges into one final // result (with upto n values). // // This means we won't be creating the auto-merge-plan // at this point. // // The problem with the auto-merge-plan as originally // designed is that it was being driven based upon // the overall topology of the DAG as a whole rather // than the topology/history of the individual file. // And by respecting the history of the individual // file, I think we can get closer ancestors and better // per-file merging and perhaps fewer criss-crosses // and/or we push all of these issues to RESOLVE. if (neq & SG_MRG_CSET_ENTRY_NEQ__FILE_HID_BLOB) { if (!pMrgCSetEntryConflict->prbUnique_File_HidBlob) SG_ERR_CHECK_RETURN( SG_RBTREE__ALLOC(pCtx,&pMrgCSetEntryConflict->prbUnique_File_HidBlob) ); SG_ASSERT( (pMrgCSetEntry_Leaf_k->bufHid_Blob[0]) ); // TODO 2010/09/13 the code that sets __FILE_HID_BLOB probably cannot tell // TODO whether this branch did not change the file content // TODO relative to the LCA or whether it did change it back to // TODO the original value (an UNDO of the edits). I would argue // TODO that we should not list the former as a change, but that // TODO we SHOULD list the latter. The fix doesn't belong here, // TODO but this is just where I was typing when I thought of it. SG_ERR_CHECK_RETURN( _update_1_rbUnique(pCtx,pMrgCSetEntryConflict->prbUnique_File_HidBlob,pMrgCSetEntry_Leaf_k->bufHid_Blob,pMrgCSetEntry_Leaf_k) ); if (pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict && pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_File_HidBlob) SG_ERR_CHECK_RETURN( _carry_forward_unique_values(pCtx, pMrgCSetEntryConflict->prbUnique_File_HidBlob, pMrgCSetEntry_Leaf_k->pMrgCSetEntryConflict->prbUnique_File_HidBlob) ); } }
void SG_dagquery__highest_revno_common_ancestor( SG_context * pCtx, SG_repo * pRepo, SG_uint64 dagnum, const SG_stringarray * pInputNodeHids, char ** ppOutputNodeHid ) { const char * const * paszInputNodeHids = NULL; SG_uint32 countInputNodes = 0; SG_repo_fetch_dagnodes_handle * pDagnodeFetcher = NULL; _hrca_work_queue_t workQueue = {NULL, 0, 0, NULL}; SG_uint32 i; SG_dagnode * pDagnode = NULL; const char * pszHidRef = NULL; SG_bitvector * pIsAncestorOf = NULL; SG_uint32 countIsAncestorOf = 0; SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK(pRepo); SG_NULLARGCHECK(pInputNodeHids); SG_ERR_CHECK( SG_stringarray__sz_array_and_count(pCtx, pInputNodeHids, &paszInputNodeHids, &countInputNodes) ); SG_ARGCHECK(countInputNodes>0, pInputNodeHids); SG_NULLARGCHECK(ppOutputNodeHid); SG_ERR_CHECK( SG_repo__fetch_dagnodes__begin(pCtx, pRepo, dagnum, &pDagnodeFetcher) ); SG_ERR_CHECK( SG_allocN(pCtx, _HRCA_WORK_QUEUE_INIT_LENGTH, workQueue.p) ); workQueue.allocatedLength = _HRCA_WORK_QUEUE_INIT_LENGTH; SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &workQueue.pRevnoCache) ); SG_ERR_CHECK( SG_BITVECTOR__ALLOC(pCtx, &pIsAncestorOf, countInputNodes) ); for(i=0; i<countInputNodes; ++i) { SG_ERR_CHECK( SG_bitvector__zero(pCtx, pIsAncestorOf) ); SG_ERR_CHECK( SG_bitvector__set_bit(pCtx, pIsAncestorOf, i, SG_TRUE) ); SG_ERR_CHECK( _hrca_work_queue__insert(pCtx, &workQueue, paszInputNodeHids[i], pRepo, pDagnodeFetcher, pIsAncestorOf) ); } SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); SG_ERR_CHECK( _hrca_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &pIsAncestorOf) ); SG_ERR_CHECK( SG_bitvector__count_set_bits(pCtx, pIsAncestorOf, &countIsAncestorOf) ); while(countIsAncestorOf < countInputNodes) { SG_uint32 count_parents = 0; const char** parents = NULL; SG_ERR_CHECK( SG_dagnode__get_parents__ref(pCtx, pDagnode, &count_parents, &parents) ); for(i=0; i<count_parents; ++i) SG_ERR_CHECK( _hrca_work_queue__insert(pCtx, &workQueue, parents[i], pRepo, pDagnodeFetcher, pIsAncestorOf) ); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); SG_ERR_CHECK( _hrca_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &pIsAncestorOf) ); SG_ERR_CHECK( SG_bitvector__count_set_bits(pCtx, pIsAncestorOf, &countIsAncestorOf) ); } SG_ERR_CHECK( SG_strdup(pCtx, pszHidRef, ppOutputNodeHid) ); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); for(i=0; i<workQueue.length; ++i) { SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode); SG_BITVECTOR_NULLFREE(pCtx, workQueue.p[i].pIsAncestorOf); } SG_NULLFREE(pCtx, workQueue.p); SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache); SG_ERR_CHECK( SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pDagnodeFetcher) ); return; fail: for(i=0; i<workQueue.length; ++i) { SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode); SG_BITVECTOR_NULLFREE(pCtx, workQueue.p[i].pIsAncestorOf); } SG_NULLFREE(pCtx, workQueue.p); SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); if(pDagnodeFetcher!=NULL) { SG_ERR_IGNORE( SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pDagnodeFetcher) ); } }
void SG_dagquery__find_new_since_common( SG_context * pCtx, SG_repo * pRepo, SG_uint64 dagnum, const char * pszOldNodeHid, const char * pszNewNodeHid, SG_stringarray ** ppResults ) { _fnsc_work_queue_t workQueue = {NULL, 0, 0, 0, NULL}; SG_uint32 i; SG_dagnode * pDagnode = NULL; SG_stringarray * pResults = NULL; SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK(pRepo); SG_NONEMPTYCHECK(pszOldNodeHid); SG_NONEMPTYCHECK(pszNewNodeHid); SG_NULLARGCHECK(ppResults); SG_ERR_CHECK( SG_allocN(pCtx, _FNSC_WORK_QUEUE_INIT_LENGTH, workQueue.p) ); workQueue.allocatedLength = _FNSC_WORK_QUEUE_INIT_LENGTH; SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &workQueue.pRevnoCache) ); SG_ERR_CHECK( _fnsc_work_queue__insert(pCtx, &workQueue, pszOldNodeHid, dagnum, pRepo, _ANCESTOR_OF_OLD) ); SG_ERR_CHECK( _fnsc_work_queue__insert(pCtx, &workQueue, pszNewNodeHid, dagnum, pRepo, _ANCESTOR_OF_NEW) ); SG_ERR_CHECK( SG_STRINGARRAY__ALLOC(pCtx, &pResults, 32) ); while(workQueue.numAncestorsOfNewOnTheQueue > 0) { const char * pszHidRef = NULL; SG_byte isAncestorOf = 0; SG_ERR_CHECK( _fnsc_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &isAncestorOf) ); if (isAncestorOf==_ANCESTOR_OF_NEW) SG_ERR_CHECK( SG_stringarray__add(pCtx, pResults, pszHidRef) ); { SG_uint32 count_parents = 0; const char** parents = NULL; SG_ERR_CHECK( SG_dagnode__get_parents__ref(pCtx, pDagnode, &count_parents, &parents) ); for(i=0; i<count_parents; ++i) SG_ERR_CHECK( _fnsc_work_queue__insert(pCtx, &workQueue, parents[i], dagnum, pRepo, isAncestorOf) ); } SG_DAGNODE_NULLFREE(pCtx, pDagnode); } for(i=0; i<workQueue.length; ++i) SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode); SG_NULLFREE(pCtx, workQueue.p); SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache); *ppResults = pResults; return; fail: for(i=0; i<workQueue.length; ++i) SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode); SG_NULLFREE(pCtx, workQueue.p); SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_STRINGARRAY_NULLFREE(pCtx, pResults); }
void SG_password__get( SG_context *pCtx, const char *szRepoSpec, const char *username, SG_string **ppstrPassword) { SG_string *password = NULL; SG_string *path = NULL; SG_string *server = NULL; SG_string *proto = NULL; SG_uint32 port; SG_bool isValid = SG_FALSE; GnomeKeyringResult saveRes = 0; GList *results = NULL; guint count = 0; SG_NULLARGCHECK(username); SG_NULLARGCHECK(ppstrPassword); SG_NULLARGCHECK(szRepoSpec); if (! SG_password__supported()) goto fail; SG_ERR_CHECK( _sg_password__parse_url(pCtx, szRepoSpec, &isValid, &proto, &server, &path, &port) ); if (! isValid) SG_ERR_THROW(SG_ERR_NOTIMPLEMENTED); saveRes = gnome_keyring_find_network_password_sync( username, NULL, SG_string__sz(server), SG_string__sz(path), SG_string__sz(proto), NULL, (guint32)port, &results); if ((saveRes != GNOME_KEYRING_RESULT_OK) && (saveRes != GNOME_KEYRING_RESULT_NO_MATCH) && (saveRes != GNOME_KEYRING_RESULT_CANCELLED)) _SG_THROW_LINUX_SEC_ERROR(saveRes); if (results != NULL) count = g_list_length(results); if (count > 0) { const char *pw = ""; GnomeKeyringNetworkPasswordData *entry = g_list_nth_data(results, 0); SG_ASSERT(entry != NULL); if (entry->password) pw = entry->password; SG_ERR_CHECK( SG_string__alloc__sz(pCtx, &password, pw) ); } *ppstrPassword = password; password = NULL; fail: SG_STRING_NULLFREE(pCtx, path); SG_STRING_NULLFREE(pCtx, server); SG_STRING_NULLFREE(pCtx, proto); SG_STRING_NULLFREE(pCtx, password); if (results) gnome_keyring_network_password_list_free(results); }
SG_error SG_context__err_to_string(SG_context* pCtx, SG_string** ppErrString) { SG_error err = SG_ERR_OK; char szErr[SG_ERROR_BUFFER_SIZE]; SG_string* pTempStr = NULL; SG_uint32 lenRequired; SG_ASSERT( pCtx ); // SG_ASSERT( pCtx->level < SG_CONTEXT_MAX_ERROR_LEVELS ); if (!ppErrString) return SG_ERR_INVALIDARG; if (pCtx->level > 0) { // when in an error-on-error (level > 0), the error string DOES NOT belong to the // error context/level that we are in. The full message is only defined for the // original error that triggered things. *ppErrString = NULL; return SG_ERR_OK; } SG_error__get_message(pCtx->errValues[pCtx->level], szErr, sizeof(szErr)); lenRequired = ( strlen(szErr) + strlen(pCtx->szDescription) + pCtx->lenStackTrace + 100); // do all of the formatting in an error-on-error context/level // so that none of the string allocation/manipulation trashes // the current error context. // // ***DO NOT JUMP OUT OF THIS PUSH..POP BLOCK.*** SG_context__push_level(pCtx); { SG_STRING__ALLOC__RESERVE(pCtx,&pTempStr,lenRequired); if (SG_IS_OK(pCtx->errValues[pCtx->level])) { // since we pre-reserved all of the space we require, we // don't expect any of the appends to fail. So we can // ignore intermediate errors after each step. if one of // them does have a problem, the subsequent statements will // fail because we already have an error at this level. // either way, we don't care because we're going to pop the // level in a minute anyway. if (szErr[0] != 0) { SG_string__append__sz(pCtx, pTempStr, szErr); if (pCtx->szDescription[0] != 0) SG_string__append__sz(pCtx, pTempStr, ": "); else SG_string__append__sz(pCtx, pTempStr, "."); } if (pCtx->szDescription[0] != 0) SG_string__append__sz(pCtx, pTempStr, pCtx->szDescription); if (szErr[0] != 0 || pCtx->szDescription[0] != 0) SG_string__append__sz(pCtx, pTempStr, "\n"); SG_string__append__sz(pCtx, pTempStr, pCtx->szStackTrace); // if all of the formating works, we return the allocated string. // if not, we delete it and return the formatting error. if (SG_IS_OK(pCtx->errValues[pCtx->level])) *ppErrString = pTempStr; else SG_STRING_NULLFREE(pCtx, pTempStr); } err = pCtx->errValues[pCtx->level]; // we return the error value of the allocation/formatting } SG_context__pop_level(pCtx); return err; }
SG_error SG_context__err_stackframe_add(SG_context* pCtx, const char* szFilename, SG_uint32 linenum) { SG_error err = SG_ERR_OK; SG_uint32 len_needed; SG_uint32 len_avail; char buf[20]; SG_ASSERT( pCtx ); // SG_ASSERT( pCtx->level < SG_CONTEXT_MAX_ERROR_LEVELS ); if (pCtx->level > 0) // when in an error-on-error (level > 0), return SG_ERR_OK; // we don't add to the stack trace. SG_ASSERT( (szFilename && *szFilename) ); SG_ASSERT( (linenum) ); SG_ASSERT( (SG_IS_ERROR(pCtx->errValues[pCtx->level])) ); SG_ASSERT( (!pCtx->bStackTraceAtLimit) ); // do all of the formatting in an error-on-error level so that none of // the string manipulation trashes the current error context. // // ***DO NOT JUMP OUT OF THIS PUSH..POP BLOCK.*** SG_context__push_level(pCtx); { SG_uint32 lenFilename; SG_uint32 lenLineNr; SG_sprintf(pCtx, buf, sizeof(buf), "%d", linenum); lenLineNr = strlen(buf); lenFilename = strlen(szFilename); len_needed = lenFilename + lenLineNr + 3; // 3 == \t, :, and \n. // TODO should 5 be 6 to account for the NULL byte? // 5 == "\t...\n" len_avail = SG_NrElements(pCtx->szStackTrace) - pCtx->lenStackTrace - 5; if (len_needed > len_avail) { pCtx->bStackTraceAtLimit = SG_TRUE; memmove(&pCtx->szStackTrace[pCtx->lenStackTrace],"\t...\n",5); pCtx->lenStackTrace += 5; pCtx->szStackTrace[pCtx->lenStackTrace] = 0; } else { pCtx->szStackTrace[pCtx->lenStackTrace] = '\t'; pCtx->lenStackTrace++; memmove(&pCtx->szStackTrace[pCtx->lenStackTrace],szFilename,lenFilename); pCtx->lenStackTrace+=lenFilename; pCtx->szStackTrace[pCtx->lenStackTrace] = ':'; pCtx->lenStackTrace++; memmove(&pCtx->szStackTrace[pCtx->lenStackTrace],buf,lenLineNr); pCtx->lenStackTrace+=lenLineNr; pCtx->szStackTrace[pCtx->lenStackTrace] = '\n'; pCtx->lenStackTrace++; pCtx->szStackTrace[pCtx->lenStackTrace] = 0; } err = pCtx->errValues[pCtx->level]; // we return the error value from the formatting. } SG_context__pop_level(pCtx); return err; }
/* =============================== static functions =============================== */ inline DWORD WINAPI threadproc(LPVOID p){ thread * t = (thread *)p; SG_ASSERT(t); t->run_loop(); return 1; }
/** * Return a pathname (live or temp) to a file that contains * the CURRENTLY QUEUED content that this item **SHOULD** have * at this point in the TX. * * That is, the caller could be in the middle of a TX and have * overwritten the file once or twice and then may now be * requesting the path to show a diff. Or the file content may * be unchanged, but we have queued one or more moves/renames to * it or parent directories. * * As a good player INSIDE THE TX, we need to give them a path * to a CURRENT IN-TX COPY OF THE ***CONTENT*** (wherever it * may be). * * So the path we return may be to a temp file that was created * as a source for a QUEUED overwrite. Or it may be a path to * the unmodified content in the WD -- WHERE IT WAS BEFORE THE * TX -- because until APPLY is called, the WD hasn't been * changed yet. * * Regardless of whether the result is a temp file or not, the * caller should be careful to not let the user modify the file * without participating in the TX. That is, if we return the * actual non-temp working copy of a file and they use it in a * DIFF and the user's difftool is interactive and they alter * it and then we cancel the TX, what should the WD version of * the file contain? * * See also: * __overwrite_file_from_file() * __overwrite_file_from_repo() * __add_special() * __undo_delete() * * * We return an indication of whether the file is a TEMP file * and shouldn't be written to. It DOES NOT indicate that you * can delete it -- it indicates that you should not edit it because * *WE* will probably delete the file if the TX is rolled-back and so * the user would lose their edits. * */ void sg_wc_liveview_item__get_proxy_file_path(SG_context * pCtx, sg_wc_liveview_item * pLVI, SG_wc_tx * pWcTx, SG_pathname ** ppPath, SG_bool * pbIsTmp) { SG_string * pStringRepoPath = NULL; SG_pathname * pPathAbsolute = NULL; char * pszGid = NULL; const char * psz; SG_bool bIsTmp = SG_TRUE; if (pLVI->tneType != SG_TREENODEENTRY_TYPE_REGULAR_FILE) SG_ERR_THROW2_RETURN( SG_ERR_INVALIDARG, (pCtx, "GetProxyFilePath: '%s' is not a file.", SG_string__sz(pLVI->pStringEntryname)) ); if (pLVI->queuedOverwrites.pvhContent == NULL) { // No changes to the content yet in this TX. Return the PRE-TX // pathname of this file. (We may have QUEUED moves/renames on // the file or a parent directory, but they haven't been applied // yet.) SG_ASSERT( pLVI->pPrescanRow ); SG_ASSERT( pLVI->pPrescanRow->pStringEntryname ); SG_ASSERT( pLVI->pPrescanRow->pPrescanDir_Ref ); SG_ASSERT( pLVI->pPrescanRow->pPrescanDir_Ref->pStringRefRepoPath ); SG_ERR_CHECK( SG_STRING__ALLOC__COPY(pCtx, &pStringRepoPath, pLVI->pPrescanRow->pPrescanDir_Ref->pStringRefRepoPath) ); SG_ERR_CHECK( SG_repopath__append_entryname(pCtx, pStringRepoPath, SG_string__sz(pLVI->pPrescanRow->pStringEntryname), SG_FALSE) ); SG_ERR_CHECK( sg_wc_db__path__repopath_to_absolute(pCtx, pWcTx->pDb, pStringRepoPath, &pPathAbsolute) ); bIsTmp = SG_FALSE; // path is to actual WC file goto done; } SG_ERR_CHECK_RETURN( SG_vhash__check__sz(pCtx, pLVI->queuedOverwrites.pvhContent, "file", &psz) ); if (psz) { // return path to existing TEMP file. someone else owns the file. SG_ERR_CHECK( SG_PATHNAME__ALLOC__SZ(pCtx, &pPathAbsolute, psz) ); bIsTmp = SG_TRUE; // path is to a TEMP file (for which an overwrite-from-file has already been scheduled). goto done; } SG_ERR_CHECK_RETURN( SG_vhash__check__sz(pCtx, pLVI->queuedOverwrites.pvhContent, "hid", &psz) ); if (psz) { // synthesize a TEMP file for this. caller owns the new temp file. SG_ERR_CHECK( sg_wc_db__gid__get_gid_from_alias(pCtx, pWcTx->pDb, pLVI->uiAliasGid, &pszGid) ); SG_ERR_CHECK( sg_wc_diff_utils__export_to_temp_file(pCtx, pWcTx, "ref", pszGid, psz, SG_string__sz(pLVI->pStringEntryname), // for suffix only &pPathAbsolute) ); bIsTmp = SG_TRUE; // path is to a TEMP file that we just created. goto done; } SG_ERR_THROW2_RETURN( SG_ERR_NOTIMPLEMENTED, (pCtx, "GetProxyFilePath: required field missing from vhash for: %s", SG_string__sz(pLVI->pStringEntryname)) ); done: #if TRACE_WC_LIE SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "GetProxyFilePath: '%s' ==> '%s' [bIsTmp %d]\n", SG_string__sz(pLVI->pStringEntryname), SG_pathname__sz(pPathAbsolute), bIsTmp) ); #endif *ppPath = pPathAbsolute; pPathAbsolute = NULL; *pbIsTmp = bIsTmp; fail: SG_PATHNAME_NULLFREE(pCtx, pPathAbsolute); SG_STRING_NULLFREE(pCtx, pStringRepoPath); SG_NULLFREE(pCtx, pszGid); }
size_t DfpnSolver::MID(const DfpnBounds& maxBounds, DfpnHistory& history) { maxBounds.CheckConsistency(); SG_ASSERT(maxBounds.phi > 1); SG_ASSERT(maxBounds.delta > 1); ++m_numMIDcalls; size_t prevWork = 0; SgEmptyBlackWhite colorToMove = GetColorToMove(); DfpnData data; if (TTRead(data)) { prevWork = data.m_work; if (! maxBounds.GreaterThan(data.m_bounds)) // Estimated bounds are larger than we had // anticipated. The calling state must have computed // the max bounds with out of date information, so just // return here without doing anything: the caller will // now update to this new info and carry on. return 0; } else { SgEmptyBlackWhite winner = SG_EMPTY; if (TerminalState(colorToMove, winner)) { ++m_numTerminal; DfpnBounds terminal; if (colorToMove == winner) DfpnBounds::SetToWinning(terminal); else { SG_ASSERT(SgOppBW(colorToMove) == winner); DfpnBounds::SetToLosing(terminal); } TTWrite(DfpnData(terminal, SG_NULLMOVE, 1)); return 1; } } ++m_generateMoves; DfpnChildren children; GenerateChildren(children.Children()); // Not thread safe: perhaps move into while loop below later... std::vector<DfpnData> childrenData(children.Size()); for (size_t i = 0; i < children.Size(); ++i) LookupData(childrenData[i], children, i); // Index used for progressive widening size_t maxChildIndex = ComputeMaxChildIndex(childrenData); SgHashCode currentHash = Hash(); SgMove bestMove = SG_NULLMOVE; DfpnBounds currentBounds; size_t localWork = 1; do { UpdateBounds(currentBounds, childrenData, maxChildIndex); if (! maxBounds.GreaterThan(currentBounds)) break; // Select most proving child std::size_t bestIndex = 999999; DfpnBoundType delta2 = DfpnBounds::INFTY; SelectChild(bestIndex, delta2, childrenData, maxChildIndex); bestMove = children.MoveAt(bestIndex); // Compute maximum bound for child const DfpnBounds childBounds(childrenData[bestIndex].m_bounds); DfpnBounds childMaxBounds; childMaxBounds.phi = maxBounds.delta - (currentBounds.delta - childBounds.phi); childMaxBounds.delta = delta2 == DfpnBounds::INFTY ? maxBounds.phi : std::min(maxBounds.phi, std::max(delta2 + 1, DfpnBoundType(delta2 * (1.0 + m_epsilon)))); SG_ASSERT(childMaxBounds.GreaterThan(childBounds)); if (delta2 != DfpnBounds::INFTY) m_deltaIncrease.Add(float(childMaxBounds.delta-childBounds.delta)); // Recurse on best child PlayMove(bestMove); history.Push(bestMove, currentHash); localWork += MID(childMaxBounds, history); history.Pop(); UndoMove(); // Update bounds for best child LookupData(childrenData[bestIndex], children, bestIndex); // Compute some stats when find winning move if (childrenData[bestIndex].m_bounds.IsLosing()) { m_moveOrderingIndex.Add(float(bestIndex)); m_moveOrderingPercent.Add(float(bestIndex) / (float)childrenData.size()); m_totalWastedWork += prevWork + localWork - childrenData[bestIndex].m_work; } else if (childrenData[bestIndex].m_bounds.IsWinning()) maxChildIndex = ComputeMaxChildIndex(childrenData); } while (! CheckAbort()); // Find the most delaying move for losing states, and the smallest // winning move for winning states. if (currentBounds.IsSolved()) { if (currentBounds.IsLosing()) { std::size_t maxWork = 0; for (std::size_t i = 0; i < children.Size(); ++i) { if (childrenData[i].m_work > maxWork) { maxWork = childrenData[i].m_work; bestMove = children.MoveAt(i); } } } else { std::size_t minWork = DfpnBounds::INFTY; for (std::size_t i = 0; i < children.Size(); ++i) { if (childrenData[i].m_bounds.IsLosing() && childrenData[i].m_work < minWork) { minWork = childrenData[i].m_work; bestMove = children.MoveAt(i); } } } } // Store search results TTWrite(DfpnData(currentBounds, bestMove, localWork + prevWork)); return localWork; }
void sg_wc_liveview_item__alloc__add_special(SG_context * pCtx, sg_wc_liveview_item ** ppLVI, SG_wc_tx * pWcTx, SG_uint64 uiAliasGid, SG_uint64 uiAliasGidParent, const char * pszEntryname, SG_treenode_entry_type tneType, const char * pszHidMerge, SG_int64 attrbits, SG_wc_status_flags statusFlagsAddSpecialReason) { sg_wc_liveview_item * pLVI = NULL; SG_bool bFoundIssue = SG_FALSE; SG_ERR_CHECK( SG_alloc1(pCtx, pLVI) ); // caller needs to set the backptr if appropriate // if/when it adds this LVI to the LVD's vector. pLVI->pLiveViewDir = NULL; pLVI->uiAliasGid = uiAliasGid; SG_ERR_CHECK( SG_STRING__ALLOC__SZ(pCtx, &pLVI->pStringEntryname, pszEntryname) ); // During the QUEUE phase where we are doing this // special-add, we have not yet actually created // the item on disk. So we should indicate that // scandir/readdir didn't know anything about it. // // TODO 2012/01/31 During the APPLY phase, we need to // TODO fix-up this field. Because all of // TODO the __get_original_ and __get_current_ // TODO routines below assume that this field // TODO is set. That is, if you want to do // TODO additional operations (like status) // TODO after a merge, for example. // TODO // TODO 2012/04/12 Think about using pLVI->queuedOverwrite // TODO fields for this. // NOTE // NOTE 2012/05/16 Setting this field to NULL caused a problem // NOTE in _deal_with_moved_out_list() and // NOTE sg_wc_liveview_item__alter_structure__move_rename() // NOTE during UPDATE when an ADD-SPECIAL item was initially // NOTE PARKED because of a transient collision (because the // NOTE final UNPARK step uses move/rename to do the work). pLVI->pPrescanRow = NULL; // because a liveview_item must start as an // exact clone of a scanrow, there cannot be // any in-tx changes yet for it. SG_ERR_CHECK( SG_WC_DB__PC_ROW__ALLOC(pCtx, &pLVI->pPcRow_PC) ); if (statusFlagsAddSpecialReason & SG_WC_STATUS_FLAGS__S__MERGE_CREATED) pLVI->pPcRow_PC->flags_net = SG_WC_DB__PC_ROW__FLAGS_NET__ADD_SPECIAL_M; else if (statusFlagsAddSpecialReason & SG_WC_STATUS_FLAGS__S__UPDATE_CREATED) pLVI->pPcRow_PC->flags_net = SG_WC_DB__PC_ROW__FLAGS_NET__ADD_SPECIAL_U; else SG_ERR_THROW2( SG_ERR_INVALIDARG, (pCtx, "Invalid statusFlagsAddSpecialReason for '%s'", pszEntryname) ); pLVI->pPcRow_PC->p_s->uiAliasGid = uiAliasGid; pLVI->pPcRow_PC->p_s->uiAliasGidParent = uiAliasGidParent; SG_ERR_CHECK( SG_STRDUP(pCtx, pszEntryname, &pLVI->pPcRow_PC->p_s->pszEntryname) ); pLVI->pPcRow_PC->p_s->tneType = tneType; if (pszHidMerge) SG_ERR_CHECK( SG_STRDUP(pCtx, pszHidMerge, &pLVI->pPcRow_PC->pszHidMerge) ); pLVI->tneType = tneType; pLVI->scan_flags_Live = SG_WC_PRESCAN_FLAGS__CONTROLLED_ACTIVE_POSTSCAN; if (statusFlagsAddSpecialReason & SG_WC_STATUS_FLAGS__A__SPARSE) { pLVI->pPcRow_PC->flags_net |= SG_WC_DB__PC_ROW__FLAGS_NET__SPARSE; SG_ERR_CHECK( sg_wc_db__state_dynamic__alloc(pCtx, &pLVI->pPcRow_PC->p_d_sparse) ); pLVI->pPcRow_PC->p_d_sparse->attrbits = attrbits; if (tneType != SG_TREENODEENTRY_TYPE_DIRECTORY) { SG_ASSERT( pszHidMerge && *pszHidMerge ); SG_ERR_CHECK( SG_STRDUP(pCtx, pszHidMerge, &pLVI->pPcRow_PC->p_d_sparse->pszHid) ); } pLVI->scan_flags_Live = SG_WC_PRESCAN_FLAGS__CONTROLLED_ACTIVE_SPARSE; // not |= } pLVI->pPcRow_PC->ref_attrbits = attrbits; SG_ERR_CHECK( sg_wc_db__issue__get_issue(pCtx, pWcTx->pDb, pLVI->uiAliasGid, &bFoundIssue, &pLVI->statusFlags_x_xr_xu, &pLVI->pvhIssue, &pLVI->pvhSavedResolutions) ); *ppLVI = pLVI; return; fail: SG_WC_LIVEVIEW_ITEM__NULLFREE(pCtx, pLVI); }
CRoomBase* CRoom::Clone(s32 id) { CRoomBase *ret = new CRoom( id ); SG_ASSERT( ret != NULL ); return ret; }
void sg_wc_tx__rp__add__lvi(SG_context * pCtx, SG_wc_tx * pWcTx, sg_wc_liveview_item * pLVI, SG_uint32 depth, SG_bool bNoIgnores) { SG_string * pString = NULL; SG_wc_status_flags flags; // because we are ADDING things, any FILES we touch // are new and therefore are not in the TSC, so it // doesn't matter. SG_bool bNoTSC_DoesNotApply = SG_TRUE; // Go ahead and compute the full status flags for this item. // This will consistently do the classification of FOUND vs // IGNORED vs whatever. SG_ERR_CHECK( sg_wc__status__compute_flags(pCtx, pWcTx, pLVI, bNoIgnores, bNoTSC_DoesNotApply, &flags) ); #if TRACE_WC_TX_ADD SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "SG_wc_tx__rp__add: considering '%s' [status 0x%08x]\n", SG_string__sz(pLVI->pStringEntryname), (SG_uint32)flags) ); #endif if (flags & SG_WC_STATUS_FLAGS__R__RESERVED) return; if (flags & SG_WC_STATUS_FLAGS__T__DEVICE) { SG_ASSERT( (flags & (SG_WC_STATUS_FLAGS__U__FOUND|SG_WC_STATUS_FLAGS__U__IGNORED)) ); SG_ERR_CHECK( sg_wc_tx__liveview__compute_live_repo_path(pCtx, pWcTx, pLVI, &pString) ); SG_ERR_THROW2( SG_ERR_UNSUPPORTED_DEVICE_SPECIAL_FILE, (pCtx, "%s", SG_string__sz(pString)) ); } if (flags & SG_WC_STATUS_FLAGS__U__IGNORED) return; if (flags & SG_WC_STATUS_FLAGS__U__LOST) return; // If the item is FOUND, we try to add it -- by itself. // We don't worry about recursion at this point. if (flags & SG_WC_STATUS_FLAGS__U__FOUND) SG_ERR_CHECK( _try_to_add_it(pCtx, pWcTx, pLVI) ); // For both FOUND and CONTROLLED (active, non-LOST) directories, // we want to dive in and ADD their contents (if they requested it). if (flags & SG_WC_STATUS_FLAGS__T__DIRECTORY) { if (depth > 0) { sg_wc_liveview_dir * pLVD; // we do not own this struct _dive_data dive_data; #if TRACE_WC_TX_ADD SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "SG_wc_tx__add: diving into '%s'\n", SG_string__sz(pLVI->pStringEntryname)) ); #endif // Get the contents of the directory (implicitly causing // scandir/readdir as necessary). SG_ERR_CHECK( sg_wc_tx__liveview__fetch_dir(pCtx, pWcTx, pLVI, &pLVD) ); dive_data.pWcTx = pWcTx; dive_data.depth = depth - 1; dive_data.bNoIgnores = bNoIgnores; SG_ERR_CHECK( sg_wc_liveview_dir__foreach(pCtx, pLVD, _dive_cb, &dive_data) ); } } // Silently disregard items already under version control. // This lets them do 'vv add *' without getting a bunch of // complaints about duplicate adds. fail: SG_STRING_NULLFREE(pCtx, pString); }
/** * Create a new repo in the closet. */ static void _vv_verbs__init_new_repo__do_init(SG_context * pCtx, const char * pszRepoName, const char * pszStorage, const char * pszHashMethod, const char * psz_shared_users, SG_bool bFromUserMaster, char ** ppszGidRepoId, char ** ppszHidCSetFirst) { SG_repo * pRepo = NULL; SG_repo * pRepoUserMaster = NULL; char * pszUserMasterAdminId = NULL; SG_changeset * pCSetFirst = NULL; const char * pszHidCSetFirst_ref; char * pszHidCSetFirst = NULL; char * pszGidRepoId = NULL; char bufAdminId[SG_GID_BUFFER_LENGTH]; // create a completely new repo in the closet. SG_NULLARGCHECK_RETURN( pszRepoName ); // pszStorage is optional // pszHashMethod is optional SG_ASSERT(SG_FALSE == (psz_shared_users && bFromUserMaster)); // checked in SG_vv_verbs__init_new_repo if (psz_shared_users) { SG_ERR_CHECK( _vv_verbs__init_new_repo__get_admin_id(pCtx, psz_shared_users, bufAdminId) ); } else if (bFromUserMaster) { SG_ERR_CHECK( SG_REPO__USER_MASTER__OPEN(pCtx, &pRepoUserMaster) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepoUserMaster, &pszUserMasterAdminId) ); memcpy(bufAdminId, pszUserMasterAdminId, sizeof(bufAdminId)); //SG_memcpy2(pszUserMasterAdminId, bufAdminId); SG_NULLFREE(pCtx, pszUserMasterAdminId); } else { SG_ERR_CHECK( SG_gid__generate(pCtx, bufAdminId, sizeof(bufAdminId)) ); } SG_ERR_CHECK( SG_repo__create__completely_new__empty__closet(pCtx, bufAdminId, pszStorage, pszHashMethod, pszRepoName) ); SG_ERR_CHECK( SG_REPO__OPEN_REPO_INSTANCE(pCtx, pszRepoName, &pRepo) ); if (!psz_shared_users && !bFromUserMaster) { SG_ERR_CHECK( SG_user__create_nobody(pCtx, pRepo) ); } SG_ERR_CHECK( SG_repo__setup_basic_stuff(pCtx, pRepo, &pCSetFirst, NULL) ); if (psz_shared_users) { SG_ERR_CHECK( SG_pull__admin(pCtx, pRepo, psz_shared_users, NULL, NULL, NULL, NULL) ); } else if (bFromUserMaster) { SG_ERR_CHECK( SG_pull__admin__local(pCtx, pRepo, pRepoUserMaster, NULL) ); } SG_ERR_CHECK( SG_changeset__get_id_ref(pCtx, pCSetFirst, &pszHidCSetFirst_ref) ); SG_ERR_CHECK( SG_STRDUP(pCtx, pszHidCSetFirst_ref, &pszHidCSetFirst) ); SG_ERR_CHECK( SG_repo__get_repo_id(pCtx, pRepo, &pszGidRepoId) ); *ppszGidRepoId = pszGidRepoId; *ppszHidCSetFirst = pszHidCSetFirst; SG_REPO_NULLFREE(pCtx, pRepo); SG_REPO_NULLFREE(pCtx, pRepoUserMaster); SG_CHANGESET_NULLFREE(pCtx, pCSetFirst); return; fail: /* If we fail to pull the admin dags after the repo's been created, delete it. */ if (pRepo) { SG_REPO_NULLFREE(pCtx, pRepo); if (pszRepoName) SG_ERR_IGNORE( _vv_verbs__init_new_repo__delete_new_repo(pCtx, pszRepoName) ); } SG_REPO_NULLFREE(pCtx, pRepoUserMaster); SG_CHANGESET_NULLFREE(pCtx, pCSetFirst); SG_NULLFREE(pCtx, pszGidRepoId); SG_NULLFREE(pCtx, pszHidCSetFirst); }
/** * This function is used to get a new tree. It must be called with the HID of * the top-level user root directory. Not the super-root. The directory which * corresponds to @. * * Record the file timestamp of each file we fetch in the pvhTimestamps; this * provides the basis for the timestamp check in scan-dir. */ static void sg_workingdir__do_get_dir__top(SG_context* pCtx, SG_repo* pRepo, const SG_pathname* pPathLocal, const char* pszidHidTreeNode, SG_vhash * pvhTimestamps) { SG_pathname* pPathSub = NULL; SG_treenode* pTreenode = NULL; SG_string* pstrLink = NULL; SG_byte* pBytes = NULL; SG_vhash* pvhAttributes = NULL; SG_int64 iAttributeBits = 0; const SG_treenode_entry* pEntry = NULL; const char* pszidHidContent = NULL; #ifdef SG_BUILD_FLAG_FEATURE_XATTR const char* pszidHidXattrs = NULL; #endif SG_bool bExists = SG_FALSE; /* Load the treenode. It should have exactly one entry, a subdirectory, * named @ */ SG_ERR_CHECK( SG_treenode__load_from_repo(pCtx, pRepo, pszidHidTreeNode, &pTreenode) ); SG_ERR_CHECK( SG_treenode__get_nth_treenode_entry__ref(pCtx, pTreenode, 0, NULL, &pEntry) ); SG_ERR_CHECK( SG_treenode_entry__get_hid_blob(pCtx, pEntry, &pszidHidContent) ); #ifdef DEBUG { SG_uint32 count; SG_treenode_entry_type type; const char* pszName = NULL; SG_ERR_CHECK( SG_treenode__count(pCtx, pTreenode, &count) ); SG_ASSERT(1 == count); SG_ERR_CHECK( SG_treenode_entry__get_entry_type(pCtx, pEntry, &type) ); SG_ASSERT (SG_TREENODEENTRY_TYPE_DIRECTORY == type); SG_ERR_CHECK( SG_treenode_entry__get_entry_name(pCtx, pEntry, &pszName) ); SG_ASSERT(0 == strcmp(pszName, "@")); } #endif /* create the directory and then dive into it */ SG_ERR_CHECK( SG_PATHNAME__ALLOC__COPY(pCtx, &pPathSub, pPathLocal) ); SG_ERR_CHECK( SG_fsobj__exists__pathname(pCtx, pPathSub, &bExists, NULL, NULL) ); if (!bExists) SG_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathSub) ); SG_ERR_CHECK( _sg_workingdir__get_dir(pCtx, pRepo, pPathSub, pszidHidContent, pvhTimestamps) ); #ifdef SG_BUILD_FLAG_FEATURE_XATTR /* fix up the xattrs on that directory */ SG_ERR_CHECK( SG_treenode_entry__get_hid_xattrs(pCtx, pEntry, &pszidHidXattrs) ); if (pszidHidXattrs) { SG_ERR_CHECK( SG_repo__fetch_vhash(pCtx, pRepo, pszidHidXattrs, &pvhAttributes) ); SG_ERR_CHECK( SG_attributes__xattrs__apply(pCtx, pPathSub, pvhAttributes, pRepo) ); SG_VHASH_NULLFREE(pCtx, pvhAttributes); } #endif /* and the attrbits too */ SG_ERR_CHECK( SG_treenode_entry__get_attribute_bits(pCtx, pEntry, &iAttributeBits) ); SG_ERR_CHECK( SG_attributes__bits__apply(pCtx, pPathSub, iAttributeBits) ); SG_PATHNAME_NULLFREE(pCtx, pPathSub); SG_TREENODE_NULLFREE(pCtx, pTreenode); return; fail: /* TODO free stuff */ SG_VHASH_NULLFREE(pCtx, pvhAttributes); SG_NULLFREE(pCtx, pBytes); SG_STRING_NULLFREE(pCtx, pstrLink); }
static void _tree__process_next_pending_item(SG_context * pCtx, _tree_t * pTree, SG_vhash * pMergeBaselines) { SG_uint32 i; _node_t * pNode = NULL; // The node we are processing. SG_uint32 iNode = 0; // Index of pNode in the 'pending' list. SG_uint32 countVcParents = 0; const char ** paszVcParentHids = NULL; SG_uint32 iVcParent; // The first pending node that needs to be processed is always the one with // the highest revno. Find it in the list. for(i=1; i < pTree->pending.count; ++i) { if(pTree->pending.p[i]->revno > pTree->pending.p[iNode]->revno) iNode = i; } pNode = pTree->pending.p[iNode]; // Load in the node's display children/vc parents. SG_ASSERT(pNode->displayChildren.count==0); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pNode->pVcParents) ); SG_ERR_CHECK( SG_dagnode__get_parents__ref(pCtx, pNode->pDagnode, &countVcParents, &paszVcParentHids) ); for(iVcParent=0; iVcParent<countVcParents; ++iVcParent) { // Each vc parent is a candidate display child. const char * pszHidCandidate = paszVcParentHids[iVcParent]; _node_t * pNodeRef = NULL; // Scan through the list of 'pending' nodes to see if we have already // fetched this one... SG_uint32 iCandidate = pTree->pending.count; for(i=0; i < pTree->pending.count && iCandidate==pTree->pending.count; ++i) { if(strcmp(pTree->pending.p[i]->pszHidRef, pszHidCandidate)==0) { iCandidate = i; pNodeRef = pTree->pending.p[i]; } } if(iCandidate == pTree->pending.count) { // Node was not found. Add it new. SG_ERR_CHECK( _tree__add_new_node(pCtx, pTree, pNode, pszHidCandidate, &pNodeRef) ); } else if(iCandidate > iNode) { // Node was found further to the right in the tree. Steal it. SG_ERR_CHECK( _tree__move_node(pCtx, pTree->pending.p[iCandidate], pNode) ); // Also, remove it from the pending list. (It gets re-added later.) _node_list__remove_at(&pTree->pending, iCandidate); } else { // Node was found further to the left. Do nothing. } SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pNode->pVcParents, pszHidCandidate, pNodeRef->revno) ); } // We have all this node's display children (still pending--they could get // stolen later). Now we need to sort them. if(pNode->displayChildren.count>1) { // First we pick one to go on the far left, if one stands out as most likely // to be the "old"/baseline node into which the others were "brought in". SG_uint32 iBaseline = pNode->displayChildren.count; // Allow the caller to have hand-picked the baseline node: if(pMergeBaselines!=NULL) { SG_int_to_string_buffer sz; SG_int64 baseline = 0; SG_ERR_CHECK( SG_vhash__check__int64(pCtx, pMergeBaselines, SG_int64_to_sz(pNode->revno, sz), &baseline) ); if(baseline!=0) { for(i=0; i<pNode->displayChildren.count; ++i) { if(pNode->displayChildren.p[i]->revno==(SG_uint32)baseline) { iBaseline = i; break; } } } } if(iBaseline == pNode->displayChildren.count) { // No baseline node from the user. See if there's one unique node whose // user *doesn't* match. for(i=0; i<pNode->displayChildren.count; ++i) { SG_bool match = SG_FALSE; SG_ERR_CHECK( _user_match_found(pCtx, pTree->pRepoRef, pNode->displayChildren.p[i], pNode, &match) ); if(!match) { if(iBaseline == pNode->displayChildren.count) { iBaseline = i; } else { // Whoops. "Nevermind." iBaseline = pNode->displayChildren.count; break; } } } } // Finally, sort _node_list__sort(&pNode->displayChildren, iBaseline); } // In the 'pending' list, replace this node with its children. if(pNode->displayChildren.count == 0) _node_list__remove_at(&pTree->pending, iNode); else { pTree->pending.p[iNode] = pNode->displayChildren.p[0]; if(pNode->displayChildren.count > 1) { SG_ERR_CHECK( _node_list__insert_at(pCtx, &pTree->pending, iNode+1, &pNode->displayChildren.p[1], pNode->displayChildren.count-1) ); } } // This node is no longer pending. pNode->isPending = SG_FALSE; return; fail: ; }
void GoRegionBoard::Fini() { SG_ASSERT(s_alloc == s_free); }
static void _sg_mergereview(SG_context * pCtx, _tree_t * pTree, SG_int32 singleMergeReview, SG_bool firstChunk, SG_vhash * pMergeBaselines, SG_uint32 resultLimit, SG_varray ** ppResults, SG_uint32 * pCountResults, SG_varray ** ppContinuationToken) { SG_varray * pResults = NULL; SG_uint32 countResults = 0; SG_bool lastResultWasIndented = SG_FALSE; SG_varray * pContinuationToken = NULL; SG_ASSERT(pCtx!=NULL); SG_ASSERT(pTree!=NULL); SG_ASSERT(ppResults!=NULL); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pResults) ); while( // Stop if we have have reached the result limit. ! (countResults >= resultLimit) // Stop if we have completed a "single merge review" that was asked for. && ! (pTree->indentLevel == 0 && singleMergeReview && (countResults>1 || !firstChunk)) // Stop if we've walked all the way back to root of the vc dag. && ! (pTree->pNextResult==NULL) ) { // Process the next item in the pending list. // Note that this may cause any number of results to become available. SG_ERR_CHECK( _tree__process_next_pending_item(pCtx, pTree, pMergeBaselines) ); // Fetch results until we don't need anymore or there's none available. while( // Stop if we have have reached the result limit. ! (countResults >= resultLimit) // Stop if we have completed a "single merge review" that was asked for. && ! (pTree->indentLevel == 0 && singleMergeReview && (countResults>1 || !firstChunk)) // Stop if we've walked all the way back to root of the vc dag. && ! (pTree->pNextResult==NULL) // Stop if the next node is in a pending state. // (Can only happen when we were starting from a "continuation token".) && ! (pTree->pNextResult->isPending) // Stop if the next node has any display children in a pending state. && ! (_node__has_pending_children(pTree->pNextResult)) ) { SG_ERR_CHECK( _tree__add_next_node_to_results(pCtx, pTree, pResults, &countResults, &lastResultWasIndented) ); } } if(countResults<resultLimit && lastResultWasIndented) { SG_ASSERT(pTree->pNextResult!=NULL); // VC root will never be indented. SG_ERR_CHECK( _tree__add_next_node_to_results(pCtx, pTree, pResults, &countResults, &lastResultWasIndented) ); } if(ppContinuationToken!=NULL) { if(pTree->pNextResult==NULL) { *ppContinuationToken = NULL; } else { SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pContinuationToken) ); if(pTree->pNextResult==pTree->pRoot) { SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pContinuationToken, pTree->pNextResult->pszHidRef) ); } else { SG_ERR_CHECK( _tree__generate_continuation_token(pCtx, pTree, pContinuationToken) ); } *ppContinuationToken = pContinuationToken; } } *ppResults = pResults; if(pCountResults != NULL) *pCountResults = countResults; return; fail: SG_VARRAY_NULLFREE(pCtx, pResults); SG_VARRAY_NULLFREE(pCtx, pContinuationToken); }
void GoRegionBoard::OnUndoneMove() // Called after a move has been undone. The board is guaranteed to be in // a legal state. { //SG_ASSERT(false); // incremental code is incomplete, do not call if (DEBUG_REGION_BOARD) SgDebug() << "OnUndoneMove " << '\n'; const bool IS_UNDO = false; SgVectorOf<GoRegion> changed; for (int val = m_stack.PopEvent(); val != SG_NEXTMOVE; val = m_stack.PopEvent()) { switch (val) { case REGION_REMOVE: { GoRegion* r = static_cast<GoRegion*>(m_stack.PopPtr()); AddRegion(r, IS_UNDO); changed.Insert(r); } break; case REGION_ADD: { GoRegion* r = static_cast<GoRegion*>(m_stack.PopPtr()); RemoveRegion(r, IS_UNDO); } break; case REGION_REMOVE_BLOCK: { GoBlock* b = static_cast<GoBlock*>(m_stack.PopPtr()); AddBlock(b, IS_UNDO); for (int nu = m_stack.PopInt(); nu > 0; --nu) { GoRegion* r = static_cast<GoRegion*>(m_stack.PopPtr()); if (CHECK) SG_ASSERT(! r->Blocks().Contains(b)); r->BlocksNonConst().PushBack(b); changed.Insert(r); } } break; case REGION_ADD_BLOCK: { GoBlock* b = static_cast<GoBlock*>(m_stack.PopPtr()); RemoveBlock(b, IS_UNDO, true); } break; case REGION_ADD_STONE: { GoRegion* r = static_cast<GoRegion*>(m_stack.PopPtr()); SgPoint p = m_stack.PopInt(); r->OnRemoveStone(p); m_region[r->Color()][p] = r; changed.Insert(r); } break; case REGION_ADD_STONE_TO_BLOCK: { GoBlock* b = static_cast<GoBlock*>(m_stack.PopPtr()); SgPoint p = m_stack.PopInt(); b->RemoveStone(p); m_block[p] = 0; } break; default: SG_ASSERT(false); } } for (SgVectorIteratorOf<GoRegion> it(changed); it; ++it) { (*it)->ResetNonBlockFlags(); (*it)->ComputeBasicFlags(); } if (HEAVYCHECK) { for (SgBWIterator it; it; ++it) { SgBlackWhite color(*it); for (SgVectorIteratorOf<GoRegion> it(AllRegions(color)); it; ++it) { const GoRegion* r = *it; SG_UNUSED(r); SG_ASSERT(r->IsValid()); } } } m_code = Board().GetHashCode(); if (HEAVYCHECK) CheckConsistency(); }