void MyFn(test1)(SG_context * pCtx) { SG_vector * pVec = NULL; SG_uint32 k, ndx, len; void * pValue; SG_uint32 variable_1 = 0; SG_uint32 variable_2 = 0; #define ADDR_1(k) ((void *)((&variable_1)+k)) #define ADDR_2(k) ((void *)((&variable_2)+k)) VERIFY_ERR_CHECK( SG_vector__alloc(pCtx,&pVec,20) ); VERIFY_ERR_CHECK( SG_vector__length(pCtx,pVec,&len) ); VERIFY_COND("test1",(len==0)); for (k=0; k<100; k++) { // fabricate a bogus pointer from a constant and stuff it into the vector. VERIFY_ERR_CHECK( SG_vector__append(pCtx,pVec,ADDR_1(k),&ndx) ); VERIFY_COND("append",(ndx==k)); } for (k=0; k<100; k++) { VERIFY_ERR_CHECK( SG_vector__get(pCtx,pVec,k,&pValue) ); VERIFY_COND("get1",(pValue == ADDR_1(k))); } for (k=0; k<100; k++) { VERIFY_ERR_CHECK( SG_vector__set(pCtx,pVec,k,ADDR_2(k)) ); } for (k=0; k<100; k++) { VERIFY_ERR_CHECK( SG_vector__get(pCtx,pVec,k,&pValue) ); VERIFY_COND("get2",(pValue == ADDR_2(k))); } VERIFY_ERR_CHECK( SG_vector__length(pCtx,pVec,&len) ); VERIFY_COND("test1",(len==100)); VERIFY_ERR_CHECK( SG_vector__clear(pCtx,pVec) ); VERIFY_ERR_CHECK( SG_vector__length(pCtx,pVec,&len) ); VERIFY_COND("test1",(len==0)); // fall thru to common cleanup fail: SG_VECTOR_NULLFREE(pCtx, pVec); }
void MyFn(alloc__copy__deep)(SG_context * pCtx) { static const SG_uint32 uSize = 100u; SG_vector* pVector = NULL; SG_vector* pCopy = NULL; SG_uint32 uIndex = 0u; SG_uint32 uOutput1 = 0u; SG_uint32 uOutput2 = 0u; void* pOutput1 = NULL; void* pOutput2 = NULL; VERIFY_ERR_CHECK( SG_VECTOR__ALLOC(pCtx, &pVector, uSize) ); // add some allocated data to the vector for (uIndex = 0u; uIndex < uSize; ++uIndex) { SG_uint32* pValue = NULL; VERIFY_ERR_CHECK( SG_alloc1(pCtx, pValue) ); *pValue = uIndex; VERIFY_ERR_CHECK( SG_vector__append(pCtx, pVector, pValue, &uOutput1) ); VERIFY_COND("Added item has unexpected index.", uOutput1 == uIndex); } // copy the vector VERIFY_ERR_CHECK( SG_VECTOR__ALLOC__COPY(pCtx, pVector, MyFn(copy_uint32), MyFn(free_uint32), &pCopy) ); // verify that the copy matches the original VERIFY_ERR_CHECK( SG_vector__length(pCtx, pVector, &uOutput1) ); VERIFY_ERR_CHECK( SG_vector__length(pCtx, pCopy, &uOutput2) ); VERIFY_COND("Copied vector's length doesn't match added item count.", uOutput1 == uSize); VERIFY_COND("Copied vector's length doesn't match original.", uOutput1 == uOutput2); for (uIndex = 0u; uIndex < uOutput1; ++uIndex) { VERIFY_ERR_CHECK( SG_vector__get(pCtx, pVector, uIndex, &pOutput1) ); VERIFY_ERR_CHECK( SG_vector__get(pCtx, pCopy, uIndex, &pOutput2) ); VERIFYP_COND("Copied vector's pointer value matches original after deep copy.", pOutput1 != pOutput2, ("index(%d)", uIndex)); uOutput1 = *((SG_uint32*)pOutput1); uOutput2 = *((SG_uint32*)pOutput2); VERIFYP_COND("Copied vector's pointed-to value doesn't match original after deep copy.", uOutput1 == uOutput2, ("index(%d)", uIndex)); } fail: SG_context__push_level(pCtx); SG_vector__free__with_assoc(pCtx, pVector, MyFn(free_uint32)); SG_vector__free__with_assoc(pCtx, pCopy, MyFn(free_uint32)); SG_context__pop_level(pCtx); }
void MyFn(alloc__copy__shallow)(SG_context * pCtx) { static const SG_uint32 uSize = 100u; SG_vector* pVector = NULL; SG_vector* pCopy = NULL; SG_uint32 uIndex = 0u; SG_uint32 uOutput1 = 0u; SG_uint32 uOutput2 = 0u; void* pOutput1 = NULL; void* pOutput2 = NULL; VERIFY_ERR_CHECK( SG_VECTOR__ALLOC(pCtx, &pVector, uSize) ); // add some random stack data to the vector for (uIndex = 0u; uIndex < uSize; ++uIndex) { VERIFY_ERR_CHECK( SG_vector__append(pCtx, pVector, &uIndex + uIndex, &uOutput1) ); VERIFY_COND("Added item has unexpected index.", uOutput1 == uIndex); } // copy the vector VERIFY_ERR_CHECK( SG_VECTOR__ALLOC__COPY(pCtx, pVector, NULL, NULL, &pCopy) ); // verify that the copy matches the original VERIFY_ERR_CHECK( SG_vector__length(pCtx, pVector, &uOutput1) ); VERIFY_ERR_CHECK( SG_vector__length(pCtx, pCopy, &uOutput2) ); VERIFY_COND("Copied vector's length doesn't match added item count.", uOutput1 == uSize); VERIFY_COND("Copied vector's length doesn't match original.", uOutput1 == uOutput2); for (uIndex = 0u; uIndex < uOutput1; ++uIndex) { VERIFY_ERR_CHECK( SG_vector__get(pCtx, pVector, uIndex, &pOutput1) ); VERIFY_ERR_CHECK( SG_vector__get(pCtx, pCopy, uIndex, &pOutput2) ); VERIFYP_COND("Copied vector's value doesn't match original.", pOutput1 == pOutput2, ("index(%d", uIndex)); } fail: SG_VECTOR_NULLFREE(pCtx, pVector); SG_VECTOR_NULLFREE(pCtx, pCopy); }
/** * The values for RENAME, MOVE, ATTRBITS, SYMLINKS, and SUBMODULES are collapsable. (see below) * In the corresponding rbUnique's we only need to remember the set of unique values for the * field. THESE ARE THE KEYS IN THE prbUnique. * * As a convenience, we associate a vector of entries with each key. These form a many-to-one * thing so that we can report all of the entries that have this value. * * Here we carry-forward the values from a sub-merge to the outer-merge by coping the keys * in the source-rbtree and insert in the destination-rbtree. * * NOTE: the term sub-merge here refers to the steps within an n-way merge; * it DOES NOT refer to a submodule. */ static void _carry_forward_unique_values(SG_context * pCtx, SG_rbtree * prbDest, SG_rbtree * prbSrc) { SG_rbtree_iterator * pIter = NULL; SG_vector * pVec_Allocated = NULL; const char * pszKey; SG_vector * pVec_Src; SG_vector * pVec_Dest; SG_uint32 j, nr; SG_bool bFound; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx,&pIter,prbSrc,&bFound,&pszKey,(void **)&pVec_Src) ); while (bFound) { SG_ERR_CHECK( SG_rbtree__find(pCtx,prbDest,pszKey,&bFound,(void **)&pVec_Dest) ); if (!bFound) { SG_ERR_CHECK( SG_VECTOR__ALLOC(pCtx,&pVec_Allocated,3) ); SG_ERR_CHECK( SG_rbtree__add__with_assoc(pCtx,prbDest,pszKey,pVec_Allocated) ); pVec_Dest = pVec_Allocated; pVec_Allocated = NULL; // rbtree owns this now } SG_ERR_CHECK( SG_vector__length(pCtx,pVec_Src,&nr) ); for (j=0; j<nr; j++) { SG_mrg_cset_entry * pMrgCSetEntry_x; SG_ERR_CHECK( SG_vector__get(pCtx,pVec_Src,j,(void **)&pMrgCSetEntry_x) ); SG_ERR_CHECK( SG_vector__append(pCtx,pVec_Dest,pMrgCSetEntry_x,NULL) ); #if TRACE_WC_MERGE SG_ERR_IGNORE( SG_console(pCtx,SG_CS_STDERR,"_carry_forward_unique_value: [%s][%s]\n", pszKey, SG_string__sz(pMrgCSetEntry_x->pMrgCSet->pStringCSetLabel)) ); #endif } SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx,pIter,&bFound,&pszKey,NULL) ); } fail: SG_RBTREE_ITERATOR_NULLFREE(pCtx,pIter); }
void MyFn(_verify_offset_values)( SG_context* pCtx, SG_vector* pVector, SG_uint32 uBegin, SG_uint32 uEnd, SG_int32 iOffset ) { SG_uint32 uIndex = 0u; for (uIndex = uBegin; uIndex < uEnd; ++uIndex) { SG_uint32* pValue = NULL; SG_uint32 uExpected = uIndex + iOffset; VERIFY_ERR_CHECK( SG_vector__get(pCtx, pVector, uIndex, (void**)&pValue) ); VERIFYP_COND("Incorrect value.", *pValue == uExpected, ("Expected(%u) Actual (%u) Index(%u)", uExpected, *pValue, uIndex)); } fail: return; }
/** * Handle the UPDATE command. * * */ void do_cmd_update(SG_context * pCtx, SG_option_state * pOptSt) { SG_pathname * pPathCwd = NULL; SG_pendingtree * pPendingTree = NULL; SG_repo * pRepo; char * pszTargetChangeset = NULL; char * pszBaselineBeforeUpdate = NULL; // use the current directory to find the pending-tree, the repo, and the current baseline. SG_ERR_CHECK( SG_PATHNAME__ALLOC(pCtx, &pPathCwd) ); SG_ERR_CHECK( SG_pathname__set__from_cwd(pCtx, pPathCwd) ); SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pPathCwd, pOptSt->bIgnoreWarnings, &pPendingTree) ); SG_ERR_CHECK( SG_pendingtree__get_repo(pCtx, pPendingTree, &pRepo) ); SG_ERR_CHECK( _get_baseline(pCtx, pPendingTree, &pszBaselineBeforeUpdate) ); // determine the target changeset // we check that we have at most 1 rev *or* 1 tag up in sg.c if (pOptSt->iCountRevs == 1) { SG_rev_tag_obj* pRTobj = NULL; const char * psz_rev_0; SG_ERR_CHECK( SG_vector__get(pCtx, pOptSt->pvec_rev_tags, 0, (void**)&pRTobj) ); psz_rev_0 = pRTobj->pszRevTag; SG_ERR_CHECK( SG_repo__hidlookup__dagnode(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, psz_rev_0, &pszTargetChangeset) ); } else if (pOptSt->iCountTags == 1) { SG_rev_tag_obj* pRTobj = NULL; const char * psz_tag_0; SG_ERR_CHECK( SG_vector__get(pCtx, pOptSt->pvec_rev_tags, 0, (void**)&pRTobj) ); psz_tag_0 = pRTobj->pszRevTag; SG_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, psz_tag_0, &pszTargetChangeset) ); } else { // pass NULL for target changeset and let the UPDATE code find the proper head/tip. } SG_ERR_CHECK( _my_do_cmd_update(pCtx, pOptSt, pPendingTree, pszTargetChangeset) ); SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); pRepo = NULL; if (pszTargetChangeset == NULL) { // if they didn't ask for a specific changeset (and we successfully // went to the SINGLE/UNIQUE DESCENDANT HEAD from their (then current) // BASELINE, we should look around and see if there are other heads/leaves // and advise them to MERGE with them. // // Since we did successfully do the UPDATE we should exit with OK, so // I'm going to do all of this advisory stuff in an IGNORE. SG_ERR_IGNORE( _advise_after_update(pCtx, pOptSt, pPathCwd, pszBaselineBeforeUpdate) ); } fail: SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); SG_PATHNAME_NULLFREE(pCtx, pPathCwd); SG_NULLFREE(pCtx, pszTargetChangeset); SG_NULLFREE(pCtx, pszBaselineBeforeUpdate); }