void SG_dagfrag__equal(SG_context * pCtx, const SG_dagfrag * pFrag1, const SG_dagfrag * pFrag2, SG_bool * pbResult) { SG_rbtree_iterator * pIter1 = NULL; SG_rbtree_iterator * pIter2 = NULL; const char * szKey1; const char * szKey2; _my_data * pMyData1; _my_data * pMyData2; SG_bool bFound1, bFound2, bEqualDagnodes; SG_bool bFinalResult = SG_FALSE; SG_NULLARGCHECK_RETURN(pFrag1); SG_NULLARGCHECK_RETURN(pFrag2); SG_NULLARGCHECK_RETURN(pbResult); // we compare the RB-Cache because it has everything in it. // the work-queues are transient (used during __add_leaf()). // the generation-sorted-member-cache is only around when // needed and is a subset of the RB-Cache. // since the RB-Cache is ordered by HID, we don't need to do // any sorting. just walk both versions in parallel. SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx,&pIter1,pFrag1->m_pRB_Cache,&bFound1,&szKey1,(void **)&pMyData1) ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx,&pIter2,pFrag2->m_pRB_Cache,&bFound2,&szKey2,(void **)&pMyData2) ); while (1) { if (!bFound1 && !bFound2) goto Equal; if (!bFound1 || !bFound2) goto Different; if (strcmp(szKey1,szKey2) != 0) goto Different; if (pMyData1->m_state != pMyData2->m_state) goto Different; if (SG_DFS_END_FRINGE != pMyData1->m_state) { if (pMyData1->m_genDagnode != pMyData2->m_genDagnode) goto Different; SG_ERR_CHECK( SG_dagnode__equal(pCtx,pMyData1->m_pDagnode,pMyData2->m_pDagnode,&bEqualDagnodes) ); if (!bEqualDagnodes) goto Different; } SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx,pIter1,&bFound1,&szKey1,(void **)&pMyData1) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx,pIter2,&bFound2,&szKey2,(void **)&pMyData2) ); } Equal: bFinalResult = SG_TRUE; Different: *pbResult = bFinalResult; fail: SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIter1); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pIter2); }
void SG_dagfrag__get_dagnum(SG_context * pCtx, SG_dagfrag* pFrag, SG_uint32* piDagNum) { SG_NULLARGCHECK_RETURN(pFrag); SG_NULLARGCHECK_RETURN(piDagNum); *piDagNum = pFrag->m_iDagNum; }
void sg_client__c__pull_clone( SG_context* pCtx, SG_client* pClient, const SG_pathname* pStagingPathname, char** ppszFragballName) { sg_client_c_instance_data* pMe = NULL; SG_repo* pRepo = NULL; SG_vhash* pvhStatus = NULL; SG_NULLARGCHECK_RETURN(pClient); SG_NULLARGCHECK_RETURN(ppszFragballName); pMe = (sg_client_c_instance_data*)pClient->p_vtable_instance_data; SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, pClient->psz_remote_repo_spec, &pRepo) ); SG_ERR_CHECK( SG_context__msg__emit(pCtx, "Copying repository...") ); SG_ERR_CHECK( SG_repo__fetch_repo__fragball(pCtx, pRepo, pStagingPathname, ppszFragballName) ); SG_ERR_CHECK( SG_context__msg__emit(pCtx, "done") ); /* fall through */ fail: SG_REPO_NULLFREE(pCtx, pRepo); SG_VHASH_NULLFREE(pCtx, pvhStatus); SG_ERR_IGNORE( SG_context__msg__emit(pCtx, "\n") ); }
void SG_server__push_add( SG_context* pCtx, SG_server * pServer, const char* pPushId, const char* psz_fragball_name, SG_vhash** ppResult ) { SG_staging* pStaging = NULL; SG_vhash* pvh_status = NULL; SG_repo* pRepo = NULL; SG_NULLARGCHECK_RETURN(pServer); SG_NULLARGCHECK_RETURN(pPushId); SG_NULLARGCHECK_RETURN(ppResult); SG_ERR_CHECK( SG_staging__open(pCtx, pPushId, &pStaging) ); SG_ERR_CHECK( SG_staging__slurp_fragball(pCtx, pStaging, psz_fragball_name) ); SG_ERR_CHECK( SG_staging__check_status(pCtx, pStaging, SG_TRUE, SG_TRUE, SG_FALSE, SG_TRUE, SG_TRUE, &pvh_status) ); *ppResult = pvh_status; pvh_status = NULL; /* fallthru */ fail: SG_REPO_NULLFREE(pCtx, pRepo); SG_VHASH_NULLFREE(pCtx, pvh_status); SG_STAGING_NULLFREE(pCtx, pStaging); }
void SG_mrg_cset_entry_conflict__alloc(SG_context * pCtx, SG_mrg_cset * pMrgCSet, SG_mrg_cset_entry * pMrgCSetEntry_Ancestor, SG_mrg_cset_entry * pMrgCSetEntry_Baseline, SG_mrg_cset_entry_conflict ** ppMrgCSetEntryConflict) { SG_mrg_cset_entry_conflict * pMrgCSetEntryConflict = NULL; SG_NULLARGCHECK_RETURN(pMrgCSet); SG_NULLARGCHECK_RETURN(pMrgCSetEntry_Ancestor); // pMrgCSetEntry_Baseline may or may not be null SG_NULLARGCHECK_RETURN(ppMrgCSetEntryConflict); // we allocate and return this. we DO NOT automatically add it to // the conflict-list in the cset. SG_ERR_CHECK_RETURN( SG_alloc1(pCtx,pMrgCSetEntryConflict) ); pMrgCSetEntryConflict->pMrgCSet = pMrgCSet; pMrgCSetEntryConflict->pMrgCSetEntry_Ancestor = pMrgCSetEntry_Ancestor; pMrgCSetEntryConflict->pMrgCSetEntry_Baseline = pMrgCSetEntry_Baseline; // defer alloc of vectors until we need them. // defer alloc of rbtrees until we need them. pMrgCSetEntryConflict->pMergeTool = NULL; *ppMrgCSetEntryConflict = pMrgCSetEntryConflict; }
void sg_sync_client__http__get_dagnode_info( SG_context* pCtx, SG_sync_client* pSyncClient, SG_vhash* pvhRequest, SG_history_result** ppInfo) { SG_string* pstrRequest = NULL; char* pszUrl = NULL; SG_string* pstr = NULL; SG_NULLARGCHECK_RETURN(pSyncClient); SG_NULLARGCHECK_RETURN(pvhRequest); SG_NULLARGCHECK_RETURN(ppInfo); SG_ERR_CHECK( _get_sync_url(pCtx, pSyncClient->psz_remote_repo_spec, SYNC_URL_SUFFIX "/incoming" JSON_URL_SUFFIX, NULL, NULL, &pszUrl) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pstrRequest) ); SG_ERR_CHECK( SG_vhash__to_json(pCtx, pvhRequest, pstrRequest) ); SG_ERR_CHECK( do_url(pCtx, pszUrl, "GET", SG_string__sz(pstrRequest), pSyncClient->psz_username, pSyncClient->psz_password, &pstr, NULL, SG_TRUE) ); SG_ERR_CHECK( SG_history_result__from_json(pCtx, SG_string__sz(pstr), ppInfo) ); /* fall through */ fail: SG_NULLFREE(pCtx, pszUrl); SG_STRING_NULLFREE(pCtx, pstrRequest); SG_STRING_NULLFREE(pCtx, pstr); }
void _dispatch__todo(SG_context * pCtx, _request_headers * pRequestHeaders, SG_repo **ppRepo, const char ** ppUriSubstrings, SG_uint32 uriSubstringsCount, _response_handle ** ppResponseHandle) { SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK_RETURN(pRequestHeaders); SG_NULLARGCHECK_RETURN(ppUriSubstrings); SG_NULLARGCHECK_RETURN(ppResponseHandle); SG_ASSERT(*ppResponseHandle==NULL); SG_UNUSED(ppRepo); if(uriSubstringsCount==0) { if(eq(pRequestHeaders->pRequestMethod,"GET")) { SG_ERR_CHECK_RETURN( _create_response_handle_for_template(pCtx, pRequestHeaders, SG_HTTP_STATUS_OK, "todo.xhtml", _todo_replacer, ppResponseHandle) ); } else SG_ERR_THROW_RETURN(SG_ERR_URI_HTTP_405_METHOD_NOT_ALLOWED); } else SG_ERR_THROW_RETURN(SG_ERR_URI_HTTP_404_NOT_FOUND); }
void SG_sync_remote__push_commit( SG_context* pCtx, SG_repo* pRepo, const char* pszPushId, SG_vhash** ppResult ) { SG_staging* pStaging = NULL; SG_vhash* pvh_status = NULL; SG_NULLARGCHECK_RETURN(pszPushId); SG_NULLARGCHECK_RETURN(ppResult); SG_ERR_CHECK( SG_staging__open(pCtx, pszPushId, &pStaging) ); SG_ERR_CHECK( SG_staging__commit(pCtx, pStaging, pRepo) ); SG_ERR_CHECK( SG_staging__check_status(pCtx, pStaging, pRepo, SG_TRUE, SG_FALSE, SG_TRUE, SG_TRUE, SG_FALSE, &pvh_status) ); *ppResult = pvh_status; pvh_status = NULL; /* fallthru */ fail: SG_VHASH_NULLFREE(pCtx, pvh_status); SG_STAGING_NULLFREE(pCtx, pStaging); }
void SG_stringarray__remove_all( SG_context* pCtx, SG_stringarray* psa, const char* psz, SG_uint32 * pNumOccurrencesRemoved ) { SG_uint32 numOccurrencesRemoved = 0; SG_uint32 i; SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK_RETURN(psa); SG_NULLARGCHECK_RETURN(psz); for(i=0;i<psa->count;++i) { if(strcmp(psa->aStrings[i],psz)==0) numOccurrencesRemoved += 1; else psa->aStrings[i-numOccurrencesRemoved] = psa->aStrings[i]; } psa->count -= numOccurrencesRemoved; if(pNumOccurrencesRemoved!=NULL) *pNumOccurrencesRemoved = numOccurrencesRemoved; }
void SG_curl__set_headers_from_varray(SG_context * pCtx, SG_curl * pCurl, SG_varray * pvaHeaders, struct curl_slist ** ppHeaderList) { CURLcode rc = CURLE_OK; _sg_curl* p = (_sg_curl*)pCurl; struct curl_slist* pHeaderList = NULL; SG_uint32 count = 0; SG_uint32 i = 0; SG_NULLARGCHECK_RETURN(pCurl); SG_NULLARGCHECK_RETURN(pvaHeaders); SG_ERR_CHECK( SG_varray__count(pCtx, pvaHeaders, &count) ); for (i = 0; i < count; i++) { const char * psz = NULL; SG_ERR_CHECK_RETURN( SG_varray__get__sz(pCtx, pvaHeaders, i, &psz) ); pHeaderList = curl_slist_append(pHeaderList, psz); if (!pHeaderList) SG_ERR_THROW2_RETURN(SG_ERR_UNSPECIFIED, (pCtx, "Failed to add HTTP header.")); } rc = curl_easy_setopt(p->pCurl, CURLOPT_HTTPHEADER, pHeaderList); if (rc) SG_ERR_THROW2(SG_ERR_LIBCURL(rc), (pCtx, "Problem setting HTTP headers" )); SG_RETURN_AND_NULL(pHeaderList, ppHeaderList); fail: if (pHeaderList) SG_CURL_HEADERS_NULLFREE(pCtx, pHeaderList); }
void sg_client__c__push_begin(SG_context* pCtx, SG_client * pClient, SG_pathname** ppFragballDirPathname, SG_client_push_handle** ppPush) { sg_client_c_push_handle* pPush = NULL; sg_client_c_instance_data* pMe = NULL; SG_NULLARGCHECK_RETURN(pClient); SG_NULLARGCHECK_RETURN(ppFragballDirPathname); SG_NULLARGCHECK_RETURN(ppPush); pMe = (sg_client_c_instance_data*)pClient->p_vtable_instance_data; // Alloc a push handle. SG_ERR_CHECK( SG_alloc(pCtx, 1, sizeof(sg_client_c_push_handle), &pPush) ); // Tell the server we're about to push. We get back a push ID, which we save in the push handle. SG_ERR_CHECK( SG_server__push_begin(pCtx, pMe->pServer, pClient->psz_remote_repo_spec, (const char **)&pPush->pszPushId) ); /* This is a local push, so we tell our caller to put fragballs directly in the server's staging area. */ SG_ERR_CHECK( SG_server__push_get_staging_path(pCtx, pMe->pServer, pPush->pszPushId, ppFragballDirPathname) ); // Return the new push handle. *ppPush = (SG_client_push_handle*)pPush; pPush = NULL; /* fall through */ fail: _NULLFREE_PUSH_HANDLE(pCtx, pPush); }
static void _cache__lookup(SG_context * pCtx, SG_dagfrag * pFrag, const char * szHid, _my_data ** ppData, // you do not own this (but you may modify non-pointer values within) SG_bool * pbPresent) { // see if szHid is present in cache. return the associated data. _my_data * pData; SG_bool b = SG_FALSE; SG_NULLARGCHECK_RETURN(pFrag); SG_NONEMPTYCHECK_RETURN(szHid); SG_NULLARGCHECK_RETURN(ppData); SG_ERR_CHECK_RETURN( SG_rbtree__find(pCtx,pFrag->m_pRB_Cache,szHid,&b,((void **)&pData)) ); if (b) { *pbPresent = SG_TRUE; *ppData = pData; } else { *pbPresent = SG_FALSE; *ppData = NULL; } }
/** * Queue a request to resolve (or partially resolve) an issue. * We assume that the caller has already * queued any necessary WD operations * (such as moves/renamed) and now just * needs to mark the item resolved (or * partially resolved). * * Since an item may have multiple conflict * bits set, you can use this to mark the * specific choices made in the pvhSavedResolutions * and only mark it fully resolved when * everything has been chosen. * * We steal the optional/given pvhSavedResolutions. * */ void sg_wc_tx__queue__resolve_issue__sr(SG_context * pCtx, SG_wc_tx * pWcTx, sg_wc_liveview_item * pLVI, SG_wc_status_flags statusFlags_x_xr_xu, SG_vhash ** ppvhSavedResolutions) { SG_string * pStringResolveData = NULL; SG_NULLARGCHECK_RETURN( pWcTx ); SG_NULLARGCHECK_RETURN( pLVI ); // ppvhSavedResolutions is optional. if (ppvhSavedResolutions && *ppvhSavedResolutions) { SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pStringResolveData) ); SG_ERR_CHECK( SG_vhash__to_json(pCtx, (*ppvhSavedResolutions), pStringResolveData) ); } SG_ERR_CHECK( sg_wc_tx__journal__resolve_issue__sr(pCtx, pWcTx, pLVI->uiAliasGid, statusFlags_x_xr_xu, pStringResolveData) ); pLVI->statusFlags_x_xr_xu = statusFlags_x_xr_xu; SG_VHASH_NULLFREE(pCtx, pLVI->pvhSavedResolutions); if (ppvhSavedResolutions && *ppvhSavedResolutions) { pLVI->pvhSavedResolutions = *ppvhSavedResolutions; *ppvhSavedResolutions = NULL; } fail: SG_STRING_NULLFREE(pCtx, pStringResolveData); }
void SG_dagfrag__foreach_member(SG_context * pCtx, SG_dagfrag * pFrag, SG_dagfrag__foreach_member_callback * pcb, void * pVoidCallerData) { // we want to iterate over the START_ and INTERIOR_ MEMBERS in the CACHE. // we need to use the SORTED MEMBER CACHE so that ancestors are presented // before descendants. struct _fm_data fm_data; SG_NULLARGCHECK_RETURN(pFrag); SG_NULLARGCHECK_RETURN(pcb); if (!pFrag->m_pRB_GenerationSortedMemberCache) SG_ERR_CHECK_RETURN( _my_create_generation_sorted_member_cache(pCtx,pFrag) ); fm_data.pFrag = pFrag; fm_data.pcb = pcb; fm_data.pVoidCallerData = pVoidCallerData; // we wrap their callback with our own so that we can munge the arguments // that they see. #if TRACE_DAGFRAG && 0 SG_ERR_CHECK_RETURN( SG_console(pCtx, SG_CS_STDERR, "SORTED MEMBER CACHE:\r\n") ); SG_ERR_CHECK_RETURN( SG_rbtree_debug__dump_keys_to_console(pCtx, pFrag->m_pRB_GenerationSortedMemberCache) ); SG_ERR_CHECK_RETURN( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif SG_ERR_CHECK_RETURN( SG_rbtree__foreach(pCtx, pFrag->m_pRB_GenerationSortedMemberCache, _sg_dagfrag__my_foreach_member_callback, &fm_data) ); }
void SG_stringarray__alloc__copy( SG_context* pCtx, SG_stringarray** ppThis, const SG_stringarray* pOther ) { SG_uint32 count = 0; SG_stringarray * pThis = NULL; SG_uint32 i; SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK_RETURN(ppThis); SG_NULLARGCHECK_RETURN(pOther); SG_ERR_CHECK( SG_stringarray__count(pCtx, pOther, &count) ); SG_ERR_CHECK( SG_stringarray__alloc(pCtx, &pThis, count) ); for(i=0;i<count;++i) { const char * sz = NULL; SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, pOther, i, &sz) ); SG_ERR_CHECK( SG_stringarray__add(pCtx, pThis, sz) ); } *ppThis = pThis; return; fail: SG_STRINGARRAY_NULLFREE(pCtx, pThis); }
void SG_stringarray__add( SG_context* pCtx, SG_stringarray* psa, const char* psz ) { const char* pCopy = NULL; const char** new_aStrings; SG_NULLARGCHECK_RETURN( psa ); SG_NULLARGCHECK_RETURN( psz ); if ((1 + psa->count) > psa->space) { SG_uint32 new_space = psa->space * 2; SG_ERR_CHECK( SG_alloc(pCtx, new_space, sizeof(const char *), &new_aStrings) ); memcpy((char **)new_aStrings, psa->aStrings, psa->count * sizeof(const char*)); SG_NULLFREE(pCtx, psa->aStrings); psa->aStrings = new_aStrings; psa->space = new_space; } SG_ERR_CHECK( SG_strpool__add__sz(pCtx, psa->pStrPool, psz, &pCopy) ); psa->aStrings[psa->count++] = pCopy; fail: return; }
void SG_stringarray__find( SG_context * pCtx, const SG_stringarray * psa, const char * pszToFind, SG_uint32 ndxStart, SG_bool * pbFound, SG_uint32 * pNdxFound) { SG_uint32 k; SG_NULLARGCHECK_RETURN(psa); SG_NONEMPTYCHECK_RETURN(pszToFind); SG_NULLARGCHECK_RETURN(pbFound); // pNdxFound is optional for (k=ndxStart; k<psa->count; k++) { if (strcmp(psa->aStrings[k], pszToFind) == 0) { *pbFound = SG_TRUE; if (pNdxFound) *pNdxFound = k; return; } } *pbFound = SG_FALSE; }
void SG_pull__clone( SG_context* pCtx, const char* pszPullIntoRepoDescriptorName, SG_client* pClient) { sg_pull_instance_data* pMe = NULL; char* pszFragballName = NULL; const SG_pathname* pStagingPathname; SG_NULLARGCHECK_RETURN(pszPullIntoRepoDescriptorName); SG_NULLARGCHECK_RETURN(pClient); SG_ERR_CHECK( _pull_init(pCtx, pClient, pszPullIntoRepoDescriptorName, &pMe) ); SG_ERR_CHECK( SG_staging__get_pathname(pCtx, pMe->pStaging, &pStagingPathname) ); /* Request a fragball containing the entire repo */ SG_ERR_CHECK( SG_client__pull_clone(pCtx, pClient, pStagingPathname, &pszFragballName) ); /* commit and cleanup */ SG_ERR_CHECK_RETURN( SG_staging__commit_fragball(pCtx, pMe->pStaging, pszFragballName) ); SG_ERR_CHECK( SG_context__msg__emit(pCtx, "Cleaning up...") ); SG_ERR_CHECK_RETURN( SG_staging__cleanup(pCtx, &pMe->pStaging) ); SG_ERR_CHECK( SG_context__msg__emit(pCtx, "done") ); /* fall through */ fail: _NULLFREE_INSTANCE_DATA(pCtx, pMe); SG_NULLFREE(pCtx, pszFragballName); SG_ERR_IGNORE( SG_context__msg__emit(pCtx, "\n") ); }
void SG_dbrecord__get_value(SG_context* pCtx, const SG_dbrecord* prec, const char* putf8Name, const char** pputf8Value) { SG_NULLARGCHECK_RETURN(prec); SG_NULLARGCHECK_RETURN(putf8Name); SG_NULLARGCHECK_RETURN(pputf8Value); SG_ERR_CHECK_RETURN( SG_vhash__get__sz(pCtx, prec->pvh, putf8Name, pputf8Value) ); }
void SG_vector_i64__length(SG_context* pCtx, const SG_vector_i64 * pVector, SG_uint32 * pLength) { SG_NULLARGCHECK_RETURN(pVector); SG_NULLARGCHECK_RETURN(pLength); *pLength = pVector->m_uiSizeInUse; }
void sg_sync_client__http__push_commit( SG_context* pCtx, SG_sync_client * pSyncClient, SG_sync_client_push_handle* pPush, SG_vhash** ppResult) { char* pszUrl = NULL; sg_sync_client_http_push_handle* pMyPush = (sg_sync_client_http_push_handle*)pPush; SG_string* pstr = NULL; SG_NULLARGCHECK_RETURN(pSyncClient); SG_NULLARGCHECK_RETURN(pPush); SG_NULLARGCHECK_RETURN(ppResult); SG_ERR_CHECK( _get_sync_url(pCtx, pSyncClient->psz_remote_repo_spec, SYNC_URL_SUFFIX, pMyPush->pszPushId, NULL, &pszUrl) ); SG_ERR_CHECK( do_url( pCtx, pszUrl, "POST", NULL, pSyncClient->psz_username, pSyncClient->psz_password, &pstr, NULL, SG_TRUE) ); SG_ERR_CHECK( SG_vhash__alloc__from_json__sz(pCtx, ppResult, SG_string__sz(pstr)) ); /* fall through */ fail: SG_STRING_NULLFREE(pCtx, pstr); SG_NULLFREE(pCtx, pszUrl); }
void SG_dagfrag__eat_other_frag(SG_context * pCtx, SG_dagfrag* pConsumerFrag, SG_dagfrag** ppFragToBeEaten) { SG_rbtree_iterator* pit = NULL; SG_bool b = SG_FALSE; _my_data * pMyData = NULL; const char* psz_id = NULL; SG_dagfrag* pFragToBeEaten = NULL; SG_NULLARGCHECK_RETURN(pConsumerFrag); SG_NULLARGCHECK_RETURN(ppFragToBeEaten); SG_NULLARGCHECK_RETURN(*ppFragToBeEaten); pFragToBeEaten = *ppFragToBeEaten; #if DEBUG && TRACE_DAGFRAG SG_ERR_CHECK( SG_dagfrag_debug__dump__console(pCtx, pFragToBeEaten, "frag to be eaten", 0, SG_CS_STDOUT) ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, pFragToBeEaten->m_pRB_Cache) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); SG_ERR_CHECK( SG_dagfrag_debug__dump__console(pCtx, pConsumerFrag, "new frag before meal", 0, SG_CS_STDOUT) ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, pConsumerFrag->m_pRB_Cache) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif if ( (pConsumerFrag->m_iDagNum != pFragToBeEaten->m_iDagNum) || (0 != strcmp(pConsumerFrag->m_sz_repo_id, pFragToBeEaten->m_sz_repo_id)) || (0 != strcmp(pConsumerFrag->m_sz_admin_id, pFragToBeEaten->m_sz_admin_id)) ) { SG_ERR_THROW_RETURN( SG_ERR_REPO_MISMATCH ); } SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit,pFragToBeEaten->m_pRB_Cache,&b,&psz_id,(void **)&pMyData) ); while (b) { if (pMyData->m_pDagnode) { SG_ERR_CHECK( SG_dagfrag__add_dagnode(pCtx, pConsumerFrag, &pMyData->m_pDagnode) ); } SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit,&b,&psz_id,(void **)&pMyData) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_DAGFRAG_NULLFREE(pCtx, pFragToBeEaten); *ppFragToBeEaten = NULL; #if DEBUG && TRACE_DAGFRAG SG_ERR_CHECK( SG_dagfrag_debug__dump__console(pCtx, pConsumerFrag, "new frag after meal", 0, SG_CS_STDOUT) ); SG_ERR_CHECK( SG_rbtree_debug__dump_keys_to_console(pCtx, pConsumerFrag->m_pRB_Cache) ); SG_ERR_CHECK( SG_console__flush(pCtx, SG_CS_STDERR) ); #endif return; fail: SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); }
// TODO consider the possible perf benefits of changing this routine // to accept lots of changeset ids instead of just one, so it // can handle them all at once. void SG_treendx__update__multiple( SG_context* pCtx, SG_treendx* pTreeNdx, SG_stringarray* psa ) { SG_changeset* pcs = NULL; sqlite3_stmt* pStmt = NULL; SG_vhash* pvh_treepaths = NULL; SG_uint32 count_treepaths = 0; SG_uint32 count_changesets = 0; SG_uint32 ics = 0; SG_NULLARGCHECK_RETURN(psa); SG_NULLARGCHECK_RETURN(pTreeNdx); SG_ERR_CHECK( SG_stringarray__count(pCtx, psa, &count_changesets) ); SG_ERR_CHECK( sg_sqlite__exec__va(pCtx, pTreeNdx->psql, "BEGIN TRANSACTION; ") ); SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pTreeNdx->psql, &pStmt, "INSERT OR IGNORE INTO treendx (gid, strpath) VALUES (?, ?)") ); for (ics=0; ics<count_changesets; ics++) { const char* psz_hid = NULL; SG_uint32 i = 0; SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, psa, ics, &psz_hid) ); SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pTreeNdx->pRepo, psz_hid, &pcs) ); SG_ERR_CHECK( SG_changeset__get_treepaths(pCtx, pcs, &pvh_treepaths) ); if (pvh_treepaths) { SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_treepaths, &count_treepaths) ); for (i=0; i<count_treepaths; i++) { const char* psz_gid = NULL; const SG_variant* pv = NULL; const char* psz_path = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_treepaths, i, &psz_gid, &pv) ); SG_ERR_CHECK( SG_variant__get__sz(pCtx, pv, &psz_path) ); SG_ERR_CHECK( sg_sqlite__reset(pCtx, pStmt) ); SG_ERR_CHECK( sg_sqlite__clear_bindings(pCtx, pStmt) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 1, psz_gid) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 2, psz_path) ); SG_ERR_CHECK( sg_sqlite__step(pCtx, pStmt, SQLITE_DONE) ); } } SG_CHANGESET_NULLFREE(pCtx, pcs); } SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); SG_ERR_CHECK( sg_sqlite__exec__va(pCtx, pTreeNdx->psql, "COMMIT TRANSACTION; ") ); fail: SG_CHANGESET_NULLFREE(pCtx, pcs); }
void SG_workingdir__set_mapping( SG_context* pCtx, const SG_pathname* pPathLocalDirectory, const char* pszNameRepoInstanceDescriptor, /**< The name of the repo instance descriptor */ const char* pszidGidAnchorDirectory /**< The GID of the directory within the repo to which this is anchored. Usually it's user root. */ ) { SG_vhash* pvhNew = NULL; SG_vhash* pvh = NULL; SG_pathname* pMyPath = NULL; SG_pathname* pDrawerPath = NULL; SG_pathname* pMappingFilePath = NULL; SG_NULLARGCHECK_RETURN(pPathLocalDirectory); SG_NULLARGCHECK_RETURN(pszNameRepoInstanceDescriptor); /* make a copy of the path so we can modify it (adding the final slash) */ SG_ERR_CHECK( SG_PATHNAME__ALLOC__COPY(pCtx, &pMyPath, pPathLocalDirectory) ); /* null the original parameter pointer to make sure we don't use it anymore */ pPathLocalDirectory = NULL; /* make sure the path we were given is a directory that exists */ SG_ERR_CHECK( SG_fsobj__verify_directory_exists_on_disk__pathname(pCtx, pMyPath) ); /* it's a directory, so it should have a final slash */ SG_ERR_CHECK( SG_pathname__add_final_slash(pCtx, pMyPath) ); /* make sure the name of the repo instance descriptor is valid */ SG_ERR_CHECK( SG_closet__descriptors__get(pCtx, pszNameRepoInstanceDescriptor, &pvh) ); SG_VHASH_NULLFREE(pCtx, pvh); // TODO verify that the anchor GID is valid for that repo? SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhNew) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhNew, "descriptor", pszNameRepoInstanceDescriptor) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhNew, "anchor", pszidGidAnchorDirectory) ); SG_ERR_CHECK( SG_workingdir__verify_drawer_exists(pCtx, pMyPath, &pDrawerPath) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pMappingFilePath, pDrawerPath, "repo.json") ); SG_ERR_CHECK( SG_vfile__update__vhash(pCtx, pMappingFilePath, "mapping", pvhNew) ); SG_VHASH_NULLFREE(pCtx, pvhNew); SG_PATHNAME_NULLFREE(pCtx, pMyPath); SG_PATHNAME_NULLFREE(pCtx, pDrawerPath); SG_PATHNAME_NULLFREE(pCtx, pMappingFilePath); return; fail: SG_PATHNAME_NULLFREE(pCtx, pDrawerPath); SG_PATHNAME_NULLFREE(pCtx, pMappingFilePath); SG_PATHNAME_NULLFREE(pCtx, pMyPath); SG_VHASH_NULLFREE(pCtx, pvhNew); SG_VHASH_NULLFREE(pCtx, pvh); }
void SG_server__push_end(SG_context* pCtx, SG_server * pServer, const char* pPushId) { SG_NULLARGCHECK_RETURN(pServer); SG_NULLARGCHECK_RETURN(pPushId); SG_ERR_CHECK_RETURN( SG_staging__cleanup__by_id(pCtx, pPushId) ); }
void SG_dbrecord__get_hid__ref(SG_context* pCtx, const SG_dbrecord* prec, const char** ppid) { SG_NULLARGCHECK_RETURN(prec); SG_NULLARGCHECK_RETURN(ppid); /* it is legal to call this when the item is not frozen */ *ppid = prec->pid; }
void SG_dbrecord__has_value(SG_context* pCtx, const SG_dbrecord* prec, const char* putf8Name, SG_bool* pb) { SG_bool b = SG_FALSE; SG_NULLARGCHECK_RETURN(prec); SG_NULLARGCHECK_RETURN(putf8Name); SG_NULLARGCHECK_RETURN(pb); SG_ERR_CHECK_RETURN( SG_vhash__has(pCtx, prec->pvh, putf8Name, &b) ); *pb = b; }
void SG_vector_i64__get(SG_context* pCtx, const SG_vector_i64 * pVector, SG_uint32 k, SG_int64 * pValue) { SG_NULLARGCHECK_RETURN(pVector); SG_NULLARGCHECK_RETURN(pValue); SG_ARGCHECK_RETURN(k < pVector->m_uiSizeInUse, pVector->m_uiSizeInUse); *pValue = pVector->m_array[k]; }
void SG_stringarray__count( SG_context* pCtx, const SG_stringarray* psa, SG_uint32 * pCount) { SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK_RETURN(psa); SG_NULLARGCHECK_RETURN(pCount); *pCount = psa->count; }
void SG_stringarray__sz_array( SG_context* pCtx, const SG_stringarray* psa, const char * const ** pppStrings) { SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK_RETURN(psa); SG_NULLARGCHECK_RETURN(pppStrings); *pppStrings = psa->aStrings; }