void SG_mrg__free(SG_context * pCtx, SG_mrg * pMrg) { if (!pMrg) return; ////////////////////////////////////////////////////////////////// pMrg->pMergeArgs = NULL; // we do not own this pMrg->pWcTx = NULL; // we do not own this ////////////////////////////////////////////////////////////////// SG_VHASH_NULLFREE(pCtx, pMrg->pvhPile); SG_NULLFREE(pCtx, pMrg->pszHid_StartingBaseline); SG_NULLFREE(pCtx, pMrg->pszBranchName_Starting); SG_NULLFREE(pCtx, pMrg->pszHidTarget); ////////////////////////////////////////////////////////////////// SG_DAGLCA_NULLFREE(pCtx,pMrg->pDagLca); SG_MRG_CSET_NULLFREE(pCtx, pMrg->pMrgCSet_LCA); SG_MRG_CSET_NULLFREE(pCtx, pMrg->pMrgCSet_Baseline); SG_MRG_CSET_NULLFREE(pCtx, pMrg->pMrgCSet_Other); SG_MRG_CSET_NULLFREE(pCtx,pMrg->pMrgCSet_FinalResult); SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx,pMrg->prbCSets_Aux,(SG_free_callback *)SG_mrg_cset__free); SG_VECTOR_I64_NULLFREE(pCtx, pMrg->pVecRevertAllKillList); SG_NULLFREE(pCtx, pMrg); }
void SG_dagfrag__free(SG_context * pCtx, SG_dagfrag * pFrag) { if (!pFrag) return; SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx, pFrag->m_pRB_Cache, _my_data__free); SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache); // we borrowed the assoc data from the real cache, so we don't free them. SG_NULLFREE(pCtx, pFrag); }
void SG_mrg_cset_entry_conflict__free(SG_context * pCtx, SG_mrg_cset_entry_conflict * pMrgCSetEntryConflict) { if (!pMrgCSetEntryConflict) return; SG_VECTOR_I64_NULLFREE(pCtx, pMrgCSetEntryConflict->pVec_MrgCSetEntryNeq_Changes); SG_VECTOR_NULLFREE(pCtx, pMrgCSetEntryConflict->pVec_MrgCSetEntry_Changes); // we do not own the pointers within SG_VECTOR_NULLFREE(pCtx, pMrgCSetEntryConflict->pVec_MrgCSet_Deletes); // we do not own the pointers within // for the collapsable rbUnique's we own the vectors in the rbtree-values, but not the pointers within the vector SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx, pMrgCSetEntryConflict->prbUnique_AttrBits, (SG_free_callback *)SG_vector__free); SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx, pMrgCSetEntryConflict->prbUnique_Entryname, (SG_free_callback *)SG_vector__free); SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx, pMrgCSetEntryConflict->prbUnique_GidParent, (SG_free_callback *)SG_vector__free); SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx, pMrgCSetEntryConflict->prbUnique_Symlink_HidBlob, (SG_free_callback *)SG_vector__free); SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx, pMrgCSetEntryConflict->prbUnique_Submodule_HidBlob, (SG_free_callback *)SG_vector__free); SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx, pMrgCSetEntryConflict->prbUnique_File_HidBlob, (SG_free_callback *)SG_vector__free); SG_VECTOR_NULLFREE(pCtx, pMrgCSetEntryConflict->pVec_MrgCSetEntry_OtherDirsInCycle); // we do not own the pointers within SG_STRING_NULLFREE(pCtx, pMrgCSetEntryConflict->pStringPathCycleHint); SG_FILETOOL_NULLFREE(pCtx, pMrgCSetEntryConflict->pMergeTool); SG_PATHNAME_NULLFREE(pCtx, pMrgCSetEntryConflict->pPathTempDirForFile); SG_PATHNAME_NULLFREE(pCtx, pMrgCSetEntryConflict->pPathTempFile_Ancestor); SG_PATHNAME_NULLFREE(pCtx, pMrgCSetEntryConflict->pPathTempFile_Result); SG_PATHNAME_NULLFREE(pCtx, pMrgCSetEntryConflict->pPathTempFile_Baseline); SG_PATHNAME_NULLFREE(pCtx, pMrgCSetEntryConflict->pPathTempFile_Other); SG_NULLFREE(pCtx, pMrgCSetEntryConflict->pszHidDisposable); SG_NULLFREE(pCtx, pMrgCSetEntryConflict->pszHidGenerated); SG_NULLFREE(pCtx, pMrgCSetEntryConflict); }
void SG_jscore__shutdown(SG_context * pCtx) { if(gpJSCoreGlobalState!=NULL) { if (gpJSCoreGlobalState->rt) JS_DestroyRuntime(gpJSCoreGlobalState->rt); JS_ShutDown(); SG_PATHNAME_NULLFREE(pCtx, gpJSCoreGlobalState->pPathToDispatchDotJS); SG_PATHNAME_NULLFREE(pCtx, gpJSCoreGlobalState->pPathToCore); SG_PATHNAME_NULLFREE(pCtx, gpJSCoreGlobalState->pPathToModules); SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx, gpJSCoreGlobalState->prbJSMutexes, _free_js_mutex_cb); SG_NULLFREE(pCtx, gpJSCoreGlobalState); } }
void SG_repo__pack__vcdiff(SG_context* pCtx, SG_repo * pRepo) { SG_rbtree* prb_leaves = NULL; SG_uint32 count_leaves = 0; const char* psz_hid_cs = NULL; SG_rbtree* prb_blobs = NULL; SG_bool b; SG_rbtree_iterator* pit = NULL; SG_rbtree_iterator* pit_for_gid = NULL; SG_bool b_for_gid; const char* psz_hid_ref = NULL; const char* psz_hid_blob = NULL; const char* psz_gid = NULL; SG_rbtree* prb = NULL; const char* psz_gen = NULL; SG_repo_tx_handle* pTx; SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&prb_leaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, prb_leaves, &count_leaves) ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_leaves, &b, &psz_hid_cs, NULL) ); SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &prb_blobs) ); SG_ERR_CHECK( sg_pack__do_changeset(pCtx, pRepo, psz_hid_cs, prb_blobs) ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prb_blobs, &b, &psz_gid, (void**) &prb) ); while (b) { SG_uint32 count_for_gid = 0; SG_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count_for_gid) ); if (count_for_gid > 1) { psz_hid_ref = NULL; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit_for_gid, prb, &b_for_gid, &psz_gen, (void**) &psz_hid_blob) ); while (b_for_gid) { // Not a lot of thought went into doing each of these in its own repo tx. Consider alternatives. SG_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); if (psz_hid_ref) { SG_ERR_CHECK( SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid_blob, SG_BLOBENCODING__VCDIFF, psz_hid_ref, NULL, NULL, NULL, NULL) ); // TODO be tolerant here of SG_ERR_REPO_BUSY } else { psz_hid_ref = psz_hid_blob; SG_ERR_CHECK( SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid_ref, SG_BLOBENCODING__FULL, NULL, NULL, NULL, NULL, NULL) ); // TODO be tolerant here of SG_ERR_REPO_BUSY } SG_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit_for_gid, &b_for_gid, &psz_gen, (void**) &psz_hid_blob) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit_for_gid); psz_hid_ref = NULL; } SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &b, &psz_gid, (void**) &prb) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_RBTREE_NULLFREE_WITH_ASSOC(pCtx, prb_blobs, _sg_repo__free_rbtree); SG_RBTREE_NULLFREE(pCtx, prb_leaves); return; fail: return; }