void SG_curl__set_headers_from_varray(SG_context * pCtx, SG_curl * pCurl, SG_varray * pvaHeaders, struct curl_slist ** ppHeaderList) { CURLcode rc = CURLE_OK; _sg_curl* p = (_sg_curl*)pCurl; struct curl_slist* pHeaderList = NULL; SG_uint32 count = 0; SG_uint32 i = 0; SG_NULLARGCHECK_RETURN(pCurl); SG_NULLARGCHECK_RETURN(pvaHeaders); SG_ERR_CHECK( SG_varray__count(pCtx, pvaHeaders, &count) ); for (i = 0; i < count; i++) { const char * psz = NULL; SG_ERR_CHECK_RETURN( SG_varray__get__sz(pCtx, pvaHeaders, i, &psz) ); pHeaderList = curl_slist_append(pHeaderList, psz); if (!pHeaderList) SG_ERR_THROW2_RETURN(SG_ERR_UNSPECIFIED, (pCtx, "Failed to add HTTP header.")); } rc = curl_easy_setopt(p->pCurl, CURLOPT_HTTPHEADER, pHeaderList); if (rc) SG_ERR_THROW2(SG_ERR_LIBCURL(rc), (pCtx, "Problem setting HTTP headers" )); SG_RETURN_AND_NULL(pHeaderList, ppHeaderList); fail: if (pHeaderList) SG_CURL_HEADERS_NULLFREE(pCtx, pHeaderList); }
void SG_mergereview__continue(SG_context * pCtx, SG_repo * pRepo, SG_varray * pContinuationToken, SG_bool singleMergeReview, SG_vhash * pMergeBaselines, SG_uint32 resultLimit, SG_varray ** ppResults, SG_uint32 * pCountResults, SG_varray ** ppContinuationToken) { _tree_t tree; SG_uint16 firstVariantType; SG_zero(tree); SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK(pRepo); SG_NULLARGCHECK(pContinuationToken); if(resultLimit==0) resultLimit = SG_UINT32_MAX; SG_NULLARGCHECK(ppResults); SG_ERR_CHECK( SG_varray__typeof(pCtx, pContinuationToken, 0, &firstVariantType) ); if(firstVariantType==SG_VARIANT_TYPE_SZ) { const char * szHead = NULL; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pContinuationToken, 0, &szHead) ); SG_ERR_CHECK( _tree__init(pCtx, &tree, szHead, pRepo) ); } else { SG_ERR_CHECK( _tree__init__continuation(pCtx, &tree, pContinuationToken, pRepo) ); } SG_ERR_CHECK( _sg_mergereview(pCtx, &tree, singleMergeReview, SG_FALSE, pMergeBaselines, resultLimit, ppResults, pCountResults, ppContinuationToken) ); _tree__uninit(pCtx, &tree); return; fail: _tree__uninit(pCtx, &tree); }
/** * Take a domain-specific/relative repo-path and get * the GID ALIAS of the item. * * THIS IS STRICLTY BASED UPON THE FIXED CHANGESET * THAT WE ALREADY HAVE IN THE tne_* TABLE. It does * not know about or account for any pending changes * in the WD; that is, it DOES NOT know about tbl_PC. * * We DO NOT know if the given domain is appropriate * for the given pCSetRow. That is upto the caller. * For example, we expect them to map: * 'b' ==> "L0" * 'c' ==> "L1" * but we don't enforce that here. * */ void sg_wc_db__tne__get_alias_from_extended_repo_path(SG_context * pCtx, sg_wc_db * pDb, const sg_wc_db__cset_row * pCSetRow, const char * pszBaselineRepoPath, SG_bool * pbFound, SG_uint64 * puiAliasGid) { SG_varray * pva = NULL; sg_wc_db__tne_row * pTneRow_k = NULL; SG_uint64 uiAlias; SG_uint32 k, count; *pbFound = SG_FALSE; *puiAliasGid = 0; // TODO 2012/01/04 For now, require that an extended-prefix be // TODO present in the repo-path. // TODO // TODO We may relax this to allow a '/' current/live // TODO domain repo-path eventually. SG_ASSERT_RELEASE_FAIL( ((pszBaselineRepoPath[0]=='@') && (pszBaselineRepoPath[1]!='/')) ); SG_ERR_CHECK( SG_repopath__split_into_varray(pCtx, pszBaselineRepoPath, &pva) ); // the root directory should be "@b" and be contained in pva[0]. // we have a direct line to its alias. SG_ERR_CHECK( sg_wc_db__tne__get_alias_of_root(pCtx, pDb, pCSetRow, &uiAlias) ); SG_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); for (k=1; k<count; k++) { const char * pszEntryname_k; SG_bool bFound_k; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva, k, &pszEntryname_k) ); SG_ERR_CHECK( sg_wc_db__tne__get_row_by_parent_alias_and_entryname(pCtx, pDb, pCSetRow, uiAlias, pszEntryname_k, &bFound_k, &pTneRow_k) ); if (!bFound_k) goto fail; uiAlias = pTneRow_k->p_s->uiAliasGid; SG_WC_DB__TNE_ROW__NULLFREE(pCtx, pTneRow_k); } *pbFound = SG_TRUE; *puiAliasGid = uiAlias; fail: SG_VARRAY_NULLFREE(pCtx, pva); SG_WC_DB__TNE_ROW__NULLFREE(pCtx, pTneRow_k); }
void SG_localsettings__varray__remove_first_match(SG_context * pCtx, const char* psz_path, const char* psz_val) { SG_jsondb* p = NULL; SG_string* pstr_path_element = NULL; SG_varray* pva = NULL; SG_uint32 ndx = 0; SG_uint32 count = 0; SG_uint32 i = 0; SG_bool b_found = SG_FALSE; SG_string* pstr_path_found = NULL; SG_ASSERT(pCtx); SG_NONEMPTYCHECK_RETURN(psz_path); SG_ERR_CHECK( SG_closet__get_localsettings(pCtx, &p) ); SG_ERR_CHECK( SG_localsettings__get__varray(pCtx, psz_path, NULL, &pva, &pstr_path_found) ); if (pva) { if (!pstr_path_found) { // this came from factory defaults. SG_ERR_CHECK( SG_string__alloc(pCtx, &pstr_path_found) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pstr_path_found, "%s/%s", SG_LOCALSETTING__SCOPE__MACHINE, psz_path) ); SG_ERR_CHECK( SG_localsettings__update__varray(pCtx, SG_string__sz(pstr_path_found), pva) ); } SG_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); for (i=0; i<count; i++) { const char* psz = NULL; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva, i, &psz) ); if (0 == strcmp(psz, psz_val)) { b_found = SG_TRUE; ndx = i; break; } } if (b_found) { SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pstr_path_element) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pstr_path_element, "%s/%d", SG_string__sz(pstr_path_found), ndx) ); SG_ERR_CHECK( SG_jsondb__remove(pCtx, p, SG_string__sz(pstr_path_element)) ); } } fail: SG_VARRAY_NULLFREE(pCtx, pva); SG_STRING_NULLFREE(pCtx, pstr_path_found); SG_STRING_NULLFREE(pCtx, pstr_path_element); SG_JSONDB_NULLFREE(pCtx, p); }
static void _get_baseline(SG_context * pCtx, SG_pendingtree * pPendingTree, char ** ppszBaseline) { const SG_varray * pvaParents; const char * pszBaseline; char * pszAllocated = NULL; // get the HID of the BASELINE (aka PARENT[0] from the pendingtree). SG_ERR_CHECK( SG_pendingtree__get_wd_parents__ref(pCtx, pPendingTree, &pvaParents) ); SG_ERR_CHECK( SG_varray__get__sz(pCtx, pvaParents, 0, &pszBaseline) ); SG_ERR_CHECK( SG_strdup(pCtx, pszBaseline, &pszAllocated) ); *ppszBaseline = pszAllocated; return; fail: SG_NULLFREE(pCtx, pszAllocated); }
void SG_tag__add_tags(SG_context * pCtx, SG_repo * pRepo, SG_pendingtree * pPendingTree, const char* psz_spec_cs, SG_bool bRev, SG_bool bForce, const char** ppszTags, SG_uint32 count_args) { SG_pathname* pPathCwd = NULL; char* psz_hid_cs = NULL; SG_audit q; SG_uint32 i = 0; char * psz_current_hid_with_that_tag = NULL; SG_bool bFreePendingTree = SG_FALSE; SG_ERR_CHECK( SG_audit__init(pCtx,&q,pRepo,SG_AUDIT__WHEN__NOW,SG_AUDIT__WHO__FROM_SETTINGS) ); // TODO 4/21/10 pendingtree contains a pRepo inside it. we should // TODO 4/21/10 refactor this to alloc the pendingtree first and then // TODO 4/21/10 just borrow the pRepo from it. if (psz_spec_cs) { if (bRev) { SG_ERR_CHECK( SG_repo__hidlookup__dagnode(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, psz_spec_cs, &psz_hid_cs) ); } else { SG_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, psz_spec_cs, &psz_hid_cs) ); if (psz_hid_cs == NULL) SG_ERR_THROW(SG_ERR_TAG_NOT_FOUND); } } else { // tag the current baseline. // // when we have an uncomitted merge, we will have more than one parent. // what does this command mean then? It feels like we we should throw // an error and say that you have to commit first. const SG_varray * pva_wd_parents; // we do not own this const char * psz_hid_parent_0; // we do not own this SG_uint32 nrParents; if (pPendingTree == NULL) { SG_ERR_CHECK( SG_pendingtree__alloc_from_cwd(pCtx, SG_TRUE, &pPendingTree) ); bFreePendingTree = SG_TRUE; } SG_ERR_CHECK( SG_pendingtree__get_wd_parents__ref(pCtx, pPendingTree, &pva_wd_parents) ); SG_ERR_CHECK( SG_varray__count(pCtx, pva_wd_parents, &nrParents) ); if (nrParents > 1) SG_ERR_THROW( SG_ERR_CANNOT_DO_WHILE_UNCOMMITTED_MERGE ); SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva_wd_parents, 0, &psz_hid_parent_0) ); SG_ERR_CHECK( SG_strdup(pCtx, psz_hid_parent_0, &psz_hid_cs) ); } if (!bForce) { //Go through and check all tags to make sure that they are not already applied. for (i = 0; i < count_args; i++) { const char * pszTag = ppszTags[i]; SG_ERR_IGNORE( SG_vc_tags__lookup__tag(pCtx, pRepo, pszTag, &psz_current_hid_with_that_tag) ); if (psz_current_hid_with_that_tag != NULL && 0 != strcmp(psz_current_hid_with_that_tag, psz_hid_cs)) //The tag has been applied, but not to the given changeset. SG_ERR_THROW(SG_ERR_TAG_ALREADY_EXISTS); SG_NULLFREE(pCtx, psz_current_hid_with_that_tag); } } for (i = 0; i < count_args; i++) { const char * pszTag = ppszTags[i]; SG_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, pszTag, &psz_current_hid_with_that_tag) ); if (psz_current_hid_with_that_tag == NULL || 0 != strcmp(psz_current_hid_with_that_tag, psz_hid_cs)) { //The tag has not been applied, or it's been applied to a different dagnode. if ( psz_current_hid_with_that_tag != NULL && bForce) //Remove it, if it's already there SG_ERR_CHECK( SG_vc_tags__remove(pCtx, pRepo, &q, 1, &pszTag) ); SG_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, pszTag, &q) ); } SG_NULLFREE(pCtx, psz_current_hid_with_that_tag); } fail: SG_NULLFREE(pCtx, psz_current_hid_with_that_tag); if (bFreePendingTree == SG_TRUE) SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); SG_NULLFREE(pCtx, psz_hid_cs); SG_PATHNAME_NULLFREE(pCtx, pPathCwd); }
static void loop_innards_make_delta( SG_context* pCtx, SG_repo* pRepo, SG_varray* pva_path, SG_uint32 i_path_step, SG_vhash* pvh_add, SG_vhash* pvh_remove ) { SG_changeset* pcs = NULL; const char* psz_csid_cur = NULL; const char* psz_csid_parent = NULL; SG_vhash* pvh_changes = NULL; SG_vhash* pvh_one_parent_changes = NULL; SG_vhash* pvh_cs_add = NULL; SG_vhash* pvh_cs_remove = NULL; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva_path, i_path_step, &psz_csid_cur) ); SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva_path, i_path_step + 1, &psz_csid_parent) ); SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_csid_cur, &pcs) ); SG_ERR_CHECK( SG_changeset__db__get_changes(pCtx, pcs, &pvh_changes) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_changes, psz_csid_parent, &pvh_one_parent_changes) ); SG_ERR_CHECK( SG_vhash__check__vhash(pCtx, pvh_one_parent_changes, "add", &pvh_cs_remove) ); SG_ERR_CHECK( SG_vhash__check__vhash(pCtx, pvh_one_parent_changes, "remove", &pvh_cs_add) ); if (pvh_cs_add) { SG_uint32 count = 0; SG_uint32 i = 0; SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_cs_add, &count) ); for (i=0; i<count; i++) { const char* psz_hid_rec = NULL; SG_bool b = SG_FALSE; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_cs_add, i, &psz_hid_rec, NULL) ); SG_ERR_CHECK( SG_vhash__remove_if_present(pCtx, pvh_remove, psz_hid_rec, &b) ); if (!b) { SG_ERR_CHECK( SG_vhash__update__null(pCtx, pvh_add, psz_hid_rec) ); } } } if (pvh_cs_remove) { SG_uint32 count = 0; SG_uint32 i = 0; SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_cs_remove, &count) ); for (i=0; i<count; i++) { const char* psz_hid_rec = NULL; SG_bool b = SG_FALSE; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_cs_remove, i, &psz_hid_rec, NULL) ); SG_ERR_CHECK( SG_vhash__remove_if_present(pCtx, pvh_add, psz_hid_rec, &b) ); if (!b) { SG_ERR_CHECK( SG_vhash__update__null(pCtx, pvh_remove, psz_hid_rec) ); } } } fail: SG_CHANGESET_NULLFREE(pCtx, pcs); }
void sg_vv2__history__working_folder( SG_context * pCtx, const SG_stringarray * psaInputs, const SG_rev_spec* pRevSpec, const SG_rev_spec* pRevSpec_single_revisions, const char* pszUser, const char* pszStamp, SG_bool bDetectCurrentBranch, SG_uint32 nResultLimit, SG_bool bHideObjectMerges, SG_int64 nFromDate, SG_int64 nToDate, SG_bool bListAll, SG_bool* pbHasResult, SG_vhash** ppvhBranchPile, SG_history_result ** ppResult, SG_history_token ** ppHistoryToken) { SG_repo * pRepo = NULL; SG_stringarray * pStringArrayGIDs = NULL; SG_stringarray * pStringArrayChangesets = NULL; SG_stringarray * pStringArrayChangesetsMissing = NULL; SG_stringarray * pStringArrayChangesets_single_revisions = NULL; SG_bool bRecommendDagWalk = SG_FALSE; SG_bool bLeaves = SG_FALSE; const char * pszBranchName = NULL; // we do not own this SG_vhash* pvhBranchPile = NULL; SG_varray* pvaParents = NULL; // we do not own this SG_bool bMyBranchWalkRecommendation = SG_FALSE; SG_rev_spec* pRevSpec_Allocated = NULL; SG_wc_tx * pWcTx = NULL; SG_vhash * pvhInfo = NULL; SG_uint32 count_args = 0; SG_uint32 countRevsSpecified = 0; if (psaInputs) SG_ERR_CHECK( SG_stringarray__count(pCtx, psaInputs, &count_args) ); // Use the WD to try to get the initial info. // I'm going to deviate from the model and use // a read-only TX here so that I can get a bunch // of fields that we need later. SG_ERR_CHECK( SG_WC_TX__ALLOC__BEGIN(pCtx, &pWcTx, NULL, SG_TRUE) ); if (count_args > 0) SG_ERR_CHECK( SG_wc_tx__get_item_gid__stringarray(pCtx, pWcTx, psaInputs, &pStringArrayGIDs) ); SG_ERR_CHECK( SG_wc_tx__get_wc_info(pCtx, pWcTx, &pvhInfo) ); SG_ERR_CHECK( SG_wc_tx__get_repo_and_wd_top(pCtx, pWcTx, &pRepo, NULL) ); /* If no revisions were specified, and the caller wants us to use the current branch, * create a revision spec with the current branch. */ if (pRevSpec) { SG_ERR_CHECK( SG_REV_SPEC__ALLOC__COPY(pCtx, pRevSpec, &pRevSpec_Allocated) ); SG_ERR_CHECK( SG_rev_spec__count(pCtx, pRevSpec_Allocated, &countRevsSpecified) ); } else { SG_ERR_CHECK( SG_REV_SPEC__ALLOC(pCtx, &pRevSpec_Allocated) ); } if (pRevSpec_single_revisions != NULL) { SG_uint32 countRevsSpecified_singles = 0; SG_ERR_CHECK( SG_rev_spec__count(pCtx, pRevSpec_single_revisions, &countRevsSpecified_singles) ); countRevsSpecified += countRevsSpecified_singles; } if (bDetectCurrentBranch && countRevsSpecified == 0) { SG_ERR_CHECK( SG_vhash__check__sz(pCtx, pvhInfo, "branch", &pszBranchName) ); if (pszBranchName) { /* The working folder is attached to a branch. Does it exist? */ SG_bool bHasBranches = SG_FALSE; SG_bool bBranchExists = SG_FALSE; SG_ERR_CHECK( SG_vc_branches__cleanup(pCtx, pRepo, &pvhBranchPile) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhBranchPile, "branches", &bHasBranches) ); if (bHasBranches) { SG_vhash* pvhRefBranches; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhBranchPile, "branches", &pvhRefBranches) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRefBranches, pszBranchName, &bBranchExists) ); } if (bBranchExists) { SG_uint32 numParents, i; const char* pszRefParent; /* If that branch exists, just add to our rev spec. */ SG_ERR_CHECK( SG_rev_spec__add_branch(pCtx, pRevSpec_Allocated, pszBranchName) ); /* Plus, if the working folder's parents are not in the branch (yet), add them as well * (they'll be in it after the user commits something...). */ SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhInfo, "parents", &pvaParents) ); SG_ERR_CHECK( SG_varray__count(pCtx, pvaParents, &numParents) ); for (i = 0; i < numParents; i++) { SG_bool already_in_rev_spec = SG_FALSE; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pvaParents, i, &pszRefParent) ); SG_ERR_CHECK( SG_rev_spec__contains(pCtx, pRepo, pRevSpec_Allocated, pszRefParent, &already_in_rev_spec) ); if(!already_in_rev_spec) SG_ERR_CHECK( SG_rev_spec__add_rev(pCtx, pRevSpec_Allocated, pszRefParent) ); } } else { /* If the branch doesn't exist, add the working folder's baseline(s) to the rev spec * and force a dag walk. */ SG_uint32 numParents, i; const char* pszRefParent; SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhInfo, "parents", &pvaParents) ); SG_ERR_CHECK( SG_varray__count(pCtx, pvaParents, &numParents) ); for (i = 0; i < numParents; i++) { SG_ERR_CHECK( SG_varray__get__sz(pCtx, pvaParents, i, &pszRefParent) ); SG_ERR_CHECK( SG_rev_spec__add_rev(pCtx, pRevSpec_Allocated, pszRefParent) ); } bMyBranchWalkRecommendation = SG_TRUE; } } } // Determine the starting changeset IDs. strBranch and bLeaves control this. // We do this step here, so that repo paths can be looked up before we call into history__core. SG_ERR_CHECK( sg_vv2__history__get_starting_changesets(pCtx, pRepo, pRevSpec_Allocated, &pStringArrayChangesets, &pStringArrayChangesetsMissing, &bRecommendDagWalk, &bLeaves) ); if (pStringArrayChangesetsMissing) { // See K2177, K1322, W0836, W8132. We requested specific starting // points and ran into some csets that were referenced (by --tag // or --branch) that are not present in the local repo. Try to // silently ignore them. SG_uint32 nrFound = 0; SG_ERR_CHECK( SG_stringarray__count(pCtx, pStringArrayChangesets, &nrFound) ); if (nrFound > 0) { // Yes there were missing csets, but we still found some // of the referenced ones. Just ignore the missing ones. // This should behave just like we had the older tag/branch // dag prior to the push -r on the vc dag. } else { const char * psz_0; // TODO 2012/10/19 Do we want a different message if the number of missing is > 1 ? SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, pStringArrayChangesetsMissing, 0, &psz_0) ); SG_ERR_THROW2( SG_ERR_CHANGESET_BLOB_NOT_FOUND, (pCtx, "%s", psz_0) ); } } bRecommendDagWalk = bRecommendDagWalk || bMyBranchWalkRecommendation; //This hack is here to detect when we're being asked for the parent of a certain //object from the sg_parents code. parents always wants the dag walk. //The better solution would be to allow users to pass in a flag about their dagwalk //preferences if (count_args == 1 && nResultLimit == 1) bRecommendDagWalk = SG_TRUE; if (bListAll) { // See W8493. If they gave us a --list-all along with a --rev or --tag, they // want to force us to show the full history rather than just the info for the // named cset. bRecommendDagWalk = SG_TRUE; } if (pRevSpec_single_revisions) { // We DO NOT pass a psaMissingHids here because we want // it to throw if the user names a missing cset. SG_ERR_CHECK( SG_rev_spec__get_all__repo__dedup(pCtx, pRepo, pRevSpec_single_revisions, SG_TRUE, &pStringArrayChangesets_single_revisions, NULL) ); } // TODO 2012/07/03 The deviates from the model. This call directly returns the // TODO allocated data into the caller's pointers. If anything fails // TODO (such as the call to get the branches below), we'll probably // TODO leak the result and token. SG_ERR_CHECK( SG_history__run(pCtx, pRepo, pStringArrayGIDs, pStringArrayChangesets, pStringArrayChangesets_single_revisions, pszUser, pszStamp, nResultLimit, bLeaves, bHideObjectMerges, nFromDate, nToDate, bRecommendDagWalk, SG_FALSE, pbHasResult, ppResult, ppHistoryToken) ); /* This is kind of a hack. History callers often need branch data to format ouput. * But we open the repo down here. I didn't want to open/close it again. And there's logic * in here about which repo to open. So instead, we do this. */ if (ppvhBranchPile) { if (pvhBranchPile) { *ppvhBranchPile = pvhBranchPile; pvhBranchPile = NULL; } else SG_ERR_CHECK( SG_vc_branches__cleanup(pCtx, pRepo, ppvhBranchPile) ); } fail: SG_ERR_IGNORE( SG_wc_tx__cancel(pCtx, pWcTx) ); SG_WC_TX__NULLFREE(pCtx, pWcTx); SG_REV_SPEC_NULLFREE(pCtx, pRevSpec_Allocated); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesets); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesetsMissing); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesets_single_revisions); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayGIDs); SG_VHASH_NULLFREE(pCtx, pvhBranchPile); SG_VHASH_NULLFREE(pCtx, pvhInfo); SG_REPO_NULLFREE(pCtx, pRepo); }
void sg_pack__do_changeset(SG_context* pCtx, SG_repo* pRepo, const char* psz_hid_cs, SG_rbtree* prb_blobs) { SG_changeset* pcs = NULL; SG_int32 gen = 0; SG_uint32 count_blobs = 0; SG_uint32 count_parents = 0; SG_varray* pva_parents = NULL; SG_uint32 i; SG_rbtree* prb_new = NULL; const char* psz_hid_root_treenode = NULL; const char* psz_key = NULL; SG_vhash* pvh_lbl = NULL; SG_vhash* pvh_blobs = NULL; SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs, &pcs) ); SG_ERR_CHECK( SG_changeset__get_root(pCtx, pcs, &psz_hid_root_treenode) ); SG_ERR_CHECK( SG_changeset__get_generation(pCtx, pcs, &gen) ); SG_ERR_CHECK( SG_RBTREE__ALLOC__PARAMS(pCtx, &prb_new, count_blobs, NULL) ); SG_ERR_CHECK( SG_changeset__get_list_of_bloblists(pCtx, pcs, &pvh_lbl) ); /* add all the tree user file blobs */ SG_ERR_CHECK( SG_changeset__get_bloblist_name(pCtx, SG_BLOB_REFTYPE__TREEUSERFILE, &psz_key) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_lbl, psz_key, &pvh_blobs) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_blobs, &count_blobs) ); /* now write all the blobs */ for (i=0; i<count_blobs; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_blobs, i, &psz_hid, NULL) ); SG_ERR_CHECK( SG_rbtree__add(pCtx, prb_new, psz_hid) ); } /* and the treenode blobs */ SG_ERR_CHECK( SG_changeset__get_bloblist_name(pCtx, SG_BLOB_REFTYPE__TREENODE, &psz_key) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_lbl, psz_key, &pvh_blobs) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_blobs, &count_blobs) ); /* now write all the blobs */ for (i=0; i<count_blobs; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_rbtree__add(pCtx, prb_new, psz_hid) ); } SG_ERR_CHECK( sg_pack__do_get_dir__top(pCtx, pRepo, gen, psz_hid_root_treenode, prb_blobs, prb_new) ); SG_RBTREE_NULLFREE(pCtx, prb_new); SG_ERR_CHECK( SG_changeset__get_parents(pCtx, pcs, &pva_parents) ); if (pva_parents) { SG_ERR_CHECK( SG_varray__count(pCtx, pva_parents, &count_parents) ); for (i=0; i<count_parents; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva_parents, i, &psz_hid) ); SG_ERR_CHECK( sg_pack__do_changeset(pCtx, pRepo, psz_hid, prb_blobs) ); } } SG_CHANGESET_NULLFREE(pCtx, pcs); return; fail: SG_RBTREE_NULLFREE(pCtx, prb_new); }