void SG_cmd_util__dump_log( SG_context * pCtx, SG_console_stream cs, SG_repo* pRepo, const char* psz_hid_cs, SG_vhash* pvhCleanPileOfBranches, SG_bool bShowOnlyOpenBranchNames, SG_bool bShowFullComments) { SG_history_result* pHistResult = NULL; SG_stringarray * psaHids = NULL; SG_STRINGARRAY__ALLOC(pCtx, &psaHids, 1); SG_ERR_CHECK( SG_stringarray__add(pCtx, psaHids, psz_hid_cs) ); SG_history__get_revision_details(pCtx, pRepo, psaHids, NULL, &pHistResult); if (SG_context__err_equals(pCtx, SG_ERR_NOT_FOUND)) { /* There's a branch that references a changeset that doesn't exist. Show what we can. */ SG_vhash* pvhRefClosedBranches = NULL; SG_vhash* pvhRefBranchValues = NULL; SG_context__err_reset(pCtx); if (pvhCleanPileOfBranches) { SG_bool bHas = SG_FALSE; SG_ERR_CHECK( SG_vhash__has(pCtx, pvhCleanPileOfBranches, "closed", &bHas) ); if (bHas) SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhCleanPileOfBranches, "closed", &pvhRefClosedBranches) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhCleanPileOfBranches, "values", &pvhRefBranchValues) ); } SG_ERR_CHECK( SG_console(pCtx, cs, "\n\t%8s: %s\n", "revision", psz_hid_cs) ); SG_ERR_CHECK( _dump_branch_name(pCtx, cs, psz_hid_cs, bShowOnlyOpenBranchNames, pvhRefBranchValues, pvhRefClosedBranches) ); SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s %s\n", "", "(not present in repository)") ); } else { SG_ERR_CHECK_CURRENT; SG_ERR_CHECK( SG_cmd_util__dump_history_results(pCtx, cs, pHistResult, pvhCleanPileOfBranches, bShowOnlyOpenBranchNames, bShowFullComments, SG_FALSE) ); } fail: SG_HISTORY_RESULT_NULLFREE(pCtx, pHistResult); SG_STRINGARRAY_NULLFREE(pCtx, psaHids); }
void SG_server__get_dagnode_info( SG_context* pCtx, SG_repo* pRepo, SG_vhash* pvhRequest, SG_varray** ppvaInfo) { char bufDagnum[SG_DAGNUM__BUF_MAX__DEC]; SG_vhash* pvhRefVersionControlHids = NULL; SG_varray* pvaHids = NULL; SG_stringarray* psaHids = NULL; const char* const* paszHids = NULL; SG_uint32 countHids = 0; SG_ERR_CHECK( SG_dagnum__to_sz__decimal(pCtx, SG_DAGNUM__VERSION_CONTROL, bufDagnum, sizeof(bufDagnum)) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, bufDagnum, &pvhRefVersionControlHids) ); // Ugh. Is vhash->varray->stringarray->char** the best option? SG_ERR_CHECK( SG_vhash__get_keys(pCtx, pvhRefVersionControlHids, &pvaHids) ); SG_ERR_CHECK( SG_varray__to_stringarray(pCtx, pvaHids, &psaHids) ); SG_ERR_CHECK( SG_stringarray__sz_array_and_count(pCtx, psaHids, &paszHids, &countHids) ); SG_ERR_CHECK( SG_history__query(pCtx, NULL, pRepo, 0, NULL, paszHids, countHids, NULL, NULL, 0, 0, 0, SG_FALSE, SG_FALSE, ppvaInfo) ); /* fall through */ fail: SG_VARRAY_NULLFREE(pCtx, pvaHids); SG_STRINGARRAY_NULLFREE(pCtx, psaHids); }
void SG_sync_remote__get_dagnode_info( SG_context* pCtx, SG_repo* pRepo, SG_vhash* pvhRequest, SG_history_result** ppInfo) { char bufDagnum[SG_DAGNUM__BUF_MAX__HEX]; SG_vhash* pvhRefVersionControlHids = NULL; SG_varray* pvaHids = NULL; SG_stringarray* psaHids = NULL; SG_ERR_CHECK( SG_dagnum__to_sz__hex(pCtx, SG_DAGNUM__VERSION_CONTROL, bufDagnum, sizeof(bufDagnum)) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, bufDagnum, &pvhRefVersionControlHids) ); // Ugh. Is vhash->varray->stringarray the best option? SG_ERR_CHECK( SG_vhash__get_keys(pCtx, pvhRefVersionControlHids, &pvaHids) ); SG_ERR_CHECK( SG_varray__to_stringarray(pCtx, pvaHids, &psaHids) ); SG_ERR_CHECK( SG_history__get_revision_details(pCtx, pRepo, psaHids, NULL, ppInfo) ); /* fall through */ fail: SG_VARRAY_NULLFREE(pCtx, pvaHids); SG_STRINGARRAY_NULLFREE(pCtx, psaHids); }
/** * For MODIFIED or DELETED items we need to populate the left side * of the diff. This should be called after _pick_tool(). * * put the various fields that we need to use in the call to SG_difftool__run() * into the pvhDiffItem for later. * */ static void _get_left_side_details(SG_context * pCtx, sg_wc6diff__setup_data * pData, const SG_vhash * pvhItem, SG_vhash * pvhDiffItem) { SG_string * pStringLabel_left = NULL; SG_pathname * pPathAbsolute_left = NULL; SG_vhash * pvhSubsection_left = NULL; // we do not own this const char * pszRepoPath_left; const char * pszGid; const char * pszHid_left; const char * pszToolName = NULL; const char * pszName_left; // get the repo-path for the left side *AS IT EXISTED IN THE LEFT CSET*. SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhItem, pData->pszSubsectionLeft, &pvhSubsection_left) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhSubsection_left, "hid", &pszHid_left) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhSubsection_left, "path", &pszRepoPath_left) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhSubsection_left, "name", &pszName_left) ); // left label is "<left_repo-path> <hid>" (we have an HID and since no date makes sense) SG_ERR_CHECK( sg_wc_diff_utils__make_label(pCtx, pszRepoPath_left, pszHid_left, NULL, &pStringLabel_left) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "left_label", SG_string__sz(pStringLabel_left)) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhDiffItem, "tool", &pszToolName) ); if (strcmp(pszToolName, SG_DIFFTOOL__INTERNAL__SKIP) == 0) { // There's no point in exporting a binary file into TEMP so // that we can invoke a no-op difftool. // See W5937. } else { // fetch the baseline version of the file into a temp file. // the left side should be read-only because it refers to a // historical version (regardless of whether or not we are // interactive). SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid) ); SG_ERR_CHECK( sg_wc_diff_utils__export_to_temp_file(pCtx, pData->pWcTx, pData->pszSubsectionLeft, pszGid, pszHid_left, pszName_left, &pPathAbsolute_left) ); SG_ERR_CHECK( SG_fsobj__chmod__pathname(pCtx, pPathAbsolute_left, 0400) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "left_abs_path", SG_pathname__sz(pPathAbsolute_left)) ); } fail: SG_PATHNAME_NULLFREE(pCtx, pPathAbsolute_left); SG_STRING_NULLFREE(pCtx, pStringLabel_left); }
void SG_pull__all__list_incoming( SG_context* pCtx, const char* pszPullIntoRepoDescriptorName, SG_client* pClient, SG_varray** ppvaInfo) { sg_pull_instance_data* pMe = NULL; char* pszFragballName = NULL; SG_vhash* pvhStatus = NULL; // Used for both fragball request status returned by SG_client // and staging status returned by SG_staging. const SG_pathname* pStagingPathname; SG_NULLARGCHECK_RETURN(pszPullIntoRepoDescriptorName); SG_NULLARGCHECK_RETURN(pClient); SG_ERR_CHECK( _pull_init(pCtx, pClient, pszPullIntoRepoDescriptorName, &pMe) ); SG_ERR_CHECK( SG_staging__get_pathname(pCtx, pMe->pStaging, &pStagingPathname) ); /* Request a fragball containing leaves of every dag */ SG_ERR_CHECK_RETURN( SG_context__msg__emit(pCtx, "Retrieving dagnodes...") ); SG_ERR_CHECK( SG_client__pull_request_fragball(pCtx, pClient, NULL, pStagingPathname, &pszFragballName, &pvhStatus) ); /* Ian TODO: inspect pvhStatus */ SG_ERR_CHECK( SG_VHASH_NULLFREE(pCtx, pvhStatus) ); SG_ERR_CHECK( SG_staging__slurp_fragball(pCtx, pMe->pStaging, pszFragballName) ); SG_ERR_CHECK( SG_staging__check_status(pCtx, pMe->pStaging, SG_TRUE, SG_FALSE, SG_FALSE, SG_FALSE, SG_FALSE, &pvhStatus) ); /* Check the status and use it to request more dagnodes until the dags connect */ SG_ERR_CHECK( _add_dagnodes_until_connected(pCtx, &pvhStatus, pMe, pClient) ); SG_ERR_CHECK( SG_VHASH_NULLFREE(pCtx, pvhStatus) ); SG_ERR_CHECK( SG_staging__check_status(pCtx, pMe->pStaging, SG_FALSE, SG_FALSE, SG_TRUE, SG_FALSE, SG_FALSE, &pvhStatus) ); { SG_bool b = SG_FALSE; SG_vhash* pvhRequest = NULL; SG_ERR_CHECK( SG_vhash__has(pCtx, pvhStatus, SG_SYNC_STATUS_KEY__NEW_NODES, &b) ); if (b) { /* There are incoming nodes. Get their info. */ SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhStatus, SG_SYNC_STATUS_KEY__NEW_NODES, &pvhRequest) ); SG_ERR_CHECK( SG_client__get_dagnode_info(pCtx, pClient, pvhRequest, ppvaInfo) ); } } /* fall through */ fail: _NULLFREE_INSTANCE_DATA(pCtx, pMe); SG_VHASH_NULLFREE(pCtx, pvhStatus); SG_NULLFREE(pCtx, pszFragballName); }
static void _deserialize_data_ver_1_cb(SG_context * pCtx, void * pVoidDeserializeData, SG_UNUSED_PARAM(const SG_varray * pva), SG_UNUSED_PARAM(SG_uint32 ndx), const SG_variant * pVariant) { struct _deserialize_data * pDeserializeData = (struct _deserialize_data *)pVoidDeserializeData; SG_vhash * pvhMyData; SG_vhash * pvhDagnode; SG_int64 gen64, state64; _my_data * pMyData; SG_dagnode * pDagnode = NULL; const char* psz_id = NULL; SG_UNUSED(pva); SG_UNUSED(ndx); SG_ERR_CHECK( SG_variant__get__vhash(pCtx,pVariant,&pvhMyData) ); #if DEBUG && TRACE_DAGFRAG && 0 SG_ERR_CHECK( SG_vhash_debug__dump_to_console(pCtx, pvhMyData) ); #endif SG_ERR_CHECK( SG_vhash__get__int64(pCtx,pvhMyData,KEY_DFS_STATE,&state64) ); if (SG_DFS_END_FRINGE == state64) { SG_ERR_CHECK( SG_vhash__get__sz(pCtx,pvhMyData,KEY_DAGNODE_ID,&psz_id) ); SG_ERR_CHECK( _cache__add__fringe(pCtx,pDeserializeData->pFrag, psz_id) ); } else { SG_ERR_CHECK( SG_vhash__get__vhash(pCtx,pvhMyData,KEY_ACTUAL_DAGNODE,&pvhDagnode) ); SG_ERR_CHECK( SG_vhash__get__int64(pCtx,pvhDagnode,KEY_GEN,&gen64) ); SG_ERR_CHECK( SG_dagnode__alloc__from_vhash(pCtx, &pDagnode, pvhDagnode) ); SG_ERR_CHECK( _cache__add__dagnode(pCtx, pDeserializeData->pFrag, (SG_int32)gen64, pDagnode, (SG_uint32)state64, &pMyData) ); pDagnode = NULL; // cache owns it now. } return; fail: SG_DAGNODE_NULLFREE(pCtx, pDagnode); }
static void _dump_branch_name( SG_context* pCtx, SG_console_stream cs, const char* pszRefHid, SG_bool bShowOnlyOpenBranchNames, const SG_vhash* pvhRefBranchValues, const SG_vhash* pvhRefClosedBranches) { SG_vhash* pvhRefBranchNames = NULL; if (pvhRefBranchValues) { SG_bool b_has = SG_FALSE; SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRefBranchValues, pszRefHid, &b_has) ); if (b_has) { SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRefBranchValues, pszRefHid, &pvhRefBranchNames) ); } } if (pvhRefBranchNames) { SG_uint32 count_branch_names = 0; SG_uint32 i; SG_ERR_CHECK( SG_vhash__count(pCtx, pvhRefBranchNames, &count_branch_names) ); for (i=0; i<count_branch_names; i++) { const char* psz_branch_name = NULL; SG_bool bClosed = SG_FALSE; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhRefBranchNames, i, &psz_branch_name, NULL) ); if (pvhRefClosedBranches) SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRefClosedBranches, psz_branch_name, &bClosed) ); if ( !bShowOnlyOpenBranchNames || (bShowOnlyOpenBranchNames && !bClosed) ) { SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s: %s%s\n", "branch", psz_branch_name, bClosed ? " (closed)" : "") ); } } } fail: ; }
/** * Pick thru the computed VARRAY build a subset VARRAY * containing just the dirty files. That is, for an interactive * diff, we only show dirty files. (In batch/patch mode, we show * everything.) * * We assume that the varray looks like: * * varray := [ { "status" : { "flags" : <int>, * ... }, * "path" : <repo-path>, * ... }, * ... ]; * * Both a Canonical STATUS (pvaStatus) and a "DiffStep" (pvaDiffStep) * match this pattern. * * Return NULL if there aren't any. * */ static void _get_dirty_files(SG_context * pCtx, const SG_varray * pva, SG_varray ** ppvaDirtyFiles) { SG_varray * pvaDirtyFiles = NULL; SG_uint32 k, nrItems; *ppvaDirtyFiles = NULL; if (!pva) return; SG_ERR_CHECK( SG_varray__count(pCtx, pva, &nrItems) ); if (nrItems == 0) return; for (k=0; k<nrItems; k++) { SG_vhash * pvhItem_k; // we do not own this SG_vhash * pvhItemStatus_k; // we do not own this SG_int64 i64; SG_wc_status_flags statusFlags; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, k, &pvhItem_k) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhItem_k, "status", &pvhItemStatus_k) ); SG_ERR_CHECK( SG_vhash__get__int64(pCtx, pvhItemStatus_k, "flags", &i64) ); statusFlags = (SG_wc_status_flags)i64; if ((statusFlags & SG_WC_STATUS_FLAGS__T__FILE) == 0) continue; if ((statusFlags & (SG_WC_STATUS_FLAGS__C__NON_DIR_MODIFIED |SG_WC_STATUS_FLAGS__S__ADDED |SG_WC_STATUS_FLAGS__S__DELETED |SG_WC_STATUS_FLAGS__S__MERGE_CREATED |SG_WC_STATUS_FLAGS__S__UPDATE_CREATED)) == 0) continue; if (!pvaDirtyFiles) SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pvaDirtyFiles) ); SG_ERR_CHECK( SG_varray__appendcopy__vhash(pCtx, pvaDirtyFiles, pvhItem_k, NULL) ); } SG_RETURN_AND_NULL( pvaDirtyFiles, ppvaDirtyFiles ); fail: SG_VARRAY_NULLFREE(pCtx, pvaDirtyFiles); }
/** * For MODIFIED or ADDED items we need to populate the right side * of the diff. * * See footnote 1 above for bIsTmp_right. * */ static void _get_right_side_details(SG_context * pCtx, sg_wc6diff__setup_data * pData, const SG_vhash * pvhItem, sg_wc_liveview_item * pLVI, SG_vhash * pvhDiffItem) { SG_string * pStringLabel_right = NULL; SG_pathname * pPathAbsolute_right = NULL; SG_vhash * pvhSubsection_right = NULL; // we do not own this const char * pszRepoPath_right; char bufDate[SG_TIME_FORMAT_LENGTH+1]; SG_bool bIsTmp_right; SG_fsobj_stat st_right; // get the repo-path for the right side *AS IT EXISTED IN THE RIGHT CSET*. SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhItem, pData->pszSubsectionRight, &pvhSubsection_right) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhSubsection_right, "path", &pszRepoPath_right) ); SG_ERR_CHECK( sg_wc_liveview_item__get_proxy_file_path(pCtx, pLVI, pData->pWcTx, &pPathAbsolute_right, &bIsTmp_right) ); SG_ERR_CHECK( SG_fsobj__stat__pathname(pCtx, pPathAbsolute_right, &st_right) ); SG_ERR_CHECK( SG_time__format_utc__i64(pCtx, st_right.mtime_ms, bufDate, SG_NrElements(bufDate)) ); // the right label is "<right_repo_path> <datestamp>" SG_ERR_CHECK( SG_fsobj__stat__pathname(pCtx, pPathAbsolute_right, &st_right) ); SG_ERR_CHECK( sg_wc_diff_utils__make_label(pCtx, pszRepoPath_right, NULL, bufDate, &pStringLabel_right) ); // put the various fields that we need to use in the call to SG_difftool__run() // into the pvhDiffItem for later. SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "right_label", SG_string__sz(pStringLabel_right)) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "right_abs_path", SG_pathname__sz(pPathAbsolute_right)) ); SG_ERR_CHECK( SG_vhash__add__bool( pCtx, pvhDiffItem, "right_is_tmp", bIsTmp_right) ); fail: SG_PATHNAME_NULLFREE(pCtx, pPathAbsolute_right); SG_STRING_NULLFREE(pCtx, pStringLabel_right); }
void SG_cmd_util__dump_history_results( SG_context * pCtx, SG_console_stream cs, SG_history_result* pHistResult, SG_vhash* pvh_pile, SG_bool bShowOnlyOpenBranchNames, SG_bool bShowFullComments, SG_bool bHideRevnums) { //print the information for each SG_bool bFound = (pHistResult != NULL); const char* currentInfoItem = NULL; SG_uint32 revno; SG_uint32 nCount = 0; SG_uint32 nIndex = 0; const char * pszTag = NULL; const char * pszComment = NULL; const char * pszStamp = NULL; const char * pszParent = NULL; SG_uint32 nResultCount = 0; SG_vhash* pvhRefBranchValues = NULL; SG_vhash* pvhRefClosedBranches = NULL; char* pszMyComment = NULL; if (pvh_pile) { SG_bool bHas = SG_FALSE; SG_ERR_CHECK( SG_vhash__has(pCtx, pvh_pile, "closed", &bHas) ); if (bHas) SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_pile, "closed", &pvhRefClosedBranches) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_pile, "values", &pvhRefBranchValues) ); } SG_ERR_CHECK( SG_history_result__count(pCtx, pHistResult, &nResultCount) ); while (nResultCount != 0 && bFound) { SG_ERR_CHECK( SG_history_result__get_revno(pCtx, pHistResult, &revno) ); SG_ERR_CHECK( SG_history_result__get_cshid(pCtx, pHistResult, ¤tInfoItem) ); if(!bHideRevnums) SG_ERR_CHECK( SG_console(pCtx, cs, "\n\t%8s: %d:%s\n", "revision", revno, currentInfoItem) ); else SG_ERR_CHECK( SG_console(pCtx, cs, "\n\t%8s: %s\n", "revision", currentInfoItem) ); SG_ERR_CHECK( _dump_branch_name(pCtx, cs, currentInfoItem, bShowOnlyOpenBranchNames, pvhRefBranchValues, pvhRefClosedBranches) ); SG_ERR_CHECK( SG_history_result__get_audit__count(pCtx, pHistResult, &nCount) ); for (nIndex = 0; nIndex < nCount; nIndex++) { SG_int64 itime = -1; char buf_time_formatted[256]; const char * pszUser = NULL; SG_ERR_CHECK( SG_history_result__get_audit__who(pCtx, pHistResult, nIndex, &pszUser) ); SG_ERR_CHECK( SG_history_result__get_audit__when(pCtx, pHistResult, nIndex, &itime) ); SG_ERR_CHECK( SG_time__format_local__i64(pCtx, itime, buf_time_formatted, sizeof(buf_time_formatted)) ); SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s: %s\n", "who", pszUser) ); SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s: %s\n", "when", buf_time_formatted) ); } SG_ERR_CHECK( SG_history_result__get_tag__count(pCtx, pHistResult, &nCount) ); for (nIndex = 0; nIndex < nCount; nIndex++) { SG_ERR_CHECK( SG_history_result__get_tag__text(pCtx, pHistResult, nIndex, &pszTag) ); SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s: %s\n", "tag", pszTag) ); } SG_ERR_CHECK( SG_history_result__get_comment__count(pCtx, pHistResult, &nCount) ); for (nIndex = 0; nIndex < nCount; nIndex++) { SG_ERR_CHECK( SG_history_result__get_comment__text(pCtx, pHistResult, nIndex, &pszComment) ); if (pszComment) { SG_ERR_CHECK( _format_comment(pCtx, !bShowFullComments, "\t ", pszComment, &pszMyComment) ); if (pszMyComment) pszComment = pszMyComment; } SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s: %s\n", "comment", pszComment) ); SG_NULLFREE(pCtx, pszMyComment); } SG_ERR_CHECK( SG_history_result__get_stamp__count(pCtx, pHistResult, &nCount) ); for (nIndex = 0; nIndex < nCount; nIndex++) { SG_ERR_CHECK( SG_history_result__get_stamp__text(pCtx, pHistResult, nIndex, &pszStamp) ); SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s: %s\n", "stamp", pszStamp) ); } SG_ERR_CHECK( SG_history_result__get_parent__count(pCtx, pHistResult, &nCount) ); for (nIndex = 0; nIndex < nCount; nIndex++) { SG_ERR_CHECK( SG_history_result__get_parent(pCtx, pHistResult, nIndex, &pszParent, &revno) ); SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s: %d:%s\n", "parent", revno, pszParent) ); } SG_ERR_CHECK( SG_history_result__next(pCtx, pHistResult, &bFound) ); } fail: SG_NULLFREE(pCtx, pszMyComment); }
void SG_server__pull_request_fragball(SG_context* pCtx, SG_repo* pRepo, SG_vhash* pvhRequest, const SG_pathname* pFragballDirPathname, char** ppszFragballName, SG_vhash** ppvhStatus) { SG_pathname* pFragballPathname = NULL; SG_uint32* paDagNums = NULL; SG_rbtree* prbDagnodes = NULL; SG_string* pstrFragballName = NULL; char* pszRevFullHid = NULL; SG_rbtree_iterator* pit = NULL; SG_uint32* repoDagnums = NULL; SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(pFragballDirPathname); SG_NULLARGCHECK_RETURN(ppvhStatus); #if TRACE_SERVER SG_ERR_CHECK( SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "pull fragball request") ); #endif SG_ERR_CHECK( SG_fragball__create(pCtx, pFragballDirPathname, &pFragballPathname) ); if (!pvhRequest) { // Add leaves from every dag to the fragball. SG_uint32 count_dagnums; SG_uint32 i; SG_ERR_CHECK( SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums) ); for (i=0; i<count_dagnums; i++) { SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes) ); SG_ERR_CHECK( SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, paDagNums[i], prbDagnodes) ); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); } SG_ERR_CHECK( SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName) ); SG_ERR_CHECK( SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName) ); } else { // Build the requested fragball. SG_bool found; SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found) ); if (found) { // Full clone requested. SG_ERR_CHECK( SG_repo__fetch_repo__fragball(pCtx, pRepo, pFragballDirPathname, ppszFragballName) ); } else { // Not a full clone. SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found) ); if (found) { // Dagnodes were requested. SG_uint32 generations = 0; SG_vhash* pvhDags; SG_uint32 count_requested_dagnums; SG_uint32 count_repo_dagnums = 0; SG_uint32 i; const char* pszDagNum = NULL; const SG_variant* pvRequestedNodes = NULL; SG_vhash* pvhRequestedNodes = NULL; const char* pszHidRequestedDagnode = NULL; // Were additional generations requested? SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &found) ); if (found) SG_ERR_CHECK( SG_vhash__get__uint32(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &generations) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums) ); if (count_requested_dagnums) SG_ERR_CHECK( SG_repo__list_dags(pCtx, pRepo, &count_repo_dagnums, &repoDagnums) ); // For each requested dag, get the requested nodes. for (i=0; i<count_requested_dagnums; i++) { SG_uint32 iMissingNodeCount; SG_uint32 iDagnum; SG_uint32 j; SG_bool isValidDagnum = SG_FALSE; SG_bool bSpecificNodesRequested = SG_FALSE; // Get the dag's missing node vhash. SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszDagNum, &pvRequestedNodes) ); SG_ERR_CHECK( SG_dagnum__from_sz__decimal(pCtx, pszDagNum, &iDagnum) ); // Verify that requested dagnum exists for (j = 0; j < count_repo_dagnums; j++) { if (repoDagnums[j] == iDagnum) { isValidDagnum = SG_TRUE; break; } } if (!isValidDagnum) { char buf[SG_DAGNUM__BUF_MAX__NAME]; SG_ERR_CHECK( SG_dagnum__to_name(pCtx, iDagnum, buf, sizeof(buf)) ); SG_ERR_THROW2(SG_ERR_NO_SUCH_DAG, (pCtx, "%s", buf)); } if (pvRequestedNodes) { SG_ERR_CHECK( SG_variant__get__vhash(pCtx, pvRequestedNodes, &pvhRequestedNodes) ); // Get each node listed for the dag SG_ERR_CHECK( SG_vhash__count(pCtx, pvhRequestedNodes, &iMissingNodeCount) ); if (iMissingNodeCount > 0) { SG_uint32 j; const SG_variant* pvVal; bSpecificNodesRequested = SG_TRUE; SG_ERR_CHECK( SG_RBTREE__ALLOC__PARAMS(pCtx, &prbDagnodes, iMissingNodeCount, NULL) ); for (j=0; j<iMissingNodeCount; j++) { SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhRequestedNodes, j, &pszHidRequestedDagnode, &pvVal) ); if (pvVal) { const char* pszVal; SG_ERR_CHECK( SG_variant__get__sz(pCtx, pvVal, &pszVal) ); if (pszVal) { if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_HID_PREFIX)) { SG_ERR_CHECK( SG_repo__hidlookup__dagnode(pCtx, pRepo, iDagnum, pszHidRequestedDagnode, &pszRevFullHid) ); pszHidRequestedDagnode = pszRevFullHid; } else if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_TAG)) { SG_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, pszHidRequestedDagnode, &pszRevFullHid) ); if (!pszRevFullHid) SG_ERR_THROW(SG_ERR_TAG_NOT_FOUND); pszHidRequestedDagnode = pszRevFullHid; } else SG_ERR_THROW(SG_ERR_PULL_INVALID_FRAGBALL_REQUEST); } } SG_ERR_CHECK( SG_rbtree__update(pCtx, prbDagnodes, pszHidRequestedDagnode) ); // Get additional dagnode generations, if requested. SG_ERR_CHECK( SG_sync__add_n_generations(pCtx, pRepo, pszHidRequestedDagnode, prbDagnodes, generations) ); SG_NULLFREE(pCtx, pszRevFullHid); } } } if (!bSpecificNodesRequested) { // When no specific nodes are in the request, add all leaves. SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes) ); // Get additional dagnode generations, if requested. if (generations) { SG_bool found; const char* hid; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prbDagnodes, &found, &hid, NULL) ); while (found) { SG_ERR_CHECK( SG_sync__add_n_generations(pCtx, pRepo, hid, prbDagnodes, generations) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &found, &hid, NULL) ); } } } if (prbDagnodes) // can be null when leaves of an empty dag are requested { SG_ERR_CHECK( SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, iDagnum, prbDagnodes) ); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); } } // dagnum loop } // if "dags" exists /* Add requested blobs to the fragball */ SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found) ); if (found) { // Blobs were requested. SG_vhash* pvhBlobs; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs) ); SG_ERR_CHECK( SG_sync__add_blobs_to_fragball(pCtx, pRepo, pFragballPathname, pvhBlobs) ); } SG_ERR_CHECK( SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName) ); SG_ERR_CHECK( SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName) ); } } /* fallthru */ fail: // If we had an error, delete the half-baked fragball. if (pFragballPathname && SG_context__has_err(pCtx)) SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pFragballPathname) ); SG_PATHNAME_NULLFREE(pCtx, pFragballPathname); SG_NULLFREE(pCtx, paDagNums); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); SG_STRING_NULLFREE(pCtx, pstrFragballName); SG_NULLFREE(pCtx, pszRevFullHid); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_NULLFREE(pCtx, repoDagnums); }
/** * Our caller is trying to create new repo and create a WD * mapped to it. The destination directory may or may not * have already existed on disk before we started. If we are * building upon an existing directory, verify that it doesn't * contain any submodules because we don't yet support them. * */ static void _check_for_nested_drawer(SG_context * pCtx, SG_wc_tx * pWcTx) { SG_varray * pvaStatus = NULL; SG_string * pString_MyDrawerRepoPath = NULL; SG_string * pString_MatchedRepoPath = NULL; const char * psz_MyDrawerName = NULL; const char * psz_MyDrawerRepoPath = NULL; SG_uint32 k, nrItems; if (pWcTx->bWeCreated_WD || pWcTx->bWeCreated_WD_Contents) return; SG_ERR_CHECK( SG_wc_tx__status(pCtx, pWcTx, NULL, SG_UINT32_MAX, SG_FALSE, // bListUnchanged SG_TRUE, // bNoIgnores SG_TRUE, // bNoTSC, SG_FALSE, // bListSparse SG_TRUE, // bListReserved SG_TRUE, // bNoSort, &pvaStatus, NULL) ); if (!pvaStatus) return; // TODO 2012/11/13 For now I'm just going to see if there is a // TODO .sgdrawer somewhere within the directory tree. // TODO In theory, we could have ADD/ADDREMOVE just // TODO look for them and refuse to add its parent // TODO directory, but I don't to even support that // TODO until we've properly dealt with submodules. // TODO // TODO So for now, if there is a WD deeply nested within // TODO this directory, we just complain. This is mainly // TODO to prevent accidents. (Because they can still // TODO manually move a sub-WD somehere deep into this // TODO directory at some point in the future.) SG_ERR_CHECK( SG_workingdir__get_drawer_directory_name(pCtx, &psz_MyDrawerName) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pString_MyDrawerRepoPath) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pString_MyDrawerRepoPath, "@/%s", psz_MyDrawerName) ); SG_ERR_CHECK( SG_repopath__ensure_final_slash(pCtx, pString_MyDrawerRepoPath) ); psz_MyDrawerRepoPath = SG_string__sz(pString_MyDrawerRepoPath); SG_ERR_CHECK( SG_varray__count(pCtx, pvaStatus, &nrItems) ); for (k=0; k<nrItems; k++) { SG_vhash * pvhItem; SG_vhash * pvhItemStatus; SG_bool bIsReserved; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaStatus, k, &pvhItem) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhItem, "status", &pvhItemStatus) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhItemStatus, "isReserved", &bIsReserved) ); if (bIsReserved) { // Don't freak out over the .sgdrawer that we just created in the root. const char * pszRepoPath; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "path", &pszRepoPath) ); if (strcmp(pszRepoPath, psz_MyDrawerRepoPath) != 0) { SG_ERR_CHECK( SG_STRING__ALLOC__SZ(pCtx, &pString_MatchedRepoPath, pszRepoPath) ); SG_ERR_CHECK( SG_repopath__remove_last(pCtx, pString_MatchedRepoPath) ); SG_ERR_THROW2( SG_ERR_ENTRY_ALREADY_UNDER_VERSION_CONTROL, (pCtx, "The directory '%s' contains a working copy and submodules are not yet supported.", SG_string__sz(pString_MatchedRepoPath)) ); } } } fail: SG_STRING_NULLFREE(pCtx, pString_MatchedRepoPath); SG_STRING_NULLFREE(pCtx, pString_MyDrawerRepoPath); SG_VARRAY_NULLFREE(pCtx, pvaStatus); }
/** * Add to the fragball request vhash (see SG_server_prototypes.h for format). */ void SG_pull__add( SG_context* pCtx, SG_pull* pPull, SG_uint32 iDagnum, SG_rbtree* prbDagnodes, SG_rbtree* prbTags, SG_rbtree* prbDagnodePrefixes) { _sg_pull* pMyPull = NULL; char bufDagnum[SG_DAGNUM__BUF_MAX__DEC]; SG_bool found = SG_FALSE; SG_vhash* pvhDags = NULL; // Needs to be freed SG_vhash* pvhDagsRef = NULL; // Does not need to be freed, owned by parent vhash SG_vhash* pvhDagnum = NULL; // Needs to be freed SG_vhash* pvhDagnumRef = NULL; // Does not need to be freed, owned by parent vhash SG_rbtree_iterator* pit = NULL; SG_NULLARGCHECK_RETURN(pPull); SG_ARGCHECK_RETURN(iDagnum, iDagnum); pMyPull = (_sg_pull*)pPull; if (!pMyPull->pvhFragballRequest) SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pMyPull->pvhFragballRequest) ); SG_ERR_CHECK( SG_dagnum__to_sz__decimal(pCtx, iDagnum, bufDagnum, sizeof(bufDagnum)) ); /* Get dagnum vhash, adding it if necessary. */ SG_ERR_CHECK( SG_vhash__has(pCtx, pMyPull->pvhFragballRequest, SG_SYNC_STATUS_KEY__DAGS, &found) ); if (found) SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pMyPull->pvhFragballRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDagsRef) ); else { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhDags) ); pvhDagsRef = pvhDags; SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, pMyPull->pvhFragballRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags) ); } SG_ERR_CHECK( SG_vhash__has(pCtx, pvhDagsRef, bufDagnum, &found) ); if (found) SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhDagsRef, bufDagnum, &pvhDagnumRef) ); if (!pvhDagnumRef) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhDagnum) ); pvhDagnumRef = pvhDagnum; SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, pvhDagsRef, bufDagnum, &pvhDagnum) ); } /* If dagnodes were provided, add them to the dagnum vhash */ if (prbDagnodes) { const char* pszHid; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prbDagnodes, &found, &pszHid, NULL) ); while (found) { SG_ERR_CHECK( SG_vhash__update__null(pCtx, pvhDagnumRef, pszHid) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &found, &pszHid, NULL) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); } /* If tags were provided, add them to the dagnum vhash */ if (prbTags) { const char* pszTag; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prbTags, &found, &pszTag, NULL) ); while (found) { SG_ERR_CHECK( SG_vhash__update__string__sz(pCtx, pvhDagnumRef, pszTag, SG_SYNC_REQUEST_VALUE_TAG) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &found, &pszTag, NULL) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); } /* If dagnode hid prefixes were provided, add them to the dagnum vhash */ if (prbDagnodePrefixes) { const char* pszHidPrefix; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prbDagnodePrefixes, &found, &pszHidPrefix, NULL) ); while (found) { SG_ERR_CHECK( SG_vhash__update__string__sz(pCtx, pvhDagnumRef, pszHidPrefix, SG_SYNC_REQUEST_VALUE_HID_PREFIX) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &found, &pszHidPrefix, NULL) ); } SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); } return; fail: SG_VHASH_NULLFREE(pCtx, pvhDagnum); SG_VHASH_NULLFREE(pCtx, pvhDags); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); }
void sg_vv2__history__working_folder( SG_context * pCtx, const SG_stringarray * psaInputs, const SG_rev_spec* pRevSpec, const SG_rev_spec* pRevSpec_single_revisions, const char* pszUser, const char* pszStamp, SG_bool bDetectCurrentBranch, SG_uint32 nResultLimit, SG_bool bHideObjectMerges, SG_int64 nFromDate, SG_int64 nToDate, SG_bool bListAll, SG_bool* pbHasResult, SG_vhash** ppvhBranchPile, SG_history_result ** ppResult, SG_history_token ** ppHistoryToken) { SG_repo * pRepo = NULL; SG_stringarray * pStringArrayGIDs = NULL; SG_stringarray * pStringArrayChangesets = NULL; SG_stringarray * pStringArrayChangesetsMissing = NULL; SG_stringarray * pStringArrayChangesets_single_revisions = NULL; SG_bool bRecommendDagWalk = SG_FALSE; SG_bool bLeaves = SG_FALSE; const char * pszBranchName = NULL; // we do not own this SG_vhash* pvhBranchPile = NULL; SG_varray* pvaParents = NULL; // we do not own this SG_bool bMyBranchWalkRecommendation = SG_FALSE; SG_rev_spec* pRevSpec_Allocated = NULL; SG_wc_tx * pWcTx = NULL; SG_vhash * pvhInfo = NULL; SG_uint32 count_args = 0; SG_uint32 countRevsSpecified = 0; if (psaInputs) SG_ERR_CHECK( SG_stringarray__count(pCtx, psaInputs, &count_args) ); // Use the WD to try to get the initial info. // I'm going to deviate from the model and use // a read-only TX here so that I can get a bunch // of fields that we need later. SG_ERR_CHECK( SG_WC_TX__ALLOC__BEGIN(pCtx, &pWcTx, NULL, SG_TRUE) ); if (count_args > 0) SG_ERR_CHECK( SG_wc_tx__get_item_gid__stringarray(pCtx, pWcTx, psaInputs, &pStringArrayGIDs) ); SG_ERR_CHECK( SG_wc_tx__get_wc_info(pCtx, pWcTx, &pvhInfo) ); SG_ERR_CHECK( SG_wc_tx__get_repo_and_wd_top(pCtx, pWcTx, &pRepo, NULL) ); /* If no revisions were specified, and the caller wants us to use the current branch, * create a revision spec with the current branch. */ if (pRevSpec) { SG_ERR_CHECK( SG_REV_SPEC__ALLOC__COPY(pCtx, pRevSpec, &pRevSpec_Allocated) ); SG_ERR_CHECK( SG_rev_spec__count(pCtx, pRevSpec_Allocated, &countRevsSpecified) ); } else { SG_ERR_CHECK( SG_REV_SPEC__ALLOC(pCtx, &pRevSpec_Allocated) ); } if (pRevSpec_single_revisions != NULL) { SG_uint32 countRevsSpecified_singles = 0; SG_ERR_CHECK( SG_rev_spec__count(pCtx, pRevSpec_single_revisions, &countRevsSpecified_singles) ); countRevsSpecified += countRevsSpecified_singles; } if (bDetectCurrentBranch && countRevsSpecified == 0) { SG_ERR_CHECK( SG_vhash__check__sz(pCtx, pvhInfo, "branch", &pszBranchName) ); if (pszBranchName) { /* The working folder is attached to a branch. Does it exist? */ SG_bool bHasBranches = SG_FALSE; SG_bool bBranchExists = SG_FALSE; SG_ERR_CHECK( SG_vc_branches__cleanup(pCtx, pRepo, &pvhBranchPile) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhBranchPile, "branches", &bHasBranches) ); if (bHasBranches) { SG_vhash* pvhRefBranches; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhBranchPile, "branches", &pvhRefBranches) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRefBranches, pszBranchName, &bBranchExists) ); } if (bBranchExists) { SG_uint32 numParents, i; const char* pszRefParent; /* If that branch exists, just add to our rev spec. */ SG_ERR_CHECK( SG_rev_spec__add_branch(pCtx, pRevSpec_Allocated, pszBranchName) ); /* Plus, if the working folder's parents are not in the branch (yet), add them as well * (they'll be in it after the user commits something...). */ SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhInfo, "parents", &pvaParents) ); SG_ERR_CHECK( SG_varray__count(pCtx, pvaParents, &numParents) ); for (i = 0; i < numParents; i++) { SG_bool already_in_rev_spec = SG_FALSE; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pvaParents, i, &pszRefParent) ); SG_ERR_CHECK( SG_rev_spec__contains(pCtx, pRepo, pRevSpec_Allocated, pszRefParent, &already_in_rev_spec) ); if(!already_in_rev_spec) SG_ERR_CHECK( SG_rev_spec__add_rev(pCtx, pRevSpec_Allocated, pszRefParent) ); } } else { /* If the branch doesn't exist, add the working folder's baseline(s) to the rev spec * and force a dag walk. */ SG_uint32 numParents, i; const char* pszRefParent; SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhInfo, "parents", &pvaParents) ); SG_ERR_CHECK( SG_varray__count(pCtx, pvaParents, &numParents) ); for (i = 0; i < numParents; i++) { SG_ERR_CHECK( SG_varray__get__sz(pCtx, pvaParents, i, &pszRefParent) ); SG_ERR_CHECK( SG_rev_spec__add_rev(pCtx, pRevSpec_Allocated, pszRefParent) ); } bMyBranchWalkRecommendation = SG_TRUE; } } } // Determine the starting changeset IDs. strBranch and bLeaves control this. // We do this step here, so that repo paths can be looked up before we call into history__core. SG_ERR_CHECK( sg_vv2__history__get_starting_changesets(pCtx, pRepo, pRevSpec_Allocated, &pStringArrayChangesets, &pStringArrayChangesetsMissing, &bRecommendDagWalk, &bLeaves) ); if (pStringArrayChangesetsMissing) { // See K2177, K1322, W0836, W8132. We requested specific starting // points and ran into some csets that were referenced (by --tag // or --branch) that are not present in the local repo. Try to // silently ignore them. SG_uint32 nrFound = 0; SG_ERR_CHECK( SG_stringarray__count(pCtx, pStringArrayChangesets, &nrFound) ); if (nrFound > 0) { // Yes there were missing csets, but we still found some // of the referenced ones. Just ignore the missing ones. // This should behave just like we had the older tag/branch // dag prior to the push -r on the vc dag. } else { const char * psz_0; // TODO 2012/10/19 Do we want a different message if the number of missing is > 1 ? SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, pStringArrayChangesetsMissing, 0, &psz_0) ); SG_ERR_THROW2( SG_ERR_CHANGESET_BLOB_NOT_FOUND, (pCtx, "%s", psz_0) ); } } bRecommendDagWalk = bRecommendDagWalk || bMyBranchWalkRecommendation; //This hack is here to detect when we're being asked for the parent of a certain //object from the sg_parents code. parents always wants the dag walk. //The better solution would be to allow users to pass in a flag about their dagwalk //preferences if (count_args == 1 && nResultLimit == 1) bRecommendDagWalk = SG_TRUE; if (bListAll) { // See W8493. If they gave us a --list-all along with a --rev or --tag, they // want to force us to show the full history rather than just the info for the // named cset. bRecommendDagWalk = SG_TRUE; } if (pRevSpec_single_revisions) { // We DO NOT pass a psaMissingHids here because we want // it to throw if the user names a missing cset. SG_ERR_CHECK( SG_rev_spec__get_all__repo__dedup(pCtx, pRepo, pRevSpec_single_revisions, SG_TRUE, &pStringArrayChangesets_single_revisions, NULL) ); } // TODO 2012/07/03 The deviates from the model. This call directly returns the // TODO allocated data into the caller's pointers. If anything fails // TODO (such as the call to get the branches below), we'll probably // TODO leak the result and token. SG_ERR_CHECK( SG_history__run(pCtx, pRepo, pStringArrayGIDs, pStringArrayChangesets, pStringArrayChangesets_single_revisions, pszUser, pszStamp, nResultLimit, bLeaves, bHideObjectMerges, nFromDate, nToDate, bRecommendDagWalk, SG_FALSE, pbHasResult, ppResult, ppHistoryToken) ); /* This is kind of a hack. History callers often need branch data to format ouput. * But we open the repo down here. I didn't want to open/close it again. And there's logic * in here about which repo to open. So instead, we do this. */ if (ppvhBranchPile) { if (pvhBranchPile) { *ppvhBranchPile = pvhBranchPile; pvhBranchPile = NULL; } else SG_ERR_CHECK( SG_vc_branches__cleanup(pCtx, pRepo, ppvhBranchPile) ); } fail: SG_ERR_IGNORE( SG_wc_tx__cancel(pCtx, pWcTx) ); SG_WC_TX__NULLFREE(pCtx, pWcTx); SG_REV_SPEC_NULLFREE(pCtx, pRevSpec_Allocated); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesets); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesetsMissing); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesets_single_revisions); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayGIDs); SG_VHASH_NULLFREE(pCtx, pvhBranchPile); SG_VHASH_NULLFREE(pCtx, pvhInfo); SG_REPO_NULLFREE(pCtx, pRepo); }
static void loop_innards_make_delta( SG_context* pCtx, SG_repo* pRepo, SG_varray* pva_path, SG_uint32 i_path_step, SG_vhash* pvh_add, SG_vhash* pvh_remove ) { SG_changeset* pcs = NULL; const char* psz_csid_cur = NULL; const char* psz_csid_parent = NULL; SG_vhash* pvh_changes = NULL; SG_vhash* pvh_one_parent_changes = NULL; SG_vhash* pvh_cs_add = NULL; SG_vhash* pvh_cs_remove = NULL; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva_path, i_path_step, &psz_csid_cur) ); SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva_path, i_path_step + 1, &psz_csid_parent) ); SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_csid_cur, &pcs) ); SG_ERR_CHECK( SG_changeset__db__get_changes(pCtx, pcs, &pvh_changes) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_changes, psz_csid_parent, &pvh_one_parent_changes) ); SG_ERR_CHECK( SG_vhash__check__vhash(pCtx, pvh_one_parent_changes, "add", &pvh_cs_remove) ); SG_ERR_CHECK( SG_vhash__check__vhash(pCtx, pvh_one_parent_changes, "remove", &pvh_cs_add) ); if (pvh_cs_add) { SG_uint32 count = 0; SG_uint32 i = 0; SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_cs_add, &count) ); for (i=0; i<count; i++) { const char* psz_hid_rec = NULL; SG_bool b = SG_FALSE; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_cs_add, i, &psz_hid_rec, NULL) ); SG_ERR_CHECK( SG_vhash__remove_if_present(pCtx, pvh_remove, psz_hid_rec, &b) ); if (!b) { SG_ERR_CHECK( SG_vhash__update__null(pCtx, pvh_add, psz_hid_rec) ); } } } if (pvh_cs_remove) { SG_uint32 count = 0; SG_uint32 i = 0; SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_cs_remove, &count) ); for (i=0; i<count; i++) { const char* psz_hid_rec = NULL; SG_bool b = SG_FALSE; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_cs_remove, i, &psz_hid_rec, NULL) ); SG_ERR_CHECK( SG_vhash__remove_if_present(pCtx, pvh_add, psz_hid_rec, &b) ); if (!b) { SG_ERR_CHECK( SG_vhash__update__null(pCtx, pvh_remove, psz_hid_rec) ); } } } fail: SG_CHANGESET_NULLFREE(pCtx, pcs); }
void SG_sync_remote__request_fragball( SG_context* pCtx, SG_repo* pRepo, const SG_pathname* pFragballDirPathname, SG_vhash* pvhRequest, char** ppszFragballName) { SG_pathname* pFragballPathname = NULL; SG_uint64* paDagNums = NULL; SG_string* pstrFragballName = NULL; SG_rbtree* prbDagnodes = NULL; SG_rbtree_iterator* pit = NULL; SG_rev_spec* pRevSpec = NULL; SG_stringarray* psaFullHids = NULL; SG_rbtree* prbDagnums = NULL; SG_dagfrag* pFrag = NULL; char* pszRepoId = NULL; char* pszAdminId = NULL; SG_fragball_writer* pfb = NULL; SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(pFragballDirPathname); { char buf_filename[SG_TID_MAX_BUFFER_LENGTH]; SG_ERR_CHECK( SG_tid__generate(pCtx, buf_filename, sizeof(buf_filename)) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pFragballPathname, pFragballDirPathname, buf_filename) ); } if (!pvhRequest) { // Add leaves from every dag to the fragball. SG_uint32 count_dagnums; SG_uint32 i; SG_ERR_CHECK( SG_fragball_writer__alloc(pCtx, pRepo, pFragballPathname, SG_TRUE, 2, &pfb) ); SG_ERR_CHECK( SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums) ); for (i=0; i<count_dagnums; i++) { SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes) ); SG_ERR_CHECK( SG_fragball__write__dagnodes(pCtx, pfb, paDagNums[i], prbDagnodes) ); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); } SG_ERR_CHECK( SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName) ); SG_ERR_CHECK( SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName) ); SG_ERR_CHECK( SG_fragball_writer__close(pCtx, pfb) ); } else { // Specific dags/nodes were requested. Build that fragball. SG_bool found; #if TRACE_SYNC_REMOTE && 0 SG_ERR_CHECK( SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "fragball request") ); #endif SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found) ); if (found) { // SG_SYNC_STATUS_KEY__CLONE_REQUEST is currently ignored SG_ERR_CHECK( SG_repo__fetch_repo__fragball(pCtx, pRepo, 3, pFragballDirPathname, ppszFragballName) ); } else { // Not a full clone. SG_ERR_CHECK( SG_fragball_writer__alloc(pCtx, pRepo, pFragballPathname, SG_TRUE, 2, &pfb) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__SINCE, &found) ); if (found) { SG_vhash* pvh_since = NULL; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__SINCE, &pvh_since) ); SG_ERR_CHECK( _do_since(pCtx, pRepo, pvh_since, pfb) ); } SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found) ); if (found) { // Specific Dagnodes were requested. Add just those nodes to our "start from" rbtree. SG_vhash* pvhDags; SG_uint32 count_requested_dagnums; SG_uint32 i; const SG_variant* pvRevSpecs = NULL; SG_vhash* pvhRevSpec = NULL; // For each requested dag, get rev spec request. SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums) ); if (count_requested_dagnums) SG_ERR_CHECK( SG_repo__list_dags__rbtree(pCtx, pRepo, &prbDagnums) ); for (i=0; i<count_requested_dagnums; i++) { SG_bool isValidDagnum = SG_FALSE; SG_bool bSpecificNodesRequested = SG_FALSE; const char* pszRefDagNum = NULL; SG_uint64 iDagnum; // Get the dag's missing node vhash. SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszRefDagNum, &pvRevSpecs) ); // Verify that requested dagnum exists SG_ERR_CHECK( SG_rbtree__find(pCtx, prbDagnums, pszRefDagNum, &isValidDagnum, NULL) ); if (!isValidDagnum) continue; SG_ERR_CHECK( SG_dagnum__from_sz__hex(pCtx, pszRefDagNum, &iDagnum) ); if (pvRevSpecs && pvRevSpecs->type != SG_VARIANT_TYPE_NULL) { SG_uint32 countRevSpecs = 0; SG_ERR_CHECK( SG_variant__get__vhash(pCtx, pvRevSpecs, &pvhRevSpec) ); SG_ERR_CHECK( SG_rev_spec__from_vash(pCtx, pvhRevSpec, &pRevSpec) ); // Process the rev spec for each dag SG_ERR_CHECK( SG_rev_spec__count(pCtx, pRevSpec, &countRevSpecs) ); if (countRevSpecs > 0) { bSpecificNodesRequested = SG_TRUE; SG_ERR_CHECK( SG_rev_spec__get_all__repo(pCtx, pRepo, pRevSpec, SG_TRUE, &psaFullHids, NULL) ); SG_ERR_CHECK( SG_stringarray__to_rbtree_keys(pCtx, psaFullHids, &prbDagnodes) ); SG_STRINGARRAY_NULLFREE(pCtx, psaFullHids); } SG_REV_SPEC_NULLFREE(pCtx, pRevSpec); } if (!bSpecificNodesRequested) { // When no specific nodes are in the request, add all leaves. SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes) ); } if (prbDagnodes) // can be null when leaves of an empty dag are requested { // Get the leaves of the other repo, which we need to connect to. SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__LEAVES, &found) ); if (found) { SG_vhash* pvhRefAllLeaves; SG_vhash* pvhRefDagLeaves; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__LEAVES, &pvhRefAllLeaves) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, pszRefDagNum, &found) ); { SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRefAllLeaves, pszRefDagNum, &pvhRefDagLeaves) ); SG_ERR_CHECK( SG_sync__build_best_guess_dagfrag(pCtx, pRepo, iDagnum, prbDagnodes, pvhRefDagLeaves, &pFrag) ); } } else { // The other repo's leaves weren't provided: add just the requested nodes, make no attempt to connect. SG_ERR_CHECK( SG_repo__get_repo_id(pCtx, pRepo, &pszRepoId) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &pszAdminId) ); SG_ERR_CHECK( SG_dagfrag__alloc(pCtx, &pFrag, pszRepoId, pszAdminId, iDagnum) ); SG_ERR_CHECK( SG_dagfrag__load_from_repo__simple(pCtx, pFrag, pRepo, prbDagnodes) ); SG_NULLFREE(pCtx, pszRepoId); SG_NULLFREE(pCtx, pszAdminId); } SG_ERR_CHECK( SG_fragball__write__frag(pCtx, pfb, pFrag) ); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); SG_DAGFRAG_NULLFREE(pCtx, pFrag); } } // dagnum loop } // if "dags" exists /* Add requested blobs to the fragball */ SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found) ); if (found) { // Blobs were requested. SG_vhash* pvhBlobs; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs) ); SG_ERR_CHECK( SG_sync__add_blobs_to_fragball(pCtx, pfb, pvhBlobs) ); } SG_ERR_CHECK( SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName) ); SG_ERR_CHECK( SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName) ); } SG_ERR_CHECK( SG_fragball_writer__close(pCtx, pfb) ); } /* fallthru */ fail: // If we had an error, delete the half-baked fragball. if (pFragballPathname && SG_context__has_err(pCtx)) { SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pFragballPathname) ); } SG_PATHNAME_NULLFREE(pCtx, pFragballPathname); SG_NULLFREE(pCtx, paDagNums); SG_STRING_NULLFREE(pCtx, pstrFragballName); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_RBTREE_NULLFREE(pCtx, prbDagnums); SG_REV_SPEC_NULLFREE(pCtx, pRevSpec); SG_STRINGARRAY_NULLFREE(pCtx, psaFullHids); SG_DAGFRAG_NULLFREE(pCtx, pFrag); SG_NULLFREE(pCtx, pszRepoId); SG_NULLFREE(pCtx, pszAdminId); SG_FRAGBALL_WRITER_NULLFREE(pCtx, pfb); }
void SG_workingdir__find_mapping( SG_context* pCtx, const SG_pathname* pPathLocalDirectory, SG_pathname** ppPathMappedLocalDirectory, /**< Return the actual local directory that contains the mapping */ SG_string** ppstrNameRepoInstanceDescriptor, /**< Return the name of the repo instance descriptor */ char** ppszidGidAnchorDirectory /**< Return the GID of the repo directory */ ) { SG_pathname* curpath = NULL; SG_string* result_pstrDescriptorName = NULL; char* result_pszidGid = NULL; SG_pathname* result_mappedLocalDirectory = NULL; SG_vhash* pvhMapping = NULL; SG_pathname* pDrawerPath = NULL; SG_pathname* pMappingFilePath = NULL; SG_vhash* pvh = NULL; SG_NULLARGCHECK_RETURN(pPathLocalDirectory); SG_ERR_CHECK( SG_PATHNAME__ALLOC__COPY(pCtx, &curpath, pPathLocalDirectory) ); /* it's a directory, so it should have a final slash */ SG_ERR_CHECK( SG_pathname__add_final_slash(pCtx, curpath) ); while (SG_TRUE) { SG_ERR_CHECK( SG_workingdir__get_drawer_path(pCtx, curpath, &pDrawerPath) ); SG_fsobj__verify_directory_exists_on_disk__pathname(pCtx, pDrawerPath); if (!SG_context__has_err(pCtx)) { const char* pszDescriptorName = NULL; const char* pszGid = NULL; SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pMappingFilePath, pDrawerPath, "repo.json") ); SG_PATHNAME_NULLFREE(pCtx, pDrawerPath); SG_ERR_CHECK( SG_vfile__slurp(pCtx, pMappingFilePath, &pvh) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh, "mapping", &pvhMapping) ); SG_PATHNAME_NULLFREE(pCtx, pMappingFilePath); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhMapping, "descriptor", &pszDescriptorName) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhMapping, "anchor", &pszGid) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &result_pstrDescriptorName) ); SG_ERR_CHECK( SG_string__set__sz(pCtx, result_pstrDescriptorName, pszDescriptorName) ); if (pszGid) { SG_ERR_CHECK( SG_gid__alloc_clone(pCtx, pszGid, &result_pszidGid) ); } else { result_pszidGid = NULL; } SG_VHASH_NULLFREE(pCtx, pvh); result_mappedLocalDirectory = curpath; curpath = NULL; break; } else SG_context__err_reset(pCtx); SG_PATHNAME_NULLFREE(pCtx, pDrawerPath); SG_pathname__remove_last(pCtx, curpath); if (SG_context__err_equals(pCtx, SG_ERR_CANNOTTRIMROOTDIRECTORY)) { SG_context__err_reset(pCtx); break; } else { SG_ERR_CHECK_CURRENT; } } if (result_mappedLocalDirectory) { if (ppPathMappedLocalDirectory) { *ppPathMappedLocalDirectory = result_mappedLocalDirectory; } else { SG_PATHNAME_NULLFREE(pCtx, result_mappedLocalDirectory); } if (ppstrNameRepoInstanceDescriptor) { *ppstrNameRepoInstanceDescriptor = result_pstrDescriptorName; } else { SG_STRING_NULLFREE(pCtx, result_pstrDescriptorName); } if (ppszidGidAnchorDirectory) { *ppszidGidAnchorDirectory = result_pszidGid; } else { SG_NULLFREE(pCtx, result_pszidGid); } return; } else { SG_PATHNAME_NULLFREE(pCtx, curpath); SG_ERR_THROW_RETURN(SG_ERR_NOT_FOUND); } fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pDrawerPath); SG_PATHNAME_NULLFREE(pCtx, pMappingFilePath); SG_PATHNAME_NULLFREE(pCtx, result_mappedLocalDirectory); SG_PATHNAME_NULLFREE(pCtx, curpath); }
void sg_wc_tx__diff__setup__file(SG_context * pCtx, sg_wc6diff__setup_data * pData, const SG_vhash * pvhItem, SG_wc_status_flags statusFlags) { SG_string * pStringHeader = NULL; SG_vhash * pvhDiffItem = NULL; // we do not own this const char * pszGid; // we do not own this const char * pszLiveRepoPath; // we do not own this SG_vhash * pvhItemStatus; // we do not own this if (statusFlags & SG_WC_STATUS_FLAGS__U__IGNORED) { // The code in wc4status won't create header info for ignored // items unless verbose is set and diff never sets it, so we // skip this item to avoid getting the divider row of equal signs. return; } SG_ERR_CHECK( SG_varray__appendnew__vhash(pCtx, pData->pvaDiffSteps, &pvhDiffItem) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "gid", pszGid) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "path", &pszLiveRepoPath) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "path", pszLiveRepoPath) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhItem, "status", &pvhItemStatus) ); SG_ERR_CHECK( SG_vhash__addcopy__vhash(pCtx, pvhDiffItem, "status", pvhItemStatus) ); // the following if-else-if-else... DOES NOT imply that these statuses // are mutually exclusive (for example, one could have ADDED+LOST), but // rather just the cases when we actually want to print the content diffs. if (statusFlags & SG_WC_STATUS_FLAGS__A__SPARSE) { SG_ERR_CHECK( sg_wc_tx__diff__setup__header(pCtx, pvhItem, &pStringHeader) ); SG_ERR_CHECK( SG_string__append__format(pCtx, pStringHeader, "=== %s\n", "Omitting details for sparse item.") ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "header", SG_string__sz(pStringHeader)) ); } else if (statusFlags & SG_WC_STATUS_FLAGS__U__LOST) { SG_ERR_CHECK( sg_wc_tx__diff__setup__header(pCtx, pvhItem, &pStringHeader) ); SG_ERR_CHECK( SG_string__append__format(pCtx, pStringHeader, "=== %s\n", "Omitting details for lost item.") ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "header", SG_string__sz(pStringHeader)) ); } else if (statusFlags & SG_WC_STATUS_FLAGS__U__FOUND) { // just build the header -- we never include content details for an uncontrolled item. SG_ERR_CHECK( sg_wc_tx__diff__setup__header(pCtx, pvhItem, &pStringHeader) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "header", SG_string__sz(pStringHeader)) ); } else if ((statusFlags & SG_WC_STATUS_FLAGS__S__MERGE_CREATED) && (statusFlags & SG_WC_STATUS_FLAGS__S__DELETED)) { // just build the header -- no content to diff -- not present now and not present in baseline SG_ERR_CHECK( sg_wc_tx__diff__setup__header(pCtx, pvhItem, &pStringHeader) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "header", SG_string__sz(pStringHeader)) ); } else if ((statusFlags & SG_WC_STATUS_FLAGS__S__UPDATE_CREATED) && (statusFlags & SG_WC_STATUS_FLAGS__S__DELETED)) { // just build the header -- no content to diff -- not present now and not present in baseline SG_ERR_CHECK( sg_wc_tx__diff__setup__header(pCtx, pvhItem, &pStringHeader) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "header", SG_string__sz(pStringHeader)) ); } else if (statusFlags & (SG_WC_STATUS_FLAGS__S__ADDED |SG_WC_STATUS_FLAGS__S__MERGE_CREATED |SG_WC_STATUS_FLAGS__S__UPDATE_CREATED |SG_WC_STATUS_FLAGS__S__DELETED |SG_WC_STATUS_FLAGS__C__NON_DIR_MODIFIED)) { // content added/deleted/changed. SG_ERR_CHECK( sg_wc_tx__diff__setup__header(pCtx, pvhItem, &pStringHeader) ); SG_ERR_CHECK( _diff__file__contents(pCtx, pData, pvhItem, statusFlags, pvhDiffItem, pStringHeader) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "header", SG_string__sz(pStringHeader)) ); } else { // just build the header -- content did not change -- must be a simple structural change. SG_ERR_CHECK( sg_wc_tx__diff__setup__header(pCtx, pvhItem, &pStringHeader) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "header", SG_string__sz(pStringHeader)) ); } fail: SG_STRING_NULLFREE(pCtx, pStringHeader); }
void sg_pack__do_changeset(SG_context* pCtx, SG_repo* pRepo, const char* psz_hid_cs, SG_rbtree* prb_blobs) { SG_changeset* pcs = NULL; SG_int32 gen = 0; SG_uint32 count_blobs = 0; SG_uint32 count_parents = 0; SG_varray* pva_parents = NULL; SG_uint32 i; SG_rbtree* prb_new = NULL; const char* psz_hid_root_treenode = NULL; const char* psz_key = NULL; SG_vhash* pvh_lbl = NULL; SG_vhash* pvh_blobs = NULL; SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs, &pcs) ); SG_ERR_CHECK( SG_changeset__get_root(pCtx, pcs, &psz_hid_root_treenode) ); SG_ERR_CHECK( SG_changeset__get_generation(pCtx, pcs, &gen) ); SG_ERR_CHECK( SG_RBTREE__ALLOC__PARAMS(pCtx, &prb_new, count_blobs, NULL) ); SG_ERR_CHECK( SG_changeset__get_list_of_bloblists(pCtx, pcs, &pvh_lbl) ); /* add all the tree user file blobs */ SG_ERR_CHECK( SG_changeset__get_bloblist_name(pCtx, SG_BLOB_REFTYPE__TREEUSERFILE, &psz_key) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_lbl, psz_key, &pvh_blobs) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_blobs, &count_blobs) ); /* now write all the blobs */ for (i=0; i<count_blobs; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh_blobs, i, &psz_hid, NULL) ); SG_ERR_CHECK( SG_rbtree__add(pCtx, prb_new, psz_hid) ); } /* and the treenode blobs */ SG_ERR_CHECK( SG_changeset__get_bloblist_name(pCtx, SG_BLOB_REFTYPE__TREENODE, &psz_key) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_lbl, psz_key, &pvh_blobs) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh_blobs, &count_blobs) ); /* now write all the blobs */ for (i=0; i<count_blobs; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_rbtree__add(pCtx, prb_new, psz_hid) ); } SG_ERR_CHECK( sg_pack__do_get_dir__top(pCtx, pRepo, gen, psz_hid_root_treenode, prb_blobs, prb_new) ); SG_RBTREE_NULLFREE(pCtx, prb_new); SG_ERR_CHECK( SG_changeset__get_parents(pCtx, pcs, &pva_parents) ); if (pva_parents) { SG_ERR_CHECK( SG_varray__count(pCtx, pva_parents, &count_parents) ); for (i=0; i<count_parents; i++) { const char* psz_hid = NULL; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva_parents, i, &psz_hid) ); SG_ERR_CHECK( sg_pack__do_changeset(pCtx, pRepo, psz_hid, prb_blobs) ); } } SG_CHANGESET_NULLFREE(pCtx, pcs); return; fail: SG_RBTREE_NULLFREE(pCtx, prb_new); }