static int _compare_path(SG_context * pCtx, const void * pVoid_ppv1, // const SG_variant ** ppv1 const void * pVoid_ppv2, // const SG_variant ** ppv2 void * pVoidData) { const SG_variant** ppv1 = (const SG_variant **)pVoid_ppv1; const SG_variant** ppv2 = (const SG_variant **)pVoid_ppv2; SG_vhash * pvh1; SG_vhash * pvh2; const char * psz1; const char * psz2; int result = 0; SG_UNUSED( pVoidData ); if (*ppv1 == NULL && *ppv2 == NULL) return 0; if (*ppv1 == NULL) return -1; if (*ppv2 == NULL) return 1; SG_variant__get__vhash(pCtx, *ppv1, &pvh1); SG_variant__get__vhash(pCtx, *ppv2, &pvh2); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh1, "path", &psz1) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh2, "path", &psz2) ); SG_ERR_CHECK( SG_repopath__compare(pCtx, psz1, psz2, &result) ); fail: return result; }
/** * For MODIFIED or DELETED items we need to populate the left side * of the diff. This should be called after _pick_tool(). * * put the various fields that we need to use in the call to SG_difftool__run() * into the pvhDiffItem for later. * */ static void _get_left_side_details(SG_context * pCtx, sg_wc6diff__setup_data * pData, const SG_vhash * pvhItem, SG_vhash * pvhDiffItem) { SG_string * pStringLabel_left = NULL; SG_pathname * pPathAbsolute_left = NULL; SG_vhash * pvhSubsection_left = NULL; // we do not own this const char * pszRepoPath_left; const char * pszGid; const char * pszHid_left; const char * pszToolName = NULL; const char * pszName_left; // get the repo-path for the left side *AS IT EXISTED IN THE LEFT CSET*. SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhItem, pData->pszSubsectionLeft, &pvhSubsection_left) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhSubsection_left, "hid", &pszHid_left) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhSubsection_left, "path", &pszRepoPath_left) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhSubsection_left, "name", &pszName_left) ); // left label is "<left_repo-path> <hid>" (we have an HID and since no date makes sense) SG_ERR_CHECK( sg_wc_diff_utils__make_label(pCtx, pszRepoPath_left, pszHid_left, NULL, &pStringLabel_left) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "left_label", SG_string__sz(pStringLabel_left)) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhDiffItem, "tool", &pszToolName) ); if (strcmp(pszToolName, SG_DIFFTOOL__INTERNAL__SKIP) == 0) { // There's no point in exporting a binary file into TEMP so // that we can invoke a no-op difftool. // See W5937. } else { // fetch the baseline version of the file into a temp file. // the left side should be read-only because it refers to a // historical version (regardless of whether or not we are // interactive). SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid) ); SG_ERR_CHECK( sg_wc_diff_utils__export_to_temp_file(pCtx, pData->pWcTx, pData->pszSubsectionLeft, pszGid, pszHid_left, pszName_left, &pPathAbsolute_left) ); SG_ERR_CHECK( SG_fsobj__chmod__pathname(pCtx, pPathAbsolute_left, 0400) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "left_abs_path", SG_pathname__sz(pPathAbsolute_left)) ); } fail: SG_PATHNAME_NULLFREE(pCtx, pPathAbsolute_left); SG_STRING_NULLFREE(pCtx, pStringLabel_left); }
void sg_wc_tx__apply__move_rename(SG_context * pCtx, SG_wc_tx * pWcTx, const SG_vhash * pvh) { SG_bool bAfterTheFact; SG_bool bUseIntermediate; SG_bool bSrcIsSparse; const char * pszRepoPath_Src; const char * pszRepoPath_Dest; SG_pathname * pPath_Src = NULL; SG_pathname * pPath_Dest = NULL; SG_pathname * pPath_Temp = NULL; SG_ERR_CHECK( SG_vhash__get__bool(pCtx, pvh, "after_the_fact", &bAfterTheFact) ); SG_ERR_CHECK( SG_vhash__get__bool(pCtx, pvh, "use_intermediate", &bUseIntermediate) ); SG_ERR_CHECK( SG_vhash__get__bool(pCtx, pvh, "src_sparse", &bSrcIsSparse) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "src", &pszRepoPath_Src) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "dest", &pszRepoPath_Dest) ); #if TRACE_WC_TX_APPLY SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, ("sg_wc_tx__apply__move_rename: [after-the-fact %d][use-intermediate %d][src-sparse %d] '%s' --> '%s'\n"), bAfterTheFact, bUseIntermediate, bSrcIsSparse, pszRepoPath_Src, pszRepoPath_Dest) ); #endif if (bAfterTheFact || bSrcIsSparse) // user already did the move/rename. return; SG_ERR_CHECK( sg_wc_db__path__sz_repopath_to_absolute(pCtx, pWcTx->pDb, pszRepoPath_Src, &pPath_Src ) ); SG_ERR_CHECK( sg_wc_db__path__sz_repopath_to_absolute(pCtx, pWcTx->pDb, pszRepoPath_Dest, &pPath_Dest) ); if (bUseIntermediate) { // need to use a temp file because of transient // collisions ('vv rename foo FOO' on Windows). SG_ERR_CHECK( sg_wc_db__path__get_unique_temp_path(pCtx, pWcTx->pDb, &pPath_Temp) ); SG_ERR_CHECK( SG_fsobj__move__pathname_pathname(pCtx, pPath_Src, pPath_Temp) ); SG_ERR_CHECK( SG_fsobj__move__pathname_pathname(pCtx, pPath_Temp, pPath_Dest) ); } else { SG_ERR_CHECK( SG_fsobj__move__pathname_pathname(pCtx, pPath_Src, pPath_Dest) ); } fail: SG_PATHNAME_NULLFREE(pCtx, pPath_Src); SG_PATHNAME_NULLFREE(pCtx, pPath_Dest); SG_PATHNAME_NULLFREE(pCtx, pPath_Temp); }
/** * Insert/Replace a TNE ROW from the tne_L0 table in the wc.db. * * The existing row (if it exists) is a copy of the TNE * as it existed in the current baseline. This item * will be present in the future baseline, but it has * one or more changed fields. So we want the TNE ROW * to be updated as we transition the tne_L0 table. * */ void sg_wc_tx__apply__insert_tne(SG_context * pCtx, SG_wc_tx * pWcTx, const SG_vhash * pvh) { #if TRACE_WC_TX_APPLY const char * pszRepoPath; SG_ERR_CHECK_RETURN( SG_vhash__get__sz(pCtx, pvh, "src", &pszRepoPath) ); SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, ("sg_wc_tx__apply__insert_tne: '%s'\n"), pszRepoPath) ); #else SG_UNUSED( pCtx ); SG_UNUSED( pvh ); #endif SG_UNUSED( pWcTx ); // we don't actually have anything here. // the journal record was more for the verbose log. // the actual work of updating the SQL will be done // in the parallel journal-stmt. }
void SG_dbrecord__get_value(SG_context* pCtx, const SG_dbrecord* prec, const char* putf8Name, const char** pputf8Value) { SG_NULLARGCHECK_RETURN(prec); SG_NULLARGCHECK_RETURN(putf8Name); SG_NULLARGCHECK_RETURN(pputf8Value); SG_ERR_CHECK_RETURN( SG_vhash__get__sz(pCtx, prec->pvh, putf8Name, pputf8Value) ); }
static void _sg_jscontextpool__force_config_bool(SG_context * pCtx, SG_vhash * pConfig, const char * szSettingName, SG_bool * pValue) { SG_bool has = SG_FALSE; SG_bool value = SG_FALSE; SG_ASSERT(pCtx!=NULL); SG_ASSERT(pConfig!=NULL); SG_ASSERT(szSettingName!=NULL); SG_ERR_CHECK( SG_vhash__has(pCtx, pConfig, szSettingName, &has) ); if(has) { SG_uint16 type_of_value = 0; SG_ERR_CHECK( SG_vhash__typeof(pCtx, pConfig, szSettingName, &type_of_value) ); if(type_of_value==SG_VARIANT_TYPE_SZ) { const char *szValue = NULL; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pConfig, szSettingName, &szValue) ); value = strcmp(szValue, "true")==0; } else if(type_of_value==SG_VARIANT_TYPE_BOOL) { SG_ERR_CHECK( SG_vhash__get__bool(pCtx, pConfig, szSettingName, &value) ); } } SG_ERR_CHECK( SG_vhash__update__bool(pCtx, pConfig, szSettingName, value) ); if(pValue!=NULL) { *pValue = value; } return; fail: ; }
void sg_wc__status__find_status__single_item(SG_context * pCtx, const SG_varray * pva_statuses, const char * psz_repo_path, //This must be the repo path, exactly as it appears in the status results. SG_vhash ** ppvh_status_result) { SG_vhash * pvh_status = NULL; SG_uint32 nArraySize = 0; SG_uint32 index = 0; const char * psz_repo_path_to_check = NULL; SG_bool bFound = SG_FALSE; SG_NULLARGCHECK(pva_statuses); SG_ERR_CHECK( SG_varray__count(pCtx, pva_statuses, &nArraySize) ); for (index = 0; index < nArraySize; index++) { SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pva_statuses, index, &pvh_status) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_status, "path", &psz_repo_path_to_check) ); if (SG_strcmp__null(psz_repo_path, psz_repo_path_to_check) == 0) { bFound = SG_TRUE; break; } } if (bFound) SG_RETURN_AND_NULL(pvh_status, ppvh_status_result); fail: return; }
void SG_user__get_email_for_repo(SG_context * pCtx, SG_repo* pRepo, const char ** ppsz_email) { char * psz_admin_id = NULL; char * psz_userid = NULL; const char * psz_email_temp = NULL; SG_string * pstr_path = NULL; SG_vhash * pvh_userhash = NULL; if (pRepo) { SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &psz_admin_id) ); // we store this userid under the admin scope of the repo we were given SG_ERR_CHECK( SG_string__alloc__format(pCtx, &pstr_path, "%s/%s/%s", SG_LOCALSETTING__SCOPE__ADMIN, psz_admin_id, SG_LOCALSETTING__USERID ) ); SG_ERR_CHECK( SG_localsettings__get__sz(pCtx, SG_string__sz(pstr_path), pRepo, &psz_userid, NULL) ); if (psz_userid == NULL || *psz_userid == 0) SG_ERR_THROW(SG_ERR_USER_NOT_FOUND); SG_ERR_CHECK( SG_user__lookup_by_userid(pCtx, pRepo, psz_userid, &pvh_userhash) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_userhash, "email", &psz_email_temp) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_email_temp, (char**)ppsz_email) ); } fail: SG_VHASH_NULLFREE(pCtx, pvh_userhash); SG_NULLFREE(pCtx, psz_admin_id); SG_NULLFREE(pCtx, psz_userid); SG_STRING_NULLFREE(pCtx, pstr_path); }
void SG_dagfrag__alloc__from_vhash(SG_context * pCtx, SG_dagfrag ** ppNew, const SG_vhash * pvhFrag) { const char * szVersion; SG_dagfrag * pFrag = NULL; struct _deserialize_data deserialize_data; SG_int64 iDagNum = 0; SG_NULLARGCHECK_RETURN(ppNew); SG_NULLARGCHECK_RETURN(pvhFrag); SG_ERR_CHECK( SG_vhash__get__sz(pCtx,pvhFrag,KEY_VERSION,&szVersion) ); if (strcmp(szVersion,"1") == 0) { // handle dagfrags that were serialized by software compiled with // VALUE_VERSION == 1. SG_varray * pvaMyData; const char* psz_repo_id = NULL; const char* psz_admin_id = NULL; SG_ERR_CHECK( SG_vhash__get__sz(pCtx,pvhFrag,KEY_REPO_ID,&psz_repo_id) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx,pvhFrag,KEY_ADMIN_ID,&psz_admin_id) ); SG_ERR_CHECK( SG_vhash__get__int64(pCtx,pvhFrag,KEY_DAGNUM,&iDagNum) ); SG_ERR_CHECK( SG_dagfrag__alloc(pCtx,&pFrag,psz_repo_id,psz_admin_id,(SG_uint32) iDagNum) ); SG_ERR_CHECK( SG_vhash__get__varray(pCtx,pvhFrag,KEY_DATA,&pvaMyData) ); deserialize_data.pFrag = pFrag; SG_ERR_CHECK( SG_varray__foreach(pCtx, pvaMyData, _deserialize_data_ver_1_cb, &deserialize_data) ); *ppNew = pFrag; return; } else { SG_ERR_THROW( SG_ERR_DAGFRAG_DESERIALIZATION_VERSION ); } fail: SG_DAGFRAG_NULLFREE(pCtx, pFrag); }
static void _user_match_found(SG_context * pCtx, SG_repo * pRepo, _node_t *pNode1, _node_t * pNode2, SG_bool *matchFound) { SG_uint32 countAudits1 = 0; SG_uint32 countAudits2 = 0; SG_uint32 iAudit1; if(pNode1->pAudits==NULL) { SG_ERR_CHECK( SG_repo__lookup_audits(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, pNode1->pszHidRef, &pNode1->pAudits) ); } if(pNode2->pAudits==NULL) { SG_ERR_CHECK( SG_repo__lookup_audits(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, pNode2->pszHidRef, &pNode2->pAudits) ); } SG_ERR_CHECK( SG_varray__count(pCtx, pNode1->pAudits, &countAudits1) ); SG_ERR_CHECK( SG_varray__count(pCtx, pNode2->pAudits, &countAudits2) ); for(iAudit1=0; iAudit1<countAudits1; ++iAudit1) { SG_vhash * pAuditRef = NULL; SG_uint32 iAudit2; const char * szAudit1 = NULL; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pNode1->pAudits, iAudit1, &pAuditRef) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pAuditRef, "userid", &szAudit1) ); for(iAudit2=0; iAudit2<countAudits2; ++iAudit2) { const char * szAudit2 = NULL; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pNode2->pAudits, iAudit2, &pAuditRef) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pAuditRef, "userid", &szAudit2) ); if(strcmp(szAudit1, szAudit2)==0) { *matchFound = SG_TRUE; return; } } } *matchFound = SG_FALSE; return; fail: ; }
void SG_vc_hooks__ASK__WIT__LIST_ITEMS( SG_context* pCtx, SG_repo* pRepo, const char * psz_search_term, SG_varray *pBugs ) { SG_vhash* pvh_params = NULL; SG_vhash* pvh_result = NULL; SG_vhash* pvh_hook = NULL; const char* psz_js = NULL; const char* psz_descriptor_name = NULL; SG_bool hasBugs = SG_FALSE; SG_ERR_CHECK( sg_vc_hooks__lookup_by_interface__single_result( pCtx, pRepo, SG_VC_HOOK__INTERFACE__ASK__WIT__LIST_ITEMS, &pvh_hook ) ); if (!pvh_hook) return; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_hook, "js", &psz_js) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_params) ); SG_ERR_CHECK( SG_repo__get_descriptor_name(pCtx, pRepo, &psz_descriptor_name) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "descriptor_name", psz_descriptor_name) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "text", psz_search_term) ); SG_ERR_CHECK( SG_vc_hooks__execute(pCtx, psz_js, pvh_params, &pvh_result) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvh_result, "items", &hasBugs) ); if (hasBugs && pBugs) { SG_varray *bugs = NULL; SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvh_result, "items", &bugs) ); SG_ERR_CHECK( SG_varray__copy_items(pCtx, bugs, pBugs) ); } fail: SG_VHASH_NULLFREE(pCtx, pvh_params); SG_VHASH_NULLFREE(pCtx, pvh_result); SG_VHASH_NULLFREE(pCtx, pvh_hook); }
/** * build an ordered stringarray of the GIDs of all of the issues. * i need to have the list of GIDs be independent of pPendingTree * and pvaIssues so that FIX can do incremental SAVES of the * pendingtree (which trashes the ptnodes and/or requires a reload). */ static void _resolve__get_all_issue_gids(SG_context * pCtx, struct _resolve_data * pData, SG_bool bWantResolved, SG_bool bWantUnresolved) { const SG_varray * pvaIssues; // varray[pvhIssue *] of all issues -- we do not own this SG_bool bHaveIssues; SG_uint32 k; SG_uint32 nrIssues = 0; SG_bool bWantBoth = (bWantResolved && bWantUnresolved); SG_ERR_CHECK( SG_pendingtree__get_wd_issues__ref(pCtx, pData->pPendingTree, &bHaveIssues, &pvaIssues) ); if (bHaveIssues) SG_ERR_CHECK( SG_varray__count(pCtx, pvaIssues, &nrIssues) ); SG_ERR_CHECK( SG_STRINGARRAY__ALLOC(pCtx, &pData->psaGids, nrIssues) ); for (k=0; k<nrIssues; k++) { const SG_vhash * pvhIssue_k; const char * pszGid_k; SG_bool bWantThisOne; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaIssues, k, (SG_vhash **)&pvhIssue_k) ); if (bWantBoth) bWantThisOne = SG_TRUE; else { SG_int64 s; SG_pendingtree_wd_issue_status status; SG_bool bResolved; SG_ERR_CHECK_RETURN( SG_vhash__get__int64(pCtx, pvhIssue_k, "status", &s) ); status = (SG_pendingtree_wd_issue_status)s; bResolved = ((status & SG_ISSUE_STATUS__MARKED_RESOLVED) == SG_ISSUE_STATUS__MARKED_RESOLVED); bWantThisOne = ((bWantResolved && bResolved) || (bWantUnresolved && !bResolved)); } if (bWantThisOne) { SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhIssue_k, "gid", &pszGid_k) ); SG_ERR_CHECK( SG_stringarray__add(pCtx, pData->psaGids, pszGid_k) ); } } fail: return; }
static void _deserialize_data_ver_1_cb(SG_context * pCtx, void * pVoidDeserializeData, SG_UNUSED_PARAM(const SG_varray * pva), SG_UNUSED_PARAM(SG_uint32 ndx), const SG_variant * pVariant) { struct _deserialize_data * pDeserializeData = (struct _deserialize_data *)pVoidDeserializeData; SG_vhash * pvhMyData; SG_vhash * pvhDagnode; SG_int64 gen64, state64; _my_data * pMyData; SG_dagnode * pDagnode = NULL; const char* psz_id = NULL; SG_UNUSED(pva); SG_UNUSED(ndx); SG_ERR_CHECK( SG_variant__get__vhash(pCtx,pVariant,&pvhMyData) ); #if DEBUG && TRACE_DAGFRAG && 0 SG_ERR_CHECK( SG_vhash_debug__dump_to_console(pCtx, pvhMyData) ); #endif SG_ERR_CHECK( SG_vhash__get__int64(pCtx,pvhMyData,KEY_DFS_STATE,&state64) ); if (SG_DFS_END_FRINGE == state64) { SG_ERR_CHECK( SG_vhash__get__sz(pCtx,pvhMyData,KEY_DAGNODE_ID,&psz_id) ); SG_ERR_CHECK( _cache__add__fringe(pCtx,pDeserializeData->pFrag, psz_id) ); } else { SG_ERR_CHECK( SG_vhash__get__vhash(pCtx,pvhMyData,KEY_ACTUAL_DAGNODE,&pvhDagnode) ); SG_ERR_CHECK( SG_vhash__get__int64(pCtx,pvhDagnode,KEY_GEN,&gen64) ); SG_ERR_CHECK( SG_dagnode__alloc__from_vhash(pCtx, &pDagnode, pvhDagnode) ); SG_ERR_CHECK( _cache__add__dagnode(pCtx, pDeserializeData->pFrag, (SG_int32)gen64, pDagnode, (SG_uint32)state64, &pMyData) ); pDagnode = NULL; // cache owns it now. } return; fail: SG_DAGNODE_NULLFREE(pCtx, pDagnode); }
void SG_curl__throw_on_non200(SG_context* pCtx, SG_curl* pCurl) { SG_int32 httpResponseCode = 0; SG_vhash* pvhErr = NULL; _sg_curl* p = (_sg_curl*)pCurl; SG_NULLARGCHECK_RETURN(pCurl); SG_ERR_CHECK( SG_curl__getinfo__int32(pCtx, pCurl, CURLINFO_RESPONSE_CODE, &httpResponseCode) ); if (httpResponseCode != 200) { if ((httpResponseCode == 500 || httpResponseCode == 410) && p->pstrErr) { SG_bool bHas = SG_FALSE; const char* szMsg = NULL; SG_VHASH__ALLOC__FROM_JSON__SZ(pCtx, &pvhErr, SG_string__sz(p->pstrErr)); if (SG_context__err_equals(pCtx, SG_ERR_JSONPARSER_SYNTAX)) { // The server didn't return a JSON-formatted response. if (httpResponseCode == 500) SG_ERR_RESET_THROW2(SG_ERR_EXTENDED_HTTP_500, (pCtx, "%s", SG_string__sz(p->pstrErr))); else SG_ERR_THROW2(SG_ERR_SERVER_HTTP_ERROR, (pCtx, "%d", httpResponseCode)); } SG_ERR_CHECK_CURRENT; SG_ERR_CHECK( SG_vhash__has(pCtx, pvhErr, "msg", &bHas) ); if (bHas) SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhErr, "msg", &szMsg) ); if (szMsg) SG_ERR_THROW2(SG_ERR_EXTENDED_HTTP_500, (pCtx, "%s", szMsg)); else SG_ERR_THROW2(SG_ERR_EXTENDED_HTTP_500, (pCtx, "%s", SG_string__sz(p->pstrErr))); } else if (httpResponseCode == 401) { SG_ERR_THROW(SG_ERR_AUTHORIZATION_REQUIRED); } else SG_ERR_THROW2(SG_ERR_SERVER_HTTP_ERROR, (pCtx, "%d", httpResponseCode)); } /* common cleanup */ fail: SG_VHASH_NULLFREE(pCtx, pvhErr); }
static void _diff__file__contents(SG_context * pCtx, sg_wc6diff__setup_data * pData, const SG_vhash * pvhItem, SG_wc_status_flags statusFlags, SG_vhash * pvhDiffItem, SG_string * pStringHeader) { const char * pszGid; SG_uint64 uiAliasGid; sg_wc_liveview_item * pLVI = NULL; // we do not own this SG_bool bKnown = SG_FALSE; if (statusFlags & SG_WC_STATUS_FLAGS__S__DELETED) { // right side does not exist anymore (as of this point in the TX), // so dump trivial '<' or '-' diff of entire left side. SG_ERR_CHECK( _diff__file__deleted(pCtx, pData, pvhItem, pvhDiffItem, pStringHeader) ); } else { // the right side exists. the left may or may not exist. SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid) ); SG_ERR_CHECK( sg_wc_db__gid__get_alias_from_gid(pCtx, pData->pWcTx->pDb, pszGid, &uiAliasGid) ); SG_ERR_CHECK( sg_wc_tx__liveview__fetch_random_item(pCtx, pData->pWcTx, uiAliasGid, &bKnown, &pLVI) ); if (statusFlags & SG_WC_STATUS_FLAGS__S__ADDED) { SG_ERR_CHECK( _diff__file__added(pCtx, pData, pvhItem, pLVI, pvhDiffItem, pStringHeader) ); } else if (statusFlags & SG_WC_STATUS_FLAGS__S__MERGE_CREATED) { SG_ERR_CHECK( _diff__file__merge_created(pCtx, pData, pvhItem, pLVI, pvhDiffItem, pStringHeader) ); } else if (statusFlags & SG_WC_STATUS_FLAGS__S__UPDATE_CREATED) { SG_ERR_CHECK( _diff__file__update_created(pCtx, pData, pvhItem, pLVI, pvhDiffItem, pStringHeader) ); } else if (statusFlags & SG_WC_STATUS_FLAGS__C__NON_DIR_MODIFIED) { SG_ERR_CHECK( _diff__file__modified(pCtx, pData, pvhItem, pLVI, pvhDiffItem, pStringHeader) ); } } fail: return; }
void SG_user__set_user__repo( SG_context* pCtx, SG_repo* pRepo, const char* psz_email ) { SG_vhash* pvh_user = NULL; char* psz_admin_id = NULL; const char* psz_userid = NULL; SG_string* pstr_path = NULL; SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pstr_path) ); if (pRepo) { SG_ERR_CHECK( SG_user__lookup_by_email(pCtx, pRepo, psz_email, &pvh_user) ); if (!pvh_user) { SG_ERR_THROW( SG_ERR_USER_NOT_FOUND ); } SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_user, SG_ZING_FIELD__RECID, &psz_userid) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &psz_admin_id) ); // we store this userid under the admin scope of the repo we were given SG_ERR_CHECK( SG_string__sprintf(pCtx, pstr_path, "%s/%s/%s", SG_LOCALSETTING__SCOPE__ADMIN, psz_admin_id, SG_LOCALSETTING__USERID ) ); SG_ERR_CHECK( SG_localsettings__update__sz(pCtx, SG_string__sz(pstr_path), psz_userid) ); } // AND we store this email address in machine scope for fallback lookups SG_ERR_CHECK( SG_string__sprintf(pCtx, pstr_path, "%s/%s", SG_LOCALSETTING__SCOPE__MACHINE, SG_LOCALSETTING__USEREMAIL ) ); SG_ERR_CHECK( SG_localsettings__update__sz(pCtx, SG_string__sz(pstr_path), psz_email) ); fail: SG_STRING_NULLFREE(pCtx, pstr_path); SG_NULLFREE(pCtx, psz_admin_id); SG_VHASH_NULLFREE(pCtx, pvh_user); }
static void _vv_verbs__init_new_repo__get_admin_id( SG_context * pCtx, const char * psz_shared_users, char* buf_admin_id ) { SG_sync_client* pSyncClient = NULL; SG_vhash* pvhRepoInfo = NULL; const char* pszRefAdminId = NULL; SG_ERR_CHECK( SG_sync_client__open(pCtx, psz_shared_users, NULL, NULL, &pSyncClient) ); SG_ERR_CHECK( SG_sync_client__get_repo_info(pCtx, pSyncClient, SG_FALSE, SG_FALSE, &pvhRepoInfo) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhRepoInfo, SG_SYNC_REPO_INFO_KEY__ADMIN_ID, &pszRefAdminId) ); SG_ERR_CHECK( SG_strcpy(pCtx, buf_admin_id, SG_GID_BUFFER_LENGTH, pszRefAdminId) ); fail: SG_SYNC_CLIENT_NULLFREE(pCtx, pSyncClient); SG_VHASH_NULLFREE(pCtx, pvhRepoInfo); }
static void _getUserEmail(SG_context *pCtx, SG_repo* pRepo, SG_string *replacement) { SG_vhash* pvh_user = NULL; SG_string* pstrUser = NULL; const char* psz_email = NULL; SG_STRING__ALLOC(pCtx, &pstrUser); SG_ERR_CHECK( _getUserId(pCtx, pRepo, pstrUser) ); SG_ERR_CHECK( SG_user__lookup_by_userid(pCtx, pRepo, SG_string__sz(pstrUser), &pvh_user) ); if (pvh_user) { SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_user, "email", &psz_email) ); } SG_ERR_CHECK( SG_string__set__sz(pCtx, replacement, psz_email) ); fail: SG_VHASH_NULLFREE(pCtx, pvh_user); SG_STRING_NULLFREE(pCtx, pstrUser); }
void sg_wc_tx__apply__add(SG_context * pCtx, SG_wc_tx * pWcTx, const SG_vhash * pvh) { #if TRACE_WC_TX_APPLY const char * pszRepoPath; SG_ERR_CHECK_RETURN( SG_vhash__get__sz(pCtx, pvh, "src", &pszRepoPath) ); SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, ("sg_wc_tx__apply__add: '%s'\n"), pszRepoPath) ); #else SG_UNUSED( pCtx ); SG_UNUSED( pvh ); #endif SG_UNUSED( pWcTx ); // we don't actually have anything to do for an ADD. // the journal record was more for the verbose log. }
/** * For MODIFIED or ADDED items we need to populate the right side * of the diff. * * See footnote 1 above for bIsTmp_right. * */ static void _get_right_side_details(SG_context * pCtx, sg_wc6diff__setup_data * pData, const SG_vhash * pvhItem, sg_wc_liveview_item * pLVI, SG_vhash * pvhDiffItem) { SG_string * pStringLabel_right = NULL; SG_pathname * pPathAbsolute_right = NULL; SG_vhash * pvhSubsection_right = NULL; // we do not own this const char * pszRepoPath_right; char bufDate[SG_TIME_FORMAT_LENGTH+1]; SG_bool bIsTmp_right; SG_fsobj_stat st_right; // get the repo-path for the right side *AS IT EXISTED IN THE RIGHT CSET*. SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhItem, pData->pszSubsectionRight, &pvhSubsection_right) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhSubsection_right, "path", &pszRepoPath_right) ); SG_ERR_CHECK( sg_wc_liveview_item__get_proxy_file_path(pCtx, pLVI, pData->pWcTx, &pPathAbsolute_right, &bIsTmp_right) ); SG_ERR_CHECK( SG_fsobj__stat__pathname(pCtx, pPathAbsolute_right, &st_right) ); SG_ERR_CHECK( SG_time__format_utc__i64(pCtx, st_right.mtime_ms, bufDate, SG_NrElements(bufDate)) ); // the right label is "<right_repo_path> <datestamp>" SG_ERR_CHECK( SG_fsobj__stat__pathname(pCtx, pPathAbsolute_right, &st_right) ); SG_ERR_CHECK( sg_wc_diff_utils__make_label(pCtx, pszRepoPath_right, NULL, bufDate, &pStringLabel_right) ); // put the various fields that we need to use in the call to SG_difftool__run() // into the pvhDiffItem for later. SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "right_label", SG_string__sz(pStringLabel_right)) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "right_abs_path", SG_pathname__sz(pPathAbsolute_right)) ); SG_ERR_CHECK( SG_vhash__add__bool( pCtx, pvhDiffItem, "right_is_tmp", bIsTmp_right) ); fail: SG_PATHNAME_NULLFREE(pCtx, pPathAbsolute_right); SG_STRING_NULLFREE(pCtx, pStringLabel_right); }
/** * Our caller is trying to create new repo and create a WD * mapped to it. The destination directory may or may not * have already existed on disk before we started. If we are * building upon an existing directory, verify that it doesn't * contain any submodules because we don't yet support them. * */ static void _check_for_nested_drawer(SG_context * pCtx, SG_wc_tx * pWcTx) { SG_varray * pvaStatus = NULL; SG_string * pString_MyDrawerRepoPath = NULL; SG_string * pString_MatchedRepoPath = NULL; const char * psz_MyDrawerName = NULL; const char * psz_MyDrawerRepoPath = NULL; SG_uint32 k, nrItems; if (pWcTx->bWeCreated_WD || pWcTx->bWeCreated_WD_Contents) return; SG_ERR_CHECK( SG_wc_tx__status(pCtx, pWcTx, NULL, SG_UINT32_MAX, SG_FALSE, // bListUnchanged SG_TRUE, // bNoIgnores SG_TRUE, // bNoTSC, SG_FALSE, // bListSparse SG_TRUE, // bListReserved SG_TRUE, // bNoSort, &pvaStatus, NULL) ); if (!pvaStatus) return; // TODO 2012/11/13 For now I'm just going to see if there is a // TODO .sgdrawer somewhere within the directory tree. // TODO In theory, we could have ADD/ADDREMOVE just // TODO look for them and refuse to add its parent // TODO directory, but I don't to even support that // TODO until we've properly dealt with submodules. // TODO // TODO So for now, if there is a WD deeply nested within // TODO this directory, we just complain. This is mainly // TODO to prevent accidents. (Because they can still // TODO manually move a sub-WD somehere deep into this // TODO directory at some point in the future.) SG_ERR_CHECK( SG_workingdir__get_drawer_directory_name(pCtx, &psz_MyDrawerName) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pString_MyDrawerRepoPath) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pString_MyDrawerRepoPath, "@/%s", psz_MyDrawerName) ); SG_ERR_CHECK( SG_repopath__ensure_final_slash(pCtx, pString_MyDrawerRepoPath) ); psz_MyDrawerRepoPath = SG_string__sz(pString_MyDrawerRepoPath); SG_ERR_CHECK( SG_varray__count(pCtx, pvaStatus, &nrItems) ); for (k=0; k<nrItems; k++) { SG_vhash * pvhItem; SG_vhash * pvhItemStatus; SG_bool bIsReserved; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaStatus, k, &pvhItem) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhItem, "status", &pvhItemStatus) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhItemStatus, "isReserved", &bIsReserved) ); if (bIsReserved) { // Don't freak out over the .sgdrawer that we just created in the root. const char * pszRepoPath; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "path", &pszRepoPath) ); if (strcmp(pszRepoPath, psz_MyDrawerRepoPath) != 0) { SG_ERR_CHECK( SG_STRING__ALLOC__SZ(pCtx, &pString_MatchedRepoPath, pszRepoPath) ); SG_ERR_CHECK( SG_repopath__remove_last(pCtx, pString_MatchedRepoPath) ); SG_ERR_THROW2( SG_ERR_ENTRY_ALREADY_UNDER_VERSION_CONTROL, (pCtx, "The directory '%s' contains a working copy and submodules are not yet supported.", SG_string__sz(pString_MatchedRepoPath)) ); } } } fail: SG_STRING_NULLFREE(pCtx, pString_MatchedRepoPath); SG_STRING_NULLFREE(pCtx, pString_MyDrawerRepoPath); SG_VARRAY_NULLFREE(pCtx, pvaStatus); }
static void _s2__do_cset_vs_cset(SG_context * pCtx, const SG_option_state * pOptSt, const SG_stringarray * psaArgs, SG_uint32 * pNrErrors) { SG_varray * pvaStatus = NULL; SG_varray * pvaStatusDirtyFiles = NULL; SG_stringarray * psa1 = NULL; SG_string * pStringGidRepoPath = NULL; SG_string * pStringErr = NULL; SG_uint32 nrErrors = 0; SG_ERR_CHECK( SG_vv2__status(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec, psaArgs, WC__GET_DEPTH(pOptSt), SG_FALSE, // bNoSort &pvaStatus, NULL) ); if (pvaStatus) { if (pOptSt->bInteractive) { // Filter list down to just modified files and show them one-by-one. SG_ERR_CHECK( _get_dirty_files(pCtx, pvaStatus, &pvaStatusDirtyFiles) ); if (pvaStatusDirtyFiles) SG_ERR_CHECK( _do_gui_diffs(pCtx, SG_FALSE, pOptSt, pvaStatusDirtyFiles, &nrErrors) ); } else { SG_uint32 k, nrItems; // Print the changes with PATCH-like headers. // Accumulate any tool errors. SG_ERR_CHECK( SG_varray__count(pCtx, pvaStatus, &nrItems) ); for (k=0; k<nrItems; k++) { SG_vhash * pvhItem; const char * pszGid = NULL; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaStatus, k, &pvhItem) ); // TODO 2013/02/22 Our pvhItem has all of the details for the diff, // TODO but we don't yet have a public API to let it be // TODO used as is. So we build a @gid repo-path and // TODO run the old historical diff code on a 1-item array // TODO containing this @gid. // TODO // TODO We should fix this to just pass down the pvhItem // TOOD so that it doesn't have to repeat the status lookup. SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pStringGidRepoPath) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pStringGidRepoPath, "@%s", pszGid) ); SG_ERR_CHECK( SG_STRINGARRAY__ALLOC(pCtx, &psa1, 1) ); SG_ERR_CHECK( SG_stringarray__add(pCtx, psa1, SG_string__sz(pStringGidRepoPath)) ); SG_vv2__diff_to_stream__throw(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec, psa1, 0, SG_TRUE, // bNoSort -- doesn't matter only 1 item in list SG_FALSE, // bInteractive, pOptSt->psz_tool); // Don't throw the error from the tool. Just print it on STDERR // and remember that we had an error so that don't stop showing // the diffs just because we stumble over a changed binary file // or mis-configured tool, for example. if (SG_context__has_err(pCtx)) { SG_context__err_to_string(pCtx, SG_FALSE, &pStringErr); SG_context__err_reset(pCtx); SG_ERR_CHECK( SG_console__raw(pCtx, SG_CS_STDERR, SG_string__sz(pStringErr)) ); SG_STRING_NULLFREE(pCtx, pStringErr); nrErrors++; } SG_STRING_NULLFREE(pCtx, pStringGidRepoPath); SG_STRINGARRAY_NULLFREE(pCtx, psa1); } } } *pNrErrors = nrErrors; fail: SG_VARRAY_NULLFREE(pCtx, pvaStatus); SG_VARRAY_NULLFREE(pCtx, pvaStatusDirtyFiles); SG_STRINGARRAY_NULLFREE(pCtx, psa1); SG_STRING_NULLFREE(pCtx, pStringGidRepoPath); SG_STRING_NULLFREE(pCtx, pStringErr); }
void sg_sync_client__http__push_begin( SG_context* pCtx, SG_sync_client * pSyncClient, SG_pathname** pFragballDirPathname, SG_sync_client_push_handle** ppPush) { char* pszUrl = NULL; sg_sync_client_http_push_handle* pPush = NULL; SG_vhash* pvhResponse = NULL; SG_pathname* pPathUserTemp = NULL; SG_pathname* pPathFragballDir = NULL; char bufTid[SG_TID_MAX_BUFFER_LENGTH]; SG_string* pstr = NULL; SG_NULLARGCHECK_RETURN(pSyncClient); SG_NULLARGCHECK_RETURN(pFragballDirPathname); SG_NULLARGCHECK_RETURN(ppPush); // Get the URL we're going to post to SG_ERR_CHECK( _get_sync_url(pCtx, pSyncClient->psz_remote_repo_spec, SYNC_URL_SUFFIX JSON_URL_SUFFIX, NULL, NULL, &pszUrl) ); SG_ERR_CHECK( do_url(pCtx, pszUrl, "POST", NULL, pSyncClient->psz_username, pSyncClient->psz_password, &pstr, NULL, SG_TRUE) ); SG_ERR_CHECK( SG_vhash__alloc__from_json__sz(pCtx, &pvhResponse, SG_string__sz(pstr)) ); SG_STRING_NULLFREE(pCtx, pstr); // Alloc a push handle. Stuff the push ID we received into it. { const char* pszRef = NULL; SG_ERR_CHECK( SG_alloc(pCtx, 1, sizeof(sg_sync_client_http_push_handle), &pPush) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhResponse, PUSH_ID_KEY, &pszRef) ); SG_ERR_CHECK( SG_strdup(pCtx, pszRef, &pPush->pszPushId) ); } // Create a temporary local directory for stashing fragballs before shipping them over the network. SG_ERR_CHECK( SG_PATHNAME__ALLOC__USER_TEMP_DIRECTORY(pCtx, &pPathUserTemp) ); SG_ERR_CHECK( SG_tid__generate(pCtx, bufTid, SG_TID_MAX_BUFFER_LENGTH) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathFragballDir, pPathUserTemp, bufTid) ); SG_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathFragballDir) ); // Tell caller where to stash fragballs for this push. SG_RETURN_AND_NULL(pPathFragballDir, pFragballDirPathname); // Return the new push handle. *ppPush = (SG_sync_client_push_handle*)pPush; pPush = NULL; /* fall through */ fail: SG_STRING_NULLFREE(pCtx, pstr); if(SG_context__err_equals(pCtx, SG_ERR_SERVER_HTTP_ERROR)) { const char * szInfo = NULL; if(SG_IS_OK(SG_context__err_get_description(pCtx, &szInfo)) && strcmp(szInfo, "405")==0) SG_ERR_RESET_THROW(SG_ERR_SERVER_DOESNT_ACCEPT_PUSHES); } _NULLFREE_PUSH_HANDLE(pCtx, pPush); SG_NULLFREE(pCtx, pszUrl); SG_PATHNAME_NULLFREE(pCtx, pPathUserTemp); SG_PATHNAME_NULLFREE(pCtx, pPathFragballDir); SG_VHASH_NULLFREE(pCtx, pvhResponse); }
/** * Compute the pathnames to the various input/output files for 1 step * in the file content merge plan. * * When we computed the merge and modified the WD, we put the various * 'foo~mine' and etc files in the same directory where we put the * (candidate) merge result. If there are multiple steps in the plan, * the intermediate (sub-results) need to be placed in this directory * too. * * The final result can go in this directory. *BUT* if there was also * a MOVE/RENAME conflict (so the ultimate final location is yet to be * determined), the final result may get moved/renamed when we deal * with the structural issue in [2]. * * Since it is possible that the user could have done a "vv rename foo ..." * or "vv move foo ..." to manually deal with the structural conflict, we * respect that and dynamically compute the final destination (and ignore * the "result" field in the last step). * * pStrRepoPath_FinalResult should be NON-NULL when we are the final * step in the plan. */ static void _resolve__step_pathnames__compute(SG_context * pCtx, struct _resolve_data * pData, const SG_vhash * pvhIssue, const SG_vhash * pvhStep, SG_string * pStrRepoPath_Result, _resolve__step_pathnames ** ppStepPathnames) { _resolve__step_pathnames * pStepPathnames = NULL; SG_string * pStrRepoPath_Parent = NULL; SG_pathname * pPath_Parent = NULL; const SG_pathname * pPath_WorkingDirectoryTop; const char * pszGidParent; const char * pszEntryname_Mine; const char * pszEntryname_Other; const char * pszEntryname_Ancestor; SG_ERR_CHECK_RETURN( SG_alloc1(pCtx, pStepPathnames) ); // lookup the parent directory where we initially placed all // of the files, find where it is currently in the WD, and // build absolute paths for each of the mine/other/ancestor // files. SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhIssue, "gid_parent", &pszGidParent) ); SG_ERR_CHECK( SG_pendingtree__find_repo_path_by_gid(pCtx, pData->pPendingTree, pszGidParent, &pStrRepoPath_Parent) ); SG_ERR_CHECK( SG_pendingtree__get_working_directory_top__ref(pCtx, pData->pPendingTree, &pPath_WorkingDirectoryTop) ); SG_ERR_CHECK( SG_workingdir__construct_absolute_path_from_repo_path(pCtx, pPath_WorkingDirectoryTop, SG_string__sz(pStrRepoPath_Parent), &pPath_Parent) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhStep, "mine", &pszEntryname_Mine) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhStep, "other", &pszEntryname_Other) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhStep, "ancestor", &pszEntryname_Ancestor) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pStepPathnames->pPath_Mine, pPath_Parent, pszEntryname_Mine ) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pStepPathnames->pPath_Other, pPath_Parent, pszEntryname_Other ) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pStepPathnames->pPath_Ancestor, pPath_Parent, pszEntryname_Ancestor) ); if (pStrRepoPath_Result) { SG_ERR_CHECK( SG_workingdir__construct_absolute_path_from_repo_path(pCtx, pPath_WorkingDirectoryTop, SG_string__sz(pStrRepoPath_Result), &pStepPathnames->pPath_Result) ); } else { const char * pszEntryname_InternalResult; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhStep, "result", &pszEntryname_InternalResult) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pStepPathnames->pPath_Result, pPath_Parent, pszEntryname_InternalResult) ); } *ppStepPathnames = pStepPathnames; pStepPathnames = NULL; fail: SG_PATHNAME_NULLFREE(pCtx, pPath_Parent); SG_STRING_NULLFREE(pCtx, pStrRepoPath_Parent); _RESOLVE__STEP_PATHNAMES__NULLFREE(pCtx, pStepPathnames); }
void SG_cmd_util__get_username_for_repo( SG_context *pCtx, const char *szRepoName, char **ppUsername ) { SG_string * pUsername = NULL; SG_repo * pRepo = NULL; char * psz_username = NULL; SG_curl * pCurl = NULL; SG_string * pUri = NULL; SG_string * pResponse = NULL; SG_int32 responseStatusCode = 0; SG_vhash * pRepoInfo = NULL; char * psz_userid = NULL; SG_varray * pUsers = NULL; SG_NULLARGCHECK_RETURN(ppUsername); if(!szRepoName) { // Look up username based on 'whoami' of repo associated with cwd. SG_ERR_IGNORE( SG_cmd_util__get_repo_from_cwd(pCtx, &pRepo, NULL) ); if(pRepo) SG_ERR_IGNORE( SG_user__get_username_for_repo(pCtx, pRepo, &psz_username) ); SG_REPO_NULLFREE(pCtx, pRepo); } else if(SG_sz__starts_with(szRepoName, "http://") || SG_sz__starts_with(szRepoName, "https://")) { // Look up username based on 'whoami' of admin id of remote repo. SG_ERR_CHECK( SG_curl__alloc(pCtx, &pCurl) ); SG_ERR_CHECK( SG_STRING__ALLOC__SZ(pCtx, &pUri, szRepoName) ); SG_ERR_CHECK( SG_string__append__sz(pCtx, pUri, ".json") ); SG_ERR_CHECK( SG_curl__reset(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__setopt__sz(pCtx, pCurl, CURLOPT_URL, SG_string__sz(pUri)) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pResponse) ); SG_ERR_CHECK( SG_curl__set__write_string(pCtx, pCurl, pResponse) ); SG_ERR_CHECK( SG_curl__perform(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__getinfo__int32(pCtx, pCurl, CURLINFO_RESPONSE_CODE, &responseStatusCode) ); if(responseStatusCode==200) { const char * szAdminId = NULL; SG_ERR_CHECK( SG_VHASH__ALLOC__FROM_JSON__STRING(pCtx, &pRepoInfo, pResponse) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pRepoInfo, SG_SYNC_REPO_INFO_KEY__ADMIN_ID, &szAdminId) ); SG_ERR_CHECK( SG_string__clear(pCtx, pUri) ); SG_ERR_CHECK( SG_string__append__format(pCtx, pUri, "/admin/%s/whoami/userid", szAdminId) ); SG_ERR_IGNORE( SG_localsettings__get__sz(pCtx, SG_string__sz(pUri), NULL, &psz_userid, NULL) ); if(psz_userid) { // We now have the userid. Look up the username. SG_ERR_CHECK( SG_string__clear(pCtx, pUri) ); SG_ERR_CHECK( SG_string__append__format(pCtx, pUri, "%s/users.json", szRepoName) ); SG_ERR_CHECK( SG_curl__reset(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__setopt__sz(pCtx, pCurl, CURLOPT_URL, SG_string__sz(pUri)) ); SG_ERR_CHECK( SG_string__clear(pCtx, pResponse) ); SG_ERR_CHECK( SG_curl__set__write_string(pCtx, pCurl, pResponse) ); SG_ERR_CHECK( SG_curl__perform(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__getinfo__int32(pCtx, pCurl, CURLINFO_RESPONSE_CODE, &responseStatusCode) ); if(responseStatusCode==200) { SG_uint32 i, nUsers; SG_ERR_CHECK( SG_VARRAY__ALLOC__FROM_JSON__STRING(pCtx, &pUsers, pResponse) ); SG_ERR_CHECK( SG_varray__count(pCtx, pUsers, &nUsers) ); for(i=0; i<nUsers; ++i) { SG_vhash * pUser = NULL; const char * psz_recid = NULL; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pUsers, i, &pUser) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pUser, "recid", &psz_recid) ); if(!strcmp(psz_recid, psz_userid)) { const char * psz_name = NULL; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pUser, "name", &psz_name) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_name, &psz_username) ); break; } } SG_VARRAY_NULLFREE(pCtx, pUsers); } SG_NULLFREE(pCtx, psz_userid); } SG_VHASH_NULLFREE(pCtx, pRepoInfo); } SG_STRING_NULLFREE(pCtx, pResponse); SG_STRING_NULLFREE(pCtx, pUri); SG_CURL_NULLFREE(pCtx, pCurl); } else { // Look up username based on 'whoami' of repo provided. SG_ERR_CHECK( SG_REPO__OPEN_REPO_INSTANCE(pCtx, szRepoName, &pRepo) ); SG_ERR_IGNORE( SG_user__get_username_for_repo(pCtx, pRepo, &psz_username) ); SG_REPO_NULLFREE(pCtx, pRepo); } *ppUsername = psz_username; return; fail: SG_STRING_NULLFREE(pCtx, pUsername); SG_REPO_NULLFREE(pCtx, pRepo); SG_NULLFREE(pCtx, psz_username); SG_CURL_NULLFREE(pCtx, pCurl); SG_STRING_NULLFREE(pCtx, pUri); SG_STRING_NULLFREE(pCtx, pResponse); SG_VHASH_NULLFREE(pCtx, pRepoInfo); SG_NULLFREE(pCtx, psz_userid); SG_VARRAY_NULLFREE(pCtx, pUsers); }
void SG_sync_remote__push_clone__begin( SG_context* pCtx, const char* psz_repo_descriptor_name, const SG_vhash* pvhRepoInfo, const char** ppszCloneId) { const char* psz_repo_id = NULL; const char* psz_admin_id = NULL; const char* psz_hash_method = NULL; const char* pszRefValidatedName = NULL; SG_vhash* pvhFullDescriptor = NULL; // pNewRepo owns this. Don't free. SG_vhash* pvhDescriptorPartial = NULL; SG_closet_descriptor_handle* ph = NULL; SG_repo* pNewRepo = NULL; const char* pszRefCloneId = NULL; SG_NULLARGCHECK_RETURN(ppszCloneId); SG_ERR_CHECK_RETURN( _remote_clone_allowed(pCtx) ); /* We'll create a descriptor and empty repo immediately, so that we validate and claim the name * before a potentially long upload. */ SG_ERR_CHECK_RETURN( SG_vhash__get__sz(pCtx, pvhRepoInfo, SG_SYNC_REPO_INFO_KEY__REPO_ID, &psz_repo_id) ); SG_ERR_CHECK_RETURN( SG_vhash__get__sz(pCtx, pvhRepoInfo, SG_SYNC_REPO_INFO_KEY__ADMIN_ID, &psz_admin_id) ); SG_ERR_CHECK_RETURN( SG_vhash__get__sz(pCtx, pvhRepoInfo, SG_SYNC_REPO_INFO_KEY__HASH_METHOD, &psz_hash_method) ); SG_ERR_CHECK( SG_closet__descriptors__add_begin(pCtx, psz_repo_descriptor_name, NULL, psz_repo_id, psz_admin_id, &pszRefValidatedName, &pvhDescriptorPartial, &ph) ); SG_ERR_CHECK( SG_repo__create_repo_instance(pCtx, pszRefValidatedName, pvhDescriptorPartial, SG_TRUE, psz_hash_method, psz_repo_id, psz_admin_id, &pNewRepo) ); SG_VHASH_NULLFREE(pCtx, pvhDescriptorPartial); SG_ERR_CHECK_RETURN( SG_staging__clone__create(pCtx, psz_repo_descriptor_name, pvhRepoInfo, &pszRefCloneId) ); /* We temporarily add the clone ID to the repo descriptor so that we can verify * we're committing to the correct repo later, when the fragball's been uploaded. */ SG_ERR_CHECK( SG_repo__get_descriptor__ref(pCtx, pNewRepo, &pvhFullDescriptor) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhFullDescriptor, SG_SYNC_DESCRIPTOR_KEY__CLONE_ID, pszRefCloneId) ); SG_ERR_CHECK( SG_closet__descriptors__add_commit(pCtx, &ph, pvhFullDescriptor, SG_REPO_STATUS__CLONING) ); SG_REPO_NULLFREE(pCtx, pNewRepo); *ppszCloneId = pszRefCloneId; return; fail: SG_VHASH_NULLFREE(pCtx, pvhDescriptorPartial); SG_REPO_NULLFREE(pCtx, pNewRepo); SG_ERR_IGNORE( SG_closet__descriptors__add_abort(pCtx, &ph) ); }
void sg_wc_tx__diff__setup__file(SG_context * pCtx, sg_wc6diff__setup_data * pData, const SG_vhash * pvhItem, SG_wc_status_flags statusFlags) { SG_string * pStringHeader = NULL; SG_vhash * pvhDiffItem = NULL; // we do not own this const char * pszGid; // we do not own this const char * pszLiveRepoPath; // we do not own this SG_vhash * pvhItemStatus; // we do not own this if (statusFlags & SG_WC_STATUS_FLAGS__U__IGNORED) { // The code in wc4status won't create header info for ignored // items unless verbose is set and diff never sets it, so we // skip this item to avoid getting the divider row of equal signs. return; } SG_ERR_CHECK( SG_varray__appendnew__vhash(pCtx, pData->pvaDiffSteps, &pvhDiffItem) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "gid", pszGid) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "path", &pszLiveRepoPath) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "path", pszLiveRepoPath) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhItem, "status", &pvhItemStatus) ); SG_ERR_CHECK( SG_vhash__addcopy__vhash(pCtx, pvhDiffItem, "status", pvhItemStatus) ); // the following if-else-if-else... DOES NOT imply that these statuses // are mutually exclusive (for example, one could have ADDED+LOST), but // rather just the cases when we actually want to print the content diffs. if (statusFlags & SG_WC_STATUS_FLAGS__A__SPARSE) { SG_ERR_CHECK( sg_wc_tx__diff__setup__header(pCtx, pvhItem, &pStringHeader) ); SG_ERR_CHECK( SG_string__append__format(pCtx, pStringHeader, "=== %s\n", "Omitting details for sparse item.") ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "header", SG_string__sz(pStringHeader)) ); } else if (statusFlags & SG_WC_STATUS_FLAGS__U__LOST) { SG_ERR_CHECK( sg_wc_tx__diff__setup__header(pCtx, pvhItem, &pStringHeader) ); SG_ERR_CHECK( SG_string__append__format(pCtx, pStringHeader, "=== %s\n", "Omitting details for lost item.") ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "header", SG_string__sz(pStringHeader)) ); } else if (statusFlags & SG_WC_STATUS_FLAGS__U__FOUND) { // just build the header -- we never include content details for an uncontrolled item. SG_ERR_CHECK( sg_wc_tx__diff__setup__header(pCtx, pvhItem, &pStringHeader) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "header", SG_string__sz(pStringHeader)) ); } else if ((statusFlags & SG_WC_STATUS_FLAGS__S__MERGE_CREATED) && (statusFlags & SG_WC_STATUS_FLAGS__S__DELETED)) { // just build the header -- no content to diff -- not present now and not present in baseline SG_ERR_CHECK( sg_wc_tx__diff__setup__header(pCtx, pvhItem, &pStringHeader) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "header", SG_string__sz(pStringHeader)) ); } else if ((statusFlags & SG_WC_STATUS_FLAGS__S__UPDATE_CREATED) && (statusFlags & SG_WC_STATUS_FLAGS__S__DELETED)) { // just build the header -- no content to diff -- not present now and not present in baseline SG_ERR_CHECK( sg_wc_tx__diff__setup__header(pCtx, pvhItem, &pStringHeader) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "header", SG_string__sz(pStringHeader)) ); } else if (statusFlags & (SG_WC_STATUS_FLAGS__S__ADDED |SG_WC_STATUS_FLAGS__S__MERGE_CREATED |SG_WC_STATUS_FLAGS__S__UPDATE_CREATED |SG_WC_STATUS_FLAGS__S__DELETED |SG_WC_STATUS_FLAGS__C__NON_DIR_MODIFIED)) { // content added/deleted/changed. SG_ERR_CHECK( sg_wc_tx__diff__setup__header(pCtx, pvhItem, &pStringHeader) ); SG_ERR_CHECK( _diff__file__contents(pCtx, pData, pvhItem, statusFlags, pvhDiffItem, pStringHeader) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "header", SG_string__sz(pStringHeader)) ); } else { // just build the header -- content did not change -- must be a simple structural change. SG_ERR_CHECK( sg_wc_tx__diff__setup__header(pCtx, pvhItem, &pStringHeader) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhDiffItem, "header", SG_string__sz(pStringHeader)) ); } fail: SG_STRING_NULLFREE(pCtx, pStringHeader); }
/** * Do diff of an individual item. * When WC-based, we have a "DiffStep" vhash. * When historical, we have an item from a pvaStatus. * */ static void _do_diff1(SG_context * pCtx, SG_bool bWC, const SG_option_state * pOptSt, const SG_vhash * pvhItem, SG_uint32 * piResult) { SG_string * pStringGidRepoPath = NULL; SG_vhash * pvhResultCodes = NULL; SG_stringarray * psa1 = NULL; const char * pszGid; SG_int64 i64Result = 0; SG_string * pStringErr = NULL; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid) ); if (bWC) { SG_pathname * pPathWc = NULL; SG_bool bHasTool = SG_FALSE; // With the __diff__setup() and __diff__run() changes, we have already // examined the items during the __setup() step and recorded a tool for // the *FILE* that have changed content. So if "tool" isn't set in the // DiffStep/Item, we don't need to diff it -- it could be a structural // change, a non-file, a found item, etc. // // we do not use SG_wc__diff__throw() because we already have the diff info // and we want to control the result-code processing below. SG_ERR_CHECK( SG_vhash__has(pCtx, pvhItem, "tool", &bHasTool) ); if (bHasTool) SG_ERR_CHECK( SG_wc__diff__run(pCtx, pPathWc, pvhItem, &pvhResultCodes) ); } else { SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pStringGidRepoPath) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pStringGidRepoPath, "@%s", pszGid) ); SG_ERR_CHECK( SG_STRINGARRAY__ALLOC(pCtx, &psa1, 1) ); SG_ERR_CHECK( SG_stringarray__add(pCtx, psa1, SG_string__sz(pStringGidRepoPath)) ); // we do not use the __throw() version of this routine so we can control // result-code processing below. SG_ERR_CHECK( SG_vv2__diff_to_stream(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec, psa1, 0, SG_FALSE, // bNoSort SG_TRUE, // bInteractive, pOptSt->psz_tool, &pvhResultCodes) ); } if (pvhResultCodes) { SG_vhash * pvhResult; // we do not own this SG_ERR_CHECK( SG_vhash__check__vhash(pCtx, pvhResultCodes, pszGid, &pvhResult) ); if (pvhResult) { const char * pszTool; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhResult, "tool", &pszTool) ); SG_ERR_CHECK( SG_vhash__get__int64(pCtx, pvhResult, "result", &i64Result) ); SG_difftool__check_result_code__throw(pCtx, i64Result, pszTool); if (SG_context__has_err(pCtx)) { SG_context__err_to_string(pCtx, SG_FALSE, &pStringErr); SG_context__err_reset(pCtx); SG_ERR_CHECK( SG_console__raw(pCtx, SG_CS_STDERR, SG_string__sz(pStringErr)) ); // eat the tool error. the result code is set. } } } if (piResult) *piResult = (SG_uint32)i64Result; fail: SG_STRING_NULLFREE(pCtx, pStringGidRepoPath); SG_VHASH_NULLFREE(pCtx, pvhResultCodes); SG_STRINGARRAY_NULLFREE(pCtx, psa1); SG_STRING_NULLFREE(pCtx, pStringErr); }
int u0050_logstuff_test__1(SG_context * pCtx, SG_pathname* pPathTopDir) { char bufName[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_pathname* pPathFile = NULL; SG_vhash* pvh = NULL; SG_dagnode* pdn = NULL; const char* psz_hid_cs = NULL; SG_repo* pRepo = NULL; SG_uint32 count; SG_rbtree* prb = NULL; SG_varray* pva = NULL; SG_rbtree* prb_reversed = NULL; const char* psz_val = NULL; SG_audit q; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufName, sizeof(bufName), 32) ); /* create the working dir */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, bufName) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); /* add stuff */ VERIFY_ERR_CHECK( u0050_logstuff__create_file__numbers(pCtx, pPathWorkingDir, "aaa", 20) ); /* create the repo */ VERIFY_ERR_CHECK( _ut_pt__new_repo(pCtx, bufName, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( u0050_logstuff__commit_all(pCtx, pPathWorkingDir, &pdn) ); VERIFY_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_hid_cs) ); SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, bufName, &pRepo) ); #define MY_COMMENT "The name of this new file sucks! What kind of a name is 'aaa'?" VERIFY_ERR_CHECK( SG_audit__init(pCtx, &q, pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); VERIFY_ERR_CHECK( SG_vc_comments__add(pCtx, pRepo, psz_hid_cs, MY_COMMENT, &q) ); VERIFY_ERR_CHECK( SG_vc_stamps__add(pCtx, pRepo, psz_hid_cs, "crap", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "tcrap", &q) ); VERIFY_ERR_CHECK( SG_vc_comments__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "text", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, MY_COMMENT)) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_stamps__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "stamp", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "crap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "tag", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "tcrap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "whatever", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (2 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (2 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); /* we don't know whether psz_my_val is tcrap or whatever. */ // VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); { const char* psz_remove = "whatever"; VERIFY_ERR_CHECK( SG_vc_tags__remove(pCtx, pRepo, &q, 1, &psz_remove) ); /* Note that by removing whatever, we are bringing the tags list back * to a state where it has been before (just tcrap). This changeset in * the tags table will have its own csid, because the parentage is * different, but it's root idtrie HID will be the same as a previous * node. */ } VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); SG_RBTREE_NULLFREE(pCtx, prb); SG_REPO_NULLFREE(pCtx, pRepo); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 1; fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 0; }
/** * Iterate over all of the dirty files and prompt before diffing. * pvaDirtyFiles can either be a STATUS or a "DiffStep" varray. * */ static void _do_gui_diffs(SG_context * pCtx, SG_bool bWC, const SG_option_state * pOptSt, const SG_varray * pvaDirtyFiles, SG_uint32 * pNrErrors) { SG_uint32 k, nrItems; SG_uint32 nrErrors = 0; SG_ERR_CHECK( SG_varray__count(pCtx, pvaDirtyFiles, &nrItems) ); if (nrItems == 1) // if only 1 item, no fancy prompt required. { SG_vhash * pvhItem_0; // we do not own this SG_uint32 iResult = 0; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaDirtyFiles, 0, &pvhItem_0) ); SG_ERR_CHECK( _do_diff1(pCtx, bWC, pOptSt, pvhItem_0, &iResult) ); switch (iResult) { default: case SG_FILETOOL__RESULT__SUCCESS: case SG_DIFFTOOL__RESULT__SAME: case SG_DIFFTOOL__RESULT__DIFFERENT: break; case SG_DIFFTOOL__RESULT__CANCEL: case SG_FILETOOL__RESULT__FAILURE: case SG_FILETOOL__RESULT__ERROR: nrErrors++; break; } } else { k = 0; while (1) { SG_vhash * pvhItem; // we do not own this const char * pszRepoPath; char chChoice = 'd'; SG_uint32 iResult; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaDirtyFiles, k, &pvhItem) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "path", &pszRepoPath) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDOUT, "\n[%d/%d] %s:\n", k+1, nrItems, pszRepoPath) ); if (k == 0) SG_ERR_CHECK( _do_prompt(pCtx, "(d)iff (n)ext (q)uit", "dnq", 'd', &chChoice) ); else if (k+1 == nrItems) SG_ERR_CHECK( _do_prompt(pCtx, "(d)iff (p)rev (q)uit", "dpq", 'd', &chChoice) ); else SG_ERR_CHECK( _do_prompt(pCtx, "(d)iff (n)ext (p)rev (q)uit", "dnpq", 'd', &chChoice) ); switch (chChoice) { case 'd': SG_ERR_CHECK( _do_diff1(pCtx, bWC, pOptSt, pvhItem, &iResult) ); switch (iResult) { default: case SG_FILETOOL__RESULT__SUCCESS: case SG_DIFFTOOL__RESULT__SAME: case SG_DIFFTOOL__RESULT__DIFFERENT: // advance to next pair of files or finish. if (k+1 == nrItems) goto done; k++; break; case SG_DIFFTOOL__RESULT__CANCEL: case SG_FILETOOL__RESULT__FAILURE: case SG_FILETOOL__RESULT__ERROR: nrErrors++; // stay on this pair of files (so that they see the // error message and this filename again). break; } break; case 'n': k++; break; case 'p': k--; break; default: case 'q': goto done; } } } done: *pNrErrors = nrErrors; fail: return; }