void SG_server__get_dagnode_info( SG_context* pCtx, SG_repo* pRepo, SG_vhash* pvhRequest, SG_varray** ppvaInfo) { char bufDagnum[SG_DAGNUM__BUF_MAX__DEC]; SG_vhash* pvhRefVersionControlHids = NULL; SG_varray* pvaHids = NULL; SG_stringarray* psaHids = NULL; const char* const* paszHids = NULL; SG_uint32 countHids = 0; SG_ERR_CHECK( SG_dagnum__to_sz__decimal(pCtx, SG_DAGNUM__VERSION_CONTROL, bufDagnum, sizeof(bufDagnum)) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, bufDagnum, &pvhRefVersionControlHids) ); // Ugh. Is vhash->varray->stringarray->char** the best option? SG_ERR_CHECK( SG_vhash__get_keys(pCtx, pvhRefVersionControlHids, &pvaHids) ); SG_ERR_CHECK( SG_varray__to_stringarray(pCtx, pvaHids, &psaHids) ); SG_ERR_CHECK( SG_stringarray__sz_array_and_count(pCtx, psaHids, &paszHids, &countHids) ); SG_ERR_CHECK( SG_history__query(pCtx, NULL, pRepo, 0, NULL, paszHids, countHids, NULL, NULL, 0, 0, 0, SG_FALSE, SG_FALSE, ppvaInfo) ); /* fall through */ fail: SG_VARRAY_NULLFREE(pCtx, pvaHids); SG_STRINGARRAY_NULLFREE(pCtx, psaHids); }
void SG_localsettings__get__varray(SG_context * pCtx, const char * psz_path, SG_repo* pRepo, SG_varray** ppva, SG_string** ppstr_where_found) { SG_varray* pva = NULL; SG_string* pstr_path = NULL; SG_variant* pv = NULL; SG_ERR_CHECK( SG_localsettings__get__variant(pCtx, psz_path, pRepo, &pv, &pstr_path) ); if (pv) { if (SG_VARIANT_TYPE_VARRAY == pv->type) { pva = pv->v.val_varray; pv->v.val_varray = NULL; } } *ppva = pva; pva = NULL; if (ppstr_where_found) { *ppstr_where_found = pstr_path; pstr_path = NULL; } fail: SG_VARIANT_NULLFREE(pCtx, pv); SG_STRING_NULLFREE(pCtx, pstr_path); SG_VARRAY_NULLFREE(pCtx, pva); }
static void _node__free_nonreusable_memory(SG_context * pCtx, _node_t * pNode) { SG_ASSERT(pNode!=NULL); SG_DAGNODE_NULLFREE(pCtx, pNode->pDagnode); SG_VARRAY_NULLFREE(pCtx, pNode->pAudits); SG_VHASH_NULLFREE(pCtx, pNode->pVcParents); }
void SG_sync__remember_sync_target(SG_context* pCtx, const char * pszLocalRepoDescriptor, const char * pszSyncTarget) { SG_string * pString = NULL; SG_varray * pva_targets = NULL; SG_bool bFound = SG_FALSE; SG_uint32 nEntry = 0; //Save this destination to the local setting of previously used destinations. SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pString) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pString, "%s/%s/%s", SG_LOCALSETTING__SCOPE__INSTANCE, pszLocalRepoDescriptor, SG_LOCALSETTING__SYNC_TARGETS) ); SG_ERR_CHECK( SG_localsettings__get__varray(pCtx, SG_string__sz(pString), NULL, &pva_targets, NULL) ); if (pva_targets) SG_ERR_CHECK( SG_varray__find__sz(pCtx, pva_targets, pszSyncTarget, &bFound, &nEntry) ); else SG_VARRAY__ALLOC(pCtx, &pva_targets); if (!bFound) { SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_targets, pszSyncTarget) ); SG_ERR_CHECK( SG_localsettings__update__varray(pCtx, SG_string__sz(pString), pva_targets) ); } fail: SG_STRING_NULLFREE(pCtx, pString); SG_VARRAY_NULLFREE(pCtx, pva_targets); }
void SG_group__list( SG_context* pCtx, SG_repo* pRepo, SG_varray** ppva ) { SG_string* pstr_where = NULL; SG_stringarray* psa_results = NULL; char* psz_hid_cs_leaf = NULL; SG_vhash* pvh_group = NULL; SG_varray* pva_result = NULL; SG_stringarray* psa_fields = NULL; SG_ERR_CHECK( SG_zing__get_leaf__fail_if_needs_merge(pCtx, pRepo, SG_DAGNUM__USERS, &psz_hid_cs_leaf) ); SG_ERR_CHECK( SG_STRINGARRAY__ALLOC(pCtx, &psa_fields, 2) ); SG_ERR_CHECK( SG_stringarray__add(pCtx, psa_fields, "recid") ); SG_ERR_CHECK( SG_stringarray__add(pCtx, psa_fields, "name") ); SG_ERR_CHECK( SG_zing__query(pCtx, pRepo, SG_DAGNUM__USERS, psz_hid_cs_leaf, "group", NULL, "name #ASC", 0, 0, psa_fields, &pva_result) ); *ppva = pva_result; pva_result = NULL; fail: SG_VHASH_NULLFREE(pCtx, pvh_group); SG_NULLFREE(pCtx, psz_hid_cs_leaf); SG_VARRAY_NULLFREE(pCtx, pva_result); SG_STRINGARRAY_NULLFREE(pCtx, psa_results); SG_STRING_NULLFREE(pCtx, pstr_where); SG_STRINGARRAY_NULLFREE(pCtx, psa_fields); }
void SG_localsettings__varray__append(SG_context * pCtx, const char* psz_path, const char* pValue) { SG_jsondb* p = NULL; SG_string* pstr = NULL; SG_varray* pva = NULL; SG_string* pstr_path_found = NULL; SG_ASSERT(pCtx); SG_NONEMPTYCHECK_RETURN(psz_path); SG_ERR_CHECK( SG_closet__get_localsettings(pCtx, &p) ); SG_ERR_CHECK( SG_localsettings__get__varray(pCtx, psz_path, NULL, &pva, &pstr_path_found) ); if (!pstr_path_found) { // this came from factory defaults. SG_ERR_CHECK( SG_string__alloc(pCtx, &pstr_path_found) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pstr_path_found, "%s/%s", SG_LOCALSETTING__SCOPE__MACHINE, psz_path) ); SG_ERR_CHECK( SG_localsettings__update__varray(pCtx, SG_string__sz(pstr_path_found), pva) ); } SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pstr) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pstr, "%s/#", SG_string__sz(pstr_path_found)) ); SG_ERR_CHECK( SG_jsondb__update__string__sz(pCtx, p, SG_string__sz(pstr), SG_TRUE, pValue) ); fail: SG_STRING_NULLFREE(pCtx, pstr); SG_JSONDB_NULLFREE(pCtx, p); SG_VARRAY_NULLFREE(pCtx, pva); SG_STRING_NULLFREE(pCtx, pstr_path_found); }
void SG_sync_remote__get_dagnode_info( SG_context* pCtx, SG_repo* pRepo, SG_vhash* pvhRequest, SG_history_result** ppInfo) { char bufDagnum[SG_DAGNUM__BUF_MAX__HEX]; SG_vhash* pvhRefVersionControlHids = NULL; SG_varray* pvaHids = NULL; SG_stringarray* psaHids = NULL; SG_ERR_CHECK( SG_dagnum__to_sz__hex(pCtx, SG_DAGNUM__VERSION_CONTROL, bufDagnum, sizeof(bufDagnum)) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, bufDagnum, &pvhRefVersionControlHids) ); // Ugh. Is vhash->varray->stringarray the best option? SG_ERR_CHECK( SG_vhash__get_keys(pCtx, pvhRefVersionControlHids, &pvaHids) ); SG_ERR_CHECK( SG_varray__to_stringarray(pCtx, pvaHids, &psaHids) ); SG_ERR_CHECK( SG_history__get_revision_details(pCtx, pRepo, psaHids, NULL, ppInfo) ); /* fall through */ fail: SG_VARRAY_NULLFREE(pCtx, pvaHids); SG_STRINGARRAY_NULLFREE(pCtx, psaHids); }
void SG_localsettings__varray__empty(SG_context * pCtx, const char* psz_path) { SG_varray* pva = NULL; SG_ERR_CHECK( SG_varray__alloc(pCtx, &pva) ); SG_ERR_CHECK( SG_localsettings__update__varray(pCtx, psz_path, pva) ); fail: SG_VARRAY_NULLFREE(pCtx, pva); }
/** * Take a domain-specific/relative repo-path and get * the GID ALIAS of the item. * * THIS IS STRICLTY BASED UPON THE FIXED CHANGESET * THAT WE ALREADY HAVE IN THE tne_* TABLE. It does * not know about or account for any pending changes * in the WD; that is, it DOES NOT know about tbl_PC. * * We DO NOT know if the given domain is appropriate * for the given pCSetRow. That is upto the caller. * For example, we expect them to map: * 'b' ==> "L0" * 'c' ==> "L1" * but we don't enforce that here. * */ void sg_wc_db__tne__get_alias_from_extended_repo_path(SG_context * pCtx, sg_wc_db * pDb, const sg_wc_db__cset_row * pCSetRow, const char * pszBaselineRepoPath, SG_bool * pbFound, SG_uint64 * puiAliasGid) { SG_varray * pva = NULL; sg_wc_db__tne_row * pTneRow_k = NULL; SG_uint64 uiAlias; SG_uint32 k, count; *pbFound = SG_FALSE; *puiAliasGid = 0; // TODO 2012/01/04 For now, require that an extended-prefix be // TODO present in the repo-path. // TODO // TODO We may relax this to allow a '/' current/live // TODO domain repo-path eventually. SG_ASSERT_RELEASE_FAIL( ((pszBaselineRepoPath[0]=='@') && (pszBaselineRepoPath[1]!='/')) ); SG_ERR_CHECK( SG_repopath__split_into_varray(pCtx, pszBaselineRepoPath, &pva) ); // the root directory should be "@b" and be contained in pva[0]. // we have a direct line to its alias. SG_ERR_CHECK( sg_wc_db__tne__get_alias_of_root(pCtx, pDb, pCSetRow, &uiAlias) ); SG_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); for (k=1; k<count; k++) { const char * pszEntryname_k; SG_bool bFound_k; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva, k, &pszEntryname_k) ); SG_ERR_CHECK( sg_wc_db__tne__get_row_by_parent_alias_and_entryname(pCtx, pDb, pCSetRow, uiAlias, pszEntryname_k, &bFound_k, &pTneRow_k) ); if (!bFound_k) goto fail; uiAlias = pTneRow_k->p_s->uiAliasGid; SG_WC_DB__TNE_ROW__NULLFREE(pCtx, pTneRow_k); } *pbFound = SG_TRUE; *puiAliasGid = uiAlias; fail: SG_VARRAY_NULLFREE(pCtx, pva); SG_WC_DB__TNE_ROW__NULLFREE(pCtx, pTneRow_k); }
/** * Setup for a DIFF on a single item. * * We return a varray of "DiffSteps". * */ void SG_wc_tx__diff__setup(SG_context * pCtx, SG_wc_tx * pWcTx, const SG_rev_spec * pRevSpec, const char * pszInput, SG_uint32 depth, SG_bool bNoIgnores, SG_bool bNoTSC, SG_bool bNoSort, SG_bool bInteractive, const char * pszTool, SG_varray ** ppvaDiffSteps) { SG_varray * pvaStatus = NULL; SG_varray * pvaDiffSteps = NULL; SG_uint32 nrRevSpecs = 0; SG_NULLARGCHECK_RETURN( pWcTx ); // pszInput is optional // pszTool is optional SG_NULLARGCHECK_RETURN( ppvaDiffSteps ); if (pRevSpec) SG_ERR_CHECK( SG_rev_spec__count(pCtx, pRevSpec, &nrRevSpecs) ); if (nrRevSpecs > 0) SG_ERR_CHECK( SG_wc_tx__status1(pCtx, pWcTx, pRevSpec, pszInput, depth, SG_FALSE, // bListUnchanged bNoIgnores, bNoTSC, SG_FALSE, // bListSparse SG_FALSE, // bListReserved bNoSort, &pvaStatus) ); else SG_ERR_CHECK( SG_wc_tx__status(pCtx, pWcTx, pszInput, depth, SG_FALSE, // bListUnchanged bNoIgnores, bNoTSC, SG_FALSE, // bListSparse SG_FALSE, // bListReserved bNoSort, &pvaStatus, NULL) ); if (pvaStatus) SG_ERR_CHECK( sg_wc_tx__diff__setup(pCtx, pWcTx, pvaStatus, bInteractive, pszTool, &pvaDiffSteps) ); SG_RETURN_AND_NULL(pvaDiffSteps, ppvaDiffSteps); fail: SG_VARRAY_NULLFREE(pCtx, pvaStatus); }
void wc__do_cmd_commit(SG_context * pCtx, const SG_option_state * pOptSt, const SG_stringarray * psaArgs) { SG_wc_commit_args ca; char * pszHidNewCSet = NULL; SG_repo * pRepo = NULL; SG_vhash * pvhPileOfCleanBranches = NULL; SG_varray * pvaJournal = NULL; memset(&ca, 0, sizeof(ca)); ca.bDetached = pOptSt->bAllowDetached; ca.pszUser = pOptSt->psz_username; ca.pszWhen = pOptSt->psz_when; ca.pszMessage = pOptSt->psz_message; ca.pfnPrompt = ((pOptSt->bPromptForDescription) ? SG_cmd_util__get_comment_from_editor : NULL); ca.psaInputs = psaArgs; // null for a complete commit; non-null for a partial commit. ca.depth = WC__GET_DEPTH(pOptSt); ca.psaAssocs = pOptSt->psa_assocs; ca.bAllowLost = pOptSt->bAllowLost; ca.psaStamps = pOptSt->psa_stamps; SG_ERR_CHECK( SG_wc__commit(pCtx, NULL, &ca, pOptSt->bTest, ((pOptSt->bVerbose) ? &pvaJournal : NULL), &pszHidNewCSet) ); if (pvaJournal) SG_ERR_IGNORE( sg_report_journal(pCtx, pvaJournal) ); if (!pOptSt->bTest) { // after the commit is finished, display the details // of the new changeset. SG_ERR_CHECK( SG_cmd_util__get_repo_from_cwd(pCtx, &pRepo, NULL) ); SG_ERR_CHECK( SG_vc_branches__cleanup(pCtx, pRepo, &pvhPileOfCleanBranches) ); SG_ERR_CHECK( SG_cmd_util__dump_log(pCtx, SG_CS_STDOUT, pRepo, pszHidNewCSet, pvhPileOfCleanBranches, SG_FALSE, SG_TRUE) ); } fail: SG_REPO_NULLFREE(pCtx, pRepo); SG_VHASH_NULLFREE(pCtx, pvhPileOfCleanBranches); SG_NULLFREE(pCtx, pszHidNewCSet); SG_VARRAY_NULLFREE(pCtx, pvaJournal); }
void SG_vc_hooks__lookup_by_interface( SG_context* pCtx, SG_repo* pRepo, const char* psz_interface, SG_varray** ppva ) { char* psz_hid_cs_leaf = NULL; SG_varray* pva_fields = NULL; SG_varray* pva = NULL; char buf_where[SG_HID_MAX_BUFFER_LENGTH + 64]; char *iEscape = NULL; const char *ivar = NULL; SG_ERR_CHECK( SG_zing__get_leaf(pCtx, pRepo, NULL, SG_DAGNUM__VC_HOOKS, &psz_hid_cs_leaf) ); SG_ASSERT(psz_hid_cs_leaf); SG_ERR_CHECK( SG_sqlite__escape(pCtx, psz_interface, &iEscape) ); if (iEscape) ivar = iEscape; else ivar = psz_interface; SG_ERR_CHECK( SG_sprintf(pCtx, buf_where, sizeof(buf_where), "interface == '%s'", ivar) ); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pva_fields) ); SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_fields, "js") ); SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_fields, "module") ); SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_fields, "version") ); SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_fields, SG_ZING_FIELD__HIDREC) ); SG_ERR_CHECK( SG_zing__query(pCtx, pRepo, SG_DAGNUM__VC_HOOKS, psz_hid_cs_leaf, "hook", buf_where, NULL, 0, 0, pva_fields, &pva) ); *ppva = pva; pva= NULL; fail: SG_VARRAY_NULLFREE(pCtx, pva); SG_VARRAY_NULLFREE(pCtx, pva_fields); SG_NULLFREE(pCtx, psz_hid_cs_leaf); SG_NULLFREE(pCtx, iEscape); }
void SG_localsettings__varray__remove_first_match(SG_context * pCtx, const char* psz_path, const char* psz_val) { SG_jsondb* p = NULL; SG_string* pstr_path_element = NULL; SG_varray* pva = NULL; SG_uint32 ndx = 0; SG_uint32 count = 0; SG_uint32 i = 0; SG_bool b_found = SG_FALSE; SG_string* pstr_path_found = NULL; SG_ASSERT(pCtx); SG_NONEMPTYCHECK_RETURN(psz_path); SG_ERR_CHECK( SG_closet__get_localsettings(pCtx, &p) ); SG_ERR_CHECK( SG_localsettings__get__varray(pCtx, psz_path, NULL, &pva, &pstr_path_found) ); if (pva) { if (!pstr_path_found) { // this came from factory defaults. SG_ERR_CHECK( SG_string__alloc(pCtx, &pstr_path_found) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pstr_path_found, "%s/%s", SG_LOCALSETTING__SCOPE__MACHINE, psz_path) ); SG_ERR_CHECK( SG_localsettings__update__varray(pCtx, SG_string__sz(pstr_path_found), pva) ); } SG_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); for (i=0; i<count; i++) { const char* psz = NULL; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva, i, &psz) ); if (0 == strcmp(psz, psz_val)) { b_found = SG_TRUE; ndx = i; break; } } if (b_found) { SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pstr_path_element) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pstr_path_element, "%s/%d", SG_string__sz(pstr_path_found), ndx) ); SG_ERR_CHECK( SG_jsondb__remove(pCtx, p, SG_string__sz(pstr_path_element)) ); } } fail: SG_VARRAY_NULLFREE(pCtx, pva); SG_STRING_NULLFREE(pCtx, pstr_path_found); SG_STRING_NULLFREE(pCtx, pstr_path_element); SG_JSONDB_NULLFREE(pCtx, p); }
void wc__do_cmd_addremove(SG_context * pCtx, const SG_option_state * pOptSt, const SG_stringarray * psaArgs) { SG_varray * pvaJournal = NULL; SG_ERR_CHECK( SG_wc__addremove__stringarray(pCtx, NULL, psaArgs, WC__GET_DEPTH(pOptSt), WC__GET_NO_IGNORES(pOptSt), pOptSt->bTest, &pvaJournal) ); SG_ERR_IGNORE( sg_report_addremove_journal(pCtx, pvaJournal, SG_TRUE, SG_TRUE, pOptSt->bVerbose) ); fail: SG_VARRAY_NULLFREE(pCtx, pvaJournal); }
/** * Pick thru the computed VARRAY build a subset VARRAY * containing just the dirty files. That is, for an interactive * diff, we only show dirty files. (In batch/patch mode, we show * everything.) * * We assume that the varray looks like: * * varray := [ { "status" : { "flags" : <int>, * ... }, * "path" : <repo-path>, * ... }, * ... ]; * * Both a Canonical STATUS (pvaStatus) and a "DiffStep" (pvaDiffStep) * match this pattern. * * Return NULL if there aren't any. * */ static void _get_dirty_files(SG_context * pCtx, const SG_varray * pva, SG_varray ** ppvaDirtyFiles) { SG_varray * pvaDirtyFiles = NULL; SG_uint32 k, nrItems; *ppvaDirtyFiles = NULL; if (!pva) return; SG_ERR_CHECK( SG_varray__count(pCtx, pva, &nrItems) ); if (nrItems == 0) return; for (k=0; k<nrItems; k++) { SG_vhash * pvhItem_k; // we do not own this SG_vhash * pvhItemStatus_k; // we do not own this SG_int64 i64; SG_wc_status_flags statusFlags; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, k, &pvhItem_k) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhItem_k, "status", &pvhItemStatus_k) ); SG_ERR_CHECK( SG_vhash__get__int64(pCtx, pvhItemStatus_k, "flags", &i64) ); statusFlags = (SG_wc_status_flags)i64; if ((statusFlags & SG_WC_STATUS_FLAGS__T__FILE) == 0) continue; if ((statusFlags & (SG_WC_STATUS_FLAGS__C__NON_DIR_MODIFIED |SG_WC_STATUS_FLAGS__S__ADDED |SG_WC_STATUS_FLAGS__S__DELETED |SG_WC_STATUS_FLAGS__S__MERGE_CREATED |SG_WC_STATUS_FLAGS__S__UPDATE_CREATED)) == 0) continue; if (!pvaDirtyFiles) SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pvaDirtyFiles) ); SG_ERR_CHECK( SG_varray__appendcopy__vhash(pCtx, pvaDirtyFiles, pvhItem_k, NULL) ); } SG_RETURN_AND_NULL( pvaDirtyFiles, ppvaDirtyFiles ); fail: SG_VARRAY_NULLFREE(pCtx, pvaDirtyFiles); }
void wc__do_cmd_remove(SG_context * pCtx, const SG_option_state * pOptSt, const SG_stringarray * psaArgs) { SG_varray * pvaJournal = NULL; SG_ERR_CHECK( SG_wc__remove__stringarray(pCtx, NULL, psaArgs, (!pOptSt->bRecursive), pOptSt->bForce, pOptSt->bNoBackups, pOptSt->bKeep, pOptSt->bTest, &pvaJournal) ); SG_ERR_IGNORE( sg_report_addremove_journal(pCtx, pvaJournal, SG_FALSE, SG_TRUE, pOptSt->bVerbose) ); fail: SG_VARRAY_NULLFREE(pCtx, pvaJournal); }
void SG_dagfrag__to_vhash__shared(SG_context * pCtx, SG_dagfrag * pFrag, SG_vhash * pvhShared, SG_vhash ** ppvhNew) { SG_vhash * pvhFrag = NULL; SG_varray * pvaMyData = NULL; struct _serialize_data serialize_data; SG_NULLARGCHECK_RETURN(pFrag); SG_NULLARGCHECK_RETURN(ppvhNew); SG_ASSERT( (! IS_TRANSIENT(pFrag)) ); SG_ERR_CHECK( SG_VHASH__ALLOC__SHARED(pCtx,&pvhFrag,5,pvhShared) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx,pvhFrag,KEY_REPO_ID,pFrag->m_sz_repo_id) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx,pvhFrag,KEY_ADMIN_ID,pFrag->m_sz_admin_id) ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx,pvhFrag,KEY_DAGNUM,(SG_int64)pFrag->m_iDagNum) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx,pvhFrag,KEY_VERSION,VALUE_VERSION) ); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx,&pvaMyData) ); serialize_data.pvhFrag = pvhFrag; serialize_data.pvaMyData = pvaMyData; // walk the complete RB_Cache and add complete info for each my_data item // (regardless of whether the dagnode is a START or INTERIOR member or in // the END-FRINGE. These will be in an essentially random order (HID). SG_ERR_CHECK( SG_rbtree__foreach(pCtx, pFrag->m_pRB_Cache, _serialize_data_cb, &serialize_data) ); SG_ERR_CHECK( SG_vhash__add__varray(pCtx,pvhFrag,KEY_DATA,&pvaMyData) ); *ppvhNew = pvhFrag; return; fail: SG_VHASH_NULLFREE(pCtx, pvhFrag); SG_VARRAY_NULLFREE(pCtx, pvaMyData); }
/** * Begin a "canonical" status. This is a VARRAY with one * row per reported item. Each item is completely self-contained * and contains all of the status info for that item (unlike the * "classic" view). * * pszInput, if given, lists a single file or directory where we * should begin the tree-walk. If null, we assume the entire tree. * * We accumulate all of the results into a single pvaStatus. * * This status routine uses a pWcTx so that the results * reflect the in-progress transaction. * */ void SG_wc_tx__status(SG_context * pCtx, SG_wc_tx * pWcTx, const char * pszInput, SG_uint32 depth, SG_bool bListUnchanged, SG_bool bNoIgnores, SG_bool bNoTSC, SG_bool bListSparse, SG_bool bListReserved, SG_bool bNoSort, SG_varray ** ppvaStatus, SG_vhash ** ppvhLegend) { SG_varray * pvaStatus = NULL; SG_NULLARGCHECK_RETURN( pWcTx ); // pszInput is optional SG_NULLARGCHECK_RETURN( ppvaStatus ); // ppvhLegend is optional SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pvaStatus) ); SG_ERR_CHECK( _sg_wc_tx__status(pCtx, pWcTx, pvaStatus, pszInput, depth, bListUnchanged, bNoIgnores, bNoTSC, bListSparse, bListReserved, ppvhLegend) ); // since we only have 1 input, we don't need to dedup the varray. if (!bNoSort) SG_ERR_CHECK( SG_wc__status__sort_by_repopath(pCtx, pvaStatus) ); *ppvaStatus = pvaStatus; pvaStatus = NULL; fail: SG_VARRAY_NULLFREE(pCtx, pvaStatus); }
/** * Compute an MSTATUS. Throw if not currently in a merge. * You own the returned pvaStatus. * */ void _sg_wc_tx__mstatus(SG_context * pCtx, SG_wc_tx * pWcTx, SG_bool bNoIgnores, SG_bool bNoSort, SG_varray ** ppvaStatus, SG_vhash ** ppvhLegend) { SG_varray * pvaStatus = NULL; SG_NULLARGCHECK_RETURN( pWcTx ); SG_NULLARGCHECK_RETURN( ppvaStatus ); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pvaStatus) ); SG_ERR_CHECK( sg_wc_tx__mstatus__main(pCtx, pWcTx, bNoIgnores, pvaStatus, ppvhLegend) ); if (!bNoSort) SG_ERR_CHECK( SG_wc__status__sort_by_repopath(pCtx, pvaStatus) ); *ppvaStatus = pvaStatus; pvaStatus = NULL; fail: SG_VARRAY_NULLFREE(pCtx, pvaStatus); }
static void _sg_mergereview(SG_context * pCtx, _tree_t * pTree, SG_int32 singleMergeReview, SG_bool firstChunk, SG_vhash * pMergeBaselines, SG_uint32 resultLimit, SG_varray ** ppResults, SG_uint32 * pCountResults, SG_varray ** ppContinuationToken) { SG_varray * pResults = NULL; SG_uint32 countResults = 0; SG_bool lastResultWasIndented = SG_FALSE; SG_varray * pContinuationToken = NULL; SG_ASSERT(pCtx!=NULL); SG_ASSERT(pTree!=NULL); SG_ASSERT(ppResults!=NULL); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pResults) ); while( // Stop if we have have reached the result limit. ! (countResults >= resultLimit) // Stop if we have completed a "single merge review" that was asked for. && ! (pTree->indentLevel == 0 && singleMergeReview && (countResults>1 || !firstChunk)) // Stop if we've walked all the way back to root of the vc dag. && ! (pTree->pNextResult==NULL) ) { // Process the next item in the pending list. // Note that this may cause any number of results to become available. SG_ERR_CHECK( _tree__process_next_pending_item(pCtx, pTree, pMergeBaselines) ); // Fetch results until we don't need anymore or there's none available. while( // Stop if we have have reached the result limit. ! (countResults >= resultLimit) // Stop if we have completed a "single merge review" that was asked for. && ! (pTree->indentLevel == 0 && singleMergeReview && (countResults>1 || !firstChunk)) // Stop if we've walked all the way back to root of the vc dag. && ! (pTree->pNextResult==NULL) // Stop if the next node is in a pending state. // (Can only happen when we were starting from a "continuation token".) && ! (pTree->pNextResult->isPending) // Stop if the next node has any display children in a pending state. && ! (_node__has_pending_children(pTree->pNextResult)) ) { SG_ERR_CHECK( _tree__add_next_node_to_results(pCtx, pTree, pResults, &countResults, &lastResultWasIndented) ); } } if(countResults<resultLimit && lastResultWasIndented) { SG_ASSERT(pTree->pNextResult!=NULL); // VC root will never be indented. SG_ERR_CHECK( _tree__add_next_node_to_results(pCtx, pTree, pResults, &countResults, &lastResultWasIndented) ); } if(ppContinuationToken!=NULL) { if(pTree->pNextResult==NULL) { *ppContinuationToken = NULL; } else { SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pContinuationToken) ); if(pTree->pNextResult==pTree->pRoot) { SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pContinuationToken, pTree->pNextResult->pszHidRef) ); } else { SG_ERR_CHECK( _tree__generate_continuation_token(pCtx, pTree, pContinuationToken) ); } *ppContinuationToken = pContinuationToken; } } *ppResults = pResults; if(pCountResults != NULL) *pCountResults = countResults; return; fail: SG_VARRAY_NULLFREE(pCtx, pResults); SG_VARRAY_NULLFREE(pCtx, pContinuationToken); }
static void _tree__add_next_node_to_results(SG_context * pCtx, _tree_t * pTree, SG_varray * pResults, SG_uint32 * pCountResults, SG_bool * pbLastResultWasIndented) { SG_vhash * pResult = NULL; SG_varray * pChildren = NULL; SG_uint32 i; SG_varray * pTmp = NULL; // Add pTree->pNextResult to results list. SG_ERR_CHECK( SG_varray__appendnew__vhash(pCtx, pResults, &pResult) ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pResult, "revno", pTree->pNextResult->revno) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pResult, "changeset_id", pTree->pNextResult->pszHidRef) ); SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, pResult, "parents", &pTree->pNextResult->pVcParents) ); SG_ERR_CHECK( SG_vhash__addnew__varray(pCtx, pResult, "displayChildren", &pChildren) ); for(i=0; i<pTree->pNextResult->displayChildren.count; ++i) SG_ERR_CHECK( SG_varray__append__int64(pCtx, pChildren, pTree->pNextResult->displayChildren.p[i]->revno) ); if(pTree->pNextResult->displayChildren.count>1 && pTree->indentLevel>0) { SG_varray * pContinuationToken = NULL; SG_ERR_CHECK( SG_vhash__addnew__varray(pCtx, pResult, "continuationToken", &pContinuationToken) ); SG_ERR_CHECK( _tree__generate_continuation_token(pCtx, pTree, pContinuationToken) ); } SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pResult, "indent", pTree->indentLevel) ); *pbLastResultWasIndented = (pTree->indentLevel > 0); if(pTree->pNextResult->pDisplayParent!=NULL) { SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pResult, "displayParent", pTree->pNextResult->pDisplayParent->revno) ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pResult, "indexInParent", pTree->pNextResult->pDisplayParent->displayChildren.count) ); SG_ASSERT(*pbLastResultWasIndented); } else { SG_ASSERT(!*pbLastResultWasIndented); } ++(*pCountResults); // Advance pTree->pNextResult pointer to next result. while(pTree->pNextResult->displayChildren.count==0 && pTree->pNextResult!=pTree->pRoot) { // We have already added this node and all children to the results. // Free the memory that will not be reused, and then put the node // on the "free list". _node__free_nonreusable_memory(pCtx, pTree->pNextResult); pTree->pNextResult->displayChildren.p[0] = pTree->pFreeList; pTree->pNextResult->displayChildren.count = 1; pTree->pFreeList = pTree->pNextResult; // Move back up up in the tree. pTree->pNextResult = pTree->pNextResult->pDisplayParent; if(pTree->pNextResult!=NULL) { // The node we just freed... Remove it from its display parent too. --pTree->pNextResult->displayChildren.count; // All children but the leftmost one are indented by 1 from the parent. if(pTree->pNextResult->displayChildren.count>0) --pTree->indentLevel; } } if(pTree->pNextResult->displayChildren.count>0) { SG_uint32 i = pTree->pNextResult->displayChildren.count-1; pTree->pNextResult = pTree->pNextResult->displayChildren.p[i]; if(i>=1) ++pTree->indentLevel; } else { pTree->pNextResult = NULL; } // If we advanced past root... if(pTree->pNextResult==NULL || pTree->pNextResult==pTree->pRoot->displayChildren.p[0]) { // Out with the old root, in with the new... _node__free_nonreusable_memory(pCtx, pTree->pRoot); pTree->pRoot->displayChildren.p[0] = pTree->pFreeList; pTree->pRoot->displayChildren.count = 1; pTree->pFreeList = pTree->pRoot; pTree->pRoot = pTree->pNextResult; if(pTree->pRoot!=NULL) pTree->pRoot->pDisplayParent = NULL; } return; fail: SG_VARRAY_NULLFREE(pCtx, pTmp); }
void u0026_jsonparser__create_2(SG_context* pCtx, SG_string* pStr) { SG_jsonwriter* pjson = NULL; SG_vhash* pvh = NULL; SG_varray* pva = NULL; SG_uint32 i; char* pid = NULL; SG_ERR_CHECK( SG_jsonwriter__alloc(pCtx, &pjson, pStr) ); SG_ERR_CHECK( SG_jsonwriter__write_start_object(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__string__sz(pCtx, pjson, "hello", "world") ); SG_ERR_CHECK( SG_jsonwriter__write_pair__int64(pCtx, pjson, "x", 5) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__double(pCtx, pjson, "pi", 3.14159) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__bool(pCtx, pjson, "b1", SG_TRUE) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__bool(pCtx, pjson, "b2", SG_FALSE) ); SG_ERR_CHECK( SG_jsonwriter__write_begin_pair(pCtx, pjson, "furball") ); SG_ERR_CHECK( SG_jsonwriter__write_start_array(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_element__string__sz(pCtx, pjson, "plok") ); SG_ERR_CHECK( SG_jsonwriter__write_element__double(pCtx, pjson, 47.567) ); SG_ERR_CHECK( SG_jsonwriter__write_element__int64(pCtx, pjson, 22222) ); SG_ERR_CHECK( SG_jsonwriter__write_element__null(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_element__bool(pCtx, pjson, SG_TRUE) ); SG_ERR_CHECK( SG_jsonwriter__write_element__bool(pCtx, pjson, SG_FALSE) ); SG_ERR_CHECK( SG_gid__alloc(pCtx, &pid) ); SG_ERR_CHECK( SG_jsonwriter__write_element__string__sz(pCtx, pjson, pid) ); SG_NULLFREE(pCtx, pid); SG_ERR_CHECK( SG_jsonwriter__write_end_array(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__null(pCtx, pjson, "nope") ); SG_ERR_CHECK( SG_jsonwriter__write_pair__string__sz(pCtx, pjson, "messy", U0026_MESSY) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh, "fried", "tomatoes") ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pvh, "q", 333) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__vhash(pCtx, pjson, "sub", pvh) ); SG_VHASH_NULLFREE(pCtx, pvh); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pva) ); for (i=0; i<1000; i++) { SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva, "plok") ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pva, 22) ); SG_ERR_CHECK( SG_varray__append__double(pCtx, pva, 1.414) ); SG_ERR_CHECK( SG_varray__append__bool(pCtx, pva, SG_TRUE) ); SG_ERR_CHECK( SG_varray__append__bool(pCtx, pva, SG_FALSE) ); SG_ERR_CHECK( SG_varray__append__null(pCtx, pva) ); } SG_ERR_CHECK( SG_jsonwriter__write_pair__varray(pCtx, pjson, "a", pva) ); SG_VARRAY_NULLFREE(pCtx, pva); SG_ERR_CHECK( SG_jsonwriter__write_end_object(pCtx, pjson) ); SG_JSONWRITER_NULLFREE(pCtx, pjson); return; fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_VARRAY_NULLFREE(pCtx, pva); SG_JSONWRITER_NULLFREE(pCtx, pjson); }
/** * Our caller is trying to create new repo and create a WD * mapped to it. The destination directory may or may not * have already existed on disk before we started. If we are * building upon an existing directory, verify that it doesn't * contain any submodules because we don't yet support them. * */ static void _check_for_nested_drawer(SG_context * pCtx, SG_wc_tx * pWcTx) { SG_varray * pvaStatus = NULL; SG_string * pString_MyDrawerRepoPath = NULL; SG_string * pString_MatchedRepoPath = NULL; const char * psz_MyDrawerName = NULL; const char * psz_MyDrawerRepoPath = NULL; SG_uint32 k, nrItems; if (pWcTx->bWeCreated_WD || pWcTx->bWeCreated_WD_Contents) return; SG_ERR_CHECK( SG_wc_tx__status(pCtx, pWcTx, NULL, SG_UINT32_MAX, SG_FALSE, // bListUnchanged SG_TRUE, // bNoIgnores SG_TRUE, // bNoTSC, SG_FALSE, // bListSparse SG_TRUE, // bListReserved SG_TRUE, // bNoSort, &pvaStatus, NULL) ); if (!pvaStatus) return; // TODO 2012/11/13 For now I'm just going to see if there is a // TODO .sgdrawer somewhere within the directory tree. // TODO In theory, we could have ADD/ADDREMOVE just // TODO look for them and refuse to add its parent // TODO directory, but I don't to even support that // TODO until we've properly dealt with submodules. // TODO // TODO So for now, if there is a WD deeply nested within // TODO this directory, we just complain. This is mainly // TODO to prevent accidents. (Because they can still // TODO manually move a sub-WD somehere deep into this // TODO directory at some point in the future.) SG_ERR_CHECK( SG_workingdir__get_drawer_directory_name(pCtx, &psz_MyDrawerName) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pString_MyDrawerRepoPath) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pString_MyDrawerRepoPath, "@/%s", psz_MyDrawerName) ); SG_ERR_CHECK( SG_repopath__ensure_final_slash(pCtx, pString_MyDrawerRepoPath) ); psz_MyDrawerRepoPath = SG_string__sz(pString_MyDrawerRepoPath); SG_ERR_CHECK( SG_varray__count(pCtx, pvaStatus, &nrItems) ); for (k=0; k<nrItems; k++) { SG_vhash * pvhItem; SG_vhash * pvhItemStatus; SG_bool bIsReserved; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaStatus, k, &pvhItem) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhItem, "status", &pvhItemStatus) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhItemStatus, "isReserved", &bIsReserved) ); if (bIsReserved) { // Don't freak out over the .sgdrawer that we just created in the root. const char * pszRepoPath; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "path", &pszRepoPath) ); if (strcmp(pszRepoPath, psz_MyDrawerRepoPath) != 0) { SG_ERR_CHECK( SG_STRING__ALLOC__SZ(pCtx, &pString_MatchedRepoPath, pszRepoPath) ); SG_ERR_CHECK( SG_repopath__remove_last(pCtx, pString_MatchedRepoPath) ); SG_ERR_THROW2( SG_ERR_ENTRY_ALREADY_UNDER_VERSION_CONTROL, (pCtx, "The directory '%s' contains a working copy and submodules are not yet supported.", SG_string__sz(pString_MatchedRepoPath)) ); } } } fail: SG_STRING_NULLFREE(pCtx, pString_MatchedRepoPath); SG_STRING_NULLFREE(pCtx, pString_MyDrawerRepoPath); SG_VARRAY_NULLFREE(pCtx, pvaStatus); }
/** * Begin a canonical STATUS when given one or more items using a SG_stringarray. * If psaInputs is NULL, assume the entire tree. * * We accumulate all of the results into a single pvaStatus. * * This status routine uses a pWcTx so that the results * reflect the in-progress transaction. * */ void SG_wc_tx__status__stringarray(SG_context * pCtx, SG_wc_tx * pWcTx, const SG_stringarray * psaInputs, SG_uint32 depth, SG_bool bListUnchanged, SG_bool bNoIgnores, SG_bool bNoTSC, SG_bool bListSparse, SG_bool bListReserved, SG_bool bNoSort, SG_varray ** ppvaStatus, SG_vhash ** ppvhLegend) { SG_varray * pvaStatus = NULL; SG_uint32 k, count; SG_vhash * pvhLegend = NULL; SG_NULLARGCHECK_RETURN( pWcTx ); SG_NULLARGCHECK_RETURN( ppvaStatus ); // ppvhLegend is optional SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pvaStatus) ); if (psaInputs) { SG_ERR_CHECK( SG_stringarray__count(pCtx, psaInputs, &count) ); for (k=0; k<count; k++) { const char * pszInput_k; SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, psaInputs, k, &pszInput_k) ); SG_ERR_CHECK( _sg_wc_tx__status(pCtx, pWcTx, pvaStatus, pszInput_k, depth, bListUnchanged, bNoIgnores, bNoTSC, bListSparse, bListReserved, (((k==0) && ppvhLegend) ? &pvhLegend : NULL)) ); } if (ppvhLegend) { *ppvhLegend = pvhLegend; pvhLegend = NULL; } // In case they said, "vv status foo foo foo" or something // like "vv status dirA/foo dirA" we need to de-dup the results // since we called the internal status with each input[k] and // just accumulated the results. SG_ERR_CHECK( SG_vaofvh__dedup(pCtx, pvaStatus, "gid") ); } else { // if no args, assume "@/" for the STATUS. SG_ERR_CHECK( _sg_wc_tx__status(pCtx, pWcTx, pvaStatus, NULL, depth, bListUnchanged, bNoIgnores, bNoTSC, bListSparse, bListReserved, ppvhLegend) ); // we don't need to de-dup since we simulated 1 argument. } if (!bNoSort) SG_ERR_CHECK( SG_wc__status__sort_by_repopath(pCtx, pvaStatus) ); *ppvaStatus = pvaStatus; pvaStatus = NULL; fail: SG_VARRAY_NULLFREE(pCtx, pvaStatus); SG_VHASH_NULLFREE(pCtx, pvhLegend); }
/** * Compute DIFF on (baseline or arbitrary cset vs WC) and either * splat to console or launch a GUI tool for each. * */ static void _s01__do_cset_vs_wc(SG_context * pCtx, const SG_option_state * pOptSt, const SG_stringarray * psaArgs, SG_uint32 * pNrErrors) { SG_wc_tx * pWcTx = NULL; SG_varray * pvaDiffSteps = NULL; SG_varray * pvaDiffSteps_DirtyFiles = NULL; SG_pathname * pPathWc = NULL; SG_uint32 nrErrors = 0; SG_ERR_CHECK( SG_WC_TX__ALLOC__BEGIN(pCtx, &pWcTx, pPathWc, SG_TRUE) ); SG_ERR_CHECK( SG_wc_tx__diff__setup__stringarray(pCtx, pWcTx, pOptSt->pRevSpec, psaArgs, WC__GET_DEPTH(pOptSt), SG_FALSE, // bNoIgnores SG_FALSE, // bNoTSC SG_FALSE, // bNoSort, pOptSt->bInteractive, pOptSt->psz_tool, &pvaDiffSteps) ); // rollback/cancel the TX to release the SQL locks, // but don't free it yet (because that will auto-delete // the session temp files that we are using for the left // sides). // // This is like SG_wc__diff__throw() but we want to control // the diff-loop so we can optionally do the interactive prompt. SG_ERR_CHECK( SG_wc_tx__cancel(pCtx, pWcTx) ); if (pvaDiffSteps) { if (pOptSt->bInteractive) { SG_ERR_CHECK( _get_dirty_files(pCtx, pvaDiffSteps, &pvaDiffSteps_DirtyFiles) ); if (pvaDiffSteps_DirtyFiles) SG_ERR_CHECK( _do_gui_diffs(pCtx, SG_TRUE, pOptSt, pvaDiffSteps_DirtyFiles, &nrErrors) ); } else { SG_uint32 k, nrItems; SG_ERR_CHECK( SG_varray__count(pCtx, pvaDiffSteps, &nrItems) ); for (k=0; k<nrItems; k++) { SG_vhash * pvhItem; const char * pszHeader = NULL; SG_uint32 iResult; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaDiffSteps, k, &pvhItem) ); SG_ERR_CHECK( SG_vhash__check__sz(pCtx, pvhItem, "header", &pszHeader) ); if (pszHeader) SG_ERR_IGNORE( SG_console__raw(pCtx, SG_CS_STDOUT, pszHeader) ); SG_ERR_CHECK( _do_diff1(pCtx, SG_TRUE, pOptSt, pvhItem, &iResult) ); switch (iResult) { default: case SG_FILETOOL__RESULT__SUCCESS: case SG_DIFFTOOL__RESULT__SAME: case SG_DIFFTOOL__RESULT__DIFFERENT: break; case SG_DIFFTOOL__RESULT__CANCEL: case SG_FILETOOL__RESULT__FAILURE: case SG_FILETOOL__RESULT__ERROR: nrErrors++; break; } } } } *pNrErrors = nrErrors; fail: SG_VARRAY_NULLFREE(pCtx, pvaDiffSteps); SG_VARRAY_NULLFREE(pCtx, pvaDiffSteps_DirtyFiles); SG_WC_TX__NULLFREE(pCtx, pWcTx); }
static void _s2__do_cset_vs_cset(SG_context * pCtx, const SG_option_state * pOptSt, const SG_stringarray * psaArgs, SG_uint32 * pNrErrors) { SG_varray * pvaStatus = NULL; SG_varray * pvaStatusDirtyFiles = NULL; SG_stringarray * psa1 = NULL; SG_string * pStringGidRepoPath = NULL; SG_string * pStringErr = NULL; SG_uint32 nrErrors = 0; SG_ERR_CHECK( SG_vv2__status(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec, psaArgs, WC__GET_DEPTH(pOptSt), SG_FALSE, // bNoSort &pvaStatus, NULL) ); if (pvaStatus) { if (pOptSt->bInteractive) { // Filter list down to just modified files and show them one-by-one. SG_ERR_CHECK( _get_dirty_files(pCtx, pvaStatus, &pvaStatusDirtyFiles) ); if (pvaStatusDirtyFiles) SG_ERR_CHECK( _do_gui_diffs(pCtx, SG_FALSE, pOptSt, pvaStatusDirtyFiles, &nrErrors) ); } else { SG_uint32 k, nrItems; // Print the changes with PATCH-like headers. // Accumulate any tool errors. SG_ERR_CHECK( SG_varray__count(pCtx, pvaStatus, &nrItems) ); for (k=0; k<nrItems; k++) { SG_vhash * pvhItem; const char * pszGid = NULL; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaStatus, k, &pvhItem) ); // TODO 2013/02/22 Our pvhItem has all of the details for the diff, // TODO but we don't yet have a public API to let it be // TODO used as is. So we build a @gid repo-path and // TODO run the old historical diff code on a 1-item array // TODO containing this @gid. // TODO // TODO We should fix this to just pass down the pvhItem // TOOD so that it doesn't have to repeat the status lookup. SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pStringGidRepoPath) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pStringGidRepoPath, "@%s", pszGid) ); SG_ERR_CHECK( SG_STRINGARRAY__ALLOC(pCtx, &psa1, 1) ); SG_ERR_CHECK( SG_stringarray__add(pCtx, psa1, SG_string__sz(pStringGidRepoPath)) ); SG_vv2__diff_to_stream__throw(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec, psa1, 0, SG_TRUE, // bNoSort -- doesn't matter only 1 item in list SG_FALSE, // bInteractive, pOptSt->psz_tool); // Don't throw the error from the tool. Just print it on STDERR // and remember that we had an error so that don't stop showing // the diffs just because we stumble over a changed binary file // or mis-configured tool, for example. if (SG_context__has_err(pCtx)) { SG_context__err_to_string(pCtx, SG_FALSE, &pStringErr); SG_context__err_reset(pCtx); SG_ERR_CHECK( SG_console__raw(pCtx, SG_CS_STDERR, SG_string__sz(pStringErr)) ); SG_STRING_NULLFREE(pCtx, pStringErr); nrErrors++; } SG_STRING_NULLFREE(pCtx, pStringGidRepoPath); SG_STRINGARRAY_NULLFREE(pCtx, psa1); } } } *pNrErrors = nrErrors; fail: SG_VARRAY_NULLFREE(pCtx, pvaStatus); SG_VARRAY_NULLFREE(pCtx, pvaStatusDirtyFiles); SG_STRINGARRAY_NULLFREE(pCtx, psa1); SG_STRING_NULLFREE(pCtx, pStringGidRepoPath); SG_STRING_NULLFREE(pCtx, pStringErr); }
void SG_cmd_util__get_username_for_repo( SG_context *pCtx, const char *szRepoName, char **ppUsername ) { SG_string * pUsername = NULL; SG_repo * pRepo = NULL; char * psz_username = NULL; SG_curl * pCurl = NULL; SG_string * pUri = NULL; SG_string * pResponse = NULL; SG_int32 responseStatusCode = 0; SG_vhash * pRepoInfo = NULL; char * psz_userid = NULL; SG_varray * pUsers = NULL; SG_NULLARGCHECK_RETURN(ppUsername); if(!szRepoName) { // Look up username based on 'whoami' of repo associated with cwd. SG_ERR_IGNORE( SG_cmd_util__get_repo_from_cwd(pCtx, &pRepo, NULL) ); if(pRepo) SG_ERR_IGNORE( SG_user__get_username_for_repo(pCtx, pRepo, &psz_username) ); SG_REPO_NULLFREE(pCtx, pRepo); } else if(SG_sz__starts_with(szRepoName, "http://") || SG_sz__starts_with(szRepoName, "https://")) { // Look up username based on 'whoami' of admin id of remote repo. SG_ERR_CHECK( SG_curl__alloc(pCtx, &pCurl) ); SG_ERR_CHECK( SG_STRING__ALLOC__SZ(pCtx, &pUri, szRepoName) ); SG_ERR_CHECK( SG_string__append__sz(pCtx, pUri, ".json") ); SG_ERR_CHECK( SG_curl__reset(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__setopt__sz(pCtx, pCurl, CURLOPT_URL, SG_string__sz(pUri)) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pResponse) ); SG_ERR_CHECK( SG_curl__set__write_string(pCtx, pCurl, pResponse) ); SG_ERR_CHECK( SG_curl__perform(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__getinfo__int32(pCtx, pCurl, CURLINFO_RESPONSE_CODE, &responseStatusCode) ); if(responseStatusCode==200) { const char * szAdminId = NULL; SG_ERR_CHECK( SG_VHASH__ALLOC__FROM_JSON__STRING(pCtx, &pRepoInfo, pResponse) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pRepoInfo, SG_SYNC_REPO_INFO_KEY__ADMIN_ID, &szAdminId) ); SG_ERR_CHECK( SG_string__clear(pCtx, pUri) ); SG_ERR_CHECK( SG_string__append__format(pCtx, pUri, "/admin/%s/whoami/userid", szAdminId) ); SG_ERR_IGNORE( SG_localsettings__get__sz(pCtx, SG_string__sz(pUri), NULL, &psz_userid, NULL) ); if(psz_userid) { // We now have the userid. Look up the username. SG_ERR_CHECK( SG_string__clear(pCtx, pUri) ); SG_ERR_CHECK( SG_string__append__format(pCtx, pUri, "%s/users.json", szRepoName) ); SG_ERR_CHECK( SG_curl__reset(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__setopt__sz(pCtx, pCurl, CURLOPT_URL, SG_string__sz(pUri)) ); SG_ERR_CHECK( SG_string__clear(pCtx, pResponse) ); SG_ERR_CHECK( SG_curl__set__write_string(pCtx, pCurl, pResponse) ); SG_ERR_CHECK( SG_curl__perform(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__getinfo__int32(pCtx, pCurl, CURLINFO_RESPONSE_CODE, &responseStatusCode) ); if(responseStatusCode==200) { SG_uint32 i, nUsers; SG_ERR_CHECK( SG_VARRAY__ALLOC__FROM_JSON__STRING(pCtx, &pUsers, pResponse) ); SG_ERR_CHECK( SG_varray__count(pCtx, pUsers, &nUsers) ); for(i=0; i<nUsers; ++i) { SG_vhash * pUser = NULL; const char * psz_recid = NULL; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pUsers, i, &pUser) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pUser, "recid", &psz_recid) ); if(!strcmp(psz_recid, psz_userid)) { const char * psz_name = NULL; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pUser, "name", &psz_name) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_name, &psz_username) ); break; } } SG_VARRAY_NULLFREE(pCtx, pUsers); } SG_NULLFREE(pCtx, psz_userid); } SG_VHASH_NULLFREE(pCtx, pRepoInfo); } SG_STRING_NULLFREE(pCtx, pResponse); SG_STRING_NULLFREE(pCtx, pUri); SG_CURL_NULLFREE(pCtx, pCurl); } else { // Look up username based on 'whoami' of repo provided. SG_ERR_CHECK( SG_REPO__OPEN_REPO_INSTANCE(pCtx, szRepoName, &pRepo) ); SG_ERR_IGNORE( SG_user__get_username_for_repo(pCtx, pRepo, &psz_username) ); SG_REPO_NULLFREE(pCtx, pRepo); } *ppUsername = psz_username; return; fail: SG_STRING_NULLFREE(pCtx, pUsername); SG_REPO_NULLFREE(pCtx, pRepo); SG_NULLFREE(pCtx, psz_username); SG_CURL_NULLFREE(pCtx, pCurl); SG_STRING_NULLFREE(pCtx, pUri); SG_STRING_NULLFREE(pCtx, pResponse); SG_VHASH_NULLFREE(pCtx, pRepoInfo); SG_NULLFREE(pCtx, psz_userid); SG_VARRAY_NULLFREE(pCtx, pUsers); }
int u0050_logstuff_test__1(SG_context * pCtx, SG_pathname* pPathTopDir) { char bufName[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_pathname* pPathFile = NULL; SG_vhash* pvh = NULL; SG_dagnode* pdn = NULL; const char* psz_hid_cs = NULL; SG_repo* pRepo = NULL; SG_uint32 count; SG_rbtree* prb = NULL; SG_varray* pva = NULL; SG_rbtree* prb_reversed = NULL; const char* psz_val = NULL; SG_audit q; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufName, sizeof(bufName), 32) ); /* create the working dir */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, bufName) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); /* add stuff */ VERIFY_ERR_CHECK( u0050_logstuff__create_file__numbers(pCtx, pPathWorkingDir, "aaa", 20) ); /* create the repo */ VERIFY_ERR_CHECK( _ut_pt__new_repo(pCtx, bufName, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( u0050_logstuff__commit_all(pCtx, pPathWorkingDir, &pdn) ); VERIFY_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_hid_cs) ); SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, bufName, &pRepo) ); #define MY_COMMENT "The name of this new file sucks! What kind of a name is 'aaa'?" VERIFY_ERR_CHECK( SG_audit__init(pCtx, &q, pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); VERIFY_ERR_CHECK( SG_vc_comments__add(pCtx, pRepo, psz_hid_cs, MY_COMMENT, &q) ); VERIFY_ERR_CHECK( SG_vc_stamps__add(pCtx, pRepo, psz_hid_cs, "crap", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "tcrap", &q) ); VERIFY_ERR_CHECK( SG_vc_comments__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "text", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, MY_COMMENT)) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_stamps__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "stamp", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "crap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "tag", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "tcrap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "whatever", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (2 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (2 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); /* we don't know whether psz_my_val is tcrap or whatever. */ // VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); { const char* psz_remove = "whatever"; VERIFY_ERR_CHECK( SG_vc_tags__remove(pCtx, pRepo, &q, 1, &psz_remove) ); /* Note that by removing whatever, we are bringing the tags list back * to a state where it has been before (just tcrap). This changeset in * the tags table will have its own csid, because the parentage is * different, but it's root idtrie HID will be the same as a previous * node. */ } VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); SG_RBTREE_NULLFREE(pCtx, prb); SG_REPO_NULLFREE(pCtx, pRepo); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 1; fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 0; }
void MyFn(test__wide_dag)(SG_context* pCtx) { char bufTopDir[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathTopDir = NULL; char buf_client_repo_name[SG_TID_MAX_BUFFER_LENGTH]; char buf_server_repo_name[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_vhash* pvh = NULL; SG_repo* pClientRepo = NULL; SG_client* pClient = NULL; char* pszidFirstChangeset = NULL; SG_pathname* pPathCsDir = NULL; SG_uint32 lines; SG_uint32 i, j; SG_repo* pServerRepo = NULL; SG_bool bMatch = SG_FALSE; char buf_filename[7]; SG_varray* pvaZingMergeLog = NULL; SG_varray* pvaZingMergeErr = NULL; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufTopDir, sizeof(bufTopDir), 32) ); VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__SZ(pCtx,&pPathTopDir,bufTopDir) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx,pPathTopDir) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_client_repo_name, sizeof(buf_client_repo_name), 32) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_server_repo_name, sizeof(buf_server_repo_name), 32) ); INFOP("test__wide_dag", ("client repo: %s", buf_client_repo_name)); INFOP("test__wide_dag", ("server repo: %s", buf_server_repo_name)); /* create the repo */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, buf_server_repo_name) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__new_repo2(pCtx, buf_server_repo_name, pPathWorkingDir, &pszidFirstChangeset) ); /* open that repo */ VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_server_repo_name, &pServerRepo) ); /* create an empty clone to pull into */ VERIFY_ERR_CHECK( SG_repo__create_empty_clone(pCtx, buf_server_repo_name, buf_client_repo_name) ); VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_client_repo_name, &pClientRepo) ); /* add stuff to server repo */ for (i = 0; i < 20; i++) // number of changesets { VERIFY_ERR_CHECK( _ut_pt__set_baseline(pCtx, pPathWorkingDir, pszidFirstChangeset) ); VERIFY_ERR_CHECK( SG_sprintf(pCtx, buf_filename, sizeof(buf_filename), "%d", i) ); VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathCsDir, pPathWorkingDir, buf_filename) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathCsDir) ); for (j = 0; j < 1; j++) // number of files added per changeset { VERIFY_ERR_CHECK( SG_sprintf(pCtx, buf_filename, sizeof(buf_filename), "%d", j) ); lines = (int)(2500.0 * (rand() / (RAND_MAX + 1.0))); VERIFY_ERR_CHECK( MyFn(create_file__numbers)(pCtx, pPathCsDir, buf_filename, lines) ); } SG_PATHNAME_NULLFREE(pCtx, pPathCsDir); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( MyFn(commit_all)(pCtx, pPathWorkingDir, NULL) ); } /* verify pre-pull repos are different */ VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("pre-pull repos differ", !bMatch); /* get a client and pull from server repo to empty client repo */ VERIFY_ERR_CHECK( SG_client__open(pCtx, buf_server_repo_name, NULL_CREDENTIAL, &pClient) ); // TODO Credentials VERIFY_ERR_CHECK( SG_pull__all(pCtx, buf_client_repo_name, pClient, &pvaZingMergeErr, &pvaZingMergeLog) ); VERIFY_COND("", !pvaZingMergeErr); /* verify post-pull repos are identical */ VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-pull repo DAGs differ", bMatch); VERIFY_ERR_CHECK( SG_sync__compare_repo_blobs(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-pull repo blobs differ", bMatch); VERIFY_ERR_CHECK( SG_repo__check_integrity(pCtx, pClientRepo, SG_REPO__CHECK_INTEGRITY__DAG_CONSISTENCY, SG_DAGNUM__VERSION_CONTROL, NULL, NULL) ); SG_REPO_NULLFREE(pCtx, pClientRepo); /* Make another copy with clone */ VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_client_repo_name, sizeof(buf_client_repo_name), 32) ); SG_ERR_CHECK( SG_repo__create_empty_clone_from_remote(pCtx, pClient, buf_client_repo_name) ); VERIFY_ERR_CHECK( SG_pull__clone(pCtx, buf_client_repo_name, pClient) ); /* verify post-clone repos are identical */ VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_client_repo_name, &pClientRepo) ); VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-clone repo DAGs differ", bMatch); VERIFY_ERR_CHECK( SG_sync__compare_repo_blobs(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-clone repo blobs differ", bMatch); VERIFY_ERR_CHECK( SG_repo__check_integrity(pCtx, pClientRepo, SG_REPO__CHECK_INTEGRITY__DAG_CONSISTENCY, SG_DAGNUM__VERSION_CONTROL, NULL, NULL) ); /* TODO: verify more stuff? */ /* Fall through to common cleanup */ fail: /* close client */ SG_CLIENT_NULLFREE(pCtx, pClient); /* close both repos */ SG_REPO_NULLFREE(pCtx, pServerRepo); SG_REPO_NULLFREE(pCtx, pClientRepo); SG_NULLFREE(pCtx, pszidFirstChangeset); SG_PATHNAME_NULLFREE(pCtx, pPathTopDir); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathCsDir); SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_VARRAY_NULLFREE(pCtx, pvaZingMergeLog); SG_VARRAY_NULLFREE(pCtx, pvaZingMergeErr); }
void MyFn(test__simple)(SG_context* pCtx) { char bufTopDir[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathTopDir = NULL; char buf_client_repo_name[SG_TID_MAX_BUFFER_LENGTH]; char buf_server_repo_name[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_vhash* pvh = NULL; SG_repo* pClientRepo = NULL; SG_client* pClient = NULL; SG_repo* pServerRepo = NULL; SG_bool bMatch = SG_FALSE; SG_varray* pvaZingMergeLog = NULL; SG_varray* pvaZingMergeErr = NULL; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufTopDir, sizeof(bufTopDir), 32) ); VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__SZ(pCtx,&pPathTopDir,bufTopDir) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx,pPathTopDir) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_client_repo_name, sizeof(buf_client_repo_name), 32) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_server_repo_name, sizeof(buf_server_repo_name), 32) ); INFOP("test__simple", ("client repo: %s", buf_client_repo_name)); INFOP("test__simple", ("server repo: %s", buf_server_repo_name)); /* create the repo */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, buf_server_repo_name) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__new_repo2(pCtx, buf_server_repo_name, pPathWorkingDir, NULL) ); /* open that repo */ VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_server_repo_name, &pServerRepo) ); /* create an empty clone to pull into */ VERIFY_ERR_CHECK( SG_repo__create_empty_clone(pCtx, buf_server_repo_name, buf_client_repo_name) ); VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_client_repo_name, &pClientRepo) ); /* add stuff to server repo */ VERIFY_ERR_CHECK( MyFn(create_file__numbers)(pCtx, pPathWorkingDir, "aaa", 10) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( MyFn(commit_all)(pCtx, pPathWorkingDir, NULL) ); VERIFY_ERR_CHECK( MyFn(create_file__numbers)(pCtx, pPathWorkingDir, "bbb", 10) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( MyFn(commit_all)(pCtx, pPathWorkingDir, NULL) ); /* verify pre-pull repos are different */ VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("pre-pull repos differ", !bMatch); /* get a client and pull from server repo to empty client repo */ VERIFY_ERR_CHECK( SG_client__open(pCtx, buf_server_repo_name, NULL_CREDENTIAL, &pClient) ); // TODO Credentials VERIFY_ERR_CHECK( SG_pull__all(pCtx, buf_client_repo_name, pClient, &pvaZingMergeErr, &pvaZingMergeLog) ); VERIFY_COND("", !pvaZingMergeErr); SG_CLIENT_NULLFREE(pCtx, pClient); /* verify post-pull repos are identical */ VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-pull repo DAGs differ", bMatch); VERIFY_ERR_CHECK( SG_sync__compare_repo_blobs(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-pull repo blobs differ", bMatch); VERIFY_ERR_CHECK( SG_repo__check_integrity(pCtx, pClientRepo, SG_REPO__CHECK_INTEGRITY__DAG_CONSISTENCY, SG_DAGNUM__VERSION_CONTROL, NULL, NULL) ); /* TODO: verify more stuff? */ /* Fall through to common cleanup */ fail: /* close client */ SG_CLIENT_NULLFREE(pCtx, pClient); /* close both repos */ SG_REPO_NULLFREE(pCtx, pServerRepo); SG_REPO_NULLFREE(pCtx, pClientRepo); SG_PATHNAME_NULLFREE(pCtx, pPathTopDir); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_VARRAY_NULLFREE(pCtx, pvaZingMergeLog); SG_VARRAY_NULLFREE(pCtx, pvaZingMergeErr); }