void u0048_multidag_test__1(SG_context * pCtx) { char bufName[SG_TID_MAX_BUFFER_LENGTH + u0048_multidag__MY_LABEL_LENGTH]; SG_repo* pRepo = NULL; SG_rbtree* prb = NULL; SG_uint32 count; char* pid1 = NULL; char* pid1a = NULL; char* pid1b = NULL; char* pid1c = NULL; char* pid2 = NULL; char* pid2a = NULL; char* pid2b = NULL; VERIFY_ERR_CHECK( SG_strcpy(pCtx, bufName, sizeof(bufName), u0048_multidag__MY_LABEL) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, &bufName[u0048_multidag__MY_LABEL_LENGTH], (sizeof(bufName) - u0048_multidag__MY_LABEL_LENGTH), 32) ); /* create the repo */ VERIFY_ERR_CHECK( u0048_multidag__new_repo(pCtx, bufName, &pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid1, NULL, SG_DAGNUM__TESTING__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid1a, pid1, SG_DAGNUM__TESTING__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid1b, pid1, SG_DAGNUM__TESTING__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid1c, pid1, SG_DAGNUM__TESTING__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid2, NULL, SG_DAGNUM__TESTING2__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid2a, pid2, SG_DAGNUM__TESTING2__NOTHING, pRepo) ); VERIFY_ERR_CHECK( u0048_multidag__add_dagnode(pCtx, &pid2b, pid2, SG_DAGNUM__TESTING2__NOTHING, pRepo) ); SG_NULLFREE(pCtx, pid1); SG_NULLFREE(pCtx, pid1a); SG_NULLFREE(pCtx, pid1b); SG_NULLFREE(pCtx, pid1c); SG_NULLFREE(pCtx, pid2); SG_NULLFREE(pCtx, pid2a); SG_NULLFREE(pCtx, pid2b); VERIFY_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, SG_DAGNUM__TESTING__NOTHING, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); SG_RBTREE_NULLFREE(pCtx, prb); VERIFY_COND("count", (3 == count)); VERIFY_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, SG_DAGNUM__TESTING2__NOTHING, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); SG_RBTREE_NULLFREE(pCtx, prb); VERIFY_COND("count", (2 == count)); SG_REPO_NULLFREE(pCtx, pRepo); return; fail: SG_REPO_NULLFREE(pCtx, pRepo); }
/** * List the hashes supported by each repo implementation. * * NOTE: I don't have a way to verify that the list is complete or verify * NOTE: what it should contain, so I just print the list. */ void MyFn(list_hashes)(SG_context * pCtx) { SG_repo * pRepo = NULL; SG_vhash * pvh_vtables = NULL; SG_vhash * pvh_HashMethods = NULL; SG_uint32 k, count_vtables; VERIFY_ERR_CHECK( SG_repo__query_implementation(pCtx,NULL, SG_REPO__QUESTION__VHASH__LIST_REPO_IMPLEMENTATIONS, NULL,NULL,NULL,0, &pvh_vtables) ); VERIFY_ERR_CHECK( SG_vhash__count(pCtx,pvh_vtables,&count_vtables) ); for (k=0; k<count_vtables; k++) { const char * pszKey_vtable_k; SG_uint32 j, count_HashMethods; VERIFY_ERR_CHECK( SG_vhash__get_nth_pair(pCtx,pvh_vtables,k,&pszKey_vtable_k,NULL) ); INFOP("vtable",("Repo Implementation[%d]: [%s]",k,pszKey_vtable_k)); VERIFY_ERR_CHECK( SG_repo__alloc(pCtx,&pRepo,pszKey_vtable_k) ); VERIFY_ERR_CHECK( SG_repo__query_implementation(pCtx,pRepo, SG_REPO__QUESTION__VHASH__LIST_HASH_METHODS, NULL,NULL,NULL,0, &pvh_HashMethods) ); VERIFY_ERR_CHECK( SG_vhash__count(pCtx,pvh_HashMethods,&count_HashMethods) ); for (j=0; j<count_HashMethods; j++) { const char * pszKey_HashMethod_j; const SG_variant * pVariant; SG_int64 i64; SG_uint32 strlen_Hash_j; VERIFY_ERR_CHECK( SG_vhash__get_nth_pair(pCtx,pvh_HashMethods,j,&pszKey_HashMethod_j,&pVariant) ); VERIFY_ERR_CHECK( SG_variant__get__int64(pCtx,pVariant,&i64) ); strlen_Hash_j = (SG_uint32)i64; INFOP("vtable.hash_method",("Repo [%s] Hash [%s] Length [%d]",pszKey_vtable_k,pszKey_HashMethod_j,strlen_Hash_j)); } SG_VHASH_NULLFREE(pCtx, pvh_HashMethods); SG_REPO_NULLFREE(pCtx, pRepo); } fail: SG_VHASH_NULLFREE(pCtx, pvh_HashMethods); SG_REPO_NULLFREE(pCtx, pRepo); SG_VHASH_NULLFREE(pCtx, pvh_vtables); }
void SG_server__push_add( SG_context* pCtx, SG_server * pServer, const char* pPushId, const char* psz_fragball_name, SG_vhash** ppResult ) { SG_staging* pStaging = NULL; SG_vhash* pvh_status = NULL; SG_repo* pRepo = NULL; SG_NULLARGCHECK_RETURN(pServer); SG_NULLARGCHECK_RETURN(pPushId); SG_NULLARGCHECK_RETURN(ppResult); SG_ERR_CHECK( SG_staging__open(pCtx, pPushId, &pStaging) ); SG_ERR_CHECK( SG_staging__slurp_fragball(pCtx, pStaging, psz_fragball_name) ); SG_ERR_CHECK( SG_staging__check_status(pCtx, pStaging, SG_TRUE, SG_TRUE, SG_FALSE, SG_TRUE, SG_TRUE, &pvh_status) ); *ppResult = pvh_status; pvh_status = NULL; /* fallthru */ fail: SG_REPO_NULLFREE(pCtx, pRepo); SG_VHASH_NULLFREE(pCtx, pvh_status); SG_STAGING_NULLFREE(pCtx, pStaging); }
MyMain() { SG_repo * pRepo = NULL; SG_pathname * pPathnameTempDir = NULL; TEMPLATE_MAIN_START; VERIFY_ERR_CHECK( MyFn(create_repo)(pCtx,&pRepo) ); VERIFY_ERR_CHECK( MyFn(create_tmp_src_dir)(pCtx,&pPathnameTempDir) ); BEGIN_TEST( MyFn(create_some_blobs_from_bytes)(pCtx, pRepo) ); BEGIN_TEST( MyFn(create_some_blobs_from_files)(pCtx, pRepo,pPathnameTempDir) ); BEGIN_TEST( MyFn(create_zero_byte_blob)(pCtx, pRepo) ); ////////////////////////////////////////////////////////////////// // TODO delete repo directory and everything we created under it. // TODO delete temp directory and everything we created under it. // fall-thru to common cleanup fail: SG_REPO_NULLFREE(pCtx, pRepo); SG_PATHNAME_NULLFREE(pCtx, pPathnameTempDir); TEMPLATE_MAIN_END; }
void sg_client__c__pull_clone( SG_context* pCtx, SG_client* pClient, const SG_pathname* pStagingPathname, char** ppszFragballName) { sg_client_c_instance_data* pMe = NULL; SG_repo* pRepo = NULL; SG_vhash* pvhStatus = NULL; SG_NULLARGCHECK_RETURN(pClient); SG_NULLARGCHECK_RETURN(ppszFragballName); pMe = (sg_client_c_instance_data*)pClient->p_vtable_instance_data; SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, pClient->psz_remote_repo_spec, &pRepo) ); SG_ERR_CHECK( SG_context__msg__emit(pCtx, "Copying repository...") ); SG_ERR_CHECK( SG_repo__fetch_repo__fragball(pCtx, pRepo, pStagingPathname, ppszFragballName) ); SG_ERR_CHECK( SG_context__msg__emit(pCtx, "done") ); /* fall through */ fail: SG_REPO_NULLFREE(pCtx, pRepo); SG_VHASH_NULLFREE(pCtx, pvhStatus); SG_ERR_IGNORE( SG_context__msg__emit(pCtx, "\n") ); }
void u0051_hidlookup_test__1(SG_context * pCtx, SG_pathname* pPathTopDir) { char bufName[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_pathname* pPathFile = NULL; SG_dagnode* pdn = NULL; const char* psz_hid_cs = NULL; SG_repo* pRepo = NULL; char buf_partial[256]; char* psz_result = NULL; SG_audit q; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufName, sizeof(bufName), 32) ); /* create the working dir */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, bufName) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); /* add stuff */ VERIFY_ERR_CHECK( u0051_hidlookup__create_file__numbers(pCtx, pPathWorkingDir, "aaa", 20) ); /* create the repo */ VERIFY_ERR_CHECK( _ut_pt__new_repo(pCtx, bufName, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( u0051_hidlookup__commit_all(pCtx, pPathWorkingDir, &pdn) ); VERIFY_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_hid_cs) ); VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, bufName, &pRepo) ); VERIFY_ERR_CHECK( SG_audit__init(pCtx, &q, pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "remember", &q) ); VERIFY_ERR_CHECK( SG_strcpy(pCtx, buf_partial, sizeof(buf_partial), psz_hid_cs) ); buf_partial[10] = 0; VERIFY_ERR_CHECK( SG_repo__hidlookup__dagnode(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, buf_partial, &psz_result) ); VERIFY_COND("found", (0 == strcmp(psz_result, psz_hid_cs))); SG_NULLFREE(pCtx, psz_result); VERIFY_ERR_CHECK( SG_repo__hidlookup__blob(pCtx, pRepo, buf_partial, &psz_result) ); VERIFY_COND("found", (0 == strcmp(psz_result, psz_hid_cs))); SG_NULLFREE(pCtx, psz_result); VERIFY_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, "remember", &psz_result) ); VERIFY_COND("found", (0 == strcmp(psz_result, psz_hid_cs))); SG_NULLFREE(pCtx, psz_result); fail: SG_NULLFREE(pCtx, psz_result); SG_REPO_NULLFREE(pCtx, pRepo); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); }
static void _free_instance_data(SG_context* pCtx, sg_pull_instance_data* pMe) { if (pMe) { SG_NULLFREE(pCtx, pMe->pszPullId); SG_STAGING_NULLFREE(pCtx, pMe->pStaging); SG_REPO_NULLFREE(pCtx, pMe->pPullIntoRepo); SG_NULLFREE(pCtx, pMe); } }
void wc__do_cmd_commit(SG_context * pCtx, const SG_option_state * pOptSt, const SG_stringarray * psaArgs) { SG_wc_commit_args ca; char * pszHidNewCSet = NULL; SG_repo * pRepo = NULL; SG_vhash * pvhPileOfCleanBranches = NULL; SG_varray * pvaJournal = NULL; memset(&ca, 0, sizeof(ca)); ca.bDetached = pOptSt->bAllowDetached; ca.pszUser = pOptSt->psz_username; ca.pszWhen = pOptSt->psz_when; ca.pszMessage = pOptSt->psz_message; ca.pfnPrompt = ((pOptSt->bPromptForDescription) ? SG_cmd_util__get_comment_from_editor : NULL); ca.psaInputs = psaArgs; // null for a complete commit; non-null for a partial commit. ca.depth = WC__GET_DEPTH(pOptSt); ca.psaAssocs = pOptSt->psa_assocs; ca.bAllowLost = pOptSt->bAllowLost; ca.psaStamps = pOptSt->psa_stamps; SG_ERR_CHECK( SG_wc__commit(pCtx, NULL, &ca, pOptSt->bTest, ((pOptSt->bVerbose) ? &pvaJournal : NULL), &pszHidNewCSet) ); if (pvaJournal) SG_ERR_IGNORE( sg_report_journal(pCtx, pvaJournal) ); if (!pOptSt->bTest) { // after the commit is finished, display the details // of the new changeset. SG_ERR_CHECK( SG_cmd_util__get_repo_from_cwd(pCtx, &pRepo, NULL) ); SG_ERR_CHECK( SG_vc_branches__cleanup(pCtx, pRepo, &pvhPileOfCleanBranches) ); SG_ERR_CHECK( SG_cmd_util__dump_log(pCtx, SG_CS_STDOUT, pRepo, pszHidNewCSet, pvhPileOfCleanBranches, SG_FALSE, SG_TRUE) ); } fail: SG_REPO_NULLFREE(pCtx, pRepo); SG_VHASH_NULLFREE(pCtx, pvhPileOfCleanBranches); SG_NULLFREE(pCtx, pszHidNewCSet); SG_VARRAY_NULLFREE(pCtx, pvaJournal); }
void u0048_multidag__new_repo( SG_context * pCtx, const char* pszRidescName, SG_repo** ppResult ) { SG_vhash* pvhPartialDescriptor = NULL; SG_repo* pRepo = NULL; const SG_vhash* pvhActualDescriptor = NULL; SG_changeset* pcsFirst = NULL; char* pszidGidActualRoot = NULL; const char* pszidFirstChangeset = NULL; char buf_repo_id[SG_GID_BUFFER_LENGTH]; char buf_admin_id[SG_GID_BUFFER_LENGTH]; VERIFY_ERR_CHECK( SG_gid__generate(pCtx, buf_repo_id, sizeof(buf_repo_id)) ); VERIFY_ERR_CHECK( SG_gid__generate(pCtx, buf_admin_id, sizeof(buf_admin_id)) ); VERIFY_ERR_CHECK( SG_closet__get_partial_repo_instance_descriptor_for_new_local_repo(pCtx, &pvhPartialDescriptor) ); /* This test case writes dag nodes which are not real. They don't have a * changeset associated with them. So, if we use a caching repo, the * caching code will fail because it tries to load a changeset * which doesn't exist. So, we strip down to a raw repo here. */ VERIFY_ERR_CHECK( SG_repo__create_repo_instance(pCtx,pvhPartialDescriptor,SG_TRUE,NULL,buf_repo_id,buf_admin_id,&pRepo) ); VERIFY_ERR_CHECK( sg_zing__init_new_repo(pCtx, pRepo) ); VERIFY_ERR_CHECK( SG_repo__create_user_root_directory(pCtx, pRepo, "@", &pcsFirst, &pszidGidActualRoot) ); VERIFY_ERR_CHECK( SG_changeset__get_id_ref(pCtx, pcsFirst, &pszidFirstChangeset) ); VERIFY_ERR_CHECK( SG_repo__get_descriptor(pCtx, pRepo, &pvhActualDescriptor) ); /* TODO should this give an error if the ridesc name already exists? */ VERIFY_ERR_CHECK( SG_closet__descriptors__add(pCtx, pszRidescName, pvhActualDescriptor) ); SG_NULLFREE(pCtx, pszidGidActualRoot); SG_CHANGESET_NULLFREE(pCtx, pcsFirst); SG_VHASH_NULLFREE(pCtx, pvhPartialDescriptor); *ppResult = pRepo; return; fail: SG_VHASH_NULLFREE(pCtx, pvhPartialDescriptor); SG_REPO_NULLFREE(pCtx, pRepo); return; }
static void _default_replacer( SG_context * pCtx, const _request_headers *pRequestHeaders, SG_string *keyword, SG_string *replacement, SG_bool *needEncoding) { SG_string *pstrRepoDescriptorName = NULL; SG_repo *repo = NULL; SG_UNUSED(pRequestHeaders); if (seq(keyword, "WFREPO")) { SG_ERR_CHECK( _getDescriptorName(pCtx, pRequestHeaders, &pstrRepoDescriptorName) ); SG_ERR_CHECK( SG_string__set__string(pCtx, replacement, pstrRepoDescriptorName) ); } else if (seq(keyword, "USERNAME")) { SG_ERR_CHECK( _getDescriptorName(pCtx, pRequestHeaders, &pstrRepoDescriptorName) ); if (pstrRepoDescriptorName != NULL) { SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, SG_string__sz(pstrRepoDescriptorName), &repo) ); } SG_ERR_CHECK( _getUserId(pCtx, repo, replacement) ); } else if (seq(keyword, "EMAIL")) { SG_ERR_CHECK( _getDescriptorName(pCtx, pRequestHeaders, &pstrRepoDescriptorName) ); if (pstrRepoDescriptorName != NULL) { SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, SG_string__sz(pstrRepoDescriptorName), &repo) ); } SG_ERR_CHECK( _getUserEmail(pCtx, repo, replacement) ); } else if (seq(keyword, "USERSELECTION")) { SG_ERR_CHECK( _getDescriptorName(pCtx, pRequestHeaders, &pstrRepoDescriptorName) ); SG_ERR_CHECK( _fillInUserSelection(pCtx, pstrRepoDescriptorName, replacement) ); *needEncoding = SG_FALSE; } fail: SG_STRING_NULLFREE(pCtx, pstrRepoDescriptorName); SG_REPO_NULLFREE(pCtx, repo); }
void sg_client__c__push_add(SG_context* pCtx, SG_client * pClient, SG_client_push_handle* pPush, SG_pathname** ppPath_fragball, SG_vhash** ppResult) { sg_client_c_instance_data* pMe = NULL; sg_client_c_push_handle* pMyPush = (sg_client_c_push_handle*)pPush; SG_string* pFragballName = NULL; SG_staging* pStaging = NULL; SG_vhash* pvhRepoDescriptor = NULL; SG_repo* pRepo = NULL; SG_NULLARGCHECK_RETURN(pClient); SG_NULLARGCHECK_RETURN(pPush); SG_NULLARGCHECK_RETURN(ppResult); pMe = (sg_client_c_instance_data*)pClient->p_vtable_instance_data; if (!ppPath_fragball || !*ppPath_fragball) { /* get the push's current status */ SG_ERR_CHECK( SG_staging__open(pCtx, pMyPush->pszPushId, &pStaging) ); SG_ERR_CHECK( SG_staging__check_status(pCtx, pStaging, SG_TRUE, SG_TRUE, SG_TRUE, SG_TRUE, SG_TRUE, ppResult) ); } else { /* add the fragball to the push */ // TODO: This is doing a lot more than we need here. SG_ERR_CHECK( SG_pathname__get_last(pCtx, *ppPath_fragball, &pFragballName) ); /* Tell the server to add the fragball. */ SG_ERR_CHECK( SG_server__push_add(pCtx, pMe->pServer, pMyPush->pszPushId, SG_string__sz(pFragballName), ppResult) ); SG_PATHNAME_NULLFREE(pCtx, *ppPath_fragball); } /* fall through */ fail: SG_STRING_NULLFREE(pCtx, pFragballName); SG_STAGING_NULLFREE(pCtx, pStaging); SG_REPO_NULLFREE(pCtx, pRepo); SG_VHASH_NULLFREE(pCtx, pvhRepoDescriptor); }
void sg_client__c__get_dagnode_info( SG_context* pCtx, SG_client* pClient, SG_vhash* pvhRequest, SG_varray** ppvaInfo) { sg_client_c_instance_data* pMe = NULL; SG_repo* pRepo = NULL; SG_NULLARGCHECK_RETURN(pClient); pMe = (sg_client_c_instance_data*)pClient->p_vtable_instance_data; SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, pClient->psz_remote_repo_spec, &pRepo) ); SG_ERR_CHECK( SG_server__get_dagnode_info(pCtx, pRepo, pvhRequest, ppvaInfo) ); /* fall through */ fail: SG_REPO_NULLFREE(pCtx, pRepo); }
void sg_client__c__get_repo_info(SG_context* pCtx, SG_client* pClient, char** ppszRepoId, char** ppszAdminId, char** ppszHashMethod) { sg_client_c_instance_data* pMe = NULL; SG_repo* pRepo = NULL; SG_NULLARGCHECK_RETURN(pClient); pMe = (sg_client_c_instance_data*)pClient->p_vtable_instance_data; SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, pClient->psz_remote_repo_spec, &pRepo) ); SG_ERR_CHECK( SG_server__get_repo_info(pCtx, pRepo, ppszRepoId, ppszAdminId, ppszHashMethod) ); /* fall through */ fail: SG_REPO_NULLFREE(pCtx, pRepo); }
void sg_vv2__history__repo( SG_context * pCtx, const char * pszRepoDescriptorName, //The local descriptor name. const SG_stringarray * psaArgs, // If present, these must be full repo-paths. const SG_rev_spec* pRevSpec, const SG_rev_spec* pRevSpec_single_revisions, const char* pszUser, const char* pszStamp, SG_uint32 nResultLimit, SG_bool bHideObjectMerges, SG_int64 nFromDate, SG_int64 nToDate, SG_bool bListAll, SG_bool bReassembleDag, SG_bool* pbHasResult, SG_vhash** ppvhBranchPile, SG_history_result ** ppResult, SG_history_token ** ppHistoryToken) { SG_repo * pRepo = NULL; SG_ERR_CHECK( SG_REPO__OPEN_REPO_INSTANCE(pCtx, pszRepoDescriptorName, &pRepo) ); SG_ERR_CHECK( sg_vv2__history__repo2(pCtx, pRepo, psaArgs, pRevSpec, pRevSpec_single_revisions, pszUser, pszStamp, nResultLimit, bHideObjectMerges, nFromDate, nToDate, bListAll, bReassembleDag, pbHasResult, ppResult, ppHistoryToken)); /* This is kind of a hack. History callers often need branch data to format ouput. * But we open the repo down here. I didn't want to open/close it again. So we do this. */ if (ppvhBranchPile) SG_ERR_CHECK( SG_vc_branches__cleanup(pCtx, pRepo, ppvhBranchPile) ); fail: SG_REPO_NULLFREE(pCtx, pRepo); }
void sg_client__c__pull_request_fragball(SG_context* pCtx, SG_client* pClient, SG_vhash* pvhRequest, const SG_pathname* pStagingPathname, char** ppszFragballName, SG_vhash** ppvhStatus) { sg_client_c_instance_data* pMe = NULL; SG_repo* pRepo = NULL; char* pszFragballName = NULL; SG_vhash* pvhStatus = NULL; SG_NULLARGCHECK_RETURN(pClient); SG_NULLARGCHECK_RETURN(ppszFragballName); SG_NULLARGCHECK_RETURN(ppvhStatus); pMe = (sg_client_c_instance_data*)pClient->p_vtable_instance_data; SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, pClient->psz_remote_repo_spec, &pRepo) ); /* Tell the server to build its fragball in our staging directory. We can do this just calling pServer directly because we know it's a local repo. */ SG_ERR_CHECK( SG_server__pull_request_fragball(pCtx, pRepo, pvhRequest, pStagingPathname, &pszFragballName, &pvhStatus) ); *ppszFragballName = pszFragballName; pszFragballName = NULL; *ppvhStatus = pvhStatus; pvhStatus = NULL; /* fall through */ fail: SG_REPO_NULLFREE(pCtx, pRepo); SG_VHASH_NULLFREE(pCtx, pvhStatus); SG_NULLFREE(pCtx, pszFragballName); SG_VHASH_NULLFREE(pCtx, pvhStatus); }
void MyFn(test__wide_dag)(SG_context* pCtx) { char bufTopDir[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathTopDir = NULL; char buf_client_repo_name[SG_TID_MAX_BUFFER_LENGTH]; char buf_server_repo_name[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_vhash* pvh = NULL; SG_repo* pClientRepo = NULL; SG_client* pClient = NULL; char* pszidFirstChangeset = NULL; SG_pathname* pPathCsDir = NULL; SG_uint32 lines; SG_uint32 i, j; SG_repo* pServerRepo = NULL; SG_bool bMatch = SG_FALSE; char buf_filename[7]; SG_varray* pvaZingMergeLog = NULL; SG_varray* pvaZingMergeErr = NULL; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufTopDir, sizeof(bufTopDir), 32) ); VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__SZ(pCtx,&pPathTopDir,bufTopDir) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx,pPathTopDir) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_client_repo_name, sizeof(buf_client_repo_name), 32) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_server_repo_name, sizeof(buf_server_repo_name), 32) ); INFOP("test__wide_dag", ("client repo: %s", buf_client_repo_name)); INFOP("test__wide_dag", ("server repo: %s", buf_server_repo_name)); /* create the repo */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, buf_server_repo_name) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__new_repo2(pCtx, buf_server_repo_name, pPathWorkingDir, &pszidFirstChangeset) ); /* open that repo */ VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_server_repo_name, &pServerRepo) ); /* create an empty clone to pull into */ VERIFY_ERR_CHECK( SG_repo__create_empty_clone(pCtx, buf_server_repo_name, buf_client_repo_name) ); VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_client_repo_name, &pClientRepo) ); /* add stuff to server repo */ for (i = 0; i < 20; i++) // number of changesets { VERIFY_ERR_CHECK( _ut_pt__set_baseline(pCtx, pPathWorkingDir, pszidFirstChangeset) ); VERIFY_ERR_CHECK( SG_sprintf(pCtx, buf_filename, sizeof(buf_filename), "%d", i) ); VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathCsDir, pPathWorkingDir, buf_filename) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathCsDir) ); for (j = 0; j < 1; j++) // number of files added per changeset { VERIFY_ERR_CHECK( SG_sprintf(pCtx, buf_filename, sizeof(buf_filename), "%d", j) ); lines = (int)(2500.0 * (rand() / (RAND_MAX + 1.0))); VERIFY_ERR_CHECK( MyFn(create_file__numbers)(pCtx, pPathCsDir, buf_filename, lines) ); } SG_PATHNAME_NULLFREE(pCtx, pPathCsDir); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( MyFn(commit_all)(pCtx, pPathWorkingDir, NULL) ); } /* verify pre-pull repos are different */ VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("pre-pull repos differ", !bMatch); /* get a client and pull from server repo to empty client repo */ VERIFY_ERR_CHECK( SG_client__open(pCtx, buf_server_repo_name, NULL_CREDENTIAL, &pClient) ); // TODO Credentials VERIFY_ERR_CHECK( SG_pull__all(pCtx, buf_client_repo_name, pClient, &pvaZingMergeErr, &pvaZingMergeLog) ); VERIFY_COND("", !pvaZingMergeErr); /* verify post-pull repos are identical */ VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-pull repo DAGs differ", bMatch); VERIFY_ERR_CHECK( SG_sync__compare_repo_blobs(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-pull repo blobs differ", bMatch); VERIFY_ERR_CHECK( SG_repo__check_integrity(pCtx, pClientRepo, SG_REPO__CHECK_INTEGRITY__DAG_CONSISTENCY, SG_DAGNUM__VERSION_CONTROL, NULL, NULL) ); SG_REPO_NULLFREE(pCtx, pClientRepo); /* Make another copy with clone */ VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_client_repo_name, sizeof(buf_client_repo_name), 32) ); SG_ERR_CHECK( SG_repo__create_empty_clone_from_remote(pCtx, pClient, buf_client_repo_name) ); VERIFY_ERR_CHECK( SG_pull__clone(pCtx, buf_client_repo_name, pClient) ); /* verify post-clone repos are identical */ VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_client_repo_name, &pClientRepo) ); VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-clone repo DAGs differ", bMatch); VERIFY_ERR_CHECK( SG_sync__compare_repo_blobs(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-clone repo blobs differ", bMatch); VERIFY_ERR_CHECK( SG_repo__check_integrity(pCtx, pClientRepo, SG_REPO__CHECK_INTEGRITY__DAG_CONSISTENCY, SG_DAGNUM__VERSION_CONTROL, NULL, NULL) ); /* TODO: verify more stuff? */ /* Fall through to common cleanup */ fail: /* close client */ SG_CLIENT_NULLFREE(pCtx, pClient); /* close both repos */ SG_REPO_NULLFREE(pCtx, pServerRepo); SG_REPO_NULLFREE(pCtx, pClientRepo); SG_NULLFREE(pCtx, pszidFirstChangeset); SG_PATHNAME_NULLFREE(pCtx, pPathTopDir); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathCsDir); SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_VARRAY_NULLFREE(pCtx, pvaZingMergeLog); SG_VARRAY_NULLFREE(pCtx, pvaZingMergeErr); }
void SG_cmd_util__get_username_for_repo( SG_context *pCtx, const char *szRepoName, char **ppUsername ) { SG_string * pUsername = NULL; SG_repo * pRepo = NULL; char * psz_username = NULL; SG_curl * pCurl = NULL; SG_string * pUri = NULL; SG_string * pResponse = NULL; SG_int32 responseStatusCode = 0; SG_vhash * pRepoInfo = NULL; char * psz_userid = NULL; SG_varray * pUsers = NULL; SG_NULLARGCHECK_RETURN(ppUsername); if(!szRepoName) { // Look up username based on 'whoami' of repo associated with cwd. SG_ERR_IGNORE( SG_cmd_util__get_repo_from_cwd(pCtx, &pRepo, NULL) ); if(pRepo) SG_ERR_IGNORE( SG_user__get_username_for_repo(pCtx, pRepo, &psz_username) ); SG_REPO_NULLFREE(pCtx, pRepo); } else if(SG_sz__starts_with(szRepoName, "http://") || SG_sz__starts_with(szRepoName, "https://")) { // Look up username based on 'whoami' of admin id of remote repo. SG_ERR_CHECK( SG_curl__alloc(pCtx, &pCurl) ); SG_ERR_CHECK( SG_STRING__ALLOC__SZ(pCtx, &pUri, szRepoName) ); SG_ERR_CHECK( SG_string__append__sz(pCtx, pUri, ".json") ); SG_ERR_CHECK( SG_curl__reset(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__setopt__sz(pCtx, pCurl, CURLOPT_URL, SG_string__sz(pUri)) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pResponse) ); SG_ERR_CHECK( SG_curl__set__write_string(pCtx, pCurl, pResponse) ); SG_ERR_CHECK( SG_curl__perform(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__getinfo__int32(pCtx, pCurl, CURLINFO_RESPONSE_CODE, &responseStatusCode) ); if(responseStatusCode==200) { const char * szAdminId = NULL; SG_ERR_CHECK( SG_VHASH__ALLOC__FROM_JSON__STRING(pCtx, &pRepoInfo, pResponse) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pRepoInfo, SG_SYNC_REPO_INFO_KEY__ADMIN_ID, &szAdminId) ); SG_ERR_CHECK( SG_string__clear(pCtx, pUri) ); SG_ERR_CHECK( SG_string__append__format(pCtx, pUri, "/admin/%s/whoami/userid", szAdminId) ); SG_ERR_IGNORE( SG_localsettings__get__sz(pCtx, SG_string__sz(pUri), NULL, &psz_userid, NULL) ); if(psz_userid) { // We now have the userid. Look up the username. SG_ERR_CHECK( SG_string__clear(pCtx, pUri) ); SG_ERR_CHECK( SG_string__append__format(pCtx, pUri, "%s/users.json", szRepoName) ); SG_ERR_CHECK( SG_curl__reset(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__setopt__sz(pCtx, pCurl, CURLOPT_URL, SG_string__sz(pUri)) ); SG_ERR_CHECK( SG_string__clear(pCtx, pResponse) ); SG_ERR_CHECK( SG_curl__set__write_string(pCtx, pCurl, pResponse) ); SG_ERR_CHECK( SG_curl__perform(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__getinfo__int32(pCtx, pCurl, CURLINFO_RESPONSE_CODE, &responseStatusCode) ); if(responseStatusCode==200) { SG_uint32 i, nUsers; SG_ERR_CHECK( SG_VARRAY__ALLOC__FROM_JSON__STRING(pCtx, &pUsers, pResponse) ); SG_ERR_CHECK( SG_varray__count(pCtx, pUsers, &nUsers) ); for(i=0; i<nUsers; ++i) { SG_vhash * pUser = NULL; const char * psz_recid = NULL; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pUsers, i, &pUser) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pUser, "recid", &psz_recid) ); if(!strcmp(psz_recid, psz_userid)) { const char * psz_name = NULL; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pUser, "name", &psz_name) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_name, &psz_username) ); break; } } SG_VARRAY_NULLFREE(pCtx, pUsers); } SG_NULLFREE(pCtx, psz_userid); } SG_VHASH_NULLFREE(pCtx, pRepoInfo); } SG_STRING_NULLFREE(pCtx, pResponse); SG_STRING_NULLFREE(pCtx, pUri); SG_CURL_NULLFREE(pCtx, pCurl); } else { // Look up username based on 'whoami' of repo provided. SG_ERR_CHECK( SG_REPO__OPEN_REPO_INSTANCE(pCtx, szRepoName, &pRepo) ); SG_ERR_IGNORE( SG_user__get_username_for_repo(pCtx, pRepo, &psz_username) ); SG_REPO_NULLFREE(pCtx, pRepo); } *ppUsername = psz_username; return; fail: SG_STRING_NULLFREE(pCtx, pUsername); SG_REPO_NULLFREE(pCtx, pRepo); SG_NULLFREE(pCtx, psz_username); SG_CURL_NULLFREE(pCtx, pCurl); SG_STRING_NULLFREE(pCtx, pUri); SG_STRING_NULLFREE(pCtx, pResponse); SG_VHASH_NULLFREE(pCtx, pRepoInfo); SG_NULLFREE(pCtx, psz_userid); SG_VARRAY_NULLFREE(pCtx, pUsers); }
int u0050_logstuff_test__1(SG_context * pCtx, SG_pathname* pPathTopDir) { char bufName[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_pathname* pPathFile = NULL; SG_vhash* pvh = NULL; SG_dagnode* pdn = NULL; const char* psz_hid_cs = NULL; SG_repo* pRepo = NULL; SG_uint32 count; SG_rbtree* prb = NULL; SG_varray* pva = NULL; SG_rbtree* prb_reversed = NULL; const char* psz_val = NULL; SG_audit q; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufName, sizeof(bufName), 32) ); /* create the working dir */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, bufName) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); /* add stuff */ VERIFY_ERR_CHECK( u0050_logstuff__create_file__numbers(pCtx, pPathWorkingDir, "aaa", 20) ); /* create the repo */ VERIFY_ERR_CHECK( _ut_pt__new_repo(pCtx, bufName, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( u0050_logstuff__commit_all(pCtx, pPathWorkingDir, &pdn) ); VERIFY_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_hid_cs) ); SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, bufName, &pRepo) ); #define MY_COMMENT "The name of this new file sucks! What kind of a name is 'aaa'?" VERIFY_ERR_CHECK( SG_audit__init(pCtx, &q, pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); VERIFY_ERR_CHECK( SG_vc_comments__add(pCtx, pRepo, psz_hid_cs, MY_COMMENT, &q) ); VERIFY_ERR_CHECK( SG_vc_stamps__add(pCtx, pRepo, psz_hid_cs, "crap", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "tcrap", &q) ); VERIFY_ERR_CHECK( SG_vc_comments__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "text", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, MY_COMMENT)) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_stamps__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "stamp", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "crap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "tag", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "tcrap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "whatever", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (2 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (2 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); /* we don't know whether psz_my_val is tcrap or whatever. */ // VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); { const char* psz_remove = "whatever"; VERIFY_ERR_CHECK( SG_vc_tags__remove(pCtx, pRepo, &q, 1, &psz_remove) ); /* Note that by removing whatever, we are bringing the tags list back * to a state where it has been before (just tcrap). This changeset in * the tags table will have its own csid, because the parentage is * different, but it's root idtrie HID will be the same as a previous * node. */ } VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); SG_RBTREE_NULLFREE(pCtx, prb); SG_REPO_NULLFREE(pCtx, pRepo); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 1; fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 0; }
void MyFn(test__simple)(SG_context* pCtx) { char bufTopDir[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathTopDir = NULL; char buf_client_repo_name[SG_TID_MAX_BUFFER_LENGTH]; char buf_server_repo_name[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_vhash* pvh = NULL; SG_repo* pClientRepo = NULL; SG_client* pClient = NULL; SG_repo* pServerRepo = NULL; SG_bool bMatch = SG_FALSE; SG_varray* pvaZingMergeLog = NULL; SG_varray* pvaZingMergeErr = NULL; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufTopDir, sizeof(bufTopDir), 32) ); VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__SZ(pCtx,&pPathTopDir,bufTopDir) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx,pPathTopDir) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_client_repo_name, sizeof(buf_client_repo_name), 32) ); VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, buf_server_repo_name, sizeof(buf_server_repo_name), 32) ); INFOP("test__simple", ("client repo: %s", buf_client_repo_name)); INFOP("test__simple", ("server repo: %s", buf_server_repo_name)); /* create the repo */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, buf_server_repo_name) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__new_repo2(pCtx, buf_server_repo_name, pPathWorkingDir, NULL) ); /* open that repo */ VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_server_repo_name, &pServerRepo) ); /* create an empty clone to pull into */ VERIFY_ERR_CHECK( SG_repo__create_empty_clone(pCtx, buf_server_repo_name, buf_client_repo_name) ); VERIFY_ERR_CHECK( SG_repo__open_repo_instance(pCtx, buf_client_repo_name, &pClientRepo) ); /* add stuff to server repo */ VERIFY_ERR_CHECK( MyFn(create_file__numbers)(pCtx, pPathWorkingDir, "aaa", 10) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( MyFn(commit_all)(pCtx, pPathWorkingDir, NULL) ); VERIFY_ERR_CHECK( MyFn(create_file__numbers)(pCtx, pPathWorkingDir, "bbb", 10) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( MyFn(commit_all)(pCtx, pPathWorkingDir, NULL) ); /* verify pre-pull repos are different */ VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("pre-pull repos differ", !bMatch); /* get a client and pull from server repo to empty client repo */ VERIFY_ERR_CHECK( SG_client__open(pCtx, buf_server_repo_name, NULL_CREDENTIAL, &pClient) ); // TODO Credentials VERIFY_ERR_CHECK( SG_pull__all(pCtx, buf_client_repo_name, pClient, &pvaZingMergeErr, &pvaZingMergeLog) ); VERIFY_COND("", !pvaZingMergeErr); SG_CLIENT_NULLFREE(pCtx, pClient); /* verify post-pull repos are identical */ VERIFY_ERR_CHECK( SG_sync__compare_repo_dags(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-pull repo DAGs differ", bMatch); VERIFY_ERR_CHECK( SG_sync__compare_repo_blobs(pCtx, pClientRepo, pServerRepo, &bMatch) ); VERIFY_COND_FAIL("post-pull repo blobs differ", bMatch); VERIFY_ERR_CHECK( SG_repo__check_integrity(pCtx, pClientRepo, SG_REPO__CHECK_INTEGRITY__DAG_CONSISTENCY, SG_DAGNUM__VERSION_CONTROL, NULL, NULL) ); /* TODO: verify more stuff? */ /* Fall through to common cleanup */ fail: /* close client */ SG_CLIENT_NULLFREE(pCtx, pClient); /* close both repos */ SG_REPO_NULLFREE(pCtx, pServerRepo); SG_REPO_NULLFREE(pCtx, pClientRepo); SG_PATHNAME_NULLFREE(pCtx, pPathTopDir); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_VARRAY_NULLFREE(pCtx, pvaZingMergeLog); SG_VARRAY_NULLFREE(pCtx, pvaZingMergeErr); }
void sg_vv2__history__working_folder( SG_context * pCtx, const SG_stringarray * psaInputs, const SG_rev_spec* pRevSpec, const SG_rev_spec* pRevSpec_single_revisions, const char* pszUser, const char* pszStamp, SG_bool bDetectCurrentBranch, SG_uint32 nResultLimit, SG_bool bHideObjectMerges, SG_int64 nFromDate, SG_int64 nToDate, SG_bool bListAll, SG_bool* pbHasResult, SG_vhash** ppvhBranchPile, SG_history_result ** ppResult, SG_history_token ** ppHistoryToken) { SG_repo * pRepo = NULL; SG_stringarray * pStringArrayGIDs = NULL; SG_stringarray * pStringArrayChangesets = NULL; SG_stringarray * pStringArrayChangesetsMissing = NULL; SG_stringarray * pStringArrayChangesets_single_revisions = NULL; SG_bool bRecommendDagWalk = SG_FALSE; SG_bool bLeaves = SG_FALSE; const char * pszBranchName = NULL; // we do not own this SG_vhash* pvhBranchPile = NULL; SG_varray* pvaParents = NULL; // we do not own this SG_bool bMyBranchWalkRecommendation = SG_FALSE; SG_rev_spec* pRevSpec_Allocated = NULL; SG_wc_tx * pWcTx = NULL; SG_vhash * pvhInfo = NULL; SG_uint32 count_args = 0; SG_uint32 countRevsSpecified = 0; if (psaInputs) SG_ERR_CHECK( SG_stringarray__count(pCtx, psaInputs, &count_args) ); // Use the WD to try to get the initial info. // I'm going to deviate from the model and use // a read-only TX here so that I can get a bunch // of fields that we need later. SG_ERR_CHECK( SG_WC_TX__ALLOC__BEGIN(pCtx, &pWcTx, NULL, SG_TRUE) ); if (count_args > 0) SG_ERR_CHECK( SG_wc_tx__get_item_gid__stringarray(pCtx, pWcTx, psaInputs, &pStringArrayGIDs) ); SG_ERR_CHECK( SG_wc_tx__get_wc_info(pCtx, pWcTx, &pvhInfo) ); SG_ERR_CHECK( SG_wc_tx__get_repo_and_wd_top(pCtx, pWcTx, &pRepo, NULL) ); /* If no revisions were specified, and the caller wants us to use the current branch, * create a revision spec with the current branch. */ if (pRevSpec) { SG_ERR_CHECK( SG_REV_SPEC__ALLOC__COPY(pCtx, pRevSpec, &pRevSpec_Allocated) ); SG_ERR_CHECK( SG_rev_spec__count(pCtx, pRevSpec_Allocated, &countRevsSpecified) ); } else { SG_ERR_CHECK( SG_REV_SPEC__ALLOC(pCtx, &pRevSpec_Allocated) ); } if (pRevSpec_single_revisions != NULL) { SG_uint32 countRevsSpecified_singles = 0; SG_ERR_CHECK( SG_rev_spec__count(pCtx, pRevSpec_single_revisions, &countRevsSpecified_singles) ); countRevsSpecified += countRevsSpecified_singles; } if (bDetectCurrentBranch && countRevsSpecified == 0) { SG_ERR_CHECK( SG_vhash__check__sz(pCtx, pvhInfo, "branch", &pszBranchName) ); if (pszBranchName) { /* The working folder is attached to a branch. Does it exist? */ SG_bool bHasBranches = SG_FALSE; SG_bool bBranchExists = SG_FALSE; SG_ERR_CHECK( SG_vc_branches__cleanup(pCtx, pRepo, &pvhBranchPile) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhBranchPile, "branches", &bHasBranches) ); if (bHasBranches) { SG_vhash* pvhRefBranches; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhBranchPile, "branches", &pvhRefBranches) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRefBranches, pszBranchName, &bBranchExists) ); } if (bBranchExists) { SG_uint32 numParents, i; const char* pszRefParent; /* If that branch exists, just add to our rev spec. */ SG_ERR_CHECK( SG_rev_spec__add_branch(pCtx, pRevSpec_Allocated, pszBranchName) ); /* Plus, if the working folder's parents are not in the branch (yet), add them as well * (they'll be in it after the user commits something...). */ SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhInfo, "parents", &pvaParents) ); SG_ERR_CHECK( SG_varray__count(pCtx, pvaParents, &numParents) ); for (i = 0; i < numParents; i++) { SG_bool already_in_rev_spec = SG_FALSE; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pvaParents, i, &pszRefParent) ); SG_ERR_CHECK( SG_rev_spec__contains(pCtx, pRepo, pRevSpec_Allocated, pszRefParent, &already_in_rev_spec) ); if(!already_in_rev_spec) SG_ERR_CHECK( SG_rev_spec__add_rev(pCtx, pRevSpec_Allocated, pszRefParent) ); } } else { /* If the branch doesn't exist, add the working folder's baseline(s) to the rev spec * and force a dag walk. */ SG_uint32 numParents, i; const char* pszRefParent; SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhInfo, "parents", &pvaParents) ); SG_ERR_CHECK( SG_varray__count(pCtx, pvaParents, &numParents) ); for (i = 0; i < numParents; i++) { SG_ERR_CHECK( SG_varray__get__sz(pCtx, pvaParents, i, &pszRefParent) ); SG_ERR_CHECK( SG_rev_spec__add_rev(pCtx, pRevSpec_Allocated, pszRefParent) ); } bMyBranchWalkRecommendation = SG_TRUE; } } } // Determine the starting changeset IDs. strBranch and bLeaves control this. // We do this step here, so that repo paths can be looked up before we call into history__core. SG_ERR_CHECK( sg_vv2__history__get_starting_changesets(pCtx, pRepo, pRevSpec_Allocated, &pStringArrayChangesets, &pStringArrayChangesetsMissing, &bRecommendDagWalk, &bLeaves) ); if (pStringArrayChangesetsMissing) { // See K2177, K1322, W0836, W8132. We requested specific starting // points and ran into some csets that were referenced (by --tag // or --branch) that are not present in the local repo. Try to // silently ignore them. SG_uint32 nrFound = 0; SG_ERR_CHECK( SG_stringarray__count(pCtx, pStringArrayChangesets, &nrFound) ); if (nrFound > 0) { // Yes there were missing csets, but we still found some // of the referenced ones. Just ignore the missing ones. // This should behave just like we had the older tag/branch // dag prior to the push -r on the vc dag. } else { const char * psz_0; // TODO 2012/10/19 Do we want a different message if the number of missing is > 1 ? SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, pStringArrayChangesetsMissing, 0, &psz_0) ); SG_ERR_THROW2( SG_ERR_CHANGESET_BLOB_NOT_FOUND, (pCtx, "%s", psz_0) ); } } bRecommendDagWalk = bRecommendDagWalk || bMyBranchWalkRecommendation; //This hack is here to detect when we're being asked for the parent of a certain //object from the sg_parents code. parents always wants the dag walk. //The better solution would be to allow users to pass in a flag about their dagwalk //preferences if (count_args == 1 && nResultLimit == 1) bRecommendDagWalk = SG_TRUE; if (bListAll) { // See W8493. If they gave us a --list-all along with a --rev or --tag, they // want to force us to show the full history rather than just the info for the // named cset. bRecommendDagWalk = SG_TRUE; } if (pRevSpec_single_revisions) { // We DO NOT pass a psaMissingHids here because we want // it to throw if the user names a missing cset. SG_ERR_CHECK( SG_rev_spec__get_all__repo__dedup(pCtx, pRepo, pRevSpec_single_revisions, SG_TRUE, &pStringArrayChangesets_single_revisions, NULL) ); } // TODO 2012/07/03 The deviates from the model. This call directly returns the // TODO allocated data into the caller's pointers. If anything fails // TODO (such as the call to get the branches below), we'll probably // TODO leak the result and token. SG_ERR_CHECK( SG_history__run(pCtx, pRepo, pStringArrayGIDs, pStringArrayChangesets, pStringArrayChangesets_single_revisions, pszUser, pszStamp, nResultLimit, bLeaves, bHideObjectMerges, nFromDate, nToDate, bRecommendDagWalk, SG_FALSE, pbHasResult, ppResult, ppHistoryToken) ); /* This is kind of a hack. History callers often need branch data to format ouput. * But we open the repo down here. I didn't want to open/close it again. And there's logic * in here about which repo to open. So instead, we do this. */ if (ppvhBranchPile) { if (pvhBranchPile) { *ppvhBranchPile = pvhBranchPile; pvhBranchPile = NULL; } else SG_ERR_CHECK( SG_vc_branches__cleanup(pCtx, pRepo, ppvhBranchPile) ); } fail: SG_ERR_IGNORE( SG_wc_tx__cancel(pCtx, pWcTx) ); SG_WC_TX__NULLFREE(pCtx, pWcTx); SG_REV_SPEC_NULLFREE(pCtx, pRevSpec_Allocated); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesets); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesetsMissing); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesets_single_revisions); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayGIDs); SG_VHASH_NULLFREE(pCtx, pvhBranchPile); SG_VHASH_NULLFREE(pCtx, pvhInfo); SG_REPO_NULLFREE(pCtx, pRepo); }
void SG_sync_remote__push_clone__begin( SG_context* pCtx, const char* psz_repo_descriptor_name, const SG_vhash* pvhRepoInfo, const char** ppszCloneId) { const char* psz_repo_id = NULL; const char* psz_admin_id = NULL; const char* psz_hash_method = NULL; const char* pszRefValidatedName = NULL; SG_vhash* pvhFullDescriptor = NULL; // pNewRepo owns this. Don't free. SG_vhash* pvhDescriptorPartial = NULL; SG_closet_descriptor_handle* ph = NULL; SG_repo* pNewRepo = NULL; const char* pszRefCloneId = NULL; SG_NULLARGCHECK_RETURN(ppszCloneId); SG_ERR_CHECK_RETURN( _remote_clone_allowed(pCtx) ); /* We'll create a descriptor and empty repo immediately, so that we validate and claim the name * before a potentially long upload. */ SG_ERR_CHECK_RETURN( SG_vhash__get__sz(pCtx, pvhRepoInfo, SG_SYNC_REPO_INFO_KEY__REPO_ID, &psz_repo_id) ); SG_ERR_CHECK_RETURN( SG_vhash__get__sz(pCtx, pvhRepoInfo, SG_SYNC_REPO_INFO_KEY__ADMIN_ID, &psz_admin_id) ); SG_ERR_CHECK_RETURN( SG_vhash__get__sz(pCtx, pvhRepoInfo, SG_SYNC_REPO_INFO_KEY__HASH_METHOD, &psz_hash_method) ); SG_ERR_CHECK( SG_closet__descriptors__add_begin(pCtx, psz_repo_descriptor_name, NULL, psz_repo_id, psz_admin_id, &pszRefValidatedName, &pvhDescriptorPartial, &ph) ); SG_ERR_CHECK( SG_repo__create_repo_instance(pCtx, pszRefValidatedName, pvhDescriptorPartial, SG_TRUE, psz_hash_method, psz_repo_id, psz_admin_id, &pNewRepo) ); SG_VHASH_NULLFREE(pCtx, pvhDescriptorPartial); SG_ERR_CHECK_RETURN( SG_staging__clone__create(pCtx, psz_repo_descriptor_name, pvhRepoInfo, &pszRefCloneId) ); /* We temporarily add the clone ID to the repo descriptor so that we can verify * we're committing to the correct repo later, when the fragball's been uploaded. */ SG_ERR_CHECK( SG_repo__get_descriptor__ref(pCtx, pNewRepo, &pvhFullDescriptor) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhFullDescriptor, SG_SYNC_DESCRIPTOR_KEY__CLONE_ID, pszRefCloneId) ); SG_ERR_CHECK( SG_closet__descriptors__add_commit(pCtx, &ph, pvhFullDescriptor, SG_REPO_STATUS__CLONING) ); SG_REPO_NULLFREE(pCtx, pNewRepo); *ppszCloneId = pszRefCloneId; return; fail: SG_VHASH_NULLFREE(pCtx, pvhDescriptorPartial); SG_REPO_NULLFREE(pCtx, pNewRepo); SG_ERR_IGNORE( SG_closet__descriptors__add_abort(pCtx, &ph) ); }
static void _fillInUserSelection( SG_context *pCtx, SG_string *pstrRepoDescriptorName, SG_string *replacement) { SG_repo *repo = NULL; SG_varray *users = NULL; SG_vhash *user = NULL; SG_uint32 i = 0; SG_uint32 count; SG_string *semail = NULL; SG_string *suid = NULL; SG_string *entry = NULL; SG_string *curuid = NULL; SG_ERR_CHECK( SG_string__clear(pCtx, replacement) ); SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, SG_string__sz(pstrRepoDescriptorName), &repo) ); SG_ERR_CHECK( SG_user__list_all(pCtx, repo, &users) ); SG_ERR_CHECK( SG_varray__count(pCtx, users, &count) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &semail) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &suid) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &entry) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &curuid) ); SG_ERR_CHECK( _getUserId(pCtx, repo, curuid) ); for ( i = 0; i < count; ++i ) { const char *uid = NULL; const char *email = NULL; const char *selected = NULL; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, users, i, &user) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, user, "recid", &uid) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, user, "email", &email) ); SG_ERR_CHECK( _getEncoded(pCtx, uid, suid) ); SG_ERR_CHECK( _getEncoded(pCtx, email, semail) ); if (eq(SG_string__sz(curuid), uid)) { selected = " selected='selected' "; } else { selected = ""; } SG_ERR_CHECK( SG_string__sprintf(pCtx, entry, "<option value=\"%s\" %s>%s</option>", SG_string__sz(suid), selected, SG_string__sz(semail)) ); SG_ERR_CHECK( SG_string__append__string(pCtx, replacement, entry) ); } fail: SG_VARRAY_NULLFREE(pCtx, users); SG_REPO_NULLFREE(pCtx, repo); SG_STRING_NULLFREE(pCtx, semail); SG_STRING_NULLFREE(pCtx, suid); SG_STRING_NULLFREE(pCtx, entry); SG_STRING_NULLFREE(pCtx, curuid); }
void do_cmd_merge_preview(SG_context * pCtx, SG_option_state * pOptSt) { SG_repo * pRepo = NULL; SG_uint32 countRevSpecs = 0; SG_stringarray * psaRevSpecs = NULL; const char * const * ppszRevSpecs = NULL; SG_stringarray * psaNewChangesets = NULL; const char * const * ppszNewChangesets = NULL; SG_uint32 countNewChangesets = 0; char * pszHidBaseline = NULL; char * pszHidMergeTarget = NULL; SG_dagquery_relationship relationship; SG_vhash * pvhPileOfCleanBranches = NULL; SG_uint32 i = 0; countRevSpecs = 0; if (pOptSt->pRevSpec) { SG_ERR_CHECK( SG_rev_spec__count(pCtx, pOptSt->pRevSpec, &countRevSpecs) ); if(countRevSpecs>2) SG_ERR_THROW(SG_ERR_USAGE); } if(pOptSt->psz_repo!=NULL) { if(countRevSpecs==2) { SG_ERR_CHECK( SG_REPO__OPEN_REPO_INSTANCE(pCtx, pOptSt->psz_repo, &pRepo) ); SG_ERR_CHECK( SG_rev_spec__get_all__repo(pCtx, pRepo, pOptSt->pRevSpec, SG_FALSE, &psaRevSpecs, NULL) ); SG_ERR_CHECK( SG_stringarray__sz_array(pCtx, psaRevSpecs, &ppszRevSpecs) ); SG_ERR_CHECK( SG_STRDUP(pCtx, ppszRevSpecs[0], &pszHidBaseline) ); SG_ERR_CHECK( SG_STRDUP(pCtx, ppszRevSpecs[1], &pszHidMergeTarget) ); SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs); } else { SG_ERR_THROW2(SG_ERR_USAGE, (pCtx, "When using the --repo option, you must provide both the BASELINE-REVSPEC and the OTHER-REVSPEC.")); } } else { SG_ERR_CHECK( SG_cmd_util__get_repo_from_cwd(pCtx, &pRepo, NULL) ); if(countRevSpecs==2) { SG_ERR_CHECK( SG_rev_spec__get_all__repo(pCtx, pRepo, pOptSt->pRevSpec, SG_FALSE, &psaRevSpecs, NULL) ); SG_ERR_CHECK( SG_stringarray__sz_array(pCtx, psaRevSpecs, &ppszRevSpecs) ); SG_ERR_CHECK( SG_STRDUP(pCtx, ppszRevSpecs[0], &pszHidBaseline) ); SG_ERR_CHECK( SG_STRDUP(pCtx, ppszRevSpecs[1], &pszHidMergeTarget) ); SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs); } else { SG_uint32 countBaselines = 0; SG_ERR_CHECK( SG_wc__get_wc_parents__stringarray(pCtx, NULL, &psaRevSpecs) ); SG_ERR_CHECK( SG_stringarray__sz_array_and_count(pCtx, psaRevSpecs, &ppszRevSpecs, &countBaselines) ); SG_ERR_CHECK( SG_STRDUP(pCtx, ppszRevSpecs[0], &pszHidBaseline) ); if(countBaselines==2) { SG_ERR_CHECK( SG_STRDUP(pCtx, ppszRevSpecs[1], &pszHidMergeTarget) ); } else { SG_wc_merge_args merge_args; merge_args.pRevSpec = pOptSt->pRevSpec; merge_args.bNoAutoMergeFiles = SG_TRUE; // doesn't matter merge_args.bComplainIfBaselineNotLeaf = SG_FALSE; // doesn't matter SG_ERR_CHECK( SG_wc__merge__compute_preview_target(pCtx, NULL, &merge_args, &pszHidMergeTarget) ); } SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs); } } SG_ERR_CHECK( SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, pszHidMergeTarget, pszHidBaseline, SG_FALSE, SG_FALSE, &relationship) ); if(relationship==SG_DAGQUERY_RELATIONSHIP__ANCESTOR || relationship==SG_DAGQUERY_RELATIONSHIP__SAME) { SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDOUT, "The baseline already includes the merge target. No merge is needed.\n") ); } else { SG_ERR_CHECK( SG_dagquery__find_new_since_common(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, pszHidBaseline, pszHidMergeTarget, &psaNewChangesets) ); SG_ERR_CHECK( SG_stringarray__sz_array_and_count(pCtx, psaNewChangesets, &ppszNewChangesets, &countNewChangesets) ); SG_ERR_CHECK( SG_vc_branches__cleanup(pCtx, pRepo, &pvhPileOfCleanBranches) ); for(i=0; i<countNewChangesets; ++i) { SG_ERR_CHECK( SG_cmd_util__dump_log(pCtx, SG_CS_STDOUT, pRepo, ppszNewChangesets[i], pvhPileOfCleanBranches, SG_TRUE, SG_FALSE) ); } if(relationship==SG_DAGQUERY_RELATIONSHIP__DESCENDANT) { SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDOUT, "\nFast-Forward Merge to '%s' brings in %i changeset%s.\n", pszHidMergeTarget, countNewChangesets, ((countNewChangesets==1)?"":"s")) ); } else { SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDOUT, "\nMerge with '%s' brings in %i changeset%s.\n", pszHidMergeTarget, countNewChangesets, ((countNewChangesets==1)?"":"s")) ); } } SG_VHASH_NULLFREE(pCtx, pvhPileOfCleanBranches); SG_STRINGARRAY_NULLFREE(pCtx, psaNewChangesets); SG_NULLFREE(pCtx, pszHidBaseline); SG_NULLFREE(pCtx, pszHidMergeTarget); SG_REPO_NULLFREE(pCtx, pRepo); return; fail: SG_REPO_NULLFREE(pCtx, pRepo); SG_STRINGARRAY_NULLFREE(pCtx, psaNewChangesets); SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs); SG_NULLFREE(pCtx, pszHidBaseline); SG_NULLFREE(pCtx, pszHidMergeTarget); SG_VHASH_NULLFREE(pCtx, pvhPileOfCleanBranches); }
void SG_workingdir__create_and_get( SG_context* pCtx, const char* pszDescriptorName, const SG_pathname* pPathDirPutTopLevelDirInHere, SG_bool bCreateDrawer, const char* psz_spec_hid_cs_baseline ) { SG_repo* pRepo = NULL; SG_rbtree* pIdsetLeaves = NULL; SG_uint32 count_leaves = 0; SG_changeset* pcs = NULL; const char* pszidUserSuperRoot = NULL; SG_bool b = SG_FALSE; char* psz_hid_cs_baseline = NULL; SG_pendingtree * pPendingTree = NULL; SG_vhash * pvhTimestamps = NULL; /* * Fetch the descriptor by its given name and use it to connect to * the repo. */ SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, pszDescriptorName, &pRepo) ); if (psz_spec_hid_cs_baseline) { SG_ERR_CHECK( SG_strdup(pCtx, psz_spec_hid_cs_baseline, &psz_hid_cs_baseline) ); } else { const char* psz_hid = NULL; /* * If you do not specify a hid to be the baseline, then this routine * currently only works if there is exactly one leaf in the repo. */ SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&pIdsetLeaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, pIdsetLeaves, &count_leaves) ); if (count_leaves != 1) SG_ERR_THROW( SG_ERR_MULTIPLE_HEADS_FROM_DAGNODE ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, pIdsetLeaves, &b, &psz_hid, NULL) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_hid, &psz_hid_cs_baseline) ); } /* * Load the desired changeset from the repo so we can look up the * id of its user root directory */ SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs_baseline, &pcs) ); SG_ERR_CHECK( SG_changeset__get_root(pCtx, pcs, &pszidUserSuperRoot) ); if (bCreateDrawer) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhTimestamps) ); // Retrieve everything into the WD and capture the timestamps on the files that we create. SG_ERR_CHECK( sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, pvhTimestamps) ); // this creates "repo.json" with the repo-descriptor. SG_ERR_CHECK( SG_workingdir__set_mapping(pCtx, pPathDirPutTopLevelDirInHere, pszDescriptorName, NULL) ); // this creates an empty "wd.json" file (which doesn't know anything). SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pPathDirPutTopLevelDirInHere, SG_TRUE, &pPendingTree) ); // force set the initial parents to the current changeset. SG_ERR_CHECK( SG_pendingtree__set_single_wd_parent(pCtx, pPendingTree, psz_hid_cs_baseline) ); // force initialize the timestamp cache to the list that we just built; this should // be the only timestamps in the cache since we just populated the WD. SG_ERR_CHECK( SG_pendingtree__set_wd_timestamp_cache(pCtx, pPendingTree, &pvhTimestamps) ); // this steals our vhash SG_ERR_CHECK( SG_pendingtree__save(pCtx, pPendingTree) ); } else { // Retrieve everything into the WD but do not create .sgdrawer or record timestamps. // This is more like an EXPORT operation. SG_ERR_CHECK( sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, NULL) ); } fail: SG_VHASH_NULLFREE(pCtx, pvhTimestamps); SG_NULLFREE(pCtx, psz_hid_cs_baseline); SG_CHANGESET_NULLFREE(pCtx, pcs); SG_RBTREE_NULLFREE(pCtx, pIdsetLeaves); SG_REPO_NULLFREE(pCtx, pRepo); SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); }
/** * Create a new repo in the closet. */ static void _vv_verbs__init_new_repo__do_init(SG_context * pCtx, const char * pszRepoName, const char * pszStorage, const char * pszHashMethod, const char * psz_shared_users, SG_bool bFromUserMaster, char ** ppszGidRepoId, char ** ppszHidCSetFirst) { SG_repo * pRepo = NULL; SG_repo * pRepoUserMaster = NULL; char * pszUserMasterAdminId = NULL; SG_changeset * pCSetFirst = NULL; const char * pszHidCSetFirst_ref; char * pszHidCSetFirst = NULL; char * pszGidRepoId = NULL; char bufAdminId[SG_GID_BUFFER_LENGTH]; // create a completely new repo in the closet. SG_NULLARGCHECK_RETURN( pszRepoName ); // pszStorage is optional // pszHashMethod is optional SG_ASSERT(SG_FALSE == (psz_shared_users && bFromUserMaster)); // checked in SG_vv_verbs__init_new_repo if (psz_shared_users) { SG_ERR_CHECK( _vv_verbs__init_new_repo__get_admin_id(pCtx, psz_shared_users, bufAdminId) ); } else if (bFromUserMaster) { SG_ERR_CHECK( SG_REPO__USER_MASTER__OPEN(pCtx, &pRepoUserMaster) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepoUserMaster, &pszUserMasterAdminId) ); memcpy(bufAdminId, pszUserMasterAdminId, sizeof(bufAdminId)); //SG_memcpy2(pszUserMasterAdminId, bufAdminId); SG_NULLFREE(pCtx, pszUserMasterAdminId); } else { SG_ERR_CHECK( SG_gid__generate(pCtx, bufAdminId, sizeof(bufAdminId)) ); } SG_ERR_CHECK( SG_repo__create__completely_new__empty__closet(pCtx, bufAdminId, pszStorage, pszHashMethod, pszRepoName) ); SG_ERR_CHECK( SG_REPO__OPEN_REPO_INSTANCE(pCtx, pszRepoName, &pRepo) ); if (!psz_shared_users && !bFromUserMaster) { SG_ERR_CHECK( SG_user__create_nobody(pCtx, pRepo) ); } SG_ERR_CHECK( SG_repo__setup_basic_stuff(pCtx, pRepo, &pCSetFirst, NULL) ); if (psz_shared_users) { SG_ERR_CHECK( SG_pull__admin(pCtx, pRepo, psz_shared_users, NULL, NULL, NULL, NULL) ); } else if (bFromUserMaster) { SG_ERR_CHECK( SG_pull__admin__local(pCtx, pRepo, pRepoUserMaster, NULL) ); } SG_ERR_CHECK( SG_changeset__get_id_ref(pCtx, pCSetFirst, &pszHidCSetFirst_ref) ); SG_ERR_CHECK( SG_STRDUP(pCtx, pszHidCSetFirst_ref, &pszHidCSetFirst) ); SG_ERR_CHECK( SG_repo__get_repo_id(pCtx, pRepo, &pszGidRepoId) ); *ppszGidRepoId = pszGidRepoId; *ppszHidCSetFirst = pszHidCSetFirst; SG_REPO_NULLFREE(pCtx, pRepo); SG_REPO_NULLFREE(pCtx, pRepoUserMaster); SG_CHANGESET_NULLFREE(pCtx, pCSetFirst); return; fail: /* If we fail to pull the admin dags after the repo's been created, delete it. */ if (pRepo) { SG_REPO_NULLFREE(pCtx, pRepo); if (pszRepoName) SG_ERR_IGNORE( _vv_verbs__init_new_repo__delete_new_repo(pCtx, pszRepoName) ); } SG_REPO_NULLFREE(pCtx, pRepoUserMaster); SG_CHANGESET_NULLFREE(pCtx, pCSetFirst); SG_NULLFREE(pCtx, pszGidRepoId); SG_NULLFREE(pCtx, pszHidCSetFirst); }
void SG_sync__closet_user_dags( SG_context* pCtx, SG_repo* pRepoSrcNotMine, const char* pszRefHidLeafSrc, SG_varray** ppvaSyncedUserList) { char* pszSrcAdminId = NULL; char* pszHidLeafSrc = NULL; char* pszHidLeafDest = NULL; SG_vhash* pvhDescriptors = NULL; SG_repo* pRepoDest = NULL; SG_repo* pRepoSrcMine = NULL; char* pszDestAdminId = NULL; /* Using disallowed characters to ensure no collision with an actual repo name. * Not that this isn't actually stored anywhere--we just use it as a key in the * vhash below where the /real/ repos have descriptor names. */ const char* pszRefUserMasterFakeName = "\\/USER_MASTER\\/"; /* The repo routines do a null arg check of pRepoSrcNotMine. The other args are optional. */ if (!pszRefHidLeafSrc) { SG_ERR_CHECK( SG_zing__get_leaf(pCtx, pRepoSrcNotMine, NULL, SG_DAGNUM__USERS, &pszHidLeafSrc) ); pszRefHidLeafSrc = pszHidLeafSrc; } /* Add all repositories in "normal" status, to the list we'll iterate over. */ SG_ERR_CHECK( SG_closet__descriptors__list(pCtx, &pvhDescriptors) ); /* If it exists, add the user master repo to the list. */ { SG_bool bExists = SG_FALSE; SG_ERR_CHECK( SG_repo__user_master__exists(pCtx, &bExists) ); if (bExists) SG_ERR_CHECK( SG_vhash__add__null(pCtx, pvhDescriptors, pszRefUserMasterFakeName) ); } /* Iterate over the repositories, syncing the user database. */ { SG_int32 i = 0; SG_uint32 numDescriptors = 0; SG_ERR_CHECK( SG_vhash__count(pCtx, pvhDescriptors, &numDescriptors) ); for(i = 0; i < (SG_int32)numDescriptors; i++) { const char* pszRefNameDest = NULL; SG_bool bAdminIdsMatch = SG_TRUE; const SG_variant* pvRefDest = NULL; /* Note that the source repo will be in this loop, too, but we don't need to check for * it, adding another strcmp, because the leaf hid comparison below will effectively * skip it. So we do one extra leaf fetch and comparison, total, rather than an extra * strcmp for every repo in the closet. */ SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhDescriptors, i, &pszRefNameDest, &pvRefDest) ); if (SG_VARIANT_TYPE_NULL == pvRefDest->type) SG_ERR_CHECK( SG_REPO__USER_MASTER__OPEN(pCtx, &pRepoDest) ); else SG_ERR_CHECK( SG_REPO__OPEN_REPO_INSTANCE(pCtx, pszRefNameDest, &pRepoDest) ); SG_ERR_CHECK( SG_zing__get_leaf(pCtx, pRepoDest, NULL, SG_DAGNUM__USERS, &pszHidLeafDest) ); if (strcmp(pszRefHidLeafSrc, pszHidLeafDest)) { /* Pull from source to dest. * Pull is generally faster than push, so we're using it on purpose. */ SG_pull__admin__local(pCtx, pRepoDest, pRepoSrcNotMine, NULL); if (SG_context__has_err(pCtx)) { /* If there's an admin id mismatch, don't die. Log a warning and move on. */ if (SG_context__err_equals(pCtx, SG_ERR_ADMIN_ID_MISMATCH)) { const char* pszRefNameSrc = NULL; SG_ERR_DISCARD; SG_ERR_CHECK( SG_repo__get_descriptor_name(pCtx, pRepoSrcNotMine, &pszRefNameSrc) ); if (!pszRefNameSrc) pszRefNameSrc = pszRefUserMasterFakeName; SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepoSrcNotMine, &pszSrcAdminId) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepoDest, &pszDestAdminId) ); SG_ERR_CHECK( SG_log__report_warning(pCtx, "admin-id mismatch when syncing users: source repo %s has %s, dest repo %s has %s", pszRefNameSrc, pszSrcAdminId, pszRefNameDest, pszDestAdminId) ); bAdminIdsMatch = SG_FALSE; SG_NULLFREE(pCtx, pszDestAdminId); SG_NULLFREE(pCtx, pszSrcAdminId); } else SG_ERR_RETHROW; } if (bAdminIdsMatch) { SG_NULLFREE(pCtx, pszHidLeafDest); SG_ERR_CHECK( SG_zing__get_leaf(pCtx, pRepoDest, NULL, SG_DAGNUM__USERS, &pszHidLeafDest) ); if (strcmp(pszRefHidLeafSrc, pszHidLeafDest)) { /* The pull from source to dest resulted in a new leaf. * Use the new leaf and restart the loop. */ SG_NULLFREE(pCtx, pszHidLeafSrc); pszRefHidLeafSrc = pszHidLeafSrc = pszHidLeafDest; pszHidLeafDest = NULL; SG_REPO_NULLFREE(pCtx, pRepoSrcMine); pRepoSrcNotMine = pRepoSrcMine = pRepoDest; pRepoDest = NULL; i = -1; /* start again at the first descriptor */ } } } SG_NULLFREE(pCtx, pszHidLeafDest); SG_REPO_NULLFREE(pCtx, pRepoDest); } } if (ppvaSyncedUserList) SG_ERR_CHECK( SG_user__list_all(pCtx, pRepoSrcNotMine, ppvaSyncedUserList) ); /* fall through */ fail: SG_NULLFREE(pCtx, pszSrcAdminId); SG_NULLFREE(pCtx, pszHidLeafSrc); SG_NULLFREE(pCtx, pszHidLeafDest); SG_VHASH_NULLFREE(pCtx, pvhDescriptors); SG_REPO_NULLFREE(pCtx, pRepoDest); SG_REPO_NULLFREE(pCtx, pRepoSrcMine); SG_NULLFREE(pCtx, pszDestAdminId); }