void sg_wc_db__gid__prepare_toggle_tmp_stmt(SG_context * pCtx, sg_wc_db * pDb, SG_uint64 uiAliasGid, SG_bool bIsTmp, sqlite3_stmt ** ppStmt) { sqlite3_stmt * pStmt = NULL; #if TRACE_WC_DB { SG_int_to_string_buffer bufui64; SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "sg_wc_db__gid__prepare_toggle_tmp_stmt: %s --> %c\n", SG_uint64_to_sz(uiAliasGid, bufui64), ((bIsTmp) ? 'T' : 'F')) ); } #endif SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pDb->psql, &pStmt, ("UPDATE tbl_gid SET tmp = ? WHERE alias_gid = ?")) ); SG_ERR_CHECK( sg_sqlite__bind_int( pCtx, pStmt, 1, bIsTmp) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 2, uiAliasGid) ); *ppStmt = pStmt; return; fail: SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); }
void SG_getopt__print_option(SG_context* pCtx, SG_console_stream cs, SG_getopt_option* opt, const char * overrideDesc) { SG_string *opts; if (opt == NULL) { SG_ERR_IGNORE( SG_console(pCtx, cs, "?") ); return; } SG_ERR_CHECK( SG_string__alloc(pCtx, &opts) ); /* We have a valid option which may or may not have a "short name" (a single-character alias for the long option). */ if (opt->optch <= 255) SG_ERR_CHECK( SG_string__sprintf(pCtx, opts, "-%c [--%s]", opt->optch, opt->pStringName) ); else SG_ERR_CHECK( SG_string__sprintf(pCtx, opts, "--%s", opt->pStringName) ); if (opt->has_arg) SG_ERR_CHECK( SG_string__append__sz(pCtx, opts, " ARG") ); if (overrideDesc) SG_ERR_IGNORE( SG_console(pCtx, cs, "%-20s : %s\n", SG_string__sz(opts), overrideDesc) ); else SG_ERR_IGNORE( SG_console(pCtx, cs, "%-20s : %s\n", SG_string__sz(opts), opt->pStringDescription) ); fail: SG_STRING_NULLFREE(pCtx, opts); }
void u0023_vcdiff__test_deltify(SG_context * pCtx) { FILE* fp; SG_pathname* pPath_version1 = NULL; SG_pathname* pPath_version2 = NULL; int i; VERIFY_ERR_CHECK_DISCARD( unittest__get_nonexistent_pathname(pCtx,&pPath_version1) ); VERIFY_ERR_CHECK_DISCARD( unittest__get_nonexistent_pathname(pCtx,&pPath_version2) ); fp = fopen(SG_pathname__sz(pPath_version1), "w"); for (i=0; i<500; i++) { fprintf(fp, "Ah, I should have known it from the very start This girl will leave me with a broken heart Now listen people what I'm telling you A-keep away from-a Runaround Sue\n"); } fclose(fp); fp = fopen(SG_pathname__sz(pPath_version2), "w"); for (i=0; i<100; i++) { fprintf(fp, "He rocks in the tree-top all a day long Hoppin' and a-boppin' and a-singin' the song All the little birds on J-Bird St. Love to hear the robin goin' tweet tweet tweet\n"); } fclose(fp); VERIFY_ERR_CHECK_DISCARD( u0023_vcdiff__do_test_deltify(pCtx, pPath_version1, pPath_version2) ); SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pPath_version1) ); SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pPath_version2) ); SG_PATHNAME_NULLFREE(pCtx, pPath_version1); SG_PATHNAME_NULLFREE(pCtx, pPath_version2); }
void sg_wc_db__gid__delete_all_tmp(SG_context * pCtx, sg_wc_db * pDb) { sqlite3_stmt * pStmt = NULL; #if TRACE_WC_DB SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "sg_wc_db__gid__delete_all_tmp: had %d tmp gids.\n", pDb->nrTmpGids) ); #endif if (pDb->nrTmpGids == 0) return; SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pDb->psql, &pStmt, ("DELETE FROM tbl_gid WHERE tmp != 0")) ); SG_ERR_CHECK( sg_sqlite__step(pCtx, pStmt, SQLITE_DONE) ); SG_ERR_CHECK( sg_sqlite__finalize(pCtx, pStmt) ); pDb->nrTmpGids = 0; return; fail: SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); }
void u0023_vcdiff__test_deltify_run(SG_context * pCtx) { FILE* fp; SG_pathname* pPath_version1 = NULL; SG_pathname* pPath_version2 = NULL; int i; VERIFY_ERR_CHECK_DISCARD( unittest__get_nonexistent_pathname(pCtx, &pPath_version1) ); VERIFY_ERR_CHECK_DISCARD( unittest__get_nonexistent_pathname(pCtx, &pPath_version2) ); fp = fopen(SG_pathname__sz(pPath_version1), "w"); fprintf(fp, "Ah, I should havq known it from thq vqry start This girl will lqavq mq with a brokqn hqart Now listqn pqoplq what I'm tqlling you A-kqqp away from-a Runaround Suq\n"); fclose(fp); fp = fopen(SG_pathname__sz(pPath_version2), "w"); for (i=0; i<500000; i++) { fputc('e', fp); } fclose(fp); VERIFY_ERR_CHECK_DISCARD( u0023_vcdiff__do_test_deltify(pCtx, pPath_version1, pPath_version2) ); SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pPath_version1) ); SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pPath_version2) ); SG_PATHNAME_NULLFREE(pCtx, pPath_version1); SG_PATHNAME_NULLFREE(pCtx, pPath_version2); }
void u0023_vcdiff__test_deltify_from_zerolength(SG_context * pCtx) { FILE* fp; SG_pathname* pPath_version1 = NULL; SG_pathname* pPath_version2 = NULL; int i; VERIFY_ERR_CHECK_DISCARD( unittest__get_nonexistent_pathname(pCtx, &pPath_version1) ); VERIFY_ERR_CHECK_DISCARD( unittest__get_nonexistent_pathname(pCtx, &pPath_version2) ); fp = fopen(SG_pathname__sz(pPath_version1), "w"); fclose(fp); fp = fopen(SG_pathname__sz(pPath_version2), "w"); for (i=0; i<500000; i++) { fputc(i % 256, fp); } fclose(fp); VERIFY_ERR_CHECK_DISCARD( u0023_vcdiff__do_test_deltify(pCtx, pPath_version1, pPath_version2) ); SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pPath_version1) ); SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pPath_version2) ); SG_PATHNAME_NULLFREE(pCtx, pPath_version1); SG_PATHNAME_NULLFREE(pCtx, pPath_version2); }
void u0023_vcdiff__test_deltify_small_files(SG_context * pCtx) { FILE* fp; SG_pathname* pPath_version1; SG_pathname* pPath_version2; VERIFY_ERR_CHECK_DISCARD( unittest__get_nonexistent_pathname(pCtx, &pPath_version1) ); VERIFY_ERR_CHECK_DISCARD( unittest__get_nonexistent_pathname(pCtx, &pPath_version2) ); fp = fopen(SG_pathname__sz(pPath_version1), "w"); fprintf(fp, "e"); fclose(fp); fp = fopen(SG_pathname__sz(pPath_version2), "w"); fprintf(fp, "a"); fclose(fp); VERIFY_ERR_CHECK_DISCARD( u0023_vcdiff__do_test_deltify(pCtx, pPath_version1, pPath_version2) ); SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pPath_version1) ); SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pPath_version2) ); SG_PATHNAME_NULLFREE(pCtx, pPath_version1); SG_PATHNAME_NULLFREE(pCtx, pPath_version2); }
void sg_wc_tx__apply__delete_issue(SG_context * pCtx, SG_wc_tx * pWcTx, const SG_vhash * pvh) { SG_pathname * pPath = NULL; const char * pszRepoPathTempDir = NULL; // During the APPLY phase, our only task is to delete // the item's private tempdir, if it had one. Only items // with file-edit conflicts will actually have one. SG_ERR_CHECK( SG_vhash__check__sz(pCtx, pvh, "repopath_tempdir", &pszRepoPathTempDir) ); #if TRACE_WC_MERGE SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "sg_wc_tx__apply__delete_issue: tempdir '%s'\n", ((pszRepoPathTempDir) ? pszRepoPathTempDir : "(null)")) ); #endif if (pszRepoPathTempDir) { SG_ERR_CHECK( sg_wc_db__path__sz_repopath_to_absolute(pCtx, pWcTx->pDb, pszRepoPathTempDir, &pPath) ); // actually deleting the tempdir can fail for any number of reasons, // but that should not abort the COMMIT. so we use _IGNORE() here // rather than _CHECK(). SG_ERR_IGNORE( SG_fsobj__rmdir_recursive__pathname(pCtx, pPath) ); } fail: SG_PATHNAME_NULLFREE(pCtx, pPath); }
static void _my_do_cmd_update(SG_context * pCtx, SG_option_state * pOptSt, SG_pendingtree * pPendingTree, const char * pszTargetChangeset) { SG_pendingtree_action_log_enum eActionLog; if (pOptSt->bTest) { eActionLog = SG_PT_ACTION__LOG_IT; } else { eActionLog = SG_PT_ACTION__DO_IT; if (pOptSt->bVerbose) eActionLog |= SG_PT_ACTION__LOG_IT; } SG_ERR_CHECK( SG_pendingtree__update_baseline(pCtx, pPendingTree, pszTargetChangeset, pOptSt->bForce, eActionLog) ); if (eActionLog & SG_PT_ACTION__LOG_IT) SG_ERR_IGNORE( sg_report_action_log(pCtx,pPendingTree) ); fail: if (SG_context__err_equals(pCtx,SG_ERR_PORTABILITY_WARNINGS)) SG_ERR_IGNORE( sg_report_portability_warnings(pCtx,pPendingTree) ); }
void _sg_mrg__copy_wc_to_temp_file(SG_context * pCtx, SG_mrg * pMrg, SG_mrg_cset_entry * pMrgCSetEntry, const SG_pathname * pPathTempFile) { sg_wc_liveview_item * pLVI; SG_string * pStringRepoPath = NULL; SG_pathname * pPathInWC = NULL; SG_bool bExists; SG_bool bKnown; SG_ERR_CHECK( SG_fsobj__exists__pathname(pCtx,pPathTempFile,&bExists,NULL,NULL) ); if (bExists) { #if TRACE_WC_MERGE SG_ERR_IGNORE( SG_console(pCtx,SG_CS_STDERR, "Skipping copy of WC version to [%s]\n", SG_pathname__sz(pPathTempFile)) ); #endif return; } // Find the absolute path of the WD version of the file. SG_ERR_CHECK( sg_wc_tx__liveview__fetch_random_item(pCtx, pMrg->pWcTx, pMrgCSetEntry->uiAliasGid, &bKnown, &pLVI) ); SG_ASSERT_RELEASE_FAIL( (bKnown) ); SG_ERR_CHECK( sg_wc_tx__liveview__compute_live_repo_path(pCtx, pMrg->pWcTx, pLVI, &pStringRepoPath) ); SG_ERR_CHECK( sg_wc_db__path__repopath_to_absolute(pCtx, pMrg->pWcTx->pDb, pStringRepoPath, &pPathInWC) ); #if TRACE_WC_MERGE SG_ERR_IGNORE( SG_console(pCtx,SG_CS_STDERR, "Copying WC version [%s] to [%s]\n", SG_string__sz(pStringRepoPath), SG_pathname__sz(pPathTempFile)) ); #endif // Ideally, when we create this TEMP file it should be read-only. // Afterall, it does represent a historical version of the file and // it should only be used as INPUT to whatever merge tool the user // has configured. So it should be read-only. This might allow // a GUI merge tool to show locks/whatever and/or prevent accidental // editing of these files. // // However, this can cause an "Access is Denied" error on Windows // when we get ready to delete the contents of the TEMP directory. // We'll deal with that there rather than here. SG_ERR_CHECK( SG_fsobj__copy_file(pCtx, pPathInWC, pPathTempFile, 0400) ); fail: SG_STRING_NULLFREE(pCtx, pStringRepoPath); SG_PATHNAME_NULLFREE(pCtx, pPathInWC); }
void SG_vfile__end( SG_context* pCtx, SG_vfile** ppvf, const SG_vhash* pvh ) { SG_string* pstr = NULL; SG_vfile* pvf = NULL; SG_NULLARGCHECK_RETURN(ppvf); pvf = *ppvf; if (pvh) { SG_ARGCHECK_RETURN( !(pvf->mode & SG_FILE_RDONLY) , pvh ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pstr) ); SG_ERR_CHECK( SG_vhash__to_json(pCtx, pvh,pstr) ); SG_ERR_CHECK( SG_file__seek(pCtx, pvf->pFile, 0) ); SG_ERR_CHECK( SG_file__truncate(pCtx, pvf->pFile) ); #if TRACE_VFILE SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "VFileEnd: Writing %d bytes to file.\n", SG_string__length_in_bytes(pstr)) ); #endif SG_ERR_CHECK( SG_file__write(pCtx, pvf->pFile, SG_string__length_in_bytes(pstr), (const SG_byte *)SG_string__sz(pstr), NULL) ); SG_STRING_NULLFREE(pCtx, pstr); } else { if (!(pvf->mode & SG_FILE_RDONLY)) { SG_ERR_CHECK( SG_file__seek(pCtx, pvf->pFile, 0) ); SG_ERR_CHECK( SG_file__truncate(pCtx, pvf->pFile) ); #if TRACE_VFILE SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "VFileEnd: Truncating file.\n") ); #endif } } SG_FILE_NULLCLOSE(pCtx, pvf->pFile); SG_NULLFREE(pCtx, pvf); *ppvf = NULL; return; fail: SG_STRING_NULLFREE(pCtx, pstr); }
static void _clean_up_logging( SG_context * pCtx, SG_log_console__data * pcLogStdData, SG_log_text__data * pcLogFileData, SG_log_text__writer__daily_path__data * pcLogFileWriterData) { SG_ERR_IGNORE( SG_log_text__unregister(pCtx, pcLogFileData) ); SG_ERR_IGNORE( SG_log_console__unregister(pCtx, pcLogStdData) ); SG_PATHNAME_NULLFREE(pCtx, pcLogFileWriterData->pBasePath); }
void u0038_test_wdmapping(SG_context * pCtx) { SG_vhash* pvh = NULL; SG_closet_descriptor_handle* ph = NULL; SG_pathname* pPath = NULL; SG_pathname* pMappedPath = NULL; SG_string* pstrRepoDescriptorName = NULL; char* pszidGid = NULL; char buf_tid[SG_TID_MAX_BUFFER_LENGTH]; VERIFY_ERR_CHECK_DISCARD( SG_tid__generate2__suffix(pCtx, buf_tid, sizeof(buf_tid), 32, "u0038") ); VERIFY_ERR_CHECK_DISCARD( SG_PATHNAME__ALLOC__SZ(pCtx, &pPath, buf_tid) ); VERIFY_ERR_CHECK_DISCARD( SG_fsobj__mkdir__pathname(pCtx, pPath) ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "r1") ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add_begin(pCtx, "r1", NULL, NULL, NULL, NULL, &pvh, &ph) ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add_commit(pCtx, &ph, pvh, SG_REPO_STATUS__NORMAL) ); SG_VHASH_NULLFREE(pCtx, pvh); VERIFY_ERR_CHECK_DISCARD( SG_workingdir__set_mapping(pCtx, pPath, "r1", NULL) ); VERIFY_ERR_CHECK_DISCARD( SG_workingdir__find_mapping(pCtx, pPath, &pMappedPath, &pstrRepoDescriptorName, &pszidGid) ); VERIFY_COND("ridesc match", (0 == strcmp("r1", SG_string__sz(pstrRepoDescriptorName)))); VERIFY_ERR_CHECK_DISCARD( SG_pathname__append__from_sz(pCtx, pPath, "foo") ); VERIFY_ERR_CHECK_DISCARD( SG_pathname__append__from_sz(pCtx, pPath, "bar") ); VERIFY_ERR_CHECK_DISCARD( SG_pathname__append__from_sz(pCtx, pPath, "plok") ); SG_STRING_NULLFREE(pCtx, pstrRepoDescriptorName); SG_PATHNAME_NULLFREE(pCtx, pMappedPath); VERIFY_ERR_CHECK_DISCARD( SG_workingdir__find_mapping(pCtx, pPath, &pMappedPath, &pstrRepoDescriptorName, &pszidGid) ); SG_STRING_NULLFREE(pCtx, pstrRepoDescriptorName); SG_PATHNAME_NULLFREE(pCtx, pMappedPath); SG_PATHNAME_NULLFREE(pCtx, pPath); }
void SG_treendx__get_all_paths(SG_context* pCtx, SG_treendx* pTreeNdx, const char* psz_gid, SG_stringarray ** ppResults) { sqlite3_stmt* pStmt = NULL; SG_stringarray * pResults = NULL; int rc; SG_ERR_CHECK_RETURN( SG_gid__argcheck(pCtx, psz_gid) ); SG_ERR_CHECK( SG_STRINGARRAY__ALLOC(pCtx,&pResults, 1) ); SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pTreeNdx->psql, &pStmt, "SELECT strpath FROM treendx WHERE gid='%s' ORDER BY strpath;", psz_gid) ); while ((rc = sqlite3_step(pStmt)) == SQLITE_ROW) { const char* pszPath = (const char*) sqlite3_column_text(pStmt, 0); SG_ERR_CHECK( SG_stringarray__add(pCtx, pResults, pszPath) ); } if (rc != SQLITE_DONE) { SG_ERR_THROW(SG_ERR_SQLITE(rc)); } SG_ERR_CHECK( sg_sqlite__finalize(pCtx, pStmt) ); *ppResults = pResults; return; fail: if (pStmt) { SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); } }
/** * Insert/Replace a TNE ROW from the tne_L0 table in the wc.db. * * The existing row (if it exists) is a copy of the TNE * as it existed in the current baseline. This item * will be present in the future baseline, but it has * one or more changed fields. So we want the TNE ROW * to be updated as we transition the tne_L0 table. * */ void sg_wc_tx__apply__insert_tne(SG_context * pCtx, SG_wc_tx * pWcTx, const SG_vhash * pvh) { #if TRACE_WC_TX_APPLY const char * pszRepoPath; SG_ERR_CHECK_RETURN( SG_vhash__get__sz(pCtx, pvh, "src", &pszRepoPath) ); SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, ("sg_wc_tx__apply__insert_tne: '%s'\n"), pszRepoPath) ); #else SG_UNUSED( pCtx ); SG_UNUSED( pvh ); #endif SG_UNUSED( pWcTx ); // we don't actually have anything here. // the journal record was more for the verbose log. // the actual work of updating the SQL will be done // in the parallel journal-stmt. }
static void _cache__add__fringe(SG_context * pCtx, SG_dagfrag * pFrag, const char* psz_id) { _my_data * pDataAllocated = NULL; _my_data * pDataCached = NULL; _my_data * pOldData = NULL; SG_NULLARGCHECK_RETURN(pFrag); // this is probably not necessary for an internal routine SG_NONEMPTYCHECK_RETURN(psz_id); // this is probably not necessary for an internal routine SG_ERR_CHECK( SG_alloc(pCtx, 1, sizeof(_my_data), &pDataAllocated) ); pDataAllocated->m_genDagnode = 0; pDataAllocated->m_state = SG_DFS_END_FRINGE; pDataAllocated->m_pDagnode = NULL; SG_ERR_CHECK( SG_rbtree__update__with_assoc(pCtx,pFrag->m_pRB_Cache,psz_id,pDataAllocated,(void **)&pOldData) ); pDataCached = pDataAllocated; pDataAllocated = NULL; // cache now owns pData and pDagnode. SG_ASSERT_RELEASE_RETURN2( (!pOldData), (pCtx,"Possible memory leak adding [%s] to dagfrag fringe.",psz_id) ); return; fail: // TODO is the following test and assignment really necessary? or did it get carried // TODO over from another change? we weren't given a pDagnode in the first place. if (pDataCached) // caller still owns pDagnode on errors even if we got pData pDataCached->m_pDagnode = NULL; // into the cache. This may cause problems later if you keep going. SG_ERR_IGNORE( _my_data__free(pCtx, pDataAllocated) ); // free pData if we did not get it stuck into the cache. }
void sg_wc_db__gid__get_alias_from_gid(SG_context * pCtx, sg_wc_db * pDb, const char * pszGid, SG_uint64 * puiAliasGid) { sqlite3_stmt * pStmt = NULL; SG_uint64 uiAliasGid = 0; int rc; SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pDb->psql, &pStmt, ("SELECT alias_gid FROM tbl_gid WHERE gid = ?")) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 1, ((pszGid && *pszGid) ? pszGid : SG_WC_DB__GID__NULL_ROOT)) ); if ((rc=sqlite3_step(pStmt)) != SQLITE_ROW) SG_ERR_THROW2( SG_ERR_SQLITE(rc), (pCtx, "sg_wc_db:tbl_gid can't find gid %s.", pszGid) ); uiAliasGid = (SG_uint64)sqlite3_column_int64(pStmt, 0); SG_ERR_CHECK( sg_sqlite__finalize(pCtx, pStmt) ); *puiAliasGid = uiAliasGid; return; fail: SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); }
void u0038_test_wdmapping(SG_context * pCtx) { SG_vhash* pvh = NULL; SG_pathname* pPath = NULL; SG_pathname* pMappedPath = NULL; SG_string* pstrRepoDescriptorName = NULL; char* pszidGid = NULL; VERIFY_ERR_CHECK_DISCARD( SG_PATHNAME__ALLOC(pCtx, &pPath) ); VERIFY_ERR_CHECK_DISCARD( SG_pathname__set__from_cwd(pCtx, pPath) ); VERIFY_ERR_CHECK_DISCARD( SG_VHASH__ALLOC(pCtx, &pvh) ); VERIFY_ERR_CHECK_DISCARD( SG_vhash__add__string__sz(pCtx, pvh, "hello", "world") ); VERIFY_ERR_CHECK_DISCARD( SG_vhash__add__string__sz(pCtx, pvh, "hola", "mundo") ); SG_ERR_IGNORE( SG_closet__descriptors__remove(pCtx, "r1") ); VERIFY_ERR_CHECK_DISCARD( SG_closet__descriptors__add(pCtx, "r1", pvh) ); VERIFY_ERR_CHECK_DISCARD( SG_workingdir__set_mapping(pCtx, pPath, "r1", NULL) ); VERIFY_ERR_CHECK_DISCARD( SG_workingdir__find_mapping(pCtx, pPath, &pMappedPath, &pstrRepoDescriptorName, &pszidGid) ); VERIFY_COND("ridesc match", (0 == strcmp("r1", SG_string__sz(pstrRepoDescriptorName)))); VERIFY_ERR_CHECK_DISCARD( SG_pathname__append__from_sz(pCtx, pPath, "foo") ); VERIFY_ERR_CHECK_DISCARD( SG_pathname__append__from_sz(pCtx, pPath, "bar") ); VERIFY_ERR_CHECK_DISCARD( SG_pathname__append__from_sz(pCtx, pPath, "plok") ); SG_STRING_NULLFREE(pCtx, pstrRepoDescriptorName); SG_PATHNAME_NULLFREE(pCtx, pMappedPath); VERIFY_ERR_CHECK_DISCARD( SG_workingdir__find_mapping(pCtx, pPath, &pMappedPath, &pstrRepoDescriptorName, &pszidGid) ); SG_STRING_NULLFREE(pCtx, pstrRepoDescriptorName); SG_PATHNAME_NULLFREE(pCtx, pMappedPath); SG_PATHNAME_NULLFREE(pCtx, pPath); SG_VHASH_NULLFREE(pCtx, pvh); }
/** * The values for RENAME, MOVE, ATTRBITS, SYMLINKS, and SUBMODULES are collapsable. (see below) * In the corresponding rbUnique's we only need to remember the set of unique values for the * field. THESE ARE THE KEYS IN THE prbUnique. * * As a convenience, we associate a vector of entries with each key. These form a many-to-one * thing so that we can report all of the entries that have this value. * * TODO since we should only process a cset once, we should not get any * TODO duplicates in the vector, but it might be possible. i'm not going * TODO to worry about it now. if this becomes a problem, consider doing * TODO a unique-insert into the vector -or- making the vector a sub-rbtree. * */ static void _update_1_rbUnique(SG_context * pCtx, SG_rbtree * prbUnique, const char * pszKey, SG_mrg_cset_entry * pMrgCSetEntry_Leaf_k) { SG_vector * pVec_Allocated = NULL; SG_vector * pVec; SG_bool bFound; SG_ERR_CHECK( SG_rbtree__find(pCtx,prbUnique,pszKey,&bFound,(void **)&pVec) ); if (!bFound) { SG_ERR_CHECK( SG_VECTOR__ALLOC(pCtx,&pVec_Allocated,3) ); SG_ERR_CHECK( SG_rbtree__add__with_assoc(pCtx,prbUnique,pszKey,pVec_Allocated) ); pVec = pVec_Allocated; pVec_Allocated = NULL; // rbtree owns this now } SG_ERR_CHECK( SG_vector__append(pCtx,pVec,pMrgCSetEntry_Leaf_k,NULL) ); #if TRACE_WC_MERGE SG_ERR_IGNORE( SG_console(pCtx,SG_CS_STDERR,"_update_1_rbUnique: [%s][%s]\n", pszKey, SG_string__sz(pMrgCSetEntry_Leaf_k->pMrgCSet->pStringCSetLabel)) ); #endif return; fail: SG_VECTOR_NULLFREE(pCtx, pVec_Allocated); }
void sg_client__c__pull_clone( SG_context* pCtx, SG_client* pClient, const SG_pathname* pStagingPathname, char** ppszFragballName) { sg_client_c_instance_data* pMe = NULL; SG_repo* pRepo = NULL; SG_vhash* pvhStatus = NULL; SG_NULLARGCHECK_RETURN(pClient); SG_NULLARGCHECK_RETURN(ppszFragballName); pMe = (sg_client_c_instance_data*)pClient->p_vtable_instance_data; SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, pClient->psz_remote_repo_spec, &pRepo) ); SG_ERR_CHECK( SG_context__msg__emit(pCtx, "Copying repository...") ); SG_ERR_CHECK( SG_repo__fetch_repo__fragball(pCtx, pRepo, pStagingPathname, ppszFragballName) ); SG_ERR_CHECK( SG_context__msg__emit(pCtx, "done") ); /* fall through */ fail: SG_REPO_NULLFREE(pCtx, pRepo); SG_VHASH_NULLFREE(pCtx, pvhStatus); SG_ERR_IGNORE( SG_context__msg__emit(pCtx, "\n") ); }
void sg_wc_db__gid__get_or_insert_alias_from_gid(SG_context * pCtx, sg_wc_db * pDb, const char * pszGid, SG_uint64 * puiAliasGid) { SG_bool bNotAlreadyPresent; sg_wc_db__gid__get_alias_from_gid(pCtx, pDb, pszGid, puiAliasGid); bNotAlreadyPresent = SG_CONTEXT__HAS_ERR(pCtx); if (bNotAlreadyPresent) { SG_context__err_reset(pCtx); SG_ERR_CHECK( sg_wc_db__gid__insert(pCtx, pDb, pszGid) ); SG_ERR_CHECK( sg_wc_db__gid__get_alias_from_gid(pCtx, pDb, pszGid, puiAliasGid) ); } #if TRACE_WC_GID { SG_int_to_string_buffer bufui64; SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "GID: get_or_insert [%s] ==> %s [new %d]\n", pszGid, SG_uint64_to_sz(*puiAliasGid,bufui64), bNotAlreadyPresent) ); } #endif fail: return; }
/** * Find the alias-gid for the root "@/" (aka "@b/"). Unlike the null-root, * this is not a compile-time constant. (perhaps it should be.) * * Since neither the GID nor repo-path of the root directory * can ever change, the TX-layer caller can just ask us for * the alias of the root directory and not have to bother with * TNE/PC and/or the prescan/liveview stuff. * * We could/should use sg_wc_db__tne__foreach_in_dir_by_parent_alias() * and pass __NULL_ROOT and set up a callback to get the first result * rather than duplicating parts of that routine, but that felt like * too much trouble. (But we could also verify that there is exactly * one row and it has the correct entryname.) * */ void sg_wc_db__tne__get_alias_of_root(SG_context * pCtx, sg_wc_db * pDb, const sg_wc_db__cset_row * pCSetRow, SG_uint64 * puiAliasGid_Root) { sqlite3_stmt * pStmt = NULL; int rc; SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pDb->psql, &pStmt, ("SELECT" " alias_gid" // 0 " FROM %s" " WHERE (alias_gid_parent = ?)"), pCSetRow->psz_tne_table_name) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, SG_WC_DB__ALIAS_GID__NULL_ROOT) ); if ((rc=sqlite3_step(pStmt)) != SQLITE_ROW) { SG_ERR_THROW2( SG_ERR_SQLITE(rc), (pCtx, "sg_wc_db:%s can't find tne row for '@/' (aka '@b/').", pCSetRow->psz_tne_table_name) ); } *puiAliasGid_Root = (SG_uint64)sqlite3_column_int64(pStmt, 0); fail: SG_ERR_IGNORE( sg_sqlite__nullfinalize(pCtx, &pStmt) ); }
/** * During the revert-all setup, we add certain deleted items to the * kill-list (so that we'll delete the tbl_pc row for them) effectively * marking them clean -- and we don't insert them into the pMrgCSet * directly -- we let merge discover they are missing and decide what * to do. * * But if the merge-engine discovers the item and has different plans * for it, we cancel the predicted kill for it. * */ void sg_wc_tx__merge__remove_from_kill_list(SG_context * pCtx, SG_mrg * pMrg, const SG_uint64 uiAliasGid) { SG_uint32 k, count; if (!pMrg->pVecRevertAllKillList) return; SG_ERR_CHECK( SG_vector_i64__length(pCtx, pMrg->pVecRevertAllKillList, &count) ); for (k=0; k<count; k++) { SG_uint64 uiAliasGid_k; SG_ERR_CHECK( SG_vector_i64__get(pCtx, pMrg->pVecRevertAllKillList, k, (SG_int64 *)&uiAliasGid_k) ); if (uiAliasGid_k == uiAliasGid) { #if TRACE_WC_MERGE SG_int_to_string_buffer buf; SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "Cancel kill-list for [0x%s]\n", SG_uint64_to_sz__hex(uiAliasGid_k, buf)) ); #endif SG_ERR_CHECK( SG_vector_i64__set(pCtx, pMrg->pVecRevertAllKillList, k, SG_WC_DB__ALIAS_GID__UNDEFINED) ); } } fail: return; }
void SG_pull__clone( SG_context* pCtx, const char* pszPullIntoRepoDescriptorName, SG_client* pClient) { sg_pull_instance_data* pMe = NULL; char* pszFragballName = NULL; const SG_pathname* pStagingPathname; SG_NULLARGCHECK_RETURN(pszPullIntoRepoDescriptorName); SG_NULLARGCHECK_RETURN(pClient); SG_ERR_CHECK( _pull_init(pCtx, pClient, pszPullIntoRepoDescriptorName, &pMe) ); SG_ERR_CHECK( SG_staging__get_pathname(pCtx, pMe->pStaging, &pStagingPathname) ); /* Request a fragball containing the entire repo */ SG_ERR_CHECK( SG_client__pull_clone(pCtx, pClient, pStagingPathname, &pszFragballName) ); /* commit and cleanup */ SG_ERR_CHECK_RETURN( SG_staging__commit_fragball(pCtx, pMe->pStaging, pszFragballName) ); SG_ERR_CHECK( SG_context__msg__emit(pCtx, "Cleaning up...") ); SG_ERR_CHECK_RETURN( SG_staging__cleanup(pCtx, &pMe->pStaging) ); SG_ERR_CHECK( SG_context__msg__emit(pCtx, "done") ); /* fall through */ fail: _NULLFREE_INSTANCE_DATA(pCtx, pMe); SG_NULLFREE(pCtx, pszFragballName); SG_ERR_IGNORE( SG_context__msg__emit(pCtx, "\n") ); }
/** * Create a per-tx temp-dir if we don't already have one. * */ void sg_wc_tx__create_session_temp_dir(SG_context * pCtx, SG_wc_tx * pWcTx) { char bufTidSession[SG_TID_MAX_BUFFER_LENGTH]; SG_uint32 nrDigits = 10; if (pWcTx->pPathSessionTempDir) return; // pick a space in /tmp for exporting temporary copies of the files // so that internal and/or external tools can compare them. // // TODO 2012/05/02 Investigate the use of SG_workingdir__get_temp_path() (which // TODO creates things in .sgdrawer rather than /tmp). // TODO (see also sg_mrg__private_file_mrg.h) // TODO See also sg_vv2__diff__create_session_temp_dir(). SG_ERR_CHECK( SG_PATHNAME__ALLOC__USER_TEMP_DIRECTORY(pCtx, &pWcTx->pPathSessionTempDir) ); SG_ERR_CHECK( SG_tid__generate2(pCtx, bufTidSession, sizeof(bufTidSession), nrDigits) ); SG_ERR_CHECK( SG_pathname__append__from_sz(pCtx, pWcTx->pPathSessionTempDir, bufTidSession) ); SG_ERR_TRY( SG_fsobj__mkdir_recursive__pathname(pCtx, pWcTx->pPathSessionTempDir) ); SG_ERR_CATCH_IGNORE( SG_ERR_DIR_ALREADY_EXISTS ); SG_ERR_CATCH_END; #if 0 && defined(DEBUG) SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "CreateSessionTempDir: %s\n", SG_pathname__sz(pWcTx->pPathSessionTempDir)) ); #endif fail: return; }
void SG_vfile__update__string__sz( SG_context* pCtx, const SG_pathname* pPath, /**< The path of the file containing the JSON text */ const char* putf8Key, const char* pszVal ) { SG_vfile* pvf = NULL; SG_vhash* pvh = NULL; SG_ERR_CHECK( SG_vfile__begin(pCtx, pPath, SG_FILE_RDWR | SG_FILE_OPEN_OR_CREATE, &pvh, &pvf) ); if (!pvh) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh) ); } SG_ERR_CHECK( SG_vhash__update__string__sz(pCtx, pvh, putf8Key, pszVal) ); SG_ERR_CHECK( SG_vfile__end(pCtx, &pvf, pvh) ); SG_VHASH_NULLFREE(pCtx, pvh); return; fail: SG_ERR_IGNORE( sg_vfile__dispose(pCtx, pvf) ); SG_VHASH_NULLFREE(pCtx, pvh); }
void SG_vfile__remove( SG_context* pCtx, const SG_pathname* pPath, /**< The path of the file containing the JSON text */ const char* putf8Key ) { SG_vfile* pvf = NULL; SG_vhash* pvh = NULL; SG_ERR_CHECK( SG_vfile__begin(pCtx, pPath, SG_FILE_RDWR | SG_FILE_OPEN_OR_CREATE, &pvh, &pvf) ); if (pvh) { SG_ERR_CHECK( SG_vhash__remove(pCtx, pvh, putf8Key) ); } else { SG_ERR_THROW(SG_ERR_VHASH_KEYNOTFOUND); } SG_ERR_CHECK( SG_vfile__end(pCtx, &pvf, pvh) ); SG_VHASH_NULLFREE(pCtx, pvh); return; fail: SG_ERR_IGNORE( sg_vfile__dispose(pCtx, pvf) ); SG_VHASH_NULLFREE(pCtx, pvh); }
static void _resolve__fix__run_external_file_merge_1(SG_context * pCtx, struct _resolve_data * pData, _resolve__external_tool * pET, _resolve__step_pathnames * pStepPathnames, SG_string * pStrRepoPath, SG_bool * pbMergedText) { SG_exec_argvec * pArgVec = NULL; SG_exit_status exitStatus; SG_UNUSED( pData ); #if 0 && defined(DEBUG) SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, ("RESOLVE File Merge: %s\n" " Mine: %s\n" " Other: %s\n" " Ancestor: %s\n" " Result: %s\n"), SG_string__sz(pStrRepoPath), SG_pathname__sz(pStepPathnames->pPath_Mine), SG_pathname__sz(pStepPathnames->pPath_Other), SG_pathname__sz(pStepPathnames->pPath_Ancestor), SG_pathname__sz(pStepPathnames->pPath_Result)) ); #endif SG_ERR_CHECK( SG_exec_argvec__alloc(pCtx, &pArgVec) ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, "-r") ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, SG_pathname__sz(pStepPathnames->pPath_Result)) ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, "-t1") ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, "Mine") ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, "-t2") ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, SG_string__sz(pStrRepoPath)) ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, "-t3") ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, "Other") ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, SG_pathname__sz(pStepPathnames->pPath_Mine)) ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, SG_pathname__sz(pStepPathnames->pPath_Ancestor)) ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, SG_pathname__sz(pStepPathnames->pPath_Other)) ); SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "RESOLVE: Preparing to launch external merge tool: %s\n", pET->pszName) ); SG_ERR_CHECK( SG_exec__exec_sync__files(pCtx, pET->pszExe, pArgVec, NULL, NULL, NULL, &exitStatus) ); *pbMergedText = (exitStatus == 0); fail: SG_EXEC_ARGVEC_NULLFREE(pCtx, pArgVec); }
/** * Export the contents of the given file entry (which reflects a specific version) * into a temp file so that an external tool can use it. * * You own the returned pathname and the file on disk. */ void _sg_mrg__export_to_temp_file(SG_context * pCtx, SG_mrg * pMrg, const char * pszHidBlob, const SG_pathname * pPathTempFile) { SG_file * pFile = NULL; SG_bool bExists; SG_ERR_CHECK( SG_fsobj__exists__pathname(pCtx,pPathTempFile,&bExists,NULL,NULL) ); if (bExists) { #if TRACE_WC_MERGE SG_ERR_IGNORE( SG_console(pCtx,SG_CS_STDERR, "Skipping export of [blob %s] to [%s]\n",pszHidBlob,SG_pathname__sz(pPathTempFile)) ); #endif } else { #if TRACE_WC_MERGE SG_ERR_IGNORE( SG_console(pCtx,SG_CS_STDERR, "Exporting [blob %s] to [%s]\n",pszHidBlob,SG_pathname__sz(pPathTempFile)) ); #endif // Ideally, when we create this TEMP file it should be read-only. // Afterall, it does represent a historical version of the file and // it should only be used as INPUT to whatever merge tool the user // has configured. So it should be read-only. This might allow // a GUI merge tool to show locks/whatever and/or prevent accidental // editing of these files. // // However, this can cause an "Access is Denied" error on Windows // when we get ready to delete the contents of the TEMP directory. // We'll deal with that there rather than here. SG_ERR_CHECK( SG_file__open__pathname(pCtx,pPathTempFile,SG_FILE_WRONLY|SG_FILE_CREATE_NEW,0400,&pFile) ); SG_ERR_CHECK( SG_repo__fetch_blob_into_file(pCtx, pMrg->pWcTx->pDb->pRepo, pszHidBlob,pFile,NULL) ); SG_FILE_NULLCLOSE(pCtx,pFile); } return; fail: if (pFile) // only if **WE** created the file, do we try to delete it on error. { SG_FILE_NULLCLOSE(pCtx,pFile); SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx,pPathTempFile) ); } }
/** * Fetch the current branch if attached. * Return NULL if detached. * */ void sg_wc_db__branch__get_branch(SG_context * pCtx, sg_wc_db * pDb, char ** ppszBranchName) { sqlite3_stmt * pStmt = NULL; int rc; SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pDb->psql, &pStmt, ("SELECT" " name" // 0 " FROM tbl_branch" " WHERE id = ?")) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, ID_KEY) ); rc = sqlite3_step(pStmt); switch (rc) { case SQLITE_ROW: if (sqlite3_column_type(pStmt, 0) == SQLITE_NULL) *ppszBranchName = NULL; else SG_ERR_CHECK( SG_STRDUP(pCtx, (const char *)sqlite3_column_text(pStmt, 0), ppszBranchName) ); break; case SQLITE_DONE: *ppszBranchName = NULL; break; default: SG_ERR_THROW2( SG_ERR_SQLITE(rc), (pCtx, "sg_wc_db:tbl_branch can't get branch name.") ); } SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); #if TRACE_WC_DB SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "sg_wc_db__branch__get_branch: %s\n", ((*ppszBranchName) ? (*ppszBranchName) : "<detached>")) ); #endif return; fail: SG_ERR_IGNORE( sg_sqlite__nullfinalize(pCtx, &pStmt) ); }