void SG_unzip__open(SG_context* pCtx, const SG_pathname* pPath, SG_unzip** ppResult) { SG_unzip us; SG_unzip *s; SG_uint64 central_pos = 0; SG_uint32 uL; SG_uint16 number_disk = 0; /* number of the current dist, used for spaning ZIP, unsupported, always 0*/ SG_uint16 number_disk_with_CD = 0; /* number the the disk with central dir, used for spaning ZIP, unsupported, always 0*/ SG_uint16 number_entry_CD = 0; /* total number of entries in the central dir (same than number_entry on nospan) */ SG_ERR_CHECK( SG_file__open__pathname(pCtx, pPath, SG_FILE_RDONLY | SG_FILE_OPEN_EXISTING, SG_FSOBJ_PERMS__UNUSED, &us.pFile) ); SG_ERR_CHECK( sg_unzip__locate_central_dir(pCtx, us.pFile, ¢ral_pos) ); SG_ERR_CHECK( SG_file__seek(pCtx, us.pFile, central_pos) ); /* the signature, already checked */ SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, us.pFile,&uL) ); /* number of this disk */ SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, us.pFile,&number_disk) ); /* number of the disk with the start of the central directory */ SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, us.pFile,&number_disk_with_CD) ); /* total number of entries in the central dir on this disk */ SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, us.pFile,&us.gi.number_entry) ); /* total number of entries in the central dir */ SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, us.pFile,&number_entry_CD) ); if ((number_entry_CD!=us.gi.number_entry) || (number_disk_with_CD!=0) || (number_disk!=0)) { SG_ERR_THROW( SG_ERR_ZIP_BAD_FILE ); } /* size of the central directory */ SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, us.pFile,&us.size_central_dir) ); /* offset of start of central directory with respect to the starting disk number */ SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, us.pFile,&us.offset_central_dir) ); /* zipfile comment length */ SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, us.pFile,&us.gi.size_comment) ); if (central_pos<us.offset_central_dir+us.size_central_dir) { SG_ERR_THROW( SG_ERR_ZIP_BAD_FILE ); } us.byte_before_the_zipfile = (SG_uint32) (central_pos - (us.offset_central_dir+us.size_central_dir)); us.central_pos = central_pos; us.pfile_in_zip_read = NULL; us.current_file_ok = SG_FALSE; SG_ERR_CHECK( SG_malloc(pCtx, sizeof(SG_unzip), &s) ); *s=us; *ppResult = s; return; fail: /* TODO free stuff */ SG_FILE_NULLCLOSE(pCtx, us.pFile); }
/** * Do diff of an individual item. * When WC-based, we have a "DiffStep" vhash. * When historical, we have an item from a pvaStatus. * */ static void _do_diff1(SG_context * pCtx, SG_bool bWC, const SG_option_state * pOptSt, const SG_vhash * pvhItem, SG_uint32 * piResult) { SG_string * pStringGidRepoPath = NULL; SG_vhash * pvhResultCodes = NULL; SG_stringarray * psa1 = NULL; const char * pszGid; SG_int64 i64Result = 0; SG_string * pStringErr = NULL; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid) ); if (bWC) { SG_pathname * pPathWc = NULL; SG_bool bHasTool = SG_FALSE; // With the __diff__setup() and __diff__run() changes, we have already // examined the items during the __setup() step and recorded a tool for // the *FILE* that have changed content. So if "tool" isn't set in the // DiffStep/Item, we don't need to diff it -- it could be a structural // change, a non-file, a found item, etc. // // we do not use SG_wc__diff__throw() because we already have the diff info // and we want to control the result-code processing below. SG_ERR_CHECK( SG_vhash__has(pCtx, pvhItem, "tool", &bHasTool) ); if (bHasTool) SG_ERR_CHECK( SG_wc__diff__run(pCtx, pPathWc, pvhItem, &pvhResultCodes) ); } else { SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pStringGidRepoPath) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pStringGidRepoPath, "@%s", pszGid) ); SG_ERR_CHECK( SG_STRINGARRAY__ALLOC(pCtx, &psa1, 1) ); SG_ERR_CHECK( SG_stringarray__add(pCtx, psa1, SG_string__sz(pStringGidRepoPath)) ); // we do not use the __throw() version of this routine so we can control // result-code processing below. SG_ERR_CHECK( SG_vv2__diff_to_stream(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec, psa1, 0, SG_FALSE, // bNoSort SG_TRUE, // bInteractive, pOptSt->psz_tool, &pvhResultCodes) ); } if (pvhResultCodes) { SG_vhash * pvhResult; // we do not own this SG_ERR_CHECK( SG_vhash__check__vhash(pCtx, pvhResultCodes, pszGid, &pvhResult) ); if (pvhResult) { const char * pszTool; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhResult, "tool", &pszTool) ); SG_ERR_CHECK( SG_vhash__get__int64(pCtx, pvhResult, "result", &i64Result) ); SG_difftool__check_result_code__throw(pCtx, i64Result, pszTool); if (SG_context__has_err(pCtx)) { SG_context__err_to_string(pCtx, SG_FALSE, &pStringErr); SG_context__err_reset(pCtx); SG_ERR_CHECK( SG_console__raw(pCtx, SG_CS_STDERR, SG_string__sz(pStringErr)) ); // eat the tool error. the result code is set. } } } if (piResult) *piResult = (SG_uint32)i64Result; fail: SG_STRING_NULLFREE(pCtx, pStringGidRepoPath); SG_VHASH_NULLFREE(pCtx, pvhResultCodes); SG_STRINGARRAY_NULLFREE(pCtx, psa1); SG_STRING_NULLFREE(pCtx, pStringErr); }
static void _s2__do_cset_vs_cset(SG_context * pCtx, const SG_option_state * pOptSt, const SG_stringarray * psaArgs, SG_uint32 * pNrErrors) { SG_varray * pvaStatus = NULL; SG_varray * pvaStatusDirtyFiles = NULL; SG_stringarray * psa1 = NULL; SG_string * pStringGidRepoPath = NULL; SG_string * pStringErr = NULL; SG_uint32 nrErrors = 0; SG_ERR_CHECK( SG_vv2__status(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec, psaArgs, WC__GET_DEPTH(pOptSt), SG_FALSE, // bNoSort &pvaStatus, NULL) ); if (pvaStatus) { if (pOptSt->bInteractive) { // Filter list down to just modified files and show them one-by-one. SG_ERR_CHECK( _get_dirty_files(pCtx, pvaStatus, &pvaStatusDirtyFiles) ); if (pvaStatusDirtyFiles) SG_ERR_CHECK( _do_gui_diffs(pCtx, SG_FALSE, pOptSt, pvaStatusDirtyFiles, &nrErrors) ); } else { SG_uint32 k, nrItems; // Print the changes with PATCH-like headers. // Accumulate any tool errors. SG_ERR_CHECK( SG_varray__count(pCtx, pvaStatus, &nrItems) ); for (k=0; k<nrItems; k++) { SG_vhash * pvhItem; const char * pszGid = NULL; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaStatus, k, &pvhItem) ); // TODO 2013/02/22 Our pvhItem has all of the details for the diff, // TODO but we don't yet have a public API to let it be // TODO used as is. So we build a @gid repo-path and // TODO run the old historical diff code on a 1-item array // TODO containing this @gid. // TODO // TODO We should fix this to just pass down the pvhItem // TOOD so that it doesn't have to repeat the status lookup. SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pStringGidRepoPath) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pStringGidRepoPath, "@%s", pszGid) ); SG_ERR_CHECK( SG_STRINGARRAY__ALLOC(pCtx, &psa1, 1) ); SG_ERR_CHECK( SG_stringarray__add(pCtx, psa1, SG_string__sz(pStringGidRepoPath)) ); SG_vv2__diff_to_stream__throw(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec, psa1, 0, SG_TRUE, // bNoSort -- doesn't matter only 1 item in list SG_FALSE, // bInteractive, pOptSt->psz_tool); // Don't throw the error from the tool. Just print it on STDERR // and remember that we had an error so that don't stop showing // the diffs just because we stumble over a changed binary file // or mis-configured tool, for example. if (SG_context__has_err(pCtx)) { SG_context__err_to_string(pCtx, SG_FALSE, &pStringErr); SG_context__err_reset(pCtx); SG_ERR_CHECK( SG_console__raw(pCtx, SG_CS_STDERR, SG_string__sz(pStringErr)) ); SG_STRING_NULLFREE(pCtx, pStringErr); nrErrors++; } SG_STRING_NULLFREE(pCtx, pStringGidRepoPath); SG_STRINGARRAY_NULLFREE(pCtx, psa1); } } } *pNrErrors = nrErrors; fail: SG_VARRAY_NULLFREE(pCtx, pvaStatus); SG_VARRAY_NULLFREE(pCtx, pvaStatusDirtyFiles); SG_STRINGARRAY_NULLFREE(pCtx, psa1); SG_STRING_NULLFREE(pCtx, pStringGidRepoPath); SG_STRING_NULLFREE(pCtx, pStringErr); }
void SG_tag__remove(SG_context * pCtx, SG_repo * pRepo, const char* pszRev, SG_bool bRev, SG_uint32 count_args, const char** paszArgs) { SG_audit q; char* psz_hid_given = NULL; char* psz_hid_assoc_with_tag = NULL; SG_uint32 count_valid_tags = 0; const char** paszValidArgs = NULL; SG_uint32 i = 0; if (0 == count_args) return; SG_ERR_CHECK( SG_audit__init(pCtx,&q,pRepo,SG_AUDIT__WHEN__NOW,SG_AUDIT__WHO__FROM_SETTINGS) ); if (pszRev) { if (bRev) SG_repo__hidlookup__dagnode(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, pszRev, &psz_hid_given); else SG_vc_tags__lookup__tag(pCtx, pRepo, pszRev, &psz_hid_given); if (SG_context__has_err(pCtx)) { if (SG_context__err_equals(pCtx, SG_ERR_AMBIGUOUS_ID_PREFIX)) { SG_context__push_level(pCtx); SG_console(pCtx, SG_CS_STDERR, "The revision or tag could not be found: %s\n", pszRev); SG_context__pop_level(pCtx); SG_ERR_RETHROW; } else SG_ERR_RETHROW; } } // SG_vc_tags__remove will throw on the first non-existant tag and stop // we don't want to issue errors, just warnings and keep going // weed out all the invalid tags here before calling SG_vc_tags__remove SG_ERR_CHECK( SG_alloc(pCtx, count_args, sizeof(const char*), &paszValidArgs) ); for (i =0; i < count_args; i++) { SG_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, paszArgs[i], &psz_hid_assoc_with_tag) ); if (psz_hid_assoc_with_tag) // tag exists { if (psz_hid_given) { if (strcmp(psz_hid_given, psz_hid_assoc_with_tag) == 0) // tag is assoc with given changeset paszValidArgs[count_valid_tags++] = paszArgs[i]; else // tag not assoc with given changeset, no error, but warn SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Tag not associated with given revision: %s\n", paszArgs[i]) ); } else paszValidArgs[count_valid_tags++] = paszArgs[i]; } else { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Tag not found: %s\n", paszArgs[i]) ); } SG_NULLFREE(pCtx, psz_hid_assoc_with_tag); psz_hid_assoc_with_tag = NULL; } SG_ERR_CHECK( SG_vc_tags__remove(pCtx, pRepo, &q, count_valid_tags, paszValidArgs) ); fail: SG_NULLFREE(pCtx, paszValidArgs); SG_NULLFREE(pCtx, psz_hid_given); SG_NULLFREE(pCtx, psz_hid_assoc_with_tag); }
static void sg_read_entire_file( SG_context* pCtx, const SG_pathname* pPath, char** ppbuf, SG_uint32* plen ) { SG_uint32 len32; SG_fsobj_type t; SG_byte* p = NULL; SG_bool bExists; SG_fsobj_type FsObjType; SG_fsobj_perms FsObjPerms; SG_file* pFile = NULL; SG_ERR_CHECK( SG_fsobj__exists__pathname(pCtx, pPath, &bExists, &FsObjType, &FsObjPerms) ); if ( bExists && (SG_FSOBJ_TYPE__REGULAR != FsObjType) ) { SG_ERR_IGNORE( SG_log__report_error(pCtx, "Unable to open file: %s.", SG_pathname__sz(pPath)) ); SG_ERR_THROW_RETURN(SG_ERR_NOTAFILE); } if (bExists) { SG_uint64 len64; SG_ERR_CHECK( SG_fsobj__length__pathname(pCtx, pPath, &len64, &t) ); // TODO "len" is uint64 because we can have huge files, but // TODO our buffer is limited to uint32 (on 32bit systems). // TODO verify that len will fit in uint32. len32 = (SG_uint32)len64; SG_ERR_CHECK( SG_file__open__pathname(pCtx, pPath, SG_FILE_RDONLY | SG_FILE_OPEN_EXISTING, SG_FSOBJ_PERMS__UNUSED, &pFile) ); } else { SG_ERR_THROW_RETURN(SG_ERR_NOTAFILE); //len32 = 0; } if (len32 > 0) { SG_ERR_CHECK( SG_alloc(pCtx, 1,len32+1,&p) ); SG_ERR_CHECK( SG_file__read(pCtx, pFile, len32, p, NULL) ); p[len32] = 0; *ppbuf = (char*) p; p = NULL; *plen = len32; } else { *ppbuf = NULL; *plen = 0; } fail: SG_FILE_NULLCLOSE(pCtx, pFile); SG_NULLFREE(pCtx, p); }
/** * Finds any character from a given set within a string and replaces them with a * specified replacement string. */ static void _replace_chars_with_string( SG_context* pCtx, //< [in] [out] Error and context info. SG_string* sValue, //< [in] [out] String to perform replacements in. const char* szChars, //< [in] Set of characters to replace, as a string. //< NULL is treated as an empty string. const char* szReplacement //< [in] String to use as a replacement for the characters. //< This whole string is a replacement for each found character. //< NULL is treated as an empty string. ) { SG_int32* pValue32 = NULL; SG_uint32 uValue32 = 0u; SG_int32* pChars32 = NULL; SG_uint32 uChars32 = 0u; SG_int32* pReplacement32 = NULL; SG_uint32 uReplacement32 = 0u; SG_int32* pResult32 = NULL; SG_uint32 uResult32 = 0u; char* szResult = NULL; SG_uint32 uResult = 0u; SG_uint32 uValueIndex = 0u; SG_NULLARGCHECK(sValue); // treat NULLs as empty strings if (szChars == NULL) { szChars = ""; } if (szReplacement == NULL) { szReplacement = ""; } // convert everything to UTF32 // I couldn't come up with a way to do this directly in UTF8 using the APIs // available in sg_utf8. SG_ERR_CHECK( _utf8_to_utf32(pCtx, SG_string__sz(sValue), &pValue32, &uValue32) ); SG_ERR_CHECK( _utf8_to_utf32(pCtx, szChars, &pChars32, &uChars32) ); SG_ERR_CHECK( _utf8_to_utf32(pCtx, szReplacement, &pReplacement32, &uReplacement32) ); // allocate a result buffer if (uReplacement32 > 1u) { // largest possible size we could end up with is if we replace every single // character in the value with the replacement string SG_ERR_CHECK( SG_allocN(pCtx, (uReplacement32 * uValue32) + 1u, pResult32) ); } else { // largest possible size we could end up with is if we do no replacements // at all and are left with exactly the input value SG_ERR_CHECK( SG_allocN(pCtx, uValue32 + 1u, pResult32) ); } // run through each character in the value for (uValueIndex = 0u; uValueIndex < uValue32; ++uValueIndex) { SG_int32 iValueChar = pValue32[uValueIndex]; SG_bool bReplace = SG_FALSE; SG_uint32 uCharsIndex = 0u; // check if this character should be replaced for (uCharsIndex = 0u; uCharsIndex < uChars32; ++uCharsIndex) { if (iValueChar == pChars32[uCharsIndex]) { bReplace = SG_TRUE; break; } } if (bReplace == SG_FALSE) { // append the character to the output pResult32[uResult32] = iValueChar; ++uResult32; } else { // append the replacement string to the output memcpy((void*)(pResult32 + uResult32), (void*)pReplacement32, uReplacement32 * sizeof(SG_int32)); uResult32 += uReplacement32; } } // NULL-terminate the result and convert it back to UTF8 pResult32[uResult32] = 0; SG_ERR_CHECK( _utf32_to_utf8(pCtx, pResult32, &szResult, &uResult) ); // return the result by replacing the original value's contents SG_ERR_CHECK( SG_string__adopt_buffer(pCtx, sValue, szResult, uResult) ); szResult = NULL; fail: SG_NULLFREE(pCtx, pValue32); SG_NULLFREE(pCtx, pChars32); SG_NULLFREE(pCtx, pReplacement32); SG_NULLFREE(pCtx, pResult32); SG_NULLFREE(pCtx, szResult); return; }
void SG_validate__sanitize( SG_context* pCtx, const char* szValue, SG_uint32 uMin, SG_uint32 uMax, const char* szInvalids, SG_uint32 uFixFlags, const char* szReplace, const char* szAdd, SG_string** ppSanitized ) { SG_string* sSanitized = NULL; SG_NULLARGCHECK(ppSanitized); // treat NULL replacement string as empty if (szReplace == NULL) { szReplace = ""; } // allocate our result string SG_ERR_CHECK( SG_STRING__ALLOC__SZ(pCtx, &sSanitized, szValue) ); // if we need to sanitize bad characters, do that // Note: We do this first because sanitizing characters might change the // length of the string and affect the min/max length check. if (uFixFlags & SG_VALIDATE__RESULT__INVALID_CHARACTER) { SG_ERR_CHECK( _replace_chars_with_string(pCtx, sSanitized, szInvalids, szReplace) ); } if (uFixFlags & SG_VALIDATE__RESULT__CONTROL_CHARACTER) { SG_ERR_CHECK( _replace_chars_with_string(pCtx, sSanitized, SG_VALIDATE__CHARS__CONTROL, szReplace) ); } // if we need to lengthen the string, do that // Note: We do this prior to checking the max length because we have more fine // grained control over reducing length than we do over expanding it. We // can remove individual characters, but only add characters in blocks of // strlen(szAdd). If uMin and uMax are close to each other, then adding // a single szAdd might take us over uMax. If that happens, we want to // be able to trim that back down to uMax afterward. if (uFixFlags & SG_VALIDATE__RESULT__TOO_SHORT) { SG_uint32 uSanitized = 0u; SG_uint32 uAdd = 0u; SG_ARGCHECK(szAdd != NULL && SG_STRLEN(szAdd) > 0u, szAdd); // get the length of both strings SG_ERR_CHECK( SG_utf8__length_in_characters__sz(pCtx, SG_string__sz(sSanitized), &uSanitized) ); SG_ERR_CHECK( SG_utf8__length_in_characters__sz(pCtx, szAdd, &uAdd) ); // keep adding until the sanitized string is long enough while (uSanitized < uMin) { SG_ERR_CHECK( SG_string__append__sz(pCtx, sSanitized, szAdd) ); uSanitized += uAdd; } } // if we need to shorten the string, do that if (uFixFlags & SG_VALIDATE__RESULT__TOO_LONG) { SG_ERR_CHECK( _truncate_string(pCtx, sSanitized, uMax) ); } // return the sanitized result *ppSanitized = sSanitized; sSanitized = NULL; fail: SG_STRING_NULLFREE(pCtx, sSanitized); return; }
int u0050_logstuff_test__1(SG_context * pCtx, SG_pathname* pPathTopDir) { char bufName[SG_TID_MAX_BUFFER_LENGTH]; SG_pathname* pPathWorkingDir = NULL; SG_pathname* pPathFile = NULL; SG_vhash* pvh = NULL; SG_dagnode* pdn = NULL; const char* psz_hid_cs = NULL; SG_repo* pRepo = NULL; SG_uint32 count; SG_rbtree* prb = NULL; SG_varray* pva = NULL; SG_rbtree* prb_reversed = NULL; const char* psz_val = NULL; SG_audit q; VERIFY_ERR_CHECK( SG_tid__generate2(pCtx, bufName, sizeof(bufName), 32) ); /* create the working dir */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathWorkingDir, pPathTopDir, bufName) ); VERIFY_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathWorkingDir) ); /* add stuff */ VERIFY_ERR_CHECK( u0050_logstuff__create_file__numbers(pCtx, pPathWorkingDir, "aaa", 20) ); /* create the repo */ VERIFY_ERR_CHECK( _ut_pt__new_repo(pCtx, bufName, pPathWorkingDir) ); VERIFY_ERR_CHECK( _ut_pt__addremove(pCtx, pPathWorkingDir) ); VERIFY_ERR_CHECK( u0050_logstuff__commit_all(pCtx, pPathWorkingDir, &pdn) ); VERIFY_ERR_CHECK( SG_dagnode__get_id_ref(pCtx, pdn, &psz_hid_cs) ); SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, bufName, &pRepo) ); #define MY_COMMENT "The name of this new file sucks! What kind of a name is 'aaa'?" VERIFY_ERR_CHECK( SG_audit__init(pCtx, &q, pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS) ); VERIFY_ERR_CHECK( SG_vc_comments__add(pCtx, pRepo, psz_hid_cs, MY_COMMENT, &q) ); VERIFY_ERR_CHECK( SG_vc_stamps__add(pCtx, pRepo, psz_hid_cs, "crap", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "tcrap", &q) ); VERIFY_ERR_CHECK( SG_vc_comments__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "text", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, MY_COMMENT)) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_stamps__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "stamp", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "crap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, 0, &pvh) ); VERIFY_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh, "tag", &psz_val) ); VERIFY_COND("match", (0 == strcmp(psz_val, "tcrap")) ); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); VERIFY_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, "whatever", &q) ); VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (2 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (2 == count)); VERIFY_ERR_CHECK( SG_vc_tags__build_reverse_lookup(pCtx, prb, &prb_reversed) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb_reversed, &count) ); VERIFY_COND("count", (1 == count)); { const char* psz_my_key = NULL; const char* psz_my_val = NULL; SG_bool b; VERIFY_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, prb_reversed, &b, &psz_my_key, (void**) &psz_my_val) ); VERIFY_COND("ok", (0 == strcmp(psz_my_key, psz_hid_cs)) ); /* we don't know whether psz_my_val is tcrap or whatever. */ // VERIFY_COND("ok", (0 == strcmp(psz_my_val, "tcrap")) ); } SG_RBTREE_NULLFREE(pCtx, prb_reversed); SG_RBTREE_NULLFREE(pCtx, prb); { const char* psz_remove = "whatever"; VERIFY_ERR_CHECK( SG_vc_tags__remove(pCtx, pRepo, &q, 1, &psz_remove) ); /* Note that by removing whatever, we are bringing the tags list back * to a state where it has been before (just tcrap). This changeset in * the tags table will have its own csid, because the parentage is * different, but it's root idtrie HID will be the same as a previous * node. */ } VERIFY_ERR_CHECK( SG_vc_tags__lookup(pCtx, pRepo, psz_hid_cs, &pva) ); VERIFY_ERR_CHECK( SG_varray__count(pCtx, pva, &count) ); VERIFY_COND("count", (1 == count)); SG_VARRAY_NULLFREE(pCtx, pva); VERIFY_ERR_CHECK( SG_vc_tags__list(pCtx, pRepo, &prb) ); VERIFY_ERR_CHECK( SG_rbtree__count(pCtx, prb, &count) ); VERIFY_COND("count", (1 == count)); SG_RBTREE_NULLFREE(pCtx, prb); SG_REPO_NULLFREE(pCtx, pRepo); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 1; fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pPathWorkingDir); SG_PATHNAME_NULLFREE(pCtx, pPathFile); return 0; }
void SG_cmd_util__dump_history_results( SG_context * pCtx, SG_console_stream cs, SG_history_result* pHistResult, SG_vhash* pvh_pile, SG_bool bShowOnlyOpenBranchNames, SG_bool bShowFullComments, SG_bool bHideRevnums) { //print the information for each SG_bool bFound = (pHistResult != NULL); const char* currentInfoItem = NULL; SG_uint32 revno; SG_uint32 nCount = 0; SG_uint32 nIndex = 0; const char * pszTag = NULL; const char * pszComment = NULL; const char * pszStamp = NULL; const char * pszParent = NULL; SG_uint32 nResultCount = 0; SG_vhash* pvhRefBranchValues = NULL; SG_vhash* pvhRefClosedBranches = NULL; char* pszMyComment = NULL; if (pvh_pile) { SG_bool bHas = SG_FALSE; SG_ERR_CHECK( SG_vhash__has(pCtx, pvh_pile, "closed", &bHas) ); if (bHas) SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_pile, "closed", &pvhRefClosedBranches) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh_pile, "values", &pvhRefBranchValues) ); } SG_ERR_CHECK( SG_history_result__count(pCtx, pHistResult, &nResultCount) ); while (nResultCount != 0 && bFound) { SG_ERR_CHECK( SG_history_result__get_revno(pCtx, pHistResult, &revno) ); SG_ERR_CHECK( SG_history_result__get_cshid(pCtx, pHistResult, ¤tInfoItem) ); if(!bHideRevnums) SG_ERR_CHECK( SG_console(pCtx, cs, "\n\t%8s: %d:%s\n", "revision", revno, currentInfoItem) ); else SG_ERR_CHECK( SG_console(pCtx, cs, "\n\t%8s: %s\n", "revision", currentInfoItem) ); SG_ERR_CHECK( _dump_branch_name(pCtx, cs, currentInfoItem, bShowOnlyOpenBranchNames, pvhRefBranchValues, pvhRefClosedBranches) ); SG_ERR_CHECK( SG_history_result__get_audit__count(pCtx, pHistResult, &nCount) ); for (nIndex = 0; nIndex < nCount; nIndex++) { SG_int64 itime = -1; char buf_time_formatted[256]; const char * pszUser = NULL; SG_ERR_CHECK( SG_history_result__get_audit__who(pCtx, pHistResult, nIndex, &pszUser) ); SG_ERR_CHECK( SG_history_result__get_audit__when(pCtx, pHistResult, nIndex, &itime) ); SG_ERR_CHECK( SG_time__format_local__i64(pCtx, itime, buf_time_formatted, sizeof(buf_time_formatted)) ); SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s: %s\n", "who", pszUser) ); SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s: %s\n", "when", buf_time_formatted) ); } SG_ERR_CHECK( SG_history_result__get_tag__count(pCtx, pHistResult, &nCount) ); for (nIndex = 0; nIndex < nCount; nIndex++) { SG_ERR_CHECK( SG_history_result__get_tag__text(pCtx, pHistResult, nIndex, &pszTag) ); SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s: %s\n", "tag", pszTag) ); } SG_ERR_CHECK( SG_history_result__get_comment__count(pCtx, pHistResult, &nCount) ); for (nIndex = 0; nIndex < nCount; nIndex++) { SG_ERR_CHECK( SG_history_result__get_comment__text(pCtx, pHistResult, nIndex, &pszComment) ); if (pszComment) { SG_ERR_CHECK( _format_comment(pCtx, !bShowFullComments, "\t ", pszComment, &pszMyComment) ); if (pszMyComment) pszComment = pszMyComment; } SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s: %s\n", "comment", pszComment) ); SG_NULLFREE(pCtx, pszMyComment); } SG_ERR_CHECK( SG_history_result__get_stamp__count(pCtx, pHistResult, &nCount) ); for (nIndex = 0; nIndex < nCount; nIndex++) { SG_ERR_CHECK( SG_history_result__get_stamp__text(pCtx, pHistResult, nIndex, &pszStamp) ); SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s: %s\n", "stamp", pszStamp) ); } SG_ERR_CHECK( SG_history_result__get_parent__count(pCtx, pHistResult, &nCount) ); for (nIndex = 0; nIndex < nCount; nIndex++) { SG_ERR_CHECK( SG_history_result__get_parent(pCtx, pHistResult, nIndex, &pszParent, &revno) ); SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s: %d:%s\n", "parent", revno, pszParent) ); } SG_ERR_CHECK( SG_history_result__next(pCtx, pHistResult, &bFound) ); } fail: SG_NULLFREE(pCtx, pszMyComment); }
void SG_unzip__currentfile__open(SG_context* pCtx, SG_unzip* s) { int zerr=Z_OK; SG_uint32 iSizeVar; file_in_zip_read_info_s* pfile_in_zip_read_info = NULL; SG_uint32 offset_local_extrafield; /* offset of the local extra field */ SG_uint32 size_local_extrafield; /* size of the local extra field */ SG_NULLARGCHECK_RETURN( s ); SG_NULLARGCHECK_RETURN(s->current_file_ok); if (s->pfile_in_zip_read) { SG_unzip__currentfile__close(pCtx, s); } SG_ERR_CHECK( sg_unzip__check_coherency(pCtx, s,&iSizeVar, &offset_local_extrafield,&size_local_extrafield) ); SG_ERR_CHECK( SG_malloc(pCtx, sizeof(file_in_zip_read_info_s), &pfile_in_zip_read_info) ); SG_ERR_CHECK( SG_malloc(pCtx, UNZ_BUFSIZE, &pfile_in_zip_read_info->read_buffer) ); pfile_in_zip_read_info->offset_local_extrafield = offset_local_extrafield; pfile_in_zip_read_info->size_local_extrafield = size_local_extrafield; pfile_in_zip_read_info->pos_local_extrafield=0; pfile_in_zip_read_info->stream_initialised=0; if ((s->cur_file_info.compression_method!=0) && (s->cur_file_info.compression_method!=Z_DEFLATED)) { SG_ERR_THROW( SG_ERR_ZIP_BAD_FILE ); } pfile_in_zip_read_info->crc32_wait = s->cur_file_info.crc; pfile_in_zip_read_info->crc32=0; pfile_in_zip_read_info->compression_method = s->cur_file_info.compression_method; pfile_in_zip_read_info->pFile = s->pFile; pfile_in_zip_read_info->byte_before_the_zipfile=s->byte_before_the_zipfile; pfile_in_zip_read_info->stream.total_out = 0; if (s->cur_file_info.compression_method==Z_DEFLATED) { pfile_in_zip_read_info->stream.zalloc = (alloc_func)0; pfile_in_zip_read_info->stream.zfree = (free_func)0; pfile_in_zip_read_info->stream.opaque = (voidpf)0; pfile_in_zip_read_info->stream.next_in = (voidpf)0; pfile_in_zip_read_info->stream.avail_in = 0; zerr=inflateInit2(&pfile_in_zip_read_info->stream, -MAX_WBITS); if (zerr == Z_OK) { pfile_in_zip_read_info->stream_initialised=1; } else { SG_ERR_THROW( SG_ERR_ZLIB(zerr) ); } /* windowBits is passed < 0 to tell that there is no zlib header. * Note that in this case inflate *requires* an extra "dummy" byte * after the compressed stream in order to complete decompression and * return Z_STREAM_END. * In unzip, i don't wait absolutely Z_STREAM_END because I known the * size of both compressed and uncompressed data */ } pfile_in_zip_read_info->rest_read_compressed = s->cur_file_info.compressed_size ; pfile_in_zip_read_info->rest_read_uncompressed = s->cur_file_info.uncompressed_size ; pfile_in_zip_read_info->pos_in_zipfile = s->cur_file_info_internal.offset_curfile + SIZEZIPLOCALHEADER + iSizeVar; pfile_in_zip_read_info->stream.avail_in = (SG_uint32)0; s->pfile_in_zip_read = pfile_in_zip_read_info; return; fail: SG_NULLFREE(pCtx, pfile_in_zip_read_info); }
void SG_unzip__currentfile__read(SG_context* pCtx, SG_unzip* s, SG_byte* pBuf, SG_uint32 iLenBuf, SG_uint32* piBytesRead) { int zerr=Z_OK; SG_uint32 iRead = 0; file_in_zip_read_info_s* pfile_in_zip_read_info; SG_NULLARGCHECK_RETURN( s ); pfile_in_zip_read_info = s->pfile_in_zip_read; SG_NULLARGCHECK_RETURN( pfile_in_zip_read_info ); if (!pfile_in_zip_read_info->read_buffer) { SG_ERR_THROW_RETURN( SG_ERR_UNSPECIFIED ); } if (!iLenBuf) { return; } pfile_in_zip_read_info->stream.next_out = pBuf; pfile_in_zip_read_info->stream.avail_out = iLenBuf; if (iLenBuf > pfile_in_zip_read_info->rest_read_uncompressed) { pfile_in_zip_read_info->stream.avail_out = (SG_uint32)pfile_in_zip_read_info->rest_read_uncompressed; } while (pfile_in_zip_read_info->stream.avail_out>0) { if ((pfile_in_zip_read_info->stream.avail_in==0) && (pfile_in_zip_read_info->rest_read_compressed>0)) { SG_uint32 uReadThis = UNZ_BUFSIZE; if (pfile_in_zip_read_info->rest_read_compressed<uReadThis) { uReadThis = (SG_uint32)pfile_in_zip_read_info->rest_read_compressed; } if (uReadThis == 0) { //TODO - maybe we should change this SG_ERR_THROW( SG_ERR_EOF ); } SG_ERR_CHECK( SG_file__seek(pCtx, s->pFile, pfile_in_zip_read_info->pos_in_zipfile + pfile_in_zip_read_info->byte_before_the_zipfile) ); SG_ERR_CHECK( SG_file__read(pCtx, s->pFile, uReadThis, (SG_byte*) pfile_in_zip_read_info->read_buffer, NULL) ); pfile_in_zip_read_info->pos_in_zipfile += uReadThis; pfile_in_zip_read_info->rest_read_compressed-=uReadThis; pfile_in_zip_read_info->stream.next_in = (Bytef*)pfile_in_zip_read_info->read_buffer; pfile_in_zip_read_info->stream.avail_in = (SG_uint32)uReadThis; } if (pfile_in_zip_read_info->compression_method==0) { SG_uint32 uDoCopy,i ; if ((pfile_in_zip_read_info->stream.avail_in == 0) && (pfile_in_zip_read_info->rest_read_compressed == 0)) { if (iRead == 0) { SG_ERR_THROW( SG_ERR_EOF ); } goto done; } if (pfile_in_zip_read_info->stream.avail_out < pfile_in_zip_read_info->stream.avail_in) { uDoCopy = pfile_in_zip_read_info->stream.avail_out ; } else { uDoCopy = pfile_in_zip_read_info->stream.avail_in ; } for (i=0;i<uDoCopy;i++) { *(pfile_in_zip_read_info->stream.next_out+i) = *(pfile_in_zip_read_info->stream.next_in+i); } pfile_in_zip_read_info->crc32 = crc32(pfile_in_zip_read_info->crc32, pfile_in_zip_read_info->stream.next_out, uDoCopy); pfile_in_zip_read_info->rest_read_uncompressed-=uDoCopy; pfile_in_zip_read_info->stream.avail_in -= uDoCopy; pfile_in_zip_read_info->stream.avail_out -= uDoCopy; pfile_in_zip_read_info->stream.next_out += uDoCopy; pfile_in_zip_read_info->stream.next_in += uDoCopy; pfile_in_zip_read_info->stream.total_out += uDoCopy; iRead += uDoCopy; } else { SG_uint32 uTotalOutBefore,uTotalOutAfter; const Bytef *bufBefore; SG_uint32 uOutThis; int flush=Z_SYNC_FLUSH; uTotalOutBefore = pfile_in_zip_read_info->stream.total_out; bufBefore = pfile_in_zip_read_info->stream.next_out; /* if ((pfile_in_zip_read_info->rest_read_uncompressed == pfile_in_zip_read_info->stream.avail_out) && (pfile_in_zip_read_info->rest_read_compressed == 0)) flush = Z_FINISH; */ zerr = inflate(&pfile_in_zip_read_info->stream,flush); if ((zerr>=0) && (pfile_in_zip_read_info->stream.msg)) { SG_ERR_THROW( SG_ERR_ZLIB(zerr) ); } uTotalOutAfter = pfile_in_zip_read_info->stream.total_out; uOutThis = uTotalOutAfter-uTotalOutBefore; pfile_in_zip_read_info->crc32 = crc32(pfile_in_zip_read_info->crc32,bufBefore, (SG_uint32)(uOutThis)); pfile_in_zip_read_info->rest_read_uncompressed -= uOutThis; iRead += (SG_uint32)(uTotalOutAfter - uTotalOutBefore); if (zerr == Z_STREAM_END) { // return (iRead==0) ? UNZ_EOF : iRead; // break; } } } done: *piBytesRead = iRead; fail: return; }
/* Read the local header of the current zipfile Check the coherency of the local header and info in the end of central directory about this file store in *piSizeVar the size of extra info in local header (filename and size of extra field data) */ static void sg_unzip__check_coherency( SG_context* pCtx, SG_unzip* s, SG_uint32* piSizeVar, SG_uint32* poffset_local_extrafield, SG_uint32* psize_local_extrafield ) { /* TODO what is uFlags? It seems to be unused here. */ SG_uint32 uMagic,uData,uFlags = 0; SG_uint16 u16 = 0; SG_uint16 size_filename = 0; SG_uint16 size_extra_field = 0; *piSizeVar = 0; *poffset_local_extrafield = 0; *psize_local_extrafield = 0; SG_ERR_CHECK( SG_file__seek(pCtx, s->pFile, s->cur_file_info_internal.offset_curfile + s->byte_before_the_zipfile) ); SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, s->pFile,&uMagic) ); if (uMagic!=0x04034b50) { SG_ERR_THROW( SG_ERR_ZIP_BAD_FILE ); } SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, s->pFile,&u16) ); SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, s->pFile,&u16) ); SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, s->pFile,&u16) ); if (u16 != s->cur_file_info.compression_method) { SG_ERR_THROW( SG_ERR_ZIP_BAD_FILE ); } if ((s->cur_file_info.compression_method!=0) && (s->cur_file_info.compression_method != Z_DEFLATED)) { SG_ERR_THROW( SG_ERR_ZIP_BAD_FILE ); } SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, s->pFile,&uData) ); /* date/time */ SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, s->pFile,&uData) ); /* crc */ if ((uData!=s->cur_file_info.crc) && ((uFlags & 8)==0)) { SG_ERR_THROW( SG_ERR_ZIP_BAD_FILE ); } SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, s->pFile,&uData) ); /* size compr */ if ((uData!=s->cur_file_info.compressed_size) && ((uFlags & 8)==0)) { SG_ERR_THROW( SG_ERR_ZIP_BAD_FILE ); } SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, s->pFile,&uData) ); /* size uncompr */ if ((uData!=s->cur_file_info.uncompressed_size) && ((uFlags & 8)==0)) { SG_ERR_THROW( SG_ERR_ZIP_BAD_FILE ); } SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, s->pFile,&size_filename) ); if (size_filename!=s->cur_file_info.size_filename) { SG_ERR_THROW( SG_ERR_ZIP_BAD_FILE ); } *piSizeVar += (SG_uint32)size_filename; SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, s->pFile,&size_extra_field) ); *poffset_local_extrafield= s->cur_file_info_internal.offset_curfile + SIZEZIPLOCALHEADER + size_filename; *psize_local_extrafield = (SG_uint32)size_extra_field; *piSizeVar += (SG_uint32)size_extra_field; fail: return; }
void SG_unzip__locate_file(SG_context* pCtx, SG_unzip* s, const char* psz_filename, SG_bool* pb, SG_uint64* piLength) { SG_bool b = SG_FALSE; /* We remember the 'current' position in the file so that we can jump * back there if we fail. */ unz_file_info cur_file_infoSaved; unz_file_info_internal cur_file_info_internalSaved; SG_uint32 num_fileSaved; SG_uint32 pos_in_central_dirSaved; SG_NULLARGCHECK_RETURN( s ); SG_ARGCHECK_RETURN((strlen(psz_filename) < UNZ_MAXFILENAMEINZIP), psz_filename); /* TODO hmmm. why do we require the current file state to be "ok" here ? */ if (!s->current_file_ok) { *pb = SG_FALSE; return; } /* Save the current state */ num_fileSaved = s->num_file; pos_in_central_dirSaved = s->pos_in_central_dir; cur_file_infoSaved = s->cur_file_info; cur_file_info_internalSaved = s->cur_file_info_internal; SG_ERR_CHECK( SG_unzip__goto_first_file(pCtx, s, &b, NULL, NULL) ); while (b) { if (strcmp(s->cur_file_name, psz_filename) == 0) { break; } SG_ERR_CHECK( SG_unzip__goto_next_file(pCtx, s, &b, NULL, NULL) ); } if (b) { if (pb) { *pb = SG_TRUE; } if (piLength) { *piLength = s->cur_file_info.uncompressed_size; } } else { if (pb) { *pb = SG_FALSE; goto fail; } else { SG_ERR_THROW( SG_ERR_NOT_FOUND ); } } return; fail: /* We failed, so restore the state of the 'current file' to where we * were. */ s->num_file = num_fileSaved ; s->pos_in_central_dir = pos_in_central_dirSaved ; s->cur_file_info = cur_file_infoSaved; s->cur_file_info_internal = cur_file_info_internalSaved; }
static void sg_unzip__currentfile__get_info(SG_context* pCtx, SG_unzip* s) { SG_uint32 uMagic; SG_NULLARGCHECK_RETURN( s ); SG_ERR_CHECK( SG_file__seek(pCtx, s->pFile, s->pos_in_central_dir + s->byte_before_the_zipfile) ); /* we check the magic */ SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, s->pFile,&uMagic) ); if (uMagic != 0x02014b50) { SG_ERR_THROW( SG_ERR_ZIP_BAD_FILE ); } SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, s->pFile,&s->cur_file_info.version) ); SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, s->pFile,&s->cur_file_info.version_needed) ); SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, s->pFile,&s->cur_file_info.flag) ); SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, s->pFile,&s->cur_file_info.compression_method) ); SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, s->pFile,&s->cur_file_info.dosDate) ); SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, s->pFile,&s->cur_file_info.crc) ); SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, s->pFile,&s->cur_file_info.compressed_size) ); SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, s->pFile,&s->cur_file_info.uncompressed_size) ); SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, s->pFile,&s->cur_file_info.size_filename) ); SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, s->pFile,&s->cur_file_info.size_file_extra) ); SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, s->pFile,&s->cur_file_info.size_file_comment) ); SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, s->pFile,&s->cur_file_info.disk_num_start) ); SG_ERR_CHECK( sg_unzip__get_uint16(pCtx, s->pFile,&s->cur_file_info.internal_fa) ); SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, s->pFile,&s->cur_file_info.external_fa) ); SG_ERR_CHECK( sg_unzip__get_uint32(pCtx, s->pFile,&s->cur_file_info_internal.offset_curfile) ); SG_ASSERT (s->cur_file_info.size_filename < UNZ_MAXFILENAMEINZIP); SG_ERR_CHECK( SG_file__read(pCtx, s->pFile, s->cur_file_info.size_filename, (SG_byte*) s->cur_file_name, NULL) ); s->cur_file_name[s->cur_file_info.size_filename] = 0; fail: return; }
/** * Try to FIX the ISSUE. * * Alter something in the pendingtree/issue/WD and then SAVE the pendingtree. * We allow this to be an incremental save after just this issue. We also * allow the VFILE lock to be released while the external merge (DiffMerge) * tool is running. (Not because DiffMerge needs it, but rather so that they * could do other STATUS/DIFF commands in another shell while doing the text * merge.) */ static void _resolve__fix(SG_context * pCtx, struct _resolve_data * pData, const char * pszGid, enum _fix_status * pFixStatus) { const SG_vhash * pvhIssue; SG_string * pStrRepoPath = NULL; SG_int64 i64; SG_mrg_cset_entry_conflict_flags conflict_flags; SG_portability_flags portability_flags; SG_bool bIsResolved = SG_FALSE; SG_bool bCollisions; // Fetch the ISSUE using the current pendingtree (allocating one if // necessary) and print detailed info about the ISSUE on the console. SG_ERR_CHECK( _resolve__lookup_issue(pCtx, pData, pszGid, &pvhIssue) ); SG_ERR_CHECK( _resolve__list(pCtx, pData, pvhIssue, &pStrRepoPath) ); // TODO 2010/07/12 We should have a --prompt option to allow them to // TODO skip an issue. Like "/bin/rm -i *". // Skip the issue if it is already resolved. In theory, we should not // get this (because we filtered the pData->psaGids by status as we // parsed the command line arguments), but if they did another resolve // in another shell while we didn't have the lock, it could happen. SG_ERR_CHECK( _resolve__is_resolved(pCtx, pvhIssue, &bIsResolved) ); if (bIsResolved) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Issue already resolved; nothing to be done for '%s'.\n", SG_string__sz(pStrRepoPath)) ); goto done; } // There are 2 main types of problems: // [1] Conflicts within the text of a file (where the builtin auto-merge // failed or was not used) and for which we need to ask them to manually // merge the content (using an external tool like DiffMerge). // [2] Structural changes, including: MOVEs, RENAMEs, CHMODs, XATTRs, // entryname collisions, potential entryname collisions, and etc. // // We could also have both -- both edit conflicts and rename conflicts, // for example. // // Do these in 2 steps so that we can release the VFILE lock while they // are editing the file. ////////////////////////////////////////////////////////////////// // [1] ////////////////////////////////////////////////////////////////// SG_ERR_CHECK( SG_vhash__get__int64(pCtx, pvhIssue, "conflict_flags", &i64) ); conflict_flags = (SG_mrg_cset_entry_conflict_flags)i64; if (conflict_flags & SG_MRG_CSET_ENTRY_CONFLICT_FLAGS__DIVERGENT_FILE_EDIT__MASK__NOT_OK) { SG_ERR_CHECK( _resolve__fix__run_external_file_merge(pCtx, pData, pszGid, pvhIssue, pStrRepoPath, pFixStatus) ); pvhIssue = NULL; if (*pFixStatus != FIX_USER_MERGED) goto done; // the above MAY have freed and reloaded the pendingtree (and // invalidated pvhIssue), so re-fetch it and/or re-set our variables. SG_ERR_CHECK( _resolve__lookup_issue(pCtx, pData, pszGid, &pvhIssue) ); SG_ERR_CHECK( _resolve__is_resolved(pCtx, pvhIssue, &bIsResolved) ); if (bIsResolved) { // Someone else marked it resolved while were waiting for // the user to edit the file and while we didn't have the // file lock. We should stop here. *pFixStatus = FIX_LOST_RACE; goto done; } SG_ERR_CHECK( SG_vhash__get__int64(pCtx, pvhIssue, "conflict_flags", &i64) ); conflict_flags = (SG_mrg_cset_entry_conflict_flags)i64; } #if 0 && defined(DEBUG) SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "RESOLVE: Issue between [1] and [2]: '%s'\n", SG_string__sz(pStrRepoPath)) ); SG_ERR_IGNORE( SG_vhash_debug__dump_to_console(pCtx, pvhIssue) ); #endif ////////////////////////////////////////////////////////////////// // [2] ////////////////////////////////////////////////////////////////// SG_ERR_CHECK( SG_vhash__get__bool(pCtx, pvhIssue, "collision_flags", &bCollisions) ); SG_ERR_CHECK( SG_vhash__get__int64(pCtx, pvhIssue, "portability_flags", &i64) ); portability_flags = (SG_portability_flags)i64; if (conflict_flags & SG_MRG_CSET_ENTRY_CONFLICT_FLAGS__UNDELETE__MASK) { SG_ERR_CHECK( _resolve__fix__structural__delete(pCtx, pData, pszGid, pvhIssue, pStrRepoPath, conflict_flags, bCollisions, portability_flags, pFixStatus) ); if (*pFixStatus != FIX_USER_MERGED) goto done; } else if (bCollisions || (portability_flags) || (conflict_flags & ~SG_MRG_CSET_ENTRY_CONFLICT_FLAGS__DIVERGENT_FILE_EDIT__MASK)) { SG_ERR_CHECK( _resolve__fix__structural__non_delete(pCtx, pData, pszGid, pvhIssue, pStrRepoPath, conflict_flags, bCollisions, portability_flags, pFixStatus) ); if (*pFixStatus != FIX_USER_MERGED) goto done; } // mark the issue as RESOLVED and save the pendingtree. SG_ERR_CHECK( _resolve__mark(pCtx, pData, pvhIssue, SG_TRUE) ); *pFixStatus = FIX_USER_MERGED; ////////////////////////////////////////////////////////////////// // We've completely resolved the issue, if there were ~mine files, // we can delete them. if (conflict_flags & SG_MRG_CSET_ENTRY_CONFLICT_FLAGS__DIVERGENT_FILE_EDIT__MASK__NOT_OK) { SG_ERR_CHECK( _resolve__lookup_issue(pCtx, pData, pszGid, &pvhIssue) ); SG_ERR_CHECK( _resolve__delete_temp_files(pCtx, pData, pszGid, pvhIssue) ); } done: ; fail: SG_STRING_NULLFREE(pCtx, pStrRepoPath); }
void SG_cmd_util__get_username_for_repo( SG_context *pCtx, const char *szRepoName, char **ppUsername ) { SG_string * pUsername = NULL; SG_repo * pRepo = NULL; char * psz_username = NULL; SG_curl * pCurl = NULL; SG_string * pUri = NULL; SG_string * pResponse = NULL; SG_int32 responseStatusCode = 0; SG_vhash * pRepoInfo = NULL; char * psz_userid = NULL; SG_varray * pUsers = NULL; SG_NULLARGCHECK_RETURN(ppUsername); if(!szRepoName) { // Look up username based on 'whoami' of repo associated with cwd. SG_ERR_IGNORE( SG_cmd_util__get_repo_from_cwd(pCtx, &pRepo, NULL) ); if(pRepo) SG_ERR_IGNORE( SG_user__get_username_for_repo(pCtx, pRepo, &psz_username) ); SG_REPO_NULLFREE(pCtx, pRepo); } else if(SG_sz__starts_with(szRepoName, "http://") || SG_sz__starts_with(szRepoName, "https://")) { // Look up username based on 'whoami' of admin id of remote repo. SG_ERR_CHECK( SG_curl__alloc(pCtx, &pCurl) ); SG_ERR_CHECK( SG_STRING__ALLOC__SZ(pCtx, &pUri, szRepoName) ); SG_ERR_CHECK( SG_string__append__sz(pCtx, pUri, ".json") ); SG_ERR_CHECK( SG_curl__reset(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__setopt__sz(pCtx, pCurl, CURLOPT_URL, SG_string__sz(pUri)) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pResponse) ); SG_ERR_CHECK( SG_curl__set__write_string(pCtx, pCurl, pResponse) ); SG_ERR_CHECK( SG_curl__perform(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__getinfo__int32(pCtx, pCurl, CURLINFO_RESPONSE_CODE, &responseStatusCode) ); if(responseStatusCode==200) { const char * szAdminId = NULL; SG_ERR_CHECK( SG_VHASH__ALLOC__FROM_JSON__STRING(pCtx, &pRepoInfo, pResponse) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pRepoInfo, SG_SYNC_REPO_INFO_KEY__ADMIN_ID, &szAdminId) ); SG_ERR_CHECK( SG_string__clear(pCtx, pUri) ); SG_ERR_CHECK( SG_string__append__format(pCtx, pUri, "/admin/%s/whoami/userid", szAdminId) ); SG_ERR_IGNORE( SG_localsettings__get__sz(pCtx, SG_string__sz(pUri), NULL, &psz_userid, NULL) ); if(psz_userid) { // We now have the userid. Look up the username. SG_ERR_CHECK( SG_string__clear(pCtx, pUri) ); SG_ERR_CHECK( SG_string__append__format(pCtx, pUri, "%s/users.json", szRepoName) ); SG_ERR_CHECK( SG_curl__reset(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__setopt__sz(pCtx, pCurl, CURLOPT_URL, SG_string__sz(pUri)) ); SG_ERR_CHECK( SG_string__clear(pCtx, pResponse) ); SG_ERR_CHECK( SG_curl__set__write_string(pCtx, pCurl, pResponse) ); SG_ERR_CHECK( SG_curl__perform(pCtx, pCurl) ); SG_ERR_CHECK( SG_curl__getinfo__int32(pCtx, pCurl, CURLINFO_RESPONSE_CODE, &responseStatusCode) ); if(responseStatusCode==200) { SG_uint32 i, nUsers; SG_ERR_CHECK( SG_VARRAY__ALLOC__FROM_JSON__STRING(pCtx, &pUsers, pResponse) ); SG_ERR_CHECK( SG_varray__count(pCtx, pUsers, &nUsers) ); for(i=0; i<nUsers; ++i) { SG_vhash * pUser = NULL; const char * psz_recid = NULL; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pUsers, i, &pUser) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pUser, "recid", &psz_recid) ); if(!strcmp(psz_recid, psz_userid)) { const char * psz_name = NULL; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pUser, "name", &psz_name) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_name, &psz_username) ); break; } } SG_VARRAY_NULLFREE(pCtx, pUsers); } SG_NULLFREE(pCtx, psz_userid); } SG_VHASH_NULLFREE(pCtx, pRepoInfo); } SG_STRING_NULLFREE(pCtx, pResponse); SG_STRING_NULLFREE(pCtx, pUri); SG_CURL_NULLFREE(pCtx, pCurl); } else { // Look up username based on 'whoami' of repo provided. SG_ERR_CHECK( SG_REPO__OPEN_REPO_INSTANCE(pCtx, szRepoName, &pRepo) ); SG_ERR_IGNORE( SG_user__get_username_for_repo(pCtx, pRepo, &psz_username) ); SG_REPO_NULLFREE(pCtx, pRepo); } *ppUsername = psz_username; return; fail: SG_STRING_NULLFREE(pCtx, pUsername); SG_REPO_NULLFREE(pCtx, pRepo); SG_NULLFREE(pCtx, psz_username); SG_CURL_NULLFREE(pCtx, pCurl); SG_STRING_NULLFREE(pCtx, pUri); SG_STRING_NULLFREE(pCtx, pResponse); SG_VHASH_NULLFREE(pCtx, pRepoInfo); SG_NULLFREE(pCtx, psz_userid); SG_VARRAY_NULLFREE(pCtx, pUsers); }
/** * Assuming that we have something of the form: * * vv resolve [--foo] <arg_0> [<arg_1> [<arg_2> ...]] * * where each <arg_x> is an absolute or relative path in the WD * (probably not a repo-path). * * Use the PENDINGTREE to lookup each path and get the entry's GID. * Use the GID to search for an ISSUE in the list of issues. If we * find it, add the GID to the stringarray we are building. If not, * throw an error. */ static void _resolve__map_args_to_gids(SG_context * pCtx, struct _resolve_data * pData, SG_uint32 count_args, const char ** paszArgs, SG_bool bWantResolved, SG_bool bWantUnresolved) { SG_pathname * pPath_k = NULL; char * pszGid_k = NULL; SG_uint32 kArg; SG_bool bWantBoth = (bWantResolved && bWantUnresolved); SG_ERR_CHECK( SG_STRINGARRAY__ALLOC(pCtx, &pData->psaGids, count_args) ); for (kArg=0; kArg<count_args; kArg++) { const SG_vhash * pvhIssue_k; SG_bool bFound; SG_bool bDuplicate; SG_bool bWantThisOne; // take each <arg_k> and get a full pathname for it and // search for it in the pendingtree and get its GID. // in theory, if an entry has an issue, it is dirty and // should have a ptnode. if (paszArgs[kArg][0] == '@') SG_ERR_CHECK( SG_workingdir__construct_absolute_path_from_repo_path2(pCtx, pData->pPendingTree, paszArgs[kArg], &pPath_k) ); else SG_ERR_CHECK( SG_PATHNAME__ALLOC__SZ(pCtx, &pPath_k, paszArgs[kArg]) ); SG_ERR_CHECK( SG_pendingtree__get_gid_from_local_path(pCtx, pData->pPendingTree, pPath_k, &pszGid_k) ); #if 0 && defined(DEBUG) SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, ("Mapped arg[%d] '%s' to:\n" "\t%s\n" "\t[gid %s]\n"), kArg, paszArgs[kArg], SG_pathname__sz(pPath_k), pszGid_k) ); #endif // see if there is an ISSUE for this GID. SG_ERR_CHECK( SG_pendingtree__find_wd_issue_by_gid(pCtx, pData->pPendingTree, pszGid_k, &bFound, &pvhIssue_k) ); if (!bFound) SG_ERR_THROW2( SG_ERR_ISSUE_NOT_FOUND, (pCtx, "No issue found for '%s': %s", paszArgs[kArg], SG_pathname__sz(pPath_k)) ); if (bWantBoth) bWantThisOne = SG_TRUE; else { SG_int64 s; SG_pendingtree_wd_issue_status status; SG_bool bResolved; SG_ERR_CHECK_RETURN( SG_vhash__get__int64(pCtx, pvhIssue_k, "status", &s) ); status = (SG_pendingtree_wd_issue_status)s; bResolved = ((status & SG_ISSUE_STATUS__MARKED_RESOLVED) == SG_ISSUE_STATUS__MARKED_RESOLVED); bWantThisOne = ((bWantResolved && bResolved) || (bWantUnresolved && !bResolved)); } if (bWantThisOne) { // check for duplicate args on command line. (or rather, args that // map to the same GID.) SG_ERR_CHECK( SG_stringarray__find(pCtx, pData->psaGids, pszGid_k, 0, &bDuplicate, NULL) ); if (bDuplicate) SG_ERR_THROW2( SG_ERR_DUPLICATE_ISSUE, (pCtx, "Argument '%s' maps to an issue already named.", paszArgs[kArg]) ); SG_ERR_CHECK( SG_stringarray__add(pCtx, pData->psaGids, pszGid_k) ); } SG_NULLFREE(pCtx, pszGid_k); SG_PATHNAME_NULLFREE(pCtx, pPath_k); } return; fail: SG_NULLFREE(pCtx, pszGid_k); SG_PATHNAME_NULLFREE(pCtx, pPath_k); }
void SG_cmd_util__get_username_and_password( SG_context *pCtx, const char *szWhoami, SG_bool force_whoami, SG_bool bHadSavedCredentials, SG_uint32 kAttempt, SG_string **ppUsername, SG_string **ppPassword ) { SG_string * pUsername = NULL; SG_string * pPassword = NULL; SG_NULLARGCHECK_RETURN(ppPassword); SG_NULLARGCHECK_RETURN(ppUsername); if (kAttempt == 0) { if (bHadSavedCredentials) { SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "\nAuthorization required. Saved username/password not valid.\n") ); } else { SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "\nAuthorization required.") ); if (SG_password__supported()) SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, " Use --remember to save this password.") ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "\n") ); } } else if (kAttempt >= 3) { SG_ERR_THROW( SG_ERR_AUTHORIZATION_TOO_MANY_ATTEMPTS ); } else { SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "\nInvalid username or password. Please try again.\n") ); } if(szWhoami!=NULL && force_whoami) { SG_ERR_CHECK( SG_STRING__ALLOC__SZ(pCtx, &pUsername, szWhoami) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Enter password for %s: ", szWhoami) ); SG_ERR_CHECK( SG_console__get_password(pCtx, &pPassword) ); } else { if(szWhoami) { SG_bool bAllWhitespace = SG_FALSE; SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Enter username [%s]: ", szWhoami) ); SG_ERR_CHECK( SG_console__readline_stdin(pCtx, &pUsername) ); SG_ERR_CHECK( SG_sz__is_all_whitespace(pCtx, SG_string__sz(pUsername), &bAllWhitespace) ); if(bAllWhitespace) SG_ERR_CHECK( SG_string__set__sz(pCtx, pUsername, szWhoami) ); } else { SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Enter username: "******"Enter password: ") ); SG_ERR_CHECK( SG_console__get_password(pCtx, &pPassword) ); } *ppUsername = pUsername; *ppPassword = pPassword; return; fail: SG_STRING_NULLFREE(pCtx, pUsername); SG_STRING_NULLFREE(pCtx, pPassword); }
void SG_validate__check( SG_context* pCtx, const char* szValue, SG_uint32 uMin, SG_uint32 uMax, const char* szInvalids, SG_bool bControls, SG_uint32* pResult ) { SG_uint32 uResult = 0u; SG_uint32 uLength = 0u; SG_ARGCHECK(uMin <= uMax, uMin|uMax); SG_NULLARGCHECK(pResult); // treat NULL as an empty string if (szValue == NULL) { szValue = ""; } // validate minimum length uLength = SG_STRLEN(szValue); if (uLength < uMin) { uResult |= SG_VALIDATE__RESULT__TOO_SHORT; } // validate maximum length if (uLength > uMax) { uResult |= SG_VALIDATE__RESULT__TOO_LONG; } // validate specified characters if (szInvalids != NULL) { SG_bool bShares = SG_FALSE; SG_ERR_CHECK( SG_utf8__shares_characters(pCtx, szValue, szInvalids, &bShares) ); if (bShares != SG_FALSE) { uResult |= SG_VALIDATE__RESULT__INVALID_CHARACTER; } } // validate control characters if (bControls != SG_FALSE) { SG_bool bShares = SG_FALSE; SG_ERR_CHECK( SG_utf8__shares_characters(pCtx, szValue, SG_VALIDATE__CHARS__CONTROL, &bShares) ); if (bShares != SG_FALSE) { uResult |= SG_VALIDATE__RESULT__CONTROL_CHARACTER; } } *pResult = uResult; fail: return; }
static void _format_comment(SG_context* pCtx, SG_bool onlyIncludeFirstLine, const char* szLinePrefix, const char* szComment, char** ppszReturn) { SG_bool bFoundLineBreak = SG_FALSE; SG_string* pstr = NULL; SG_uint32 lenPrefix = SG_STRLEN(szLinePrefix); SG_uint32 offset; { const char* pos; for (pos = szComment; *pos; pos++) { if (*pos == SG_CR || *pos == SG_LF) { bFoundLineBreak = SG_TRUE; break; } } if (!bFoundLineBreak) return; if(onlyIncludeFirstLine) { SG_ERR_CHECK( SG_STRING__ALLOC__BUF_LEN(pCtx, &pstr, (const SG_byte*)szComment, (SG_uint32)(pos-szComment)) ); SG_ERR_CHECK( SG_string__sizzle(pCtx, &pstr, (SG_byte**)ppszReturn, NULL) ); return; } offset = (SG_uint32)(pos-szComment); } SG_ERR_CHECK( SG_STRING__ALLOC__SZ(pCtx, &pstr, szComment) ); while (offset < SG_string__length_in_bytes(pstr)) { SG_byte current; SG_ERR_CHECK( SG_string__get_byte_l(pCtx, pstr, offset, ¤t) ); if (current == SG_CR) { SG_byte next; bFoundLineBreak = SG_TRUE; SG_ERR_CHECK( SG_string__get_byte_l(pCtx, pstr, offset+1, &next) ); if (next != SG_LF) { // Mac format, lines end with \r only. Consoles will not advance a line. SG_ERR_CHECK( SG_string__insert__sz(pCtx, pstr, offset+1, "\n") ); } offset++; } else if (current == SG_LF) { bFoundLineBreak = SG_TRUE; } offset++; if (bFoundLineBreak) { SG_ERR_CHECK( SG_string__insert__sz(pCtx, pstr, offset, szLinePrefix) ); offset += lenPrefix; bFoundLineBreak = SG_FALSE; } } SG_ERR_CHECK( SG_string__sizzle(pCtx, &pstr, (SG_byte**)ppszReturn, NULL) ); return; fail: SG_STRING_NULLFREE(pCtx, pstr); }
static void _set_up_logging( SG_context * pCtx, SG_log_console__data * pcLogStdData, SG_log_text__data * pcLogFileData, SG_log_text__writer__daily_path__data * pcLogFileWriterData) { // Code coppied from _my_main() in sg.c char * szLogLevel = NULL; char * szLogPath = NULL; SG_uint32 logFileFlags = SG_LOG__FLAG__HANDLER_TYPE__ALL; // find the appropriate log path SG_ERR_CHECK( SG_localsettings__get__sz(pCtx, SG_LOCALSETTING__LOG_PATH, NULL, &szLogPath, NULL) ); // get the configured log level SG_ERR_CHECK( SG_localsettings__get__sz(pCtx, SG_LOCALSETTING__LOG_LEVEL, NULL, &szLogLevel, NULL) ); // register the stdout logger SG_ERR_CHECK( SG_log_console__set_defaults(pCtx, pcLogStdData) ); SG_ERR_CHECK( SG_log_console__register(pCtx, pcLogStdData, NULL, SG_LOG__FLAG__HANDLER_TYPE__NORMAL) ); // register the file logger SG_ERR_CHECK( SG_log_text__set_defaults(pCtx, pcLogFileData) ); pcLogFileData->fWriter = SG_log_text__writer__daily_path; pcLogFileData->pWriterData = pcLogFileWriterData; pcLogFileData->szRegisterMessage = NULL; pcLogFileData->szUnregisterMessage = NULL; if (szLogLevel != NULL) { if (SG_stricmp(szLogLevel, "quiet") == 0) { logFileFlags = SG_LOG__FLAG__HANDLER_TYPE__QUIET; pcLogFileData->bLogVerboseOperations = SG_FALSE; pcLogFileData->bLogVerboseValues = SG_FALSE; pcLogFileData->szVerboseFormat = NULL; pcLogFileData->szInfoFormat = NULL; } else if (SG_stricmp(szLogLevel, "normal") == 0) { logFileFlags = SG_LOG__FLAG__HANDLER_TYPE__NORMAL; pcLogFileData->bLogVerboseOperations = SG_FALSE; pcLogFileData->bLogVerboseValues = SG_FALSE; pcLogFileData->szVerboseFormat = NULL; } else if (SG_stricmp(szLogLevel, "verbose") == 0) { logFileFlags = SG_LOG__FLAG__HANDLER_TYPE__ALL; pcLogFileData->szRegisterMessage = "---- vscript started logging ----"; pcLogFileData->szUnregisterMessage = "---- vscript stopped logging ----"; } } logFileFlags |= SG_LOG__FLAG__DETAILED_MESSAGES; SG_ERR_CHECK( SG_log_text__writer__daily_path__set_defaults(pCtx, pcLogFileWriterData) ); pcLogFileWriterData->bReopen = SG_FALSE; pcLogFileWriterData->ePermissions = 0666; if (szLogPath != NULL) SG_ERR_CHECK( SG_PATHNAME__ALLOC__SZ(pCtx, &pcLogFileWriterData->pBasePath, szLogPath) ); else SG_ERR_CHECK( SG_PATHNAME__ALLOC__LOG_DIRECTORY(pCtx, &pcLogFileWriterData->pBasePath) ); pcLogFileWriterData->szFilenameFormat = "vscript-%d-%02d-%02d.log"; SG_ERR_CHECK( SG_log_text__register(pCtx, pcLogFileData, NULL, logFileFlags) ); fail: SG_context__err_reset(pCtx); SG_NULLFREE(pCtx, szLogPath); SG_NULLFREE(pCtx, szLogLevel); }
/** * Handle the RESOLVE command. * * */ void do_cmd_resolve(SG_context * pCtx, SG_option_state * pOptSt, SG_uint32 count_args, const char** paszArgs) { struct _resolve_data data; SG_uint32 sum = 0; SG_bool bAll = SG_FALSE; SG_bool bWantResolved = SG_FALSE; SG_bool bWantUnresolved = SG_FALSE; SG_bool bReqArg = SG_FALSE; memset(&data, 0, sizeof(data)); data.pPathCwd = NULL; data.pPendingTree = NULL; data.psaGids = NULL; data.bIgnoreWarnings = SG_TRUE; // TODO what should this be? // allow at most ONE of the command options. // // the --{List,Mark,Unmark}All options do not allow ARGs. // // the --{Mark,Unmark} require at least one ARG. // the --List allows 0 or more ARGs. // // if no command option, allow 0 or more ARGs. // // most commands do not require there to be issues; rather // they just don't do anything. // // WARNING: We set sg_cl_options[].has_arg to 0 for all of // our commands options so that we get all of the // pathnames in ARGs rather than bound to the option. // That is, I want to be able to say: // vv resolve --mark foo bar // rather than: // vv resolve --mark foo --mark bar // // It also allows me to have: // vv resolve --list // and // vv resolve --list foo if (pOptSt->bListAll) { sum++; bAll = SG_TRUE; bWantResolved = SG_TRUE; bWantUnresolved = SG_TRUE; bReqArg = SG_FALSE; } if (pOptSt->bMarkAll) { sum++; bAll = SG_TRUE; bWantResolved = SG_TRUE; bWantUnresolved = SG_TRUE; bReqArg = SG_FALSE; } // if (pOptSt->bUnmarkAll) { sum++; bAll = SG_TRUE; bWantResolved = SG_TRUE; bWantUnresolved = SG_TRUE; bReqArg = SG_FALSE; } if (pOptSt->bList) { if (count_args == 0) { sum++; bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE; bReqArg = SG_FALSE; } else { sum++; bAll = SG_FALSE; bWantResolved = SG_TRUE; bWantUnresolved = SG_TRUE; bReqArg = SG_FALSE; } } if (pOptSt->bMark) { sum++; bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE; bReqArg = SG_TRUE; } // if (pOptSt->bUnmark) { sum++; bAll = SG_FALSE; bWantResolved = SG_TRUE; bWantUnresolved = SG_FALSE; bReqArg = SG_TRUE; } if (sum == 0) { bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE; bReqArg = SG_FALSE; } if (sum > 1) SG_ERR_THROW( SG_ERR_USAGE ); if (bReqArg && (count_args == 0)) SG_ERR_THROW( SG_ERR_USAGE ); if (bAll && (count_args > 0)) SG_ERR_THROW( SG_ERR_USAGE ); SG_ERR_CHECK( SG_PATHNAME__ALLOC(pCtx, &data.pPathCwd) ); SG_ERR_CHECK( SG_pathname__set__from_cwd(pCtx, data.pPathCwd) ); // Do a complete scan first. This ensures that the pendingtree knows // about everything that is dirty in the WD and helps ensure that every // issue in the issues list has a ptnode in the pendingtree. // // TODO 2010/07/16 Technically, this should NOT be required. But it // TODO helps. The problem is that when a file is edited // TODO we don't automatically get the notification, rather // TODO we do a status aka scan (and/or use the timestamp // TODO cache) when various commands start which detect // TODO file content changes. So the fact that the MERGE // TODO may have written a bunch of merged/edited files // TODO doesn't necessarily mean that they are listed in // TODO the pendingtree -- because the user may have edited // TODO them again (or edited other files) since the merge // TODO completed. So we scan. // TODO // TODO See also the comment in sg.c:do_cmd_commit() for sprawl-809. // TODO // TODO What this scan is helping to hide is a problem where // TODO we're hitting the issues list for GIDs and then // TODO using SG_pendingtree__find_repo_path_by_gid() to // TODO dynamically convert it into a "live/current" repo-path. // TODO and it assumes that it is only called for dirty entries // TODO (or rather, for entries that have a ptnode). We need // TODO to fix that. SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, data.pPathCwd, data.bIgnoreWarnings, &data.pPendingTree) ); SG_ERR_CHECK( SG_pendingtree__scan(pCtx, data.pPendingTree, SG_TRUE, NULL, 0, NULL, 0) ); SG_PENDINGTREE_NULLFREE(pCtx, data.pPendingTree); // Now load the pendingtree for real. SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, data.pPathCwd, data.bIgnoreWarnings, &data.pPendingTree) ); if (count_args > 0) SG_ERR_CHECK( _resolve__map_args_to_gids(pCtx, &data, count_args, paszArgs, bWantResolved, bWantUnresolved) ); else SG_ERR_CHECK( _resolve__get_all_issue_gids(pCtx, &data, bWantResolved, bWantUnresolved) ); ////////////////////////////////////////////////////////////////// if (pOptSt->bListAll || pOptSt->bList) { SG_ERR_CHECK( _resolve__do_list(pCtx, &data) ); } else if (pOptSt->bMarkAll || pOptSt->bMark) { SG_ERR_CHECK( _resolve__do_mark(pCtx, &data, SG_TRUE) ); } // else if (pOptSt->bUnmarkAll || pOptSt->bUnmark) // { // SG_ERR_CHECK( _resolve__do_mark(pCtx, &data, SG_FALSE) ); // } else // no command option given -- assume we want to FIX the issues { SG_ERR_CHECK( _resolve__do_fix(pCtx, &data) ); } fail: SG_PATHNAME_NULLFREE(pCtx, data.pPathCwd); SG_PENDINGTREE_NULLFREE(pCtx, data.pPendingTree); SG_STRINGARRAY_NULLFREE(pCtx, data.psaGids); }
void SG_tag__add_tags(SG_context * pCtx, SG_repo * pRepo, SG_pendingtree * pPendingTree, const char* psz_spec_cs, SG_bool bRev, SG_bool bForce, const char** ppszTags, SG_uint32 count_args) { SG_pathname* pPathCwd = NULL; char* psz_hid_cs = NULL; SG_audit q; SG_uint32 i = 0; char * psz_current_hid_with_that_tag = NULL; SG_bool bFreePendingTree = SG_FALSE; SG_ERR_CHECK( SG_audit__init(pCtx,&q,pRepo,SG_AUDIT__WHEN__NOW,SG_AUDIT__WHO__FROM_SETTINGS) ); // TODO 4/21/10 pendingtree contains a pRepo inside it. we should // TODO 4/21/10 refactor this to alloc the pendingtree first and then // TODO 4/21/10 just borrow the pRepo from it. if (psz_spec_cs) { if (bRev) { SG_ERR_CHECK( SG_repo__hidlookup__dagnode(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, psz_spec_cs, &psz_hid_cs) ); } else { SG_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, psz_spec_cs, &psz_hid_cs) ); if (psz_hid_cs == NULL) SG_ERR_THROW(SG_ERR_TAG_NOT_FOUND); } } else { // tag the current baseline. // // when we have an uncomitted merge, we will have more than one parent. // what does this command mean then? It feels like we we should throw // an error and say that you have to commit first. const SG_varray * pva_wd_parents; // we do not own this const char * psz_hid_parent_0; // we do not own this SG_uint32 nrParents; if (pPendingTree == NULL) { SG_ERR_CHECK( SG_pendingtree__alloc_from_cwd(pCtx, SG_TRUE, &pPendingTree) ); bFreePendingTree = SG_TRUE; } SG_ERR_CHECK( SG_pendingtree__get_wd_parents__ref(pCtx, pPendingTree, &pva_wd_parents) ); SG_ERR_CHECK( SG_varray__count(pCtx, pva_wd_parents, &nrParents) ); if (nrParents > 1) SG_ERR_THROW( SG_ERR_CANNOT_DO_WHILE_UNCOMMITTED_MERGE ); SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva_wd_parents, 0, &psz_hid_parent_0) ); SG_ERR_CHECK( SG_strdup(pCtx, psz_hid_parent_0, &psz_hid_cs) ); } if (!bForce) { //Go through and check all tags to make sure that they are not already applied. for (i = 0; i < count_args; i++) { const char * pszTag = ppszTags[i]; SG_ERR_IGNORE( SG_vc_tags__lookup__tag(pCtx, pRepo, pszTag, &psz_current_hid_with_that_tag) ); if (psz_current_hid_with_that_tag != NULL && 0 != strcmp(psz_current_hid_with_that_tag, psz_hid_cs)) //The tag has been applied, but not to the given changeset. SG_ERR_THROW(SG_ERR_TAG_ALREADY_EXISTS); SG_NULLFREE(pCtx, psz_current_hid_with_that_tag); } } for (i = 0; i < count_args; i++) { const char * pszTag = ppszTags[i]; SG_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, pszTag, &psz_current_hid_with_that_tag) ); if (psz_current_hid_with_that_tag == NULL || 0 != strcmp(psz_current_hid_with_that_tag, psz_hid_cs)) { //The tag has not been applied, or it's been applied to a different dagnode. if ( psz_current_hid_with_that_tag != NULL && bForce) //Remove it, if it's already there SG_ERR_CHECK( SG_vc_tags__remove(pCtx, pRepo, &q, 1, &pszTag) ); SG_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, pszTag, &q) ); } SG_NULLFREE(pCtx, psz_current_hid_with_that_tag); } fail: SG_NULLFREE(pCtx, psz_current_hid_with_that_tag); if (bFreePendingTree == SG_TRUE) SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); SG_NULLFREE(pCtx, psz_hid_cs); SG_PATHNAME_NULLFREE(pCtx, pPathCwd); }
/** * Compute the pathnames to the various input/output files for 1 step * in the file content merge plan. * * When we computed the merge and modified the WD, we put the various * 'foo~mine' and etc files in the same directory where we put the * (candidate) merge result. If there are multiple steps in the plan, * the intermediate (sub-results) need to be placed in this directory * too. * * The final result can go in this directory. *BUT* if there was also * a MOVE/RENAME conflict (so the ultimate final location is yet to be * determined), the final result may get moved/renamed when we deal * with the structural issue in [2]. * * Since it is possible that the user could have done a "vv rename foo ..." * or "vv move foo ..." to manually deal with the structural conflict, we * respect that and dynamically compute the final destination (and ignore * the "result" field in the last step). * * pStrRepoPath_FinalResult should be NON-NULL when we are the final * step in the plan. */ static void _resolve__step_pathnames__compute(SG_context * pCtx, struct _resolve_data * pData, const SG_vhash * pvhIssue, const SG_vhash * pvhStep, SG_string * pStrRepoPath_Result, _resolve__step_pathnames ** ppStepPathnames) { _resolve__step_pathnames * pStepPathnames = NULL; SG_string * pStrRepoPath_Parent = NULL; SG_pathname * pPath_Parent = NULL; const SG_pathname * pPath_WorkingDirectoryTop; const char * pszGidParent; const char * pszEntryname_Mine; const char * pszEntryname_Other; const char * pszEntryname_Ancestor; SG_ERR_CHECK_RETURN( SG_alloc1(pCtx, pStepPathnames) ); // lookup the parent directory where we initially placed all // of the files, find where it is currently in the WD, and // build absolute paths for each of the mine/other/ancestor // files. SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhIssue, "gid_parent", &pszGidParent) ); SG_ERR_CHECK( SG_pendingtree__find_repo_path_by_gid(pCtx, pData->pPendingTree, pszGidParent, &pStrRepoPath_Parent) ); SG_ERR_CHECK( SG_pendingtree__get_working_directory_top__ref(pCtx, pData->pPendingTree, &pPath_WorkingDirectoryTop) ); SG_ERR_CHECK( SG_workingdir__construct_absolute_path_from_repo_path(pCtx, pPath_WorkingDirectoryTop, SG_string__sz(pStrRepoPath_Parent), &pPath_Parent) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhStep, "mine", &pszEntryname_Mine) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhStep, "other", &pszEntryname_Other) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhStep, "ancestor", &pszEntryname_Ancestor) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pStepPathnames->pPath_Mine, pPath_Parent, pszEntryname_Mine ) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pStepPathnames->pPath_Other, pPath_Parent, pszEntryname_Other ) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pStepPathnames->pPath_Ancestor, pPath_Parent, pszEntryname_Ancestor) ); if (pStrRepoPath_Result) { SG_ERR_CHECK( SG_workingdir__construct_absolute_path_from_repo_path(pCtx, pPath_WorkingDirectoryTop, SG_string__sz(pStrRepoPath_Result), &pStepPathnames->pPath_Result) ); } else { const char * pszEntryname_InternalResult; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhStep, "result", &pszEntryname_InternalResult) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pStepPathnames->pPath_Result, pPath_Parent, pszEntryname_InternalResult) ); } *ppStepPathnames = pStepPathnames; pStepPathnames = NULL; fail: SG_PATHNAME_NULLFREE(pCtx, pPath_Parent); SG_STRING_NULLFREE(pCtx, pStrRepoPath_Parent); _RESOLVE__STEP_PATHNAMES__NULLFREE(pCtx, pStepPathnames); }
static void _sg_jscore__install_modules(SG_context * pCtx, JSContext *cx, JSObject *glob, const SG_vhash *pServerConfig) { SG_pathname *pModuleDirPath = NULL; SG_rbtree_iterator *pModuleDir = NULL; SG_rbtree *pModules = NULL; const char *szModuleDir = NULL; SG_bool ok = SG_FALSE; SG_uint32 len_js = 0; jsval rval; char *psz_js = NULL; jsval fo = JSVAL_VOID; JSObject * jsoServerConfig = NULL; if (gpJSCoreGlobalState->bSkipModules) return; SG_ERR_CHECK( _modulesInstalled(pCtx, cx, glob, &ok) ); if (ok) return; if (! gpJSCoreGlobalState->pPathToDispatchDotJS) return; SG_ERR_CHECK( _setModulesInstalled(pCtx, cx, glob) ); SG_ERR_CHECK( sg_read_entire_file(pCtx, gpJSCoreGlobalState->pPathToDispatchDotJS, &psz_js, &len_js) ); if(!JS_EvaluateScript(cx, glob, psz_js, len_js, "dispatch.js", 1, &rval)) { SG_ERR_CHECK_CURRENT; SG_ERR_THROW2(SG_ERR_JS, (pCtx, "An error occurred evaluating dispatch.js!")); } SG_NULLFREE(pCtx, psz_js); // Call init function in dispatch.js if(pServerConfig) { jsval arg; JSBool js_ok; jsval rval2; SG_JS_NULL_CHECK( jsoServerConfig = JS_NewObject(cx, NULL, NULL, NULL) ); SG_ERR_CHECK( sg_jsglue__copy_vhash_into_jsobject(pCtx, cx, pServerConfig, jsoServerConfig) ); arg = OBJECT_TO_JSVAL(jsoServerConfig); js_ok = JS_CallFunctionName(cx, glob, "init", 1, &arg, &rval2); SG_ERR_CHECK_CURRENT; if(!js_ok) SG_ERR_THROW2(SG_ERR_JS, (pCtx, "An error occurred initializing JavaScript framework: call to JavaScript init() failed")); jsoServerConfig = NULL; } // Load core. SG_ERR_CHECK( _loadModuleDir(pCtx, gpJSCoreGlobalState->pPathToCore, "core", cx, glob) ); // Load modules. SG_ERR_CHECK( SG_dir__list(pCtx, gpJSCoreGlobalState->pPathToModules, NULL, NULL, NULL, &pModules) ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pModuleDir, pModules, &ok, &szModuleDir, NULL) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__COPY(pCtx, &pModuleDirPath, gpJSCoreGlobalState->pPathToModules) ); while(ok) { if (szModuleDir[0] != '.') { SG_fsobj_stat fss; SG_ERR_CHECK( SG_pathname__append__from_sz(pCtx, pModuleDirPath, szModuleDir) ); SG_ERR_CHECK( SG_fsobj__stat__pathname(pCtx, pModuleDirPath, &fss) ); if (fss.type & SG_FSOBJ_TYPE__DIRECTORY) // dot paths? { SG_ERR_CHECK( _loadModuleDir(pCtx, pModuleDirPath, szModuleDir, cx, glob) ); } SG_ERR_CHECK( SG_pathname__remove_last(pCtx, pModuleDirPath) ); } SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pModuleDir, &ok, &szModuleDir, NULL) ); } if (! JS_LookupProperty(cx, glob, "initModules", &fo)) { SG_ERR_CHECK_CURRENT; SG_ERR_THROW2(SG_ERR_JS, (pCtx, "lookup of initModules() failed")); } if (!JSVAL_IS_VOID(fo)) if (! JS_CallFunctionName(cx, glob, "initModules", 0, NULL, &rval)) { SG_ERR_CHECK_CURRENT; SG_ERR_THROW2(SG_ERR_JS, (pCtx, "Call to initModules() failed")); } fail: SG_NULLFREE(pCtx, psz_js); SG_PATHNAME_NULLFREE(pCtx, pModuleDirPath); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pModuleDir); SG_RBTREE_NULLFREE(pCtx, pModules); }
static void _resolve__fix__run_external_file_merge_1(SG_context * pCtx, struct _resolve_data * pData, _resolve__external_tool * pET, _resolve__step_pathnames * pStepPathnames, SG_string * pStrRepoPath, SG_bool * pbMergedText) { SG_exec_argvec * pArgVec = NULL; SG_exit_status exitStatus; SG_UNUSED( pData ); #if 0 && defined(DEBUG) SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, ("RESOLVE File Merge: %s\n" " Mine: %s\n" " Other: %s\n" " Ancestor: %s\n" " Result: %s\n"), SG_string__sz(pStrRepoPath), SG_pathname__sz(pStepPathnames->pPath_Mine), SG_pathname__sz(pStepPathnames->pPath_Other), SG_pathname__sz(pStepPathnames->pPath_Ancestor), SG_pathname__sz(pStepPathnames->pPath_Result)) ); #endif SG_ERR_CHECK( SG_exec_argvec__alloc(pCtx, &pArgVec) ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, "-r") ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, SG_pathname__sz(pStepPathnames->pPath_Result)) ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, "-t1") ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, "Mine") ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, "-t2") ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, SG_string__sz(pStrRepoPath)) ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, "-t3") ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, "Other") ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, SG_pathname__sz(pStepPathnames->pPath_Mine)) ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, SG_pathname__sz(pStepPathnames->pPath_Ancestor)) ); SG_ERR_CHECK( SG_exec_argvec__append__sz(pCtx, pArgVec, SG_pathname__sz(pStepPathnames->pPath_Other)) ); SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "RESOLVE: Preparing to launch external merge tool: %s\n", pET->pszName) ); SG_ERR_CHECK( SG_exec__exec_sync__files(pCtx, pET->pszExe, pArgVec, NULL, NULL, NULL, &exitStatus) ); *pbMergedText = (exitStatus == 0); fail: SG_EXEC_ARGVEC_NULLFREE(pCtx, pArgVec); }
/** * Iterate over all of the dirty files and prompt before diffing. * pvaDirtyFiles can either be a STATUS or a "DiffStep" varray. * */ static void _do_gui_diffs(SG_context * pCtx, SG_bool bWC, const SG_option_state * pOptSt, const SG_varray * pvaDirtyFiles, SG_uint32 * pNrErrors) { SG_uint32 k, nrItems; SG_uint32 nrErrors = 0; SG_ERR_CHECK( SG_varray__count(pCtx, pvaDirtyFiles, &nrItems) ); if (nrItems == 1) // if only 1 item, no fancy prompt required. { SG_vhash * pvhItem_0; // we do not own this SG_uint32 iResult = 0; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaDirtyFiles, 0, &pvhItem_0) ); SG_ERR_CHECK( _do_diff1(pCtx, bWC, pOptSt, pvhItem_0, &iResult) ); switch (iResult) { default: case SG_FILETOOL__RESULT__SUCCESS: case SG_DIFFTOOL__RESULT__SAME: case SG_DIFFTOOL__RESULT__DIFFERENT: break; case SG_DIFFTOOL__RESULT__CANCEL: case SG_FILETOOL__RESULT__FAILURE: case SG_FILETOOL__RESULT__ERROR: nrErrors++; break; } } else { k = 0; while (1) { SG_vhash * pvhItem; // we do not own this const char * pszRepoPath; char chChoice = 'd'; SG_uint32 iResult; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaDirtyFiles, k, &pvhItem) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "path", &pszRepoPath) ); SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDOUT, "\n[%d/%d] %s:\n", k+1, nrItems, pszRepoPath) ); if (k == 0) SG_ERR_CHECK( _do_prompt(pCtx, "(d)iff (n)ext (q)uit", "dnq", 'd', &chChoice) ); else if (k+1 == nrItems) SG_ERR_CHECK( _do_prompt(pCtx, "(d)iff (p)rev (q)uit", "dpq", 'd', &chChoice) ); else SG_ERR_CHECK( _do_prompt(pCtx, "(d)iff (n)ext (p)rev (q)uit", "dnpq", 'd', &chChoice) ); switch (chChoice) { case 'd': SG_ERR_CHECK( _do_diff1(pCtx, bWC, pOptSt, pvhItem, &iResult) ); switch (iResult) { default: case SG_FILETOOL__RESULT__SUCCESS: case SG_DIFFTOOL__RESULT__SAME: case SG_DIFFTOOL__RESULT__DIFFERENT: // advance to next pair of files or finish. if (k+1 == nrItems) goto done; k++; break; case SG_DIFFTOOL__RESULT__CANCEL: case SG_FILETOOL__RESULT__FAILURE: case SG_FILETOOL__RESULT__ERROR: nrErrors++; // stay on this pair of files (so that they see the // error message and this filename again). break; } break; case 'n': k++; break; case 'p': k--; break; default: case 'q': goto done; } } } done: *pNrErrors = nrErrors; fail: return; }
/** * Release VFILE lock and invoke external merge tool for this file. * * TODO 2010/07/12 The MERGE-PLAN is an array and allows for * TODO multiple steps (for an n-way sub-merge cascade). * TODO But we don't have that part turned on yet in * TODO sg_mrg__private_biuld_wd_issues.h:_make_file_merge_plan(), * TODO so for now, we only expect 1 step. * TODO * TODO Also, when we do have multiple steps, we might want to * TODO be able to use the 'status' field to see which steps * TODO were already performed in an earlier RESOLVE. * TODO * TODO Also, when we want to support more than 1 step we need * TODO to copy pvaPlan because when we release the pendingtree * TODO the pvhIssue becomes invalidated too. */ static void _resolve__fix__run_external_file_merge(SG_context * pCtx, struct _resolve_data * pData, const char * pszGid, const SG_vhash * pvhIssue, SG_string * pStrRepoPath, enum _fix_status * pFixStatus) { _resolve__step_pathnames * pStepPathnames = NULL; _resolve__external_tool * pET = NULL; const SG_varray * pvaPlan; const SG_vhash * pvhStep_0; SG_int64 r64; SG_uint32 nrSteps; SG_mrg_automerge_result result; SG_bool bMerged = SG_FALSE; SG_bool bIsResolved = SG_FALSE; SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhIssue, "conflict_file_merge_plan", (SG_varray **)&pvaPlan) ); SG_ERR_CHECK( SG_varray__count(pCtx, pvaPlan, &nrSteps) ); if (nrSteps > 1) SG_ERR_THROW2( SG_ERR_ASSERT, (pCtx, "TODO RESOLVE more than 1 step in auto-merge plan for '%s'.", SG_string__sz(pStrRepoPath)) ); ////////////////////////////////////////////////////////////////// // Get Step[0] SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaPlan, 0, (SG_vhash **)&pvhStep_0) ); // see if the user has already performed the merge and maybe got interrupted. SG_ERR_CHECK( SG_vhash__get__int64(pCtx, pvhStep_0, "status", &r64) ); result = (SG_mrg_automerge_result)r64; if (result == SG_MRG_AUTOMERGE_RESULT__SUCCESSFUL) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "TODO Print message about previous successful manual merge of the file content and ask if they want to redo it for '%s'.\n", SG_string__sz(pStrRepoPath)) ); *pFixStatus = FIX_USER_MERGED; goto done; } SG_ERR_CHECK( _resolve__step_pathnames__compute(pCtx, pData, pvhIssue, pvhStep_0, pStrRepoPath, &pStepPathnames) ); // While we still have a handle to the pendingtree, lookup the // specifics on the external tool that we should invoke. these // details come from localsettings. SG_ERR_CHECK( _resolve__external_tool__lookup(pCtx, pData, pszGid, pvhIssue, pStrRepoPath, &pET) ); // Free the PENDINGTREE so that we release the VFILE lock. pvhIssue = NULL; pvaPlan = NULL; pvhStep_0 = NULL; SG_PENDINGTREE_NULLFREE(pCtx, pData->pPendingTree); ////////////////////////////////////////////////////////////////// // Invoke the external tool. SG_ERR_CHECK( _resolve__fix__run_external_file_merge_1(pCtx, pData, pET, pStepPathnames, pStrRepoPath, &bMerged) ); if (!bMerged) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "RESOLVE: Aborting the merge of this file.\n") ); *pFixStatus = FIX_USER_ABORTED; goto done; } ////////////////////////////////////////////////////////////////// // Reload the PENDINGTREE and re-fetch the ISSUE and updated the STATUS on // this step in the PLAN. // // We duplicate some of the "see if someone else resolved this issue while // we were without the lock" stuff. SG_ERR_CHECK( _resolve__lookup_issue(pCtx, pData, pszGid, &pvhIssue) ); SG_ERR_CHECK( _resolve__is_resolved(pCtx, pvhIssue, &bIsResolved) ); if (bIsResolved) { // Someone else marked it resolved while were waiting for // the user to edit the file and while we didn't have the // file lock. We should stop here. SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "RESOLVE: Aborting the merge of this file (due to race condition).\n") ); *pFixStatus = FIX_LOST_RACE; goto done; } // re-fetch the current step and update the "result" status for it // and flush the pendingtree back disk. // // we only update the step status -- we DO NOT alter the __DIVERGENT_FILE_EDIT__ // conflict_flags. SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhIssue, "conflict_file_merge_plan", (SG_varray **)&pvaPlan) ); SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaPlan, 0, (SG_vhash **)&pvhStep_0) ); SG_ERR_CHECK( SG_pendingtree__set_wd_issue_plan_step_status__dont_save_pendingtree(pCtx, pData->pPendingTree, pvhStep_0, SG_MRG_AUTOMERGE_RESULT__SUCCESSFUL) ); SG_ERR_CHECK( SG_pendingtree__save(pCtx, pData->pPendingTree) ); SG_PENDINGTREE_NULLFREE(pCtx, pData->pPendingTree); SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "RESOLVE: The file content portion of the merge was successful.\n") ); *pFixStatus = FIX_USER_MERGED; // we defer the delete of the temp input files until we completely // resolve the issue. (This gives us more options if we allow the // resolve to be restarted after interruptions.) done: ; fail: _RESOLVE__EXTERNAL_TOOL__NULLFREE(pCtx, pET); _RESOLVE__STEP_PATHNAMES__NULLFREE(pCtx, pStepPathnames); }
/** * Compute DIFF on (baseline or arbitrary cset vs WC) and either * splat to console or launch a GUI tool for each. * */ static void _s01__do_cset_vs_wc(SG_context * pCtx, const SG_option_state * pOptSt, const SG_stringarray * psaArgs, SG_uint32 * pNrErrors) { SG_wc_tx * pWcTx = NULL; SG_varray * pvaDiffSteps = NULL; SG_varray * pvaDiffSteps_DirtyFiles = NULL; SG_pathname * pPathWc = NULL; SG_uint32 nrErrors = 0; SG_ERR_CHECK( SG_WC_TX__ALLOC__BEGIN(pCtx, &pWcTx, pPathWc, SG_TRUE) ); SG_ERR_CHECK( SG_wc_tx__diff__setup__stringarray(pCtx, pWcTx, pOptSt->pRevSpec, psaArgs, WC__GET_DEPTH(pOptSt), SG_FALSE, // bNoIgnores SG_FALSE, // bNoTSC SG_FALSE, // bNoSort, pOptSt->bInteractive, pOptSt->psz_tool, &pvaDiffSteps) ); // rollback/cancel the TX to release the SQL locks, // but don't free it yet (because that will auto-delete // the session temp files that we are using for the left // sides). // // This is like SG_wc__diff__throw() but we want to control // the diff-loop so we can optionally do the interactive prompt. SG_ERR_CHECK( SG_wc_tx__cancel(pCtx, pWcTx) ); if (pvaDiffSteps) { if (pOptSt->bInteractive) { SG_ERR_CHECK( _get_dirty_files(pCtx, pvaDiffSteps, &pvaDiffSteps_DirtyFiles) ); if (pvaDiffSteps_DirtyFiles) SG_ERR_CHECK( _do_gui_diffs(pCtx, SG_TRUE, pOptSt, pvaDiffSteps_DirtyFiles, &nrErrors) ); } else { SG_uint32 k, nrItems; SG_ERR_CHECK( SG_varray__count(pCtx, pvaDiffSteps, &nrItems) ); for (k=0; k<nrItems; k++) { SG_vhash * pvhItem; const char * pszHeader = NULL; SG_uint32 iResult; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaDiffSteps, k, &pvhItem) ); SG_ERR_CHECK( SG_vhash__check__sz(pCtx, pvhItem, "header", &pszHeader) ); if (pszHeader) SG_ERR_IGNORE( SG_console__raw(pCtx, SG_CS_STDOUT, pszHeader) ); SG_ERR_CHECK( _do_diff1(pCtx, SG_TRUE, pOptSt, pvhItem, &iResult) ); switch (iResult) { default: case SG_FILETOOL__RESULT__SUCCESS: case SG_DIFFTOOL__RESULT__SAME: case SG_DIFFTOOL__RESULT__DIFFERENT: break; case SG_DIFFTOOL__RESULT__CANCEL: case SG_FILETOOL__RESULT__FAILURE: case SG_FILETOOL__RESULT__ERROR: nrErrors++; break; } } } } *pNrErrors = nrErrors; fail: SG_VARRAY_NULLFREE(pCtx, pvaDiffSteps); SG_VARRAY_NULLFREE(pCtx, pvaDiffSteps_DirtyFiles); SG_WC_TX__NULLFREE(pCtx, pWcTx); }
static void sg_unzip__locate_central_dir(SG_context* pCtx, SG_file* pFile, SG_uint64* piPosition) { unsigned char* buf = NULL; SG_uint64 uSizeFile; SG_uint32 uBackRead; SG_uint32 uMaxBack=0xffff; /* maximum size of global comment */ SG_uint64 uPosFound=0; SG_ERR_CHECK( SG_file__seek_end(pCtx, pFile, &uSizeFile) ); if (uMaxBack > uSizeFile) { uMaxBack = (SG_uint32) uSizeFile; } SG_ERR_CHECK( SG_malloc(pCtx, BUFREADCOMMENT+4, &buf) ); uBackRead = 4; while (uBackRead<uMaxBack) { SG_uint32 uReadSize; SG_uint64 uReadPos; int i; if (uBackRead+BUFREADCOMMENT>uMaxBack) { uBackRead = uMaxBack; } else { uBackRead += BUFREADCOMMENT; } uReadPos = uSizeFile-uBackRead ; uReadSize = (SG_uint32) (((BUFREADCOMMENT+4) < (uSizeFile-uReadPos)) ? (BUFREADCOMMENT+4) : (uSizeFile-uReadPos)); SG_ERR_CHECK( SG_file__seek(pCtx, pFile, uReadPos) ); SG_ERR_CHECK( SG_file__read(pCtx, pFile, uReadSize, buf, NULL) ); for (i=(int)uReadSize-3; (i--)>0;) { if (((*(buf+i))==0x50) && ((*(buf+i+1))==0x4b) && ((*(buf+i+2))==0x05) && ((*(buf+i+3))==0x06)) { uPosFound = uReadPos+i; break; } } if (uPosFound!=0) { break; } } *piPosition = uPosFound; fail: SG_NULLFREE(pCtx, buf); }