void sg_wc_tx__fake_merge__revert_all__determine_target_cset(SG_context * pCtx, SG_mrg * pMrg) { SG_ERR_CHECK( SG_strdup(pCtx, pMrg->pszHid_StartingBaseline, &pMrg->pszHidTarget) ); // Don't worry about pMrg->pvhPile; it is just a cache and loaded as needed. #if TRACE_WC_MERGE SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "FakeMergeRevertAll: target (%s).\n", pMrg->pszHidTarget) ); #endif fail: return; }
/** * This routine is used by UPDATE to fake a MERGE; that is, to do * a one-legged merge. This routine must do set the same fields * in pMrg that the above routine does. * * I've isolated the effects here because UPDATE has already done * most of the homework/validation and we don't need to step thru * the stuff again here. * */ void sg_wc_tx__fake_merge__update__determine_target_cset(SG_context * pCtx, SG_mrg * pMrg, const char * pszHidTarget) { SG_ERR_CHECK( SG_strdup(pCtx, pszHidTarget, &pMrg->pszHidTarget) ); // Don't worry about pMrg->pvhPile; it is just a cache and loaded as needed. #if TRACE_WC_MERGE SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "FakeMergeUpdate: target (%s).\n", pMrg->pszHidTarget) ); #endif fail: return; }
/** * Find the appropriate external tool to let the user perform a TEXT MERGE * on a file. * * TODO 2010/07/13 For now, this is hard-coded to use DiffMerge. * TODO Later we want to allow them to have multiple * TODO tools configured and/or to use the file suffix * TODO and so on. */ static void _resolve__external_tool__lookup(SG_context * pCtx, struct _resolve_data * pData, const char * pszGid, const SG_vhash * pvhIssue, SG_string * pStrRepoPath, _resolve__external_tool ** ppET) { _resolve__external_tool * pET = NULL; SG_repo * pRepo; SG_UNUSED( pszGid ); SG_UNUSED( pvhIssue ); SG_ERR_CHECK( SG_pendingtree__get_repo(pCtx, pData->pPendingTree, &pRepo) ); SG_ERR_CHECK( SG_alloc1(pCtx, pET) ); // TODO 2010/07/13 Use localsettings to determine WHICH external tool we should use. // TODO (This could be based upon suffixes and/or whatever.) // TODO Then -- for THAT tool -- lookup the program executable path and // TODO the argument list. // TODO Substitute the given pathnames into the argument list. // TODO // TODO For now, we just hard-code DiffMerge. SG_ERR_CHECK( SG_strdup(pCtx, "DiffMerge", &pET->pszName) ); SG_localsettings__get__sz(pCtx, "merge/diffmerge/program", pRepo, &pET->pszExe, NULL); if (SG_context__has_err(pCtx) || (!pET->pszExe) || (!*pET->pszExe)) { SG_context__err_reset(pCtx); SG_ERR_THROW2( SG_ERR_NO_MERGE_TOOL_CONFIGURED, (pCtx, "'%s' Use 'vv localsetting set merge/diffmerge/program' and retry -or- manually merge content and then use 'vv resolve --mark'.", SG_string__sz(pStrRepoPath)) ); } // TODO 2010/07/13 Get argvec. *ppET = pET; return; fail: _RESOLVE__EXTERNAL_TOOL__NULLFREE(pCtx, pET); }
static void _get_baseline(SG_context * pCtx, SG_pendingtree * pPendingTree, char ** ppszBaseline) { const SG_varray * pvaParents; const char * pszBaseline; char * pszAllocated = NULL; // get the HID of the BASELINE (aka PARENT[0] from the pendingtree). SG_ERR_CHECK( SG_pendingtree__get_wd_parents__ref(pCtx, pPendingTree, &pvaParents) ); SG_ERR_CHECK( SG_varray__get__sz(pCtx, pvaParents, 0, &pszBaseline) ); SG_ERR_CHECK( SG_strdup(pCtx, pszBaseline, &pszAllocated) ); *ppszBaseline = pszAllocated; return; fail: SG_NULLFREE(pCtx, pszAllocated); }
static void SG_localsettings__factory__get_nth__variant( SG_context* pCtx, SG_uint32 i, SG_variant** ppv ) { SG_variant* pv = NULL; if (SG_VARIANT_TYPE_SZ == g_factory_defaults[i].type) { SG_ERR_CHECK( SG_alloc1(pCtx, pv) ); pv->type = g_factory_defaults[i].type; SG_ERR_CHECK( SG_strdup(pCtx, g_factory_defaults[i].psz_val, (char**) &pv->v.val_sz) ); } else if (SG_VARIANT_TYPE_VARRAY == g_factory_defaults[i].type) { const char** pp_el = NULL; SG_ERR_CHECK( SG_alloc1(pCtx, pv) ); pv->type = g_factory_defaults[i].type; SG_ERR_CHECK( SG_varray__alloc(pCtx, &pv->v.val_varray) ); pp_el = g_factory_defaults[i].pasz_array; while (*pp_el) { SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pv->v.val_varray, *pp_el) ); pp_el++; } } else { SG_ERR_THROW( SG_ERR_NOTIMPLEMENTED ); } *ppv = pv; pv = NULL; fail: SG_VARIANT_NULLFREE(pCtx, pv); }
void SG_dbrecord__load_from_repo(SG_context* pCtx, SG_repo * pRepo, const char* pszidHidBlob, SG_dbrecord ** ppResult) { // fetch contents of a dbrecord-type blob and convert to a dbrecord object. SG_byte * pbuf = NULL; SG_dbrecord * pRecord = NULL; char* pszidHidCopy = NULL; SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(pszidHidBlob); SG_NULLARGCHECK_RETURN(ppResult); *ppResult = NULL; // fetch the Blob for the given HID and convert from a JSON stream into an // allocated dbrecord object. SG_ERR_CHECK( SG_repo__fetch_blob_into_memory(pCtx, pRepo, pszidHidBlob, &pbuf, NULL) ); SG_ERR_CHECK( _sg_dbrecord__alloc_e__from_json(pCtx, &pRecord, (const char *)pbuf) ); SG_NULLFREE(pCtx, pbuf); // make a copy of the HID so that we can stuff it into the dbrecord and freeze it. // we freeze it because the caller should not be able to modify the dbrecord by // accident -- since we are a representation of something on disk already. SG_ERR_CHECK( SG_strdup(pCtx, pszidHidBlob, &pszidHidCopy) ); (void)_sg_dbrecord__freeze(pCtx, pRecord, pszidHidCopy); *ppResult = pRecord; return; fail: SG_NULLFREE(pCtx, pbuf); SG_DBRECORD_NULLFREE(pCtx, pRecord); SG_NULLFREE(pCtx, pszidHidCopy); }
void SG_tag__add_tags(SG_context * pCtx, SG_repo * pRepo, SG_pendingtree * pPendingTree, const char* psz_spec_cs, SG_bool bRev, SG_bool bForce, const char** ppszTags, SG_uint32 count_args) { SG_pathname* pPathCwd = NULL; char* psz_hid_cs = NULL; SG_audit q; SG_uint32 i = 0; char * psz_current_hid_with_that_tag = NULL; SG_bool bFreePendingTree = SG_FALSE; SG_ERR_CHECK( SG_audit__init(pCtx,&q,pRepo,SG_AUDIT__WHEN__NOW,SG_AUDIT__WHO__FROM_SETTINGS) ); // TODO 4/21/10 pendingtree contains a pRepo inside it. we should // TODO 4/21/10 refactor this to alloc the pendingtree first and then // TODO 4/21/10 just borrow the pRepo from it. if (psz_spec_cs) { if (bRev) { SG_ERR_CHECK( SG_repo__hidlookup__dagnode(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, psz_spec_cs, &psz_hid_cs) ); } else { SG_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, psz_spec_cs, &psz_hid_cs) ); if (psz_hid_cs == NULL) SG_ERR_THROW(SG_ERR_TAG_NOT_FOUND); } } else { // tag the current baseline. // // when we have an uncomitted merge, we will have more than one parent. // what does this command mean then? It feels like we we should throw // an error and say that you have to commit first. const SG_varray * pva_wd_parents; // we do not own this const char * psz_hid_parent_0; // we do not own this SG_uint32 nrParents; if (pPendingTree == NULL) { SG_ERR_CHECK( SG_pendingtree__alloc_from_cwd(pCtx, SG_TRUE, &pPendingTree) ); bFreePendingTree = SG_TRUE; } SG_ERR_CHECK( SG_pendingtree__get_wd_parents__ref(pCtx, pPendingTree, &pva_wd_parents) ); SG_ERR_CHECK( SG_varray__count(pCtx, pva_wd_parents, &nrParents) ); if (nrParents > 1) SG_ERR_THROW( SG_ERR_CANNOT_DO_WHILE_UNCOMMITTED_MERGE ); SG_ERR_CHECK( SG_varray__get__sz(pCtx, pva_wd_parents, 0, &psz_hid_parent_0) ); SG_ERR_CHECK( SG_strdup(pCtx, psz_hid_parent_0, &psz_hid_cs) ); } if (!bForce) { //Go through and check all tags to make sure that they are not already applied. for (i = 0; i < count_args; i++) { const char * pszTag = ppszTags[i]; SG_ERR_IGNORE( SG_vc_tags__lookup__tag(pCtx, pRepo, pszTag, &psz_current_hid_with_that_tag) ); if (psz_current_hid_with_that_tag != NULL && 0 != strcmp(psz_current_hid_with_that_tag, psz_hid_cs)) //The tag has been applied, but not to the given changeset. SG_ERR_THROW(SG_ERR_TAG_ALREADY_EXISTS); SG_NULLFREE(pCtx, psz_current_hid_with_that_tag); } } for (i = 0; i < count_args; i++) { const char * pszTag = ppszTags[i]; SG_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, pszTag, &psz_current_hid_with_that_tag) ); if (psz_current_hid_with_that_tag == NULL || 0 != strcmp(psz_current_hid_with_that_tag, psz_hid_cs)) { //The tag has not been applied, or it's been applied to a different dagnode. if ( psz_current_hid_with_that_tag != NULL && bForce) //Remove it, if it's already there SG_ERR_CHECK( SG_vc_tags__remove(pCtx, pRepo, &q, 1, &pszTag) ); SG_ERR_CHECK( SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, pszTag, &q) ); } SG_NULLFREE(pCtx, psz_current_hid_with_that_tag); } fail: SG_NULLFREE(pCtx, psz_current_hid_with_that_tag); if (bFreePendingTree == SG_TRUE) SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); SG_NULLFREE(pCtx, psz_hid_cs); SG_PATHNAME_NULLFREE(pCtx, pPathCwd); }
void sg_sync_client__http__push_begin( SG_context* pCtx, SG_sync_client * pSyncClient, SG_pathname** pFragballDirPathname, SG_sync_client_push_handle** ppPush) { char* pszUrl = NULL; sg_sync_client_http_push_handle* pPush = NULL; SG_vhash* pvhResponse = NULL; SG_pathname* pPathUserTemp = NULL; SG_pathname* pPathFragballDir = NULL; char bufTid[SG_TID_MAX_BUFFER_LENGTH]; SG_string* pstr = NULL; SG_NULLARGCHECK_RETURN(pSyncClient); SG_NULLARGCHECK_RETURN(pFragballDirPathname); SG_NULLARGCHECK_RETURN(ppPush); // Get the URL we're going to post to SG_ERR_CHECK( _get_sync_url(pCtx, pSyncClient->psz_remote_repo_spec, SYNC_URL_SUFFIX JSON_URL_SUFFIX, NULL, NULL, &pszUrl) ); SG_ERR_CHECK( do_url(pCtx, pszUrl, "POST", NULL, pSyncClient->psz_username, pSyncClient->psz_password, &pstr, NULL, SG_TRUE) ); SG_ERR_CHECK( SG_vhash__alloc__from_json__sz(pCtx, &pvhResponse, SG_string__sz(pstr)) ); SG_STRING_NULLFREE(pCtx, pstr); // Alloc a push handle. Stuff the push ID we received into it. { const char* pszRef = NULL; SG_ERR_CHECK( SG_alloc(pCtx, 1, sizeof(sg_sync_client_http_push_handle), &pPush) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhResponse, PUSH_ID_KEY, &pszRef) ); SG_ERR_CHECK( SG_strdup(pCtx, pszRef, &pPush->pszPushId) ); } // Create a temporary local directory for stashing fragballs before shipping them over the network. SG_ERR_CHECK( SG_PATHNAME__ALLOC__USER_TEMP_DIRECTORY(pCtx, &pPathUserTemp) ); SG_ERR_CHECK( SG_tid__generate(pCtx, bufTid, SG_TID_MAX_BUFFER_LENGTH) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathFragballDir, pPathUserTemp, bufTid) ); SG_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathFragballDir) ); // Tell caller where to stash fragballs for this push. SG_RETURN_AND_NULL(pPathFragballDir, pFragballDirPathname); // Return the new push handle. *ppPush = (SG_sync_client_push_handle*)pPush; pPush = NULL; /* fall through */ fail: SG_STRING_NULLFREE(pCtx, pstr); if(SG_context__err_equals(pCtx, SG_ERR_SERVER_HTTP_ERROR)) { const char * szInfo = NULL; if(SG_IS_OK(SG_context__err_get_description(pCtx, &szInfo)) && strcmp(szInfo, "405")==0) SG_ERR_RESET_THROW(SG_ERR_SERVER_DOESNT_ACCEPT_PUSHES); } _NULLFREE_PUSH_HANDLE(pCtx, pPush); SG_NULLFREE(pCtx, pszUrl); SG_PATHNAME_NULLFREE(pCtx, pPathUserTemp); SG_PATHNAME_NULLFREE(pCtx, pPathFragballDir); SG_VHASH_NULLFREE(pCtx, pvhResponse); }
void SG_getopt__alloc(SG_context * pCtx, int argc, ARGV_CHAR_T ** argv, SG_getopt ** ppGetopt) { SG_getopt * pGetopt = NULL; SG_string * pStr = NULL; int k; SG_ERR_CHECK( SG_alloc1(pCtx,pGetopt) ); pGetopt->count_args = 0; // make sure we alloc for at least app and command, since we fake a help if they left the command off. if (argc > 2) SG_ERR_CHECK( SG_alloc(pCtx,argc,sizeof(char *),&pGetopt->paszArgs) ); else SG_ERR_CHECK( SG_alloc(pCtx, 2, sizeof(char*), &pGetopt->paszArgs) ); // intern our app name. SG_ERR_CHECK( SG_getopt__set_app_name(pCtx, pGetopt,argv[0]) ); SG_ERR_CHECK( SG_strdup(pCtx, SG_string__sz(pGetopt->pStringAppName), &pGetopt->paszArgs[pGetopt->count_args++]) ); // intern the command name. if no command given, fake a "sg help". SG_ERR_CHECK( SG_getopt__set_command_name(pCtx, pGetopt, ((argc < 2) ? ARGV_LITERAL("help") : argv[1])) ); SG_ERR_CHECK( SG_strdup(pCtx, SG_string__sz(pGetopt->pStringCommandName), &pGetopt->paszArgs[pGetopt->count_args++]) ); // intern the rest of the command line arguments into 1 of 2 // utf-8 argv subsets. if (argc > 2) { SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pStr) ); for (k=2; k<argc; k++) { char * psz; SG_ERR_CHECK( SG_UTF8__INTERN_FROM_OS_BUFFER(pCtx, pStr,argv[k]) ); SG_ERR_CHECK( SG_strdup(pCtx, SG_string__sz(pStr),&psz) ); pGetopt->paszArgs[pGetopt->count_args++] = psz; } } SG_STRING_NULLFREE(pCtx, pStr); pGetopt->place = ""; pGetopt->interleave = 0; pGetopt->ind = 1; pGetopt->skip_start = 1; pGetopt->skip_end = 1; *ppGetopt = pGetopt; return; fail: SG_STRING_NULLFREE(pCtx, pStr); SG_GETOPT_NULLFREE(pCtx, pGetopt); }
void SG_workingdir__create_and_get( SG_context* pCtx, const char* pszDescriptorName, const SG_pathname* pPathDirPutTopLevelDirInHere, SG_bool bCreateDrawer, const char* psz_spec_hid_cs_baseline ) { SG_repo* pRepo = NULL; SG_rbtree* pIdsetLeaves = NULL; SG_uint32 count_leaves = 0; SG_changeset* pcs = NULL; const char* pszidUserSuperRoot = NULL; SG_bool b = SG_FALSE; char* psz_hid_cs_baseline = NULL; SG_pendingtree * pPendingTree = NULL; SG_vhash * pvhTimestamps = NULL; /* * Fetch the descriptor by its given name and use it to connect to * the repo. */ SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, pszDescriptorName, &pRepo) ); if (psz_spec_hid_cs_baseline) { SG_ERR_CHECK( SG_strdup(pCtx, psz_spec_hid_cs_baseline, &psz_hid_cs_baseline) ); } else { const char* psz_hid = NULL; /* * If you do not specify a hid to be the baseline, then this routine * currently only works if there is exactly one leaf in the repo. */ SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&pIdsetLeaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, pIdsetLeaves, &count_leaves) ); if (count_leaves != 1) SG_ERR_THROW( SG_ERR_MULTIPLE_HEADS_FROM_DAGNODE ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, pIdsetLeaves, &b, &psz_hid, NULL) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_hid, &psz_hid_cs_baseline) ); } /* * Load the desired changeset from the repo so we can look up the * id of its user root directory */ SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs_baseline, &pcs) ); SG_ERR_CHECK( SG_changeset__get_root(pCtx, pcs, &pszidUserSuperRoot) ); if (bCreateDrawer) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhTimestamps) ); // Retrieve everything into the WD and capture the timestamps on the files that we create. SG_ERR_CHECK( sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, pvhTimestamps) ); // this creates "repo.json" with the repo-descriptor. SG_ERR_CHECK( SG_workingdir__set_mapping(pCtx, pPathDirPutTopLevelDirInHere, pszDescriptorName, NULL) ); // this creates an empty "wd.json" file (which doesn't know anything). SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pPathDirPutTopLevelDirInHere, SG_TRUE, &pPendingTree) ); // force set the initial parents to the current changeset. SG_ERR_CHECK( SG_pendingtree__set_single_wd_parent(pCtx, pPendingTree, psz_hid_cs_baseline) ); // force initialize the timestamp cache to the list that we just built; this should // be the only timestamps in the cache since we just populated the WD. SG_ERR_CHECK( SG_pendingtree__set_wd_timestamp_cache(pCtx, pPendingTree, &pvhTimestamps) ); // this steals our vhash SG_ERR_CHECK( SG_pendingtree__save(pCtx, pPendingTree) ); } else { // Retrieve everything into the WD but do not create .sgdrawer or record timestamps. // This is more like an EXPORT operation. SG_ERR_CHECK( sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, NULL) ); } fail: SG_VHASH_NULLFREE(pCtx, pvhTimestamps); SG_NULLFREE(pCtx, psz_hid_cs_baseline); SG_CHANGESET_NULLFREE(pCtx, pcs); SG_RBTREE_NULLFREE(pCtx, pIdsetLeaves); SG_REPO_NULLFREE(pCtx, pRepo); SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); }
/** * Get the current content HID and optionally the size. * (Note that the current HID is not usually defined for a directory. * And therefore the content size of a directory is not usually * defined either.) * */ void sg_wc_liveview_item__get_current_content_hid(SG_context * pCtx, sg_wc_liveview_item * pLVI, SG_wc_tx * pWcTx, SG_bool bNoTSC, char ** ppszHidContent, SG_uint64 * pSize) { if (pLVI->queuedOverwrites.pvhContent) { // We have a QUEUED operation on this item that changed the // contents. Get the 'current' value from the journal. const char * psz = NULL; SG_ERR_CHECK_RETURN( SG_vhash__check__sz(pCtx, pLVI->queuedOverwrites.pvhContent, "hid", &psz) ); if (psz) { // last overwrite-type operation used an HID. #if TRACE_WC_LIE SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "GetCurrentContentHid: using journal %s for: %s\n", psz, SG_string__sz(pLVI->pStringEntryname)) ); #endif if (pSize) SG_ERR_CHECK_RETURN( _fetch_size_of_blob(pCtx, pWcTx, psz, pSize) ); SG_ERR_CHECK_RETURN( SG_strdup(pCtx, psz, ppszHidContent) ); return; } SG_ERR_CHECK_RETURN( SG_vhash__check__sz(pCtx, pLVI->queuedOverwrites.pvhContent, "file", &psz) ); if (psz) { // last overwrite-type operation used a TEMP file. SG_ERR_CHECK_RETURN( sg_wc_compute_file_hid__sz(pCtx, pWcTx, psz, ppszHidContent, pSize) ); return; } SG_ERR_CHECK_RETURN( SG_vhash__check__sz(pCtx, pLVI->queuedOverwrites.pvhContent, "target", &psz) ); if (psz) { // last overwrite-type operation gave us a SYMLINK-TARGET. // it is no problem to compute this, i'm just being // lazy since i'm not sure we need this. SG_ERR_THROW2_RETURN( SG_ERR_NOTIMPLEMENTED, (pCtx, "GetCurrentContentHid: using journal: TODO compute HID of symlink target '%s' for: %s", psz, SG_string__sz(pLVI->pStringEntryname)) ); // TODO also return size } SG_ERR_THROW2_RETURN( SG_ERR_NOTIMPLEMENTED, (pCtx, "GetCurrentContentHid: required field missing from vhash for: %s", SG_string__sz(pLVI->pStringEntryname)) ); } SG_ASSERT_RELEASE_RETURN( (pLVI->pPrescanRow) ); if (SG_WC_PRESCAN_FLAGS__IS_CONTROLLED_SPARSE(pLVI->scan_flags_Live)) { if (pLVI->pPcRow_PC) { SG_ASSERT_RELEASE_RETURN( (pLVI->pPcRow_PC->p_d_sparse) ); SG_ERR_CHECK_RETURN( SG_STRDUP(pCtx, pLVI->pPcRow_PC->p_d_sparse->pszHid, ppszHidContent) ); if (pSize) SG_ERR_CHECK_RETURN( _fetch_size_of_blob(pCtx, pWcTx, pLVI->pPcRow_PC->p_d_sparse->pszHid, pSize) ); } else if (pLVI->pPrescanRow->pPcRow_Ref) { SG_ASSERT_RELEASE_RETURN( (pLVI->pPrescanRow->pPcRow_Ref->p_d_sparse) ); SG_ERR_CHECK_RETURN( SG_STRDUP(pCtx, pLVI->pPrescanRow->pPcRow_Ref->p_d_sparse->pszHid, ppszHidContent) ); if (pSize) SG_ERR_CHECK_RETURN( _fetch_size_of_blob(pCtx, pWcTx, pLVI->pPrescanRow->pPcRow_Ref->p_d_sparse->pszHid, pSize) ); } else { // With the addition of {sparse_hid,sparse_attrbits} to tbl_PC, // we should not get here. SG_ERR_THROW2_RETURN( SG_ERR_NOTIMPLEMENTED, (pCtx, "GetCurrentHid: unhandled case when sparse for '%s'.", SG_string__sz(pLVI->pStringEntryname)) ); } } else if (pLVI->pPrescanRow->pRD) { #if TRACE_WC_TSC if (!bNoTSC) SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "GetCurrentContentHid: looking up '%s'\n", SG_string__sz(pLVI->pStringEntryname)) ); #endif SG_ERR_CHECK_RETURN( sg_wc_readdir__row__get_content_hid__owned(pCtx, pWcTx, pLVI->pPrescanRow->pRD, bNoTSC, ppszHidContent, pSize) ); } else if (pLVI->pPrescanRow->pTneRow) { SG_ERR_CHECK_RETURN( SG_STRDUP(pCtx, pLVI->pPrescanRow->pTneRow->p_d->pszHid, ppszHidContent) ); if (pSize) SG_ERR_CHECK_RETURN( _fetch_size_of_blob(pCtx, pWcTx, pLVI->pPrescanRow->pTneRow->p_d->pszHid, pSize) ); } else { // perhaps an ADD-SPECIAL + DELETE // or an ADDED+LOST in an UPDATE ? SG_ERR_THROW2_RETURN( SG_ERR_NOTIMPLEMENTED, (pCtx, "GetCurrentHid: unhandled case for '%s'.", SG_string__sz(pLVI->pStringEntryname)) ); } }
void SG_dagquery__highest_revno_common_ancestor( SG_context * pCtx, SG_repo * pRepo, SG_uint64 dagnum, const SG_stringarray * pInputNodeHids, char ** ppOutputNodeHid ) { const char * const * paszInputNodeHids = NULL; SG_uint32 countInputNodes = 0; SG_repo_fetch_dagnodes_handle * pDagnodeFetcher = NULL; _hrca_work_queue_t workQueue = {NULL, 0, 0, NULL}; SG_uint32 i; SG_dagnode * pDagnode = NULL; const char * pszHidRef = NULL; SG_bitvector * pIsAncestorOf = NULL; SG_uint32 countIsAncestorOf = 0; SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK(pRepo); SG_NULLARGCHECK(pInputNodeHids); SG_ERR_CHECK( SG_stringarray__sz_array_and_count(pCtx, pInputNodeHids, &paszInputNodeHids, &countInputNodes) ); SG_ARGCHECK(countInputNodes>0, pInputNodeHids); SG_NULLARGCHECK(ppOutputNodeHid); SG_ERR_CHECK( SG_repo__fetch_dagnodes__begin(pCtx, pRepo, dagnum, &pDagnodeFetcher) ); SG_ERR_CHECK( SG_allocN(pCtx, _HRCA_WORK_QUEUE_INIT_LENGTH, workQueue.p) ); workQueue.allocatedLength = _HRCA_WORK_QUEUE_INIT_LENGTH; SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &workQueue.pRevnoCache) ); SG_ERR_CHECK( SG_BITVECTOR__ALLOC(pCtx, &pIsAncestorOf, countInputNodes) ); for(i=0; i<countInputNodes; ++i) { SG_ERR_CHECK( SG_bitvector__zero(pCtx, pIsAncestorOf) ); SG_ERR_CHECK( SG_bitvector__set_bit(pCtx, pIsAncestorOf, i, SG_TRUE) ); SG_ERR_CHECK( _hrca_work_queue__insert(pCtx, &workQueue, paszInputNodeHids[i], pRepo, pDagnodeFetcher, pIsAncestorOf) ); } SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); SG_ERR_CHECK( _hrca_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &pIsAncestorOf) ); SG_ERR_CHECK( SG_bitvector__count_set_bits(pCtx, pIsAncestorOf, &countIsAncestorOf) ); while(countIsAncestorOf < countInputNodes) { SG_uint32 count_parents = 0; const char** parents = NULL; SG_ERR_CHECK( SG_dagnode__get_parents__ref(pCtx, pDagnode, &count_parents, &parents) ); for(i=0; i<count_parents; ++i) SG_ERR_CHECK( _hrca_work_queue__insert(pCtx, &workQueue, parents[i], pRepo, pDagnodeFetcher, pIsAncestorOf) ); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); SG_ERR_CHECK( _hrca_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &pIsAncestorOf) ); SG_ERR_CHECK( SG_bitvector__count_set_bits(pCtx, pIsAncestorOf, &countIsAncestorOf) ); } SG_ERR_CHECK( SG_strdup(pCtx, pszHidRef, ppOutputNodeHid) ); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); for(i=0; i<workQueue.length; ++i) { SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode); SG_BITVECTOR_NULLFREE(pCtx, workQueue.p[i].pIsAncestorOf); } SG_NULLFREE(pCtx, workQueue.p); SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache); SG_ERR_CHECK( SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pDagnodeFetcher) ); return; fail: for(i=0; i<workQueue.length; ++i) { SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode); SG_BITVECTOR_NULLFREE(pCtx, workQueue.p[i].pIsAncestorOf); } SG_NULLFREE(pCtx, workQueue.p); SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache); SG_DAGNODE_NULLFREE(pCtx, pDagnode); SG_BITVECTOR_NULLFREE(pCtx, pIsAncestorOf); if(pDagnodeFetcher!=NULL) { SG_ERR_IGNORE( SG_repo__fetch_dagnodes__end(pCtx, pRepo, &pDagnodeFetcher) ); } }