void u0025_vhash__create(SG_context * pCtx, SG_vhash** pph) { SG_vhash* ph = NULL; SG_varray* pa = NULL; SG_vhash* pvhSub = NULL; SG_varray* pvaSub = NULL; SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &ph) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, ph, "hello", "world") ); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pa) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pa, 31) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pa, 51) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhSub) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhSub, "not", "now") ); SG_ERR_CHECK( SG_varray__append__vhash(pCtx, pa, &pvhSub) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pa, 71) ); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pvaSub) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pvaSub, 81) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pvaSub, 82) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pvaSub, 83) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pvaSub, 84) ); SG_ERR_CHECK( SG_varray__append__varray(pCtx, pa, &pvaSub) ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pa, 91) ); SG_ERR_CHECK( SG_vhash__add__varray(pCtx, ph, "a", &pa) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, ph, "b", "fiddle") ); *pph = ph; return; fail: // TODO free return; }
void SG_sync__remember_sync_target(SG_context* pCtx, const char * pszLocalRepoDescriptor, const char * pszSyncTarget) { SG_string * pString = NULL; SG_varray * pva_targets = NULL; SG_bool bFound = SG_FALSE; SG_uint32 nEntry = 0; //Save this destination to the local setting of previously used destinations. SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pString) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pString, "%s/%s/%s", SG_LOCALSETTING__SCOPE__INSTANCE, pszLocalRepoDescriptor, SG_LOCALSETTING__SYNC_TARGETS) ); SG_ERR_CHECK( SG_localsettings__get__varray(pCtx, SG_string__sz(pString), NULL, &pva_targets, NULL) ); if (pva_targets) SG_ERR_CHECK( SG_varray__find__sz(pCtx, pva_targets, pszSyncTarget, &bFound, &nEntry) ); else SG_VARRAY__ALLOC(pCtx, &pva_targets); if (!bFound) { SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_targets, pszSyncTarget) ); SG_ERR_CHECK( SG_localsettings__update__varray(pCtx, SG_string__sz(pString), pva_targets) ); } fail: SG_STRING_NULLFREE(pCtx, pString); SG_VARRAY_NULLFREE(pCtx, pva_targets); }
/** * Pick thru the computed VARRAY build a subset VARRAY * containing just the dirty files. That is, for an interactive * diff, we only show dirty files. (In batch/patch mode, we show * everything.) * * We assume that the varray looks like: * * varray := [ { "status" : { "flags" : <int>, * ... }, * "path" : <repo-path>, * ... }, * ... ]; * * Both a Canonical STATUS (pvaStatus) and a "DiffStep" (pvaDiffStep) * match this pattern. * * Return NULL if there aren't any. * */ static void _get_dirty_files(SG_context * pCtx, const SG_varray * pva, SG_varray ** ppvaDirtyFiles) { SG_varray * pvaDirtyFiles = NULL; SG_uint32 k, nrItems; *ppvaDirtyFiles = NULL; if (!pva) return; SG_ERR_CHECK( SG_varray__count(pCtx, pva, &nrItems) ); if (nrItems == 0) return; for (k=0; k<nrItems; k++) { SG_vhash * pvhItem_k; // we do not own this SG_vhash * pvhItemStatus_k; // we do not own this SG_int64 i64; SG_wc_status_flags statusFlags; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pva, k, &pvhItem_k) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhItem_k, "status", &pvhItemStatus_k) ); SG_ERR_CHECK( SG_vhash__get__int64(pCtx, pvhItemStatus_k, "flags", &i64) ); statusFlags = (SG_wc_status_flags)i64; if ((statusFlags & SG_WC_STATUS_FLAGS__T__FILE) == 0) continue; if ((statusFlags & (SG_WC_STATUS_FLAGS__C__NON_DIR_MODIFIED |SG_WC_STATUS_FLAGS__S__ADDED |SG_WC_STATUS_FLAGS__S__DELETED |SG_WC_STATUS_FLAGS__S__MERGE_CREATED |SG_WC_STATUS_FLAGS__S__UPDATE_CREATED)) == 0) continue; if (!pvaDirtyFiles) SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pvaDirtyFiles) ); SG_ERR_CHECK( SG_varray__appendcopy__vhash(pCtx, pvaDirtyFiles, pvhItem_k, NULL) ); } SG_RETURN_AND_NULL( pvaDirtyFiles, ppvaDirtyFiles ); fail: SG_VARRAY_NULLFREE(pCtx, pvaDirtyFiles); }
void SG_dagfrag__to_vhash__shared(SG_context * pCtx, SG_dagfrag * pFrag, SG_vhash * pvhShared, SG_vhash ** ppvhNew) { SG_vhash * pvhFrag = NULL; SG_varray * pvaMyData = NULL; struct _serialize_data serialize_data; SG_NULLARGCHECK_RETURN(pFrag); SG_NULLARGCHECK_RETURN(ppvhNew); SG_ASSERT( (! IS_TRANSIENT(pFrag)) ); SG_ERR_CHECK( SG_VHASH__ALLOC__SHARED(pCtx,&pvhFrag,5,pvhShared) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx,pvhFrag,KEY_REPO_ID,pFrag->m_sz_repo_id) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx,pvhFrag,KEY_ADMIN_ID,pFrag->m_sz_admin_id) ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx,pvhFrag,KEY_DAGNUM,(SG_int64)pFrag->m_iDagNum) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx,pvhFrag,KEY_VERSION,VALUE_VERSION) ); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx,&pvaMyData) ); serialize_data.pvhFrag = pvhFrag; serialize_data.pvaMyData = pvaMyData; // walk the complete RB_Cache and add complete info for each my_data item // (regardless of whether the dagnode is a START or INTERIOR member or in // the END-FRINGE. These will be in an essentially random order (HID). SG_ERR_CHECK( SG_rbtree__foreach(pCtx, pFrag->m_pRB_Cache, _serialize_data_cb, &serialize_data) ); SG_ERR_CHECK( SG_vhash__add__varray(pCtx,pvhFrag,KEY_DATA,&pvaMyData) ); *ppvhNew = pvhFrag; return; fail: SG_VHASH_NULLFREE(pCtx, pvhFrag); SG_VARRAY_NULLFREE(pCtx, pvaMyData); }
void SG_vc_hooks__lookup_by_interface( SG_context* pCtx, SG_repo* pRepo, const char* psz_interface, SG_varray** ppva ) { char* psz_hid_cs_leaf = NULL; SG_varray* pva_fields = NULL; SG_varray* pva = NULL; char buf_where[SG_HID_MAX_BUFFER_LENGTH + 64]; char *iEscape = NULL; const char *ivar = NULL; SG_ERR_CHECK( SG_zing__get_leaf(pCtx, pRepo, NULL, SG_DAGNUM__VC_HOOKS, &psz_hid_cs_leaf) ); SG_ASSERT(psz_hid_cs_leaf); SG_ERR_CHECK( SG_sqlite__escape(pCtx, psz_interface, &iEscape) ); if (iEscape) ivar = iEscape; else ivar = psz_interface; SG_ERR_CHECK( SG_sprintf(pCtx, buf_where, sizeof(buf_where), "interface == '%s'", ivar) ); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pva_fields) ); SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_fields, "js") ); SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_fields, "module") ); SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_fields, "version") ); SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_fields, SG_ZING_FIELD__HIDREC) ); SG_ERR_CHECK( SG_zing__query(pCtx, pRepo, SG_DAGNUM__VC_HOOKS, psz_hid_cs_leaf, "hook", buf_where, NULL, 0, 0, pva_fields, &pva) ); *ppva = pva; pva= NULL; fail: SG_VARRAY_NULLFREE(pCtx, pva); SG_VARRAY_NULLFREE(pCtx, pva_fields); SG_NULLFREE(pCtx, psz_hid_cs_leaf); SG_NULLFREE(pCtx, iEscape); }
/** * Begin a "canonical" status. This is a VARRAY with one * row per reported item. Each item is completely self-contained * and contains all of the status info for that item (unlike the * "classic" view). * * pszInput, if given, lists a single file or directory where we * should begin the tree-walk. If null, we assume the entire tree. * * We accumulate all of the results into a single pvaStatus. * * This status routine uses a pWcTx so that the results * reflect the in-progress transaction. * */ void SG_wc_tx__status(SG_context * pCtx, SG_wc_tx * pWcTx, const char * pszInput, SG_uint32 depth, SG_bool bListUnchanged, SG_bool bNoIgnores, SG_bool bNoTSC, SG_bool bListSparse, SG_bool bListReserved, SG_bool bNoSort, SG_varray ** ppvaStatus, SG_vhash ** ppvhLegend) { SG_varray * pvaStatus = NULL; SG_NULLARGCHECK_RETURN( pWcTx ); // pszInput is optional SG_NULLARGCHECK_RETURN( ppvaStatus ); // ppvhLegend is optional SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pvaStatus) ); SG_ERR_CHECK( _sg_wc_tx__status(pCtx, pWcTx, pvaStatus, pszInput, depth, bListUnchanged, bNoIgnores, bNoTSC, bListSparse, bListReserved, ppvhLegend) ); // since we only have 1 input, we don't need to dedup the varray. if (!bNoSort) SG_ERR_CHECK( SG_wc__status__sort_by_repopath(pCtx, pvaStatus) ); *ppvaStatus = pvaStatus; pvaStatus = NULL; fail: SG_VARRAY_NULLFREE(pCtx, pvaStatus); }
/** * Compute an MSTATUS. Throw if not currently in a merge. * You own the returned pvaStatus. * */ void _sg_wc_tx__mstatus(SG_context * pCtx, SG_wc_tx * pWcTx, SG_bool bNoIgnores, SG_bool bNoSort, SG_varray ** ppvaStatus, SG_vhash ** ppvhLegend) { SG_varray * pvaStatus = NULL; SG_NULLARGCHECK_RETURN( pWcTx ); SG_NULLARGCHECK_RETURN( ppvaStatus ); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pvaStatus) ); SG_ERR_CHECK( sg_wc_tx__mstatus__main(pCtx, pWcTx, bNoIgnores, pvaStatus, ppvhLegend) ); if (!bNoSort) SG_ERR_CHECK( SG_wc__status__sort_by_repopath(pCtx, pvaStatus) ); *ppvaStatus = pvaStatus; pvaStatus = NULL; fail: SG_VARRAY_NULLFREE(pCtx, pvaStatus); }
static void _sg_mergereview(SG_context * pCtx, _tree_t * pTree, SG_int32 singleMergeReview, SG_bool firstChunk, SG_vhash * pMergeBaselines, SG_uint32 resultLimit, SG_varray ** ppResults, SG_uint32 * pCountResults, SG_varray ** ppContinuationToken) { SG_varray * pResults = NULL; SG_uint32 countResults = 0; SG_bool lastResultWasIndented = SG_FALSE; SG_varray * pContinuationToken = NULL; SG_ASSERT(pCtx!=NULL); SG_ASSERT(pTree!=NULL); SG_ASSERT(ppResults!=NULL); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pResults) ); while( // Stop if we have have reached the result limit. ! (countResults >= resultLimit) // Stop if we have completed a "single merge review" that was asked for. && ! (pTree->indentLevel == 0 && singleMergeReview && (countResults>1 || !firstChunk)) // Stop if we've walked all the way back to root of the vc dag. && ! (pTree->pNextResult==NULL) ) { // Process the next item in the pending list. // Note that this may cause any number of results to become available. SG_ERR_CHECK( _tree__process_next_pending_item(pCtx, pTree, pMergeBaselines) ); // Fetch results until we don't need anymore or there's none available. while( // Stop if we have have reached the result limit. ! (countResults >= resultLimit) // Stop if we have completed a "single merge review" that was asked for. && ! (pTree->indentLevel == 0 && singleMergeReview && (countResults>1 || !firstChunk)) // Stop if we've walked all the way back to root of the vc dag. && ! (pTree->pNextResult==NULL) // Stop if the next node is in a pending state. // (Can only happen when we were starting from a "continuation token".) && ! (pTree->pNextResult->isPending) // Stop if the next node has any display children in a pending state. && ! (_node__has_pending_children(pTree->pNextResult)) ) { SG_ERR_CHECK( _tree__add_next_node_to_results(pCtx, pTree, pResults, &countResults, &lastResultWasIndented) ); } } if(countResults<resultLimit && lastResultWasIndented) { SG_ASSERT(pTree->pNextResult!=NULL); // VC root will never be indented. SG_ERR_CHECK( _tree__add_next_node_to_results(pCtx, pTree, pResults, &countResults, &lastResultWasIndented) ); } if(ppContinuationToken!=NULL) { if(pTree->pNextResult==NULL) { *ppContinuationToken = NULL; } else { SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pContinuationToken) ); if(pTree->pNextResult==pTree->pRoot) { SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pContinuationToken, pTree->pNextResult->pszHidRef) ); } else { SG_ERR_CHECK( _tree__generate_continuation_token(pCtx, pTree, pContinuationToken) ); } *ppContinuationToken = pContinuationToken; } } *ppResults = pResults; if(pCountResults != NULL) *pCountResults = countResults; return; fail: SG_VARRAY_NULLFREE(pCtx, pResults); SG_VARRAY_NULLFREE(pCtx, pContinuationToken); }
/** * Begin a canonical STATUS when given one or more items using a SG_stringarray. * If psaInputs is NULL, assume the entire tree. * * We accumulate all of the results into a single pvaStatus. * * This status routine uses a pWcTx so that the results * reflect the in-progress transaction. * */ void SG_wc_tx__status__stringarray(SG_context * pCtx, SG_wc_tx * pWcTx, const SG_stringarray * psaInputs, SG_uint32 depth, SG_bool bListUnchanged, SG_bool bNoIgnores, SG_bool bNoTSC, SG_bool bListSparse, SG_bool bListReserved, SG_bool bNoSort, SG_varray ** ppvaStatus, SG_vhash ** ppvhLegend) { SG_varray * pvaStatus = NULL; SG_uint32 k, count; SG_vhash * pvhLegend = NULL; SG_NULLARGCHECK_RETURN( pWcTx ); SG_NULLARGCHECK_RETURN( ppvaStatus ); // ppvhLegend is optional SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pvaStatus) ); if (psaInputs) { SG_ERR_CHECK( SG_stringarray__count(pCtx, psaInputs, &count) ); for (k=0; k<count; k++) { const char * pszInput_k; SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, psaInputs, k, &pszInput_k) ); SG_ERR_CHECK( _sg_wc_tx__status(pCtx, pWcTx, pvaStatus, pszInput_k, depth, bListUnchanged, bNoIgnores, bNoTSC, bListSparse, bListReserved, (((k==0) && ppvhLegend) ? &pvhLegend : NULL)) ); } if (ppvhLegend) { *ppvhLegend = pvhLegend; pvhLegend = NULL; } // In case they said, "vv status foo foo foo" or something // like "vv status dirA/foo dirA" we need to de-dup the results // since we called the internal status with each input[k] and // just accumulated the results. SG_ERR_CHECK( SG_vaofvh__dedup(pCtx, pvaStatus, "gid") ); } else { // if no args, assume "@/" for the STATUS. SG_ERR_CHECK( _sg_wc_tx__status(pCtx, pWcTx, pvaStatus, NULL, depth, bListUnchanged, bNoIgnores, bNoTSC, bListSparse, bListReserved, ppvhLegend) ); // we don't need to de-dup since we simulated 1 argument. } if (!bNoSort) SG_ERR_CHECK( SG_wc__status__sort_by_repopath(pCtx, pvaStatus) ); *ppvaStatus = pvaStatus; pvaStatus = NULL; fail: SG_VARRAY_NULLFREE(pCtx, pvaStatus); SG_VHASH_NULLFREE(pCtx, pvhLegend); }
void u0026_jsonparser__create_2(SG_context* pCtx, SG_string* pStr) { SG_jsonwriter* pjson = NULL; SG_vhash* pvh = NULL; SG_varray* pva = NULL; SG_uint32 i; char* pid = NULL; SG_ERR_CHECK( SG_jsonwriter__alloc(pCtx, &pjson, pStr) ); SG_ERR_CHECK( SG_jsonwriter__write_start_object(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__string__sz(pCtx, pjson, "hello", "world") ); SG_ERR_CHECK( SG_jsonwriter__write_pair__int64(pCtx, pjson, "x", 5) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__double(pCtx, pjson, "pi", 3.14159) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__bool(pCtx, pjson, "b1", SG_TRUE) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__bool(pCtx, pjson, "b2", SG_FALSE) ); SG_ERR_CHECK( SG_jsonwriter__write_begin_pair(pCtx, pjson, "furball") ); SG_ERR_CHECK( SG_jsonwriter__write_start_array(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_element__string__sz(pCtx, pjson, "plok") ); SG_ERR_CHECK( SG_jsonwriter__write_element__double(pCtx, pjson, 47.567) ); SG_ERR_CHECK( SG_jsonwriter__write_element__int64(pCtx, pjson, 22222) ); SG_ERR_CHECK( SG_jsonwriter__write_element__null(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_element__bool(pCtx, pjson, SG_TRUE) ); SG_ERR_CHECK( SG_jsonwriter__write_element__bool(pCtx, pjson, SG_FALSE) ); SG_ERR_CHECK( SG_gid__alloc(pCtx, &pid) ); SG_ERR_CHECK( SG_jsonwriter__write_element__string__sz(pCtx, pjson, pid) ); SG_NULLFREE(pCtx, pid); SG_ERR_CHECK( SG_jsonwriter__write_end_array(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__null(pCtx, pjson, "nope") ); SG_ERR_CHECK( SG_jsonwriter__write_pair__string__sz(pCtx, pjson, "messy", U0026_MESSY) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh, "fried", "tomatoes") ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pvh, "q", 333) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__vhash(pCtx, pjson, "sub", pvh) ); SG_VHASH_NULLFREE(pCtx, pvh); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pva) ); for (i=0; i<1000; i++) { SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva, "plok") ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pva, 22) ); SG_ERR_CHECK( SG_varray__append__double(pCtx, pva, 1.414) ); SG_ERR_CHECK( SG_varray__append__bool(pCtx, pva, SG_TRUE) ); SG_ERR_CHECK( SG_varray__append__bool(pCtx, pva, SG_FALSE) ); SG_ERR_CHECK( SG_varray__append__null(pCtx, pva) ); } SG_ERR_CHECK( SG_jsonwriter__write_pair__varray(pCtx, pjson, "a", pva) ); SG_VARRAY_NULLFREE(pCtx, pva); SG_ERR_CHECK( SG_jsonwriter__write_end_object(pCtx, pjson) ); SG_JSONWRITER_NULLFREE(pCtx, pjson); return; fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_VARRAY_NULLFREE(pCtx, pva); SG_JSONWRITER_NULLFREE(pCtx, pjson); }
void SG_repo__dag__find_direct_path_from_root( SG_context * pCtx, SG_repo* pRepo, SG_uint64 dagnum, const char* psz_csid, SG_varray** ppva ) { SG_varray* new_pva = NULL; #if SG_DOUBLE_CHECK__PATH_TO_ROOT SG_varray* old_pva = NULL; SG_dagnode* pdn = NULL; char* psz_cur = NULL; SG_string* pstr1 = NULL; SG_string* pstr2 = NULL; #endif SG_ERR_CHECK( SG_repo__find_dag_path(pCtx, pRepo, dagnum, NULL, psz_csid, &new_pva) ); #if SG_DOUBLE_CHECK__PATH_TO_ROOT SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &old_pva) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_csid, &psz_cur) ); while (1) { SG_uint32 count_parents = 0; const char** a_parents = NULL; SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_cur, &pdn) ); SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, old_pva, psz_cur) ); SG_ERR_CHECK( SG_dagnode__get_parents__ref(pCtx, pdn, &count_parents, &a_parents) ); if (0 == count_parents) { break; } SG_NULLFREE(pCtx, psz_cur); SG_ERR_CHECK( SG_STRDUP(pCtx, a_parents[0], &psz_cur) ); SG_DAGNODE_NULLFREE(pCtx, pdn); } SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, old_pva, "") ); SG_ERR_CHECK( SG_string__alloc(pCtx, &pstr1) ); SG_ERR_CHECK( SG_string__alloc(pCtx, &pstr2) ); SG_ERR_CHECK( SG_varray__to_json(pCtx, old_pva, pstr1) ); SG_ERR_CHECK( SG_varray__to_json(pCtx, new_pva, pstr2) ); if (0 != strcmp(SG_string__sz(pstr1), SG_string__sz(pstr2))) { // a failure here isn't actually ALWAYS bad. there can be more than one path // to root. fprintf(stderr, "old way:\n"); SG_VARRAY_STDERR(old_pva); fprintf(stderr, "new way:\n"); SG_VARRAY_STDERR(new_pva); SG_ERR_THROW( SG_ERR_UNSPECIFIED ); } #endif *ppva = new_pva; new_pva = NULL; fail: SG_VARRAY_NULLFREE(pCtx, new_pva); #if SG_DOUBLE_CHECK__PATH_TO_ROOT SG_STRING_NULLFREE(pCtx, pstr1); SG_STRING_NULLFREE(pCtx, pstr2); SG_VARRAY_NULLFREE(pCtx, old_pva); SG_DAGNODE_NULLFREE(pCtx, pdn); SG_NULLFREE(pCtx, psz_cur); #endif }