static void _add_dagnodes_until_connected(SG_context* pCtx, SG_vhash** ppvhStagingStatus, sg_pull_instance_data* pMe, SG_client* pClient) { SG_bool disconnected = SG_FALSE; SG_vhash* pvhFragballRequest = NULL; char* pszFragballName = NULL; SG_vhash* pvhRequestStatus = NULL; const SG_pathname* pStagingPathname; SG_ERR_CHECK( SG_staging__get_pathname(pCtx, pMe->pStaging, &pStagingPathname) ); SG_ERR_CHECK( SG_vhash__has(pCtx, *ppvhStagingStatus, SG_SYNC_STATUS_KEY__DAGS, &disconnected) ); while (disconnected) { #if TRACE_PULL SG_ERR_CHECK( SG_vhash_debug__dump_to_console__named(pCtx, *ppvhStagingStatus, "pull staging status") ); #endif // There's at least one dag with connection problems. // Convert the staging status vhash into a fragball request vhash. pvhFragballRequest = *ppvhStagingStatus; *ppvhStagingStatus = NULL; SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pvhFragballRequest, SG_SYNC_STATUS_KEY__GENERATIONS, GENERATIONS_PER_ROUNDTRIP) ); SG_ERR_CHECK( SG_client__pull_request_fragball(pCtx, pClient, pvhFragballRequest, pStagingPathname, &pszFragballName, &pvhRequestStatus) ); /* Ian TODO: inspect pvhRequestStatus */ SG_VHASH_NULLFREE(pCtx, pvhRequestStatus); SG_VHASH_NULLFREE(pCtx, pvhFragballRequest); SG_ERR_CHECK( SG_staging__slurp_fragball(pCtx, pMe->pStaging, (const char*)pszFragballName) ); SG_NULLFREE(pCtx, pszFragballName); SG_ERR_CHECK( SG_staging__check_status(pCtx, pMe->pStaging, SG_TRUE, SG_FALSE, SG_FALSE, SG_FALSE, SG_FALSE, ppvhStagingStatus) ); SG_ERR_CHECK( SG_vhash__has(pCtx, *ppvhStagingStatus, SG_SYNC_STATUS_KEY__DAGS, &disconnected) ); #if TRACE_PULL SG_ERR_CHECK( SG_vhash_debug__dump_to_console__named(pCtx, *ppvhStagingStatus, "pull staging status") ); #endif } SG_ERR_CHECK_RETURN( SG_context__msg__emit(pCtx, "done") ); /* fall through */ fail: SG_VHASH_NULLFREE(pCtx, *ppvhStagingStatus); SG_VHASH_NULLFREE(pCtx, pvhFragballRequest); SG_NULLFREE(pCtx, pszFragballName); SG_VHASH_NULLFREE(pCtx, pvhRequestStatus); SG_ERR_IGNORE( SG_context__msg__emit(pCtx, "\n") ); }
void SG_dagfrag__to_vhash__shared(SG_context * pCtx, SG_dagfrag * pFrag, SG_vhash * pvhShared, SG_vhash ** ppvhNew) { SG_vhash * pvhFrag = NULL; SG_varray * pvaMyData = NULL; struct _serialize_data serialize_data; SG_NULLARGCHECK_RETURN(pFrag); SG_NULLARGCHECK_RETURN(ppvhNew); SG_ASSERT( (! IS_TRANSIENT(pFrag)) ); SG_ERR_CHECK( SG_VHASH__ALLOC__SHARED(pCtx,&pvhFrag,5,pvhShared) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx,pvhFrag,KEY_REPO_ID,pFrag->m_sz_repo_id) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx,pvhFrag,KEY_ADMIN_ID,pFrag->m_sz_admin_id) ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx,pvhFrag,KEY_DAGNUM,(SG_int64)pFrag->m_iDagNum) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx,pvhFrag,KEY_VERSION,VALUE_VERSION) ); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx,&pvaMyData) ); serialize_data.pvhFrag = pvhFrag; serialize_data.pvaMyData = pvaMyData; // walk the complete RB_Cache and add complete info for each my_data item // (regardless of whether the dagnode is a START or INTERIOR member or in // the END-FRINGE. These will be in an essentially random order (HID). SG_ERR_CHECK( SG_rbtree__foreach(pCtx, pFrag->m_pRB_Cache, _serialize_data_cb, &serialize_data) ); SG_ERR_CHECK( SG_vhash__add__varray(pCtx,pvhFrag,KEY_DATA,&pvaMyData) ); *ppvhNew = pvhFrag; return; fail: SG_VHASH_NULLFREE(pCtx, pvhFrag); SG_VARRAY_NULLFREE(pCtx, pvaMyData); }
static void _serialize_data_cb(SG_context * pCtx, const char * szHid, void * pAssocData, void * pVoidSerializeData) { struct _serialize_data * pSerializeData = (struct _serialize_data *)pVoidSerializeData; _my_data * pMyData = (_my_data *)pAssocData; SG_vhash * pvhMyData = NULL; SG_vhash * pvhDagnode = NULL; SG_ERR_CHECK( SG_VHASH__ALLOC__SHARED(pCtx,&pvhMyData,5,pSerializeData->pvhFrag) ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx,pvhMyData,KEY_DFS_STATE,(SG_int64)pMyData->m_state) ); if (SG_DFS_END_FRINGE == pMyData->m_state) { SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx,pvhMyData,KEY_DAGNODE_ID,szHid) ); } else { SG_ERR_CHECK( SG_dagnode__to_vhash__shared(pCtx,pMyData->m_pDagnode,pvhMyData,&pvhDagnode) ); SG_ERR_CHECK( SG_vhash__add__vhash(pCtx,pvhMyData,KEY_ACTUAL_DAGNODE,&pvhDagnode) ); } #if DEBUG && TRACE_DAGFRAG && 0 SG_ERR_CHECK( SG_vhash_debug__dump_to_console(pCtx, pvhMyData) ); #endif SG_ERR_CHECK( SG_varray__append__vhash(pCtx,pSerializeData->pvaMyData,&pvhMyData) ); return; fail: SG_VHASH_NULLFREE(pCtx, pvhMyData); SG_VHASH_NULLFREE(pCtx, pvhDagnode); }
static void _tree__add_next_node_to_results(SG_context * pCtx, _tree_t * pTree, SG_varray * pResults, SG_uint32 * pCountResults, SG_bool * pbLastResultWasIndented) { SG_vhash * pResult = NULL; SG_varray * pChildren = NULL; SG_uint32 i; SG_varray * pTmp = NULL; // Add pTree->pNextResult to results list. SG_ERR_CHECK( SG_varray__appendnew__vhash(pCtx, pResults, &pResult) ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pResult, "revno", pTree->pNextResult->revno) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pResult, "changeset_id", pTree->pNextResult->pszHidRef) ); SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, pResult, "parents", &pTree->pNextResult->pVcParents) ); SG_ERR_CHECK( SG_vhash__addnew__varray(pCtx, pResult, "displayChildren", &pChildren) ); for(i=0; i<pTree->pNextResult->displayChildren.count; ++i) SG_ERR_CHECK( SG_varray__append__int64(pCtx, pChildren, pTree->pNextResult->displayChildren.p[i]->revno) ); if(pTree->pNextResult->displayChildren.count>1 && pTree->indentLevel>0) { SG_varray * pContinuationToken = NULL; SG_ERR_CHECK( SG_vhash__addnew__varray(pCtx, pResult, "continuationToken", &pContinuationToken) ); SG_ERR_CHECK( _tree__generate_continuation_token(pCtx, pTree, pContinuationToken) ); } SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pResult, "indent", pTree->indentLevel) ); *pbLastResultWasIndented = (pTree->indentLevel > 0); if(pTree->pNextResult->pDisplayParent!=NULL) { SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pResult, "displayParent", pTree->pNextResult->pDisplayParent->revno) ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pResult, "indexInParent", pTree->pNextResult->pDisplayParent->displayChildren.count) ); SG_ASSERT(*pbLastResultWasIndented); } else { SG_ASSERT(!*pbLastResultWasIndented); } ++(*pCountResults); // Advance pTree->pNextResult pointer to next result. while(pTree->pNextResult->displayChildren.count==0 && pTree->pNextResult!=pTree->pRoot) { // We have already added this node and all children to the results. // Free the memory that will not be reused, and then put the node // on the "free list". _node__free_nonreusable_memory(pCtx, pTree->pNextResult); pTree->pNextResult->displayChildren.p[0] = pTree->pFreeList; pTree->pNextResult->displayChildren.count = 1; pTree->pFreeList = pTree->pNextResult; // Move back up up in the tree. pTree->pNextResult = pTree->pNextResult->pDisplayParent; if(pTree->pNextResult!=NULL) { // The node we just freed... Remove it from its display parent too. --pTree->pNextResult->displayChildren.count; // All children but the leftmost one are indented by 1 from the parent. if(pTree->pNextResult->displayChildren.count>0) --pTree->indentLevel; } } if(pTree->pNextResult->displayChildren.count>0) { SG_uint32 i = pTree->pNextResult->displayChildren.count-1; pTree->pNextResult = pTree->pNextResult->displayChildren.p[i]; if(i>=1) ++pTree->indentLevel; } else { pTree->pNextResult = NULL; } // If we advanced past root... if(pTree->pNextResult==NULL || pTree->pNextResult==pTree->pRoot->displayChildren.p[0]) { // Out with the old root, in with the new... _node__free_nonreusable_memory(pCtx, pTree->pRoot); pTree->pRoot->displayChildren.p[0] = pTree->pFreeList; pTree->pRoot->displayChildren.count = 1; pTree->pFreeList = pTree->pRoot; pTree->pRoot = pTree->pNextResult; if(pTree->pRoot!=NULL) pTree->pRoot->pDisplayParent = NULL; } return; fail: SG_VARRAY_NULLFREE(pCtx, pTmp); }
static void _tree__process_next_pending_item(SG_context * pCtx, _tree_t * pTree, SG_vhash * pMergeBaselines) { SG_uint32 i; _node_t * pNode = NULL; // The node we are processing. SG_uint32 iNode = 0; // Index of pNode in the 'pending' list. SG_uint32 countVcParents = 0; const char ** paszVcParentHids = NULL; SG_uint32 iVcParent; // The first pending node that needs to be processed is always the one with // the highest revno. Find it in the list. for(i=1; i < pTree->pending.count; ++i) { if(pTree->pending.p[i]->revno > pTree->pending.p[iNode]->revno) iNode = i; } pNode = pTree->pending.p[iNode]; // Load in the node's display children/vc parents. SG_ASSERT(pNode->displayChildren.count==0); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pNode->pVcParents) ); SG_ERR_CHECK( SG_dagnode__get_parents__ref(pCtx, pNode->pDagnode, &countVcParents, &paszVcParentHids) ); for(iVcParent=0; iVcParent<countVcParents; ++iVcParent) { // Each vc parent is a candidate display child. const char * pszHidCandidate = paszVcParentHids[iVcParent]; _node_t * pNodeRef = NULL; // Scan through the list of 'pending' nodes to see if we have already // fetched this one... SG_uint32 iCandidate = pTree->pending.count; for(i=0; i < pTree->pending.count && iCandidate==pTree->pending.count; ++i) { if(strcmp(pTree->pending.p[i]->pszHidRef, pszHidCandidate)==0) { iCandidate = i; pNodeRef = pTree->pending.p[i]; } } if(iCandidate == pTree->pending.count) { // Node was not found. Add it new. SG_ERR_CHECK( _tree__add_new_node(pCtx, pTree, pNode, pszHidCandidate, &pNodeRef) ); } else if(iCandidate > iNode) { // Node was found further to the right in the tree. Steal it. SG_ERR_CHECK( _tree__move_node(pCtx, pTree->pending.p[iCandidate], pNode) ); // Also, remove it from the pending list. (It gets re-added later.) _node_list__remove_at(&pTree->pending, iCandidate); } else { // Node was found further to the left. Do nothing. } SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pNode->pVcParents, pszHidCandidate, pNodeRef->revno) ); } // We have all this node's display children (still pending--they could get // stolen later). Now we need to sort them. if(pNode->displayChildren.count>1) { // First we pick one to go on the far left, if one stands out as most likely // to be the "old"/baseline node into which the others were "brought in". SG_uint32 iBaseline = pNode->displayChildren.count; // Allow the caller to have hand-picked the baseline node: if(pMergeBaselines!=NULL) { SG_int_to_string_buffer sz; SG_int64 baseline = 0; SG_ERR_CHECK( SG_vhash__check__int64(pCtx, pMergeBaselines, SG_int64_to_sz(pNode->revno, sz), &baseline) ); if(baseline!=0) { for(i=0; i<pNode->displayChildren.count; ++i) { if(pNode->displayChildren.p[i]->revno==(SG_uint32)baseline) { iBaseline = i; break; } } } } if(iBaseline == pNode->displayChildren.count) { // No baseline node from the user. See if there's one unique node whose // user *doesn't* match. for(i=0; i<pNode->displayChildren.count; ++i) { SG_bool match = SG_FALSE; SG_ERR_CHECK( _user_match_found(pCtx, pTree->pRepoRef, pNode->displayChildren.p[i], pNode, &match) ); if(!match) { if(iBaseline == pNode->displayChildren.count) { iBaseline = i; } else { // Whoops. "Nevermind." iBaseline = pNode->displayChildren.count; break; } } } } // Finally, sort _node_list__sort(&pNode->displayChildren, iBaseline); } // In the 'pending' list, replace this node with its children. if(pNode->displayChildren.count == 0) _node_list__remove_at(&pTree->pending, iNode); else { pTree->pending.p[iNode] = pNode->displayChildren.p[0]; if(pNode->displayChildren.count > 1) { SG_ERR_CHECK( _node_list__insert_at(pCtx, &pTree->pending, iNode+1, &pNode->displayChildren.p[1], pNode->displayChildren.count-1) ); } } // This node is no longer pending. pNode->isPending = SG_FALSE; return; fail: ; }
void SG_sync_remote__get_repo_info( SG_context* pCtx, SG_repo* pRepo, SG_bool bIncludeBranches, SG_bool b_include_areas, SG_vhash** ppvh) { SG_vhash* pvh = NULL; char* pszRepoId = NULL; char* pszAdminId = NULL; char* pszHashMethod = NULL; SG_uint32 count_dagnums = 0; SG_uint64* paDagNums = NULL; SG_uint32 i = 0; SG_vhash* pvh_dags = NULL; SG_vhash* pvh_areas = NULL; SG_vhash* pvhBranchPile = NULL; SG_bool bHasBranchDag = SG_FALSE; SG_NULLARGCHECK_RETURN(pRepo); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh) ); /* Protocol version */ SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pvh, SG_SYNC_REPO_INFO_KEY__PROTOCOL_VERSION, 1) ); /* Basic repository info */ SG_ERR_CHECK( SG_repo__get_repo_id(pCtx, pRepo, &pszRepoId) ); SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &pszAdminId) ); SG_ERR_CHECK( SG_repo__get_hash_method(pCtx, pRepo, &pszHashMethod) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh, SG_SYNC_REPO_INFO_KEY__REPO_ID, pszRepoId) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh, SG_SYNC_REPO_INFO_KEY__ADMIN_ID, pszAdminId) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh, SG_SYNC_REPO_INFO_KEY__HASH_METHOD, pszHashMethod) ); /* All DAGs in the repository */ SG_ERR_CHECK( SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums) ); SG_ERR_CHECK( SG_vhash__addnew__vhash(pCtx, pvh, "dags", &pvh_dags) ); for (i=0; i<count_dagnums; i++) { char buf_dagnum[SG_DAGNUM__BUF_MAX__HEX]; SG_ERR_CHECK_RETURN( SG_dagnum__to_sz__hex(pCtx, paDagNums[i], buf_dagnum, sizeof(buf_dagnum)) ); SG_ERR_CHECK( SG_vhash__add__null(pCtx, pvh_dags, buf_dagnum) ); /* Asking for a DAG for the first time in a repo will create that DAG. * When pushing into an empty repo, we don't want this initial query to create * empty new DAGs, so we make sure they exist before we query them. */ if (paDagNums[i] == SG_DAGNUM__VC_BRANCHES) bHasBranchDag = SG_TRUE; } // TODO the following code is a problem, because it requires that this repo // instance have indexes, and we would prefer to preserve the ability of // an index-free instance to support push, pull, and clone. /* All areas in the repository */ if (b_include_areas) { SG_ERR_CHECK( SG_area__list(pCtx, pRepo, &pvh_areas) ); if (pvh_areas) { SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, pvh, "areas", &pvh_areas) ); } } /* Branches */ if (bIncludeBranches && bHasBranchDag) { SG_ERR_CHECK( SG_vc_branches__cleanup(pCtx, pRepo, &pvhBranchPile) ); if (pvhBranchPile) { SG_bool bHasBranches; SG_ERR_CHECK( SG_vhash__has(pCtx, pvhBranchPile, "branches", &bHasBranches) ); if (bHasBranches) SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, pvh, "branches", &pvhBranchPile) ); } } *ppvh = pvh; pvh = NULL; /* fall through */ fail: SG_NULLFREE(pCtx, paDagNums); SG_VHASH_NULLFREE(pCtx, pvh); SG_NULLFREE(pCtx, pszRepoId); SG_NULLFREE(pCtx, pszAdminId); SG_NULLFREE(pCtx, pszHashMethod); SG_VHASH_NULLFREE(pCtx, pvh_areas); SG_VHASH_NULLFREE(pCtx, pvhBranchPile); }
static void _sg_workingdir__get_entry2(SG_context * pCtx, SG_repo * pRepo, const SG_pathname * pPathSub, const char * pszGid, SG_treenode_entry_type type, const char * pszidHidContent, const char * pszidHidXattrs, SG_int64 iAttributeBits, SG_vhash * pvhTimestamps) { SG_file* pFile = NULL; SG_string* pstrLink = NULL; SG_byte* pBytes = NULL; SG_vhash * pvhGid = NULL; if (SG_TREENODEENTRY_TYPE_DIRECTORY == type) { /* create the directory and then recurse into it */ SG_ERR_CHECK( SG_fsobj__mkdir__pathname(pCtx, pPathSub) ); SG_ERR_CHECK( _sg_workingdir__get_dir(pCtx, pRepo, pPathSub, pszidHidContent, pvhTimestamps) ); } else if (SG_TREENODEENTRY_TYPE_REGULAR_FILE == type) { SG_ERR_CHECK( SG_file__open__pathname(pCtx, pPathSub, SG_FILE_RDWR | SG_FILE_CREATE_NEW, SG_FSOBJ_PERMS__MASK, &pFile) ); SG_ERR_CHECK( SG_repo__fetch_blob_into_file(pCtx, pRepo, pszidHidContent, pFile, NULL) ); SG_ERR_CHECK( SG_file__close(pCtx, &pFile) ); } else if (SG_TREENODEENTRY_TYPE_SYMLINK == type) { SG_uint64 iLenBytes = 0; SG_ERR_CHECK( SG_repo__fetch_blob_into_memory(pCtx, pRepo, pszidHidContent, &pBytes, &iLenBytes) ); SG_ERR_CHECK( SG_STRING__ALLOC__BUF_LEN(pCtx, &pstrLink, pBytes, (SG_uint32) iLenBytes) ); SG_ERR_CHECK( SG_fsobj__symlink(pCtx, pstrLink, pPathSub) ); SG_NULLFREE(pCtx, pBytes); SG_STRING_NULLFREE(pCtx, pstrLink); } else { SG_ERR_THROW(SG_ERR_NOTIMPLEMENTED); } if (pszidHidXattrs) { #ifdef SG_BUILD_FLAG_FEATURE_XATTR SG_ERR_CHECK( _sg_workingdir__set_xattrs(pCtx, pRepo, pPathSub, pszidHidXattrs) ); #else // TODO do we need to stuff something into the pendingtree to remind us // TODO that the entry originally had an XAttr and we just didn't restore // TODO it when we populated the WD on this Windows system? #endif } SG_ERR_CHECK( SG_attributes__bits__apply(pCtx, pPathSub, iAttributeBits) ); if (pvhTimestamps && (SG_TREENODEENTRY_TYPE_REGULAR_FILE == type)) { SG_fsobj_stat stat; SG_int64 iTimeNow; SG_ERR_CHECK( SG_fsobj__stat__pathname(pCtx, pPathSub, &stat) ); SG_ERR_CHECK( SG_time__get_milliseconds_since_1970_utc(pCtx, &iTimeNow) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhGid) ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pvhGid, "mtime_ms", stat.mtime_ms) ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pvhGid, "clock_ms", iTimeNow) ); SG_ERR_CHECK( SG_vhash__add__vhash(pCtx, pvhTimestamps, pszGid, &pvhGid) ); // this steals our vhash } fail: SG_VHASH_NULLFREE(pCtx, pvhGid); }
void u0026_jsonparser__create_2(SG_context* pCtx, SG_string* pStr) { SG_jsonwriter* pjson = NULL; SG_vhash* pvh = NULL; SG_varray* pva = NULL; SG_uint32 i; char* pid = NULL; SG_ERR_CHECK( SG_jsonwriter__alloc(pCtx, &pjson, pStr) ); SG_ERR_CHECK( SG_jsonwriter__write_start_object(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__string__sz(pCtx, pjson, "hello", "world") ); SG_ERR_CHECK( SG_jsonwriter__write_pair__int64(pCtx, pjson, "x", 5) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__double(pCtx, pjson, "pi", 3.14159) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__bool(pCtx, pjson, "b1", SG_TRUE) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__bool(pCtx, pjson, "b2", SG_FALSE) ); SG_ERR_CHECK( SG_jsonwriter__write_begin_pair(pCtx, pjson, "furball") ); SG_ERR_CHECK( SG_jsonwriter__write_start_array(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_element__string__sz(pCtx, pjson, "plok") ); SG_ERR_CHECK( SG_jsonwriter__write_element__double(pCtx, pjson, 47.567) ); SG_ERR_CHECK( SG_jsonwriter__write_element__int64(pCtx, pjson, 22222) ); SG_ERR_CHECK( SG_jsonwriter__write_element__null(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_element__bool(pCtx, pjson, SG_TRUE) ); SG_ERR_CHECK( SG_jsonwriter__write_element__bool(pCtx, pjson, SG_FALSE) ); SG_ERR_CHECK( SG_gid__alloc(pCtx, &pid) ); SG_ERR_CHECK( SG_jsonwriter__write_element__string__sz(pCtx, pjson, pid) ); SG_NULLFREE(pCtx, pid); SG_ERR_CHECK( SG_jsonwriter__write_end_array(pCtx, pjson) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__null(pCtx, pjson, "nope") ); SG_ERR_CHECK( SG_jsonwriter__write_pair__string__sz(pCtx, pjson, "messy", U0026_MESSY) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh, "fried", "tomatoes") ); SG_ERR_CHECK( SG_vhash__add__int64(pCtx, pvh, "q", 333) ); SG_ERR_CHECK( SG_jsonwriter__write_pair__vhash(pCtx, pjson, "sub", pvh) ); SG_VHASH_NULLFREE(pCtx, pvh); SG_ERR_CHECK( SG_VARRAY__ALLOC(pCtx, &pva) ); for (i=0; i<1000; i++) { SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva, "plok") ); SG_ERR_CHECK( SG_varray__append__int64(pCtx, pva, 22) ); SG_ERR_CHECK( SG_varray__append__double(pCtx, pva, 1.414) ); SG_ERR_CHECK( SG_varray__append__bool(pCtx, pva, SG_TRUE) ); SG_ERR_CHECK( SG_varray__append__bool(pCtx, pva, SG_FALSE) ); SG_ERR_CHECK( SG_varray__append__null(pCtx, pva) ); } SG_ERR_CHECK( SG_jsonwriter__write_pair__varray(pCtx, pjson, "a", pva) ); SG_VARRAY_NULLFREE(pCtx, pva); SG_ERR_CHECK( SG_jsonwriter__write_end_object(pCtx, pjson) ); SG_JSONWRITER_NULLFREE(pCtx, pjson); return; fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_VARRAY_NULLFREE(pCtx, pva); SG_JSONWRITER_NULLFREE(pCtx, pjson); }