QJsonArray SceneSerializer::serializeNodes(NodeRoot* r) { QJsonArray out; nodes = r->findChildren<Node*>( QString(), Qt::FindDirectChildrenOnly); for (auto node : nodes) out.append(serializeNode(node)); return out; }
/* * Dispatch a command - already parsed and in the form of a Node tree * - to all primary segdbs. Does not wait for completion. Does not * start a global transaction. * * NB: Callers should use PG_TRY()/PG_CATCH() if needed to make * certain that the CdbDispatchResults objects are destroyed by * cdbdisp_destroyDispatcherState() in case of error. * To wait for completion, check for errors, and clean up, it is * suggested that the caller use cdbdisp_finishCommand(). */ void cdbdisp_dispatchUtilityStatement(struct Node *stmt, bool cancelOnError, bool needTwoPhase, bool withSnapshot, struct CdbDispatcherState *ds, char *debugCaller) { char *serializedQuerytree; int serializedQuerytree_len; Query *q = makeNode(Query); StringInfoData buffer; elog((Debug_print_full_dtm ? LOG : DEBUG5), "cdbdisp_dispatchUtilityStatement debug_query_string = %s (needTwoPhase = %s, debugCaller = %s)", debug_query_string, (needTwoPhase ? "true" : "false"), debugCaller); dtmPreCommand("cdbdisp_dispatchUtilityStatement", "(none)", NULL, needTwoPhase, withSnapshot, false /* inCursor */ ); initStringInfo(&buffer); q->commandType = CMD_UTILITY; Assert(stmt != NULL); Assert(stmt->type < 1000); Assert(stmt->type > 0); q->utilityStmt = stmt; q->querySource = QSRC_ORIGINAL; /* * We must set q->canSetTag = true. False would be used to hide a command * introduced by rule expansion which is not allowed to return its * completion status in the command tag (PQcmdStatus/PQcmdTuples). For * example, if the original unexpanded command was SELECT, the status * should come back as "SELECT n" and should not reflect other commands * inserted by rewrite rules. True means we want the status. */ q->canSetTag = true; /* * serialized the stmt tree, and create the sql statement: mppexec .... */ serializedQuerytree = serializeNode((Node *) q, &serializedQuerytree_len, NULL /*uncompressed_size */); Assert(serializedQuerytree != NULL); cdbdisp_dispatchCommand(debug_query_string, serializedQuerytree, serializedQuerytree_len, cancelOnError, needTwoPhase, withSnapshot, ds); }
void SceneSerializer::serializeNodes(QDataStream* out, QObject* p) { auto nodes = p->findChildren<Node*>(QString(), Qt::FindDirectChildrenOnly); *out << quint32(nodes.length()); for (auto node : nodes) { serializeNode(out, node); } }
void Normalizer::printEntityRefNodes(DOMElement *ele) { DOMNode *child = ele->getFirstChild(); while(child != 0) { if(child->getNodeType() == DOMNode::ENTITY_REFERENCE_NODE) { XERCES_STD_QUALIFIER cout << "start of entity ref node" << XERCES_STD_QUALIFIER endl; DOMNode *entChild = ((DOMEntityReference*)child)->getFirstChild(); while(entChild != 0) { serializeNode(entChild); entChild = entChild->getNextSibling(); } XERCES_STD_QUALIFIER cout << "\nend of entity ref node\n\n" << XERCES_STD_QUALIFIER endl; } if(child->getNodeType() == DOMNode::ELEMENT_NODE) { printEntityRefNodes((DOMElement*)child); } child = child->getNextSibling(); } }
void ModelSerializer::serialize(std::ostream &os) const { os << "<model>" << std::endl; name_node_map names_nodes = model->getNamesAndNodes(); BOOST_FOREACH(snp node, names_nodes) { serializeNode(os, node.first, node.second); }
/* * Convert RecordCache into a byte-sequence, and store it directly * into a chunklist for transmission. * * This code is based on the printtup_internal_20() function in printtup.c. */ void SerializeRecordCacheIntoChunks(SerTupInfo *pSerInfo, TupleChunkList tcList, MotionConn *conn) { TupleChunkListItem tcItem = NULL; MemoryContext oldCtxt; TupSerHeader tsh; List *typelist = NULL; int size = -1; char * buf = NULL; AssertArg(tcList != NULL); AssertArg(pSerInfo != NULL); /* get ready to go */ tcList->p_first = NULL; tcList->p_last = NULL; tcList->num_chunks = 0; tcList->serialized_data_length = 0; tcList->max_chunk_length = Gp_max_tuple_chunk_size; tcItem = getChunkFromCache(&pSerInfo->chunkCache); if (tcItem == NULL) { ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("Could not allocate space for first chunk item in new chunk list."))); } /* assume that we'll take a single chunk */ SetChunkType(tcItem->chunk_data, TC_WHOLE); tcItem->chunk_length = TUPLE_CHUNK_HEADER_SIZE; appendChunkToTCList(tcList, tcItem); AssertState(s_tupSerMemCtxt != NULL); /* * To avoid inconsistency of record cache between sender and receiver in * the same motion, send the serialized record cache to receiver before the * first tuple is sent, the receiver is responsible for registering the * records to its own local cache and remapping the typmod of tuples sent * by sender. */ oldCtxt = MemoryContextSwitchTo(s_tupSerMemCtxt); typelist = build_tuple_node_list(conn->sent_record_typmod); buf = serializeNode((Node *) typelist, &size, NULL); MemoryContextSwitchTo(oldCtxt); tsh.tuplen = sizeof(TupSerHeader) + size; /* * we use natts==0xffff and infomask==0xffff to identify this special * tuple which actually carry the serialized record cache table. */ tsh.natts = RECORD_CACHE_MAGIC_NATTS; tsh.infomask = RECORD_CACHE_MAGIC_INFOMASK; addByteStringToChunkList(tcList, (char *)&tsh, sizeof(TupSerHeader), &pSerInfo->chunkCache); addByteStringToChunkList(tcList, buf, size, &pSerInfo->chunkCache); addPadding(tcList, &pSerInfo->chunkCache, size); /* * if we have more than 1 chunk we have to set the chunk types on our * first chunk and last chunk */ if (tcList->num_chunks > 1) { TupleChunkListItem first, last; first = tcList->p_first; last = tcList->p_last; Assert(first != NULL); Assert(first != last); Assert(last != NULL); SetChunkType(first->chunk_data, TC_PARTIAL_START); SetChunkType(last->chunk_data, TC_PARTIAL_END); /* * any intervening chunks are already set to TC_PARTIAL_MID when * allocated */ } return; }
/* * Compose and dispatch the MPPEXEC commands corresponding to a plan tree * within a complete parallel plan. (A plan tree will correspond either * to an initPlan or to the main plan.) * * If cancelOnError is true, then any dispatching error, a cancellation * request from the client, or an error from any of the associated QEs, * may cause the unfinished portion of the plan to be abandoned or canceled; * and in the event this occurs before all gangs have been dispatched, this * function does not return, but waits for all QEs to stop and exits to * the caller's error catcher via ereport(ERROR,...).Otherwise this * function returns normally and errors are not reported until later. * * If cancelOnError is false, the plan is to be dispatched as fully as * possible and the QEs allowed to proceed regardless of cancellation * requests, errors or connection failures from other QEs, etc. * * The CdbDispatchResults objects allocated for the plan are returned * in *pPrimaryResults. The caller, after calling * CdbCheckDispatchResult(), can examine the CdbDispatchResults * objects, can keep them as long as needed, and ultimately must free * them with cdbdisp_destroyDispatcherState() prior to deallocation of * the caller's memory context. Callers should use PG_TRY/PG_CATCH to * ensure proper cleanup. * * To wait for completion, check for errors, and clean up, it is * suggested that the caller use cdbdisp_finishCommand(). * * Note that the slice tree dispatched is the one specified in the EState * of the argument QueryDesc as es_cur__slice. * * Note that the QueryDesc params must include PARAM_EXEC_REMOTE parameters * containing the values of any initplans required by the slice to be run. * (This is handled by calls to addRemoteExecParamsToParamList() from the * functions preprocess_initplans() and ExecutorRun().) * * Each QE receives its assignment as a message of type 'M' in PostgresMain(). * The message is deserialized and processed by exec_mpp_query() in postgres.c. */ void cdbdisp_dispatchPlan(struct QueryDesc *queryDesc, bool planRequiresTxn, bool cancelOnError, struct CdbDispatcherState *ds) { char *splan, *sddesc, *sparams; int splan_len, splan_len_uncompressed, sddesc_len, sparams_len; SliceTable *sliceTbl; int rootIdx; int oldLocalSlice; PlannedStmt *stmt; bool is_SRI; DispatchCommandQueryParms queryParms; CdbComponentDatabaseInfo *qdinfo; ds->primaryResults = NULL; ds->dispatchThreads = NULL; Assert(Gp_role == GP_ROLE_DISPATCH); Assert(queryDesc != NULL && queryDesc->estate != NULL); /* * Later we'll need to operate with the slice table provided via the * EState structure in the argument QueryDesc. Cache this information * locally and assert our expectations about it. */ sliceTbl = queryDesc->estate->es_sliceTable; rootIdx = RootSliceIndex(queryDesc->estate); Assert(sliceTbl != NULL); Assert(rootIdx == 0 || (rootIdx > sliceTbl->nMotions && rootIdx <= sliceTbl->nMotions + sliceTbl->nInitPlans)); /* * Keep old value so we can restore it. We use this field as a parameter. */ oldLocalSlice = sliceTbl->localSlice; /* * This function is called only for planned statements. */ stmt = queryDesc->plannedstmt; Assert(stmt); /* * Let's evaluate STABLE functions now, so we get consistent values on the QEs * * Also, if this is a single-row INSERT statement, let's evaluate * nextval() and currval() now, so that we get the QD's values, and a * consistent value for everyone * */ is_SRI = false; if (queryDesc->operation == CMD_INSERT) { Assert(stmt->commandType == CMD_INSERT); /* * We might look for constant input relation (instead of SRI), but I'm afraid * * that wouldn't scale. */ is_SRI = IsA(stmt->planTree, Result) && stmt->planTree->lefttree == NULL; } if (!is_SRI) clear_relsize_cache(); if (queryDesc->operation == CMD_INSERT || queryDesc->operation == CMD_SELECT || queryDesc->operation == CMD_UPDATE || queryDesc->operation == CMD_DELETE) { MemoryContext oldContext; oldContext = CurrentMemoryContext; if (stmt->qdContext) { oldContext = MemoryContextSwitchTo(stmt->qdContext); } else /* * memory context of plan tree should not change */ { MemoryContext mc = GetMemoryChunkContext(stmt->planTree); oldContext = MemoryContextSwitchTo(mc); } stmt->planTree = (Plan *) exec_make_plan_constant(stmt, is_SRI); MemoryContextSwitchTo(oldContext); } /* * Cursor queries and bind/execute path queries don't run on the * writer-gang QEs; but they require snapshot-synchronization to * get started. * * initPlans, and other work (see the function pre-evaluation * above) may advance the snapshot "segmateSync" value, so we're * best off setting the shared-snapshot-ready value here. This * will dispatch to the writer gang and force it to set its * snapshot; we'll then be able to serialize the same snapshot * version (see qdSerializeDtxContextInfo() below). */ if (queryDesc->extended_query) { verify_shared_snapshot_ready(); } /* * serialized plan tree. Note that we're called for a single * slice tree (corresponding to an initPlan or the main plan), so the * parameters are fixed and we can include them in the prefix. */ splan = serializeNode((Node *) queryDesc->plannedstmt, &splan_len, &splan_len_uncompressed); uint64 plan_size_in_kb = ((uint64) splan_len_uncompressed) / (uint64) 1024; if (0 < gp_max_plan_size && plan_size_in_kb > gp_max_plan_size) { ereport(ERROR, (errcode(ERRCODE_STATEMENT_TOO_COMPLEX), (errmsg("Query plan size limit exceeded, current size: " UINT64_FORMAT "KB, max allowed size: %dKB", plan_size_in_kb, gp_max_plan_size), errhint("Size controlled by gp_max_plan_size")))); } Assert(splan != NULL && splan_len > 0 && splan_len_uncompressed > 0); if (queryDesc->params != NULL && queryDesc->params->numParams > 0) { ParamListInfoData *pli; ParamExternData *pxd; StringInfoData parambuf; Size length; int plioff; int32 iparam; /* * Allocate buffer for params */ initStringInfo(¶mbuf); /* * Copy ParamListInfoData header and ParamExternData array */ pli = queryDesc->params; length = (char *) &pli->params[pli->numParams] - (char *) pli; plioff = parambuf.len; Assert(plioff == MAXALIGN(plioff)); appendBinaryStringInfo(¶mbuf, pli, length); /* * Copy pass-by-reference param values. */ for (iparam = 0; iparam < queryDesc->params->numParams; iparam++) { int16 typlen; bool typbyval; /* * Recompute pli each time in case parambuf.data is repalloc'ed */ pli = (ParamListInfoData *) (parambuf.data + plioff); pxd = &pli->params[iparam]; if (pxd->ptype == InvalidOid) continue; /* * Does pxd->value contain the value itself, or a pointer? */ get_typlenbyval(pxd->ptype, &typlen, &typbyval); if (!typbyval) { char *s = DatumGetPointer(pxd->value); if (pxd->isnull || !PointerIsValid(s)) { pxd->isnull = true; pxd->value = 0; } else { length = datumGetSize(pxd->value, typbyval, typlen); /* * We *must* set this before we * append. Appending may realloc, which will * invalidate our pxd ptr. (obviously we could * append first if we recalculate pxd from the new * base address) */ pxd->value = Int32GetDatum(length); appendBinaryStringInfo(¶mbuf, &iparam, sizeof(iparam)); appendBinaryStringInfo(¶mbuf, s, length); } } } sparams = parambuf.data; sparams_len = parambuf.len; } else { sparams = NULL; sparams_len = 0; } sddesc = serializeNode((Node *) queryDesc->ddesc, &sddesc_len, NULL /*uncompressed_size */ ); MemSet(&queryParms, 0, sizeof(queryParms)); queryParms.strCommand = queryDesc->sourceText; queryParms.serializedQuerytree = NULL; queryParms.serializedQuerytreelen = 0; queryParms.serializedPlantree = splan; queryParms.serializedPlantreelen = splan_len; queryParms.serializedParams = sparams; queryParms.serializedParamslen = sparams_len; queryParms.serializedQueryDispatchDesc = sddesc; queryParms.serializedQueryDispatchDesclen = sddesc_len; queryParms.rootIdx = rootIdx; /* * sequence server info */ qdinfo = &(getComponentDatabases()->entry_db_info[0]); Assert(qdinfo != NULL && qdinfo->hostip != NULL); queryParms.seqServerHost = pstrdup(qdinfo->hostip); queryParms.seqServerHostlen = strlen(qdinfo->hostip) + 1; queryParms.seqServerPort = seqServerCtl->seqServerPort; /* * serialized a version of our snapshot */ /* * Generate our transction isolations. We generally want Plan * based dispatch to be in a global transaction. The executor gets * to decide if the special circumstances exist which allow us to * dispatch without starting a global xact. */ queryParms.serializedDtxContextInfo = qdSerializeDtxContextInfo(&queryParms.serializedDtxContextInfolen, true /* wantSnapshot */ , queryDesc->extended_query, mppTxnOptions(planRequiresTxn), "cdbdisp_dispatchPlan"); cdbdisp_dispatchX(&queryParms, cancelOnError, sliceTbl, ds); sliceTbl->localSlice = oldLocalSlice; }