Пример #1
0
/*
 * InitPlanNodeGpmonPkt -- initialize the init gpmon package, and send it off.
 */
void InitPlanNodeGpmonPkt(Plan *plan, gpmon_packet_t *gpmon_pkt, EState *estate,
						  PerfmonNodeType type,
						  int64 rowsout_est,
						  char* relname)
{
	int rowsout_adjustment_factor = 0;

	if(!plan)
		return;

	/* The estimates are now global so we need to adjust by
	 * the number of segments in the array.
	 */
	rowsout_adjustment_factor = GetQEGangNum();

	/* Make sure we don't div by zero below */
	if (rowsout_adjustment_factor < 1)
		rowsout_adjustment_factor = 1;

	Assert(rowsout_adjustment_factor >= 1);

	memset(gpmon_pkt, 0, sizeof(gpmon_packet_t));

	gpmon_pkt->magic = GPMON_MAGIC;
	gpmon_pkt->version = GPMON_PACKET_VERSION;
	gpmon_pkt->pkttype = GPMON_PKTTYPE_QEXEC;

	gpmon_gettmid(&gpmon_pkt->u.qexec.key.tmid);
	gpmon_pkt->u.qexec.key.ssid = gp_session_id;
	gpmon_pkt->u.qexec.key.ccnt = gp_command_count;
	gpmon_pkt->u.qexec.key.hash_key.segid = GetQEIndex();
	gpmon_pkt->u.qexec.key.hash_key.pid = MyProcPid;
	gpmon_pkt->u.qexec.key.hash_key.nid = plan->plan_node_id;

	gpmon_pkt->u.qexec.pnid = plan->plan_parent_node_id;


	gpmon_pkt->u.qexec.nodeType = (apr_uint16_t)type;

	gpmon_pkt->u.qexec.rowsout = 0;
	gpmon_pkt->u.qexec.rowsout_est = rowsout_est / rowsout_adjustment_factor;

	if (relname)
	{
		snprintf(gpmon_pkt->u.qexec.relation_name, sizeof(gpmon_pkt->u.qexec.relation_name), "%s", relname);
	}

	gpmon_pkt->u.qexec.status = (uint8)PMNS_Initialize;

	if(gp_enable_gpperfmon && estate)
	{
		gpmon_send(gpmon_pkt);
	}

	gpmon_pkt->u.qexec.status = (uint8)PMNS_Executing;
}
Пример #2
0
void
EndMotionLayerNode(MotionLayerState *mlStates, int16 motNodeID, bool flushCommLayer)
{
    MotionNodeEntry *pMNEntry;
    ChunkSorterEntry *pCSEntry;
    int i;

    pMNEntry = getMotionNodeEntry(mlStates, motNodeID, "EndMotionLayerNode");

#ifdef AMS_VERBOSE_LOGGING
    elog(DEBUG5, "Cleaning up Motion Layer details for motion node %d.",
         motNodeID);
#endif

    /*
     * Iterate through all entries in the motion layer's chunk-sort map, to
     * see if we have gotten end-of-stream from all senders.
     */
    if (pMNEntry->preserve_order && pMNEntry->ready_tuple_lists != NULL)
    {
        for (i=0; i < GetQEGangNum(); i++)
        {
            pCSEntry = &pMNEntry->ready_tuple_lists[i];

            if (pMNEntry->preserve_order &&
                    gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
            {
                /* Print chunk-sorter entry statistics. */
                elog(DEBUG4, "Chunk-sorter entry [route=%d,node=%d] statistics:\n"
                     "\tAvailable Tuples High-Watermark: " UINT64_FORMAT,
                     i, pMNEntry->motion_node_id,
                     pMNEntry->stat_tuples_available_hwm);
            }

            if (!pMNEntry->stopped && !pCSEntry->end_of_stream)
            {
                if (flushCommLayer)
                {
                    elog(FATAL, "Motion layer node %d cleanup - did not receive"
                         " end-of-stream from sender %d.", motNodeID, i);

                    /*** TODO - get chunks until end-of-stream comes in. ***/
                }
                else
                {
                    elog(LOG, "Motion layer node %d cleanup - did not receive"
                         " end-of-stream from sender %d.", motNodeID, i);
                }
            }
            else
            {
                /* End-of-stream is marked for this entry. */

                /*** TODO - do more than just complain! ***/

                if (pCSEntry->chunk_list.num_chunks > 0)
                {
                    elog(LOG, "Motion layer node %d cleanup - there are still"
                         " %d chunks enqueued from sender %d.", motNodeID,
                         pCSEntry->chunk_list.num_chunks, i );
                }

                /***
                	TODO - Make sure there are no outstanding tuples in the
                	tuple-store.
                ***/
            }

            /*
             * Clean up the chunk-sorter entry, then remove it from the hash
             * table.
             */
            clearTCList(&pMNEntry->ser_tup_info.chunkCache, &pCSEntry->chunk_list);
            if (pMNEntry->preserve_order)	/* Clean up the tuple-store. */
                htfifo_destroy(pCSEntry->ready_tuples);
        }
    }
    pMNEntry->cleanedUp = true;

    /* Clean up the motion-node entry, then remove it from the hash table. */
    if (gp_log_interconnect >= GPVARS_VERBOSITY_VERBOSE)
    {
        if (pMNEntry->stat_total_bytes_sent > 0 ||
                pMNEntry->sel_wr_wait > 0)
        {
            elog(LOG, "Interconnect seg%d slice%d sent " UINT64_FORMAT " tuples, "
                 UINT64_FORMAT " total bytes, " UINT64_FORMAT " tuple bytes, "
                 UINT64_FORMAT " chunks; waited " UINT64_FORMAT " usec.",
                 GetQEIndex(),
                 currentSliceId,
                 pMNEntry->stat_total_sends,
                 pMNEntry->stat_total_bytes_sent,
                 pMNEntry->stat_tuple_bytes_sent,
                 pMNEntry->stat_total_chunks_sent,
                 pMNEntry->sel_wr_wait
                );
        }
        if (pMNEntry->stat_total_bytes_recvd > 0 ||
                pMNEntry->sel_rd_wait > 0)
        {
            elog(LOG, "Interconnect seg%d slice%d received from slice%d: " UINT64_FORMAT " tuples, "
                 UINT64_FORMAT " total bytes, " UINT64_FORMAT " tuple bytes, "
                 UINT64_FORMAT " chunks; waited " UINT64_FORMAT " usec.",
                 GetQEIndex(),
                 currentSliceId,
                 motNodeID,
                 pMNEntry->stat_total_recvs,
                 pMNEntry->stat_total_bytes_recvd,
                 pMNEntry->stat_tuple_bytes_recvd,
                 pMNEntry->stat_total_chunks_recvd,
                 pMNEntry->sel_rd_wait
                );
        }
    }

    CleanupSerTupInfo(&pMNEntry->ser_tup_info);
    FreeTupleDesc(pMNEntry->tuple_desc);
    if (!pMNEntry->preserve_order)
        htfifo_destroy(pMNEntry->ready_tuples);

    pMNEntry->valid = false;
}