void CheckSendPlanStateGpmonPkt(PlanState *ps) { if(ps == NULL || ps->state == NULL) { return; } if(gp_enable_gpperfmon) { /* * When Sort/Material is called by the SharedNode that is not * a driver slice, do not send gpmon packet. */ Assert(ps->plan != NULL); bool doSend = toSendGpmonExecutingPkt(ps->plan, ps->state); if (!doSend) { return; } if(!ps->fHadSentGpmon || ps->gpmon_plan_tick != gpmon_tick) { gpmon_send(&ps->gpmon_pkt); ps->fHadSentGpmon = true; } ps->gpmon_plan_tick = gpmon_tick; } }
/* * InitPlanNodeGpmonPkt -- initialize the init gpmon package, and send it off. */ void InitPlanNodeGpmonPkt(Plan *plan, gpmon_packet_t *gpmon_pkt, EState *estate, PerfmonNodeType type, int64 rowsout_est, char* relname) { int rowsout_adjustment_factor = 0; if(!plan) return; /* The estimates are now global so we need to adjust by * the number of segments in the array. */ rowsout_adjustment_factor = getgpsegmentCount(); /* Make sure we don't div by zero below */ if (rowsout_adjustment_factor < 1) rowsout_adjustment_factor = 1; Assert(rowsout_adjustment_factor >= 1); memset(gpmon_pkt, 0, sizeof(gpmon_packet_t)); gpmon_pkt->magic = GPMON_MAGIC; gpmon_pkt->version = GPMON_PACKET_VERSION; gpmon_pkt->pkttype = GPMON_PKTTYPE_QEXEC; gpmon_gettmid(&gpmon_pkt->u.qexec.key.tmid); gpmon_pkt->u.qexec.key.ssid = gp_session_id; gpmon_pkt->u.qexec.key.ccnt = gp_command_count; gpmon_pkt->u.qexec.key.hash_key.segid = Gp_segment; gpmon_pkt->u.qexec.key.hash_key.pid = MyProcPid; gpmon_pkt->u.qexec.key.hash_key.nid = plan->plan_node_id; gpmon_pkt->u.qexec.pnid = plan->plan_parent_node_id; gpmon_pkt->u.qexec.nodeType = (apr_uint16_t)type; gpmon_pkt->u.qexec.rowsout = 0; gpmon_pkt->u.qexec.rowsout_est = rowsout_est / rowsout_adjustment_factor; if (relname) { snprintf(gpmon_pkt->u.qexec.relation_name, sizeof(gpmon_pkt->u.qexec.relation_name), "%s", relname); } gpmon_pkt->u.qexec.status = (uint8)PMNS_Initialize; if(gp_enable_gpperfmon && estate) { gpmon_send(gpmon_pkt); } gpmon_pkt->u.qexec.status = (uint8)PMNS_Executing; }
void EndPlanStateGpmonPkt(PlanState *ps) { if(!ps) return; ps->gpmon_pkt.u.qexec.status = (uint8)PMNS_Finished; if(gp_enable_gpperfmon && ps->state && LocallyExecutingSliceIndex(ps->state) == currentSliceId) { gpmon_send(&ps->gpmon_pkt); } }
void CheckSendPlanStateGpmonPkt(PlanState *ps) { if(!ps) return; if(gp_enable_gpperfmon) { if(!ps->fHadSentGpmon || ps->gpmon_plan_tick != gpmon_tick) { if(ps->state && LocallyExecutingSliceIndex(ps->state) == currentSliceId) { gpmon_send(&ps->gpmon_pkt); } ps->fHadSentGpmon = true; } ps->gpmon_plan_tick = gpmon_tick; } }
void EndPlanStateGpmonPkt(PlanState *ps) { if(ps == NULL || ps->state == NULL) { return; } /* * If this operator is not running in this slice, do not * send the finished packet. */ if (!NodeInExecutingSlice(ps->plan, ps->state)) { return; } ps->gpmon_pkt.u.qexec.status = (uint8)PMNS_Finished; if(gp_enable_gpperfmon) { gpmon_send(&ps->gpmon_pkt); } }
/** * Sends a UDP packet to perfmon containing current segment statistics. */ static void SegmentInfoSender() { UpdateSegmentInfoGpmonPkt(&seginfopkt); gpmon_send(&seginfopkt); }