void DbtcProxy::execTAKE_OVERTCCONF(Signal* signal) { jamEntry(); if (!checkNodeFailSequence(signal)) { jam(); return; } for (Uint32 i = 0; i < c_workers; i++) { jam(); Uint32 ref = numberToRef(number(), workerInstance(i), getOwnNodeId()); sendSignal(ref, GSN_TAKE_OVERTCCONF, signal, signal->getLength(), JBB); } }
void DblqhProxy::completeLCP_1(Signal* signal) { ndbrequire(c_lcpRecord.m_state == LcpRecord::L_RUNNING); c_lcpRecord.m_state = LcpRecord::L_COMPLETING_1; ndbrequire(c_lcpRecord.m_complete_outstanding == 0); /** * send LCP_FRAG_ORD (lastFragmentFlag = true) * to all LQH instances... * they will reply with LCP_COMPLETE_REP */ LcpFragOrd* ord = (LcpFragOrd*)signal->getDataPtrSend(); ord->lcpId = c_lcpRecord.m_lcpId; ord->lastFragmentFlag = true; for (Uint32 i = 0; i<c_workers; i++) { jam(); c_lcpRecord.m_complete_outstanding++; sendSignal(workerRef(i), GSN_LCP_FRAG_ORD, signal, LcpFragOrd::SignalLength, JBB); } /** * send END_LCP_REQ to all pgman instances (except "extra" pgman) * they will reply with END_LCP_CONF */ EndLcpReq* req = (EndLcpReq*)signal->getDataPtrSend(); req->senderData= 0; req->senderRef= reference(); req->backupPtr= 0; req->backupId= c_lcpRecord.m_lcpId; for (Uint32 i = 0; i<c_workers; i++) { jam(); c_lcpRecord.m_complete_outstanding++; sendSignal(numberToRef(PGMAN, workerInstance(i), getOwnNodeId()), GSN_END_LCP_REQ, signal, EndLcpReq::SignalLength, JBB); } }
void DbtuxProxy::sendINDEX_STAT_REP(Signal* signal, Uint32 ssId, SectionHandle*) { Ss_INDEX_STAT_REP& ss = ssFind<Ss_INDEX_STAT_REP>(ssId); IndexStatRep* rep = (IndexStatRep*)signal->getDataPtrSend(); *rep = ss.m_rep; rep->senderData = reference(); rep->senderData = ssId; const Uint32 instance = workerInstance(ss.m_worker); NdbLogPartInfo lpinfo(instance); ndbrequire(rep->fragId != ZNIL); if (!lpinfo.partNoOwner(rep->indexId, rep->fragId)) { jam(); skipReq(ss); return; } sendSignal(workerRef(ss.m_worker), GSN_INDEX_STAT_REP, signal, IndexStatRep::SignalLength, JBB); }
void DbtuxProxy::sendINDEX_STAT_IMPL_REQ(Signal* signal, Uint32 ssId, SectionHandle*) { Ss_INDEX_STAT_IMPL_REQ& ss = ssFind<Ss_INDEX_STAT_IMPL_REQ>(ssId); IndexStatImplReq* req = (IndexStatImplReq*)signal->getDataPtrSend(); *req = ss.m_req; req->senderRef = reference(); req->senderData = ssId; const Uint32 instance = workerInstance(ss.m_worker); NdbLogPartInfo lpinfo(instance); //XXX remove unused switch (req->requestType) { case IndexStatReq::RT_START_MON: /* * DICT sets fragId if assigned frag is on this node, or else ZNIL * to turn off any possible old assignment. In MT-LQH we also have * to check which worker owns the frag. */ if (req->fragId != ZNIL && !lpinfo.partNoOwner(req->indexId, req->fragId)) { jam(); req->fragId = ZNIL; } break; case IndexStatReq::RT_STOP_MON: /* * DICT sets fragId to ZNIL always. There is no (pointless) check * to see if the frag was ever assigned. */ ndbrequire(req->fragId == ZNIL); break; case IndexStatReq::RT_SCAN_FRAG: ndbrequire(req->fragId != ZNIL); if (!lpinfo.partNoOwner(req->indexId, req->fragId)) { jam(); skipReq(ss); return; } break; case IndexStatReq::RT_CLEAN_NEW: case IndexStatReq::RT_CLEAN_OLD: case IndexStatReq::RT_CLEAN_ALL: ndbrequire(req->fragId == ZNIL); break; case IndexStatReq::RT_DROP_HEAD: /* * Only one client can do the PK-delete of the head record. We use * of course the worker which owns the assigned fragment. */ ndbrequire(req->fragId != ZNIL); if (!lpinfo.partNoOwner(req->indexId, req->fragId)) { jam(); skipReq(ss); return; } break; default: ndbrequire(false); break; } sendSignal(workerRef(ss.m_worker), GSN_INDEX_STAT_IMPL_REQ, signal, IndexStatImplReq::SignalLength, JBB); }