예제 #1
0
파일: Trix.cpp 프로젝트: 0x00xw/mysql-2
void Trix::execUTIL_EXECUTE_REF(Signal* signal)
{
  jamEntry();
  UtilExecuteRef * utilExecuteRef = (UtilExecuteRef *)signal->getDataPtr();
  SubscriptionRecPtr subRecPtr;
  SubscriptionRecord* subRec;

  subRecPtr.i = utilExecuteRef->senderData;
  if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
    printf("Trix::execUTIL_EXECUTE_REF: Failed to find subscription data %u\n", subRecPtr.i);
    return;
  }
  subRecPtr.p = subRec;
  ndbrequire(utilExecuteRef->errorCode == UtilExecuteRef::TCError);
  if(utilExecuteRef->TCErrorCode == CONSTRAINT_VIOLATION)
    buildFailed(signal, subRecPtr, BuildIndxRef::IndexNotUnique);
  else
    buildFailed(signal, subRecPtr, BuildIndxRef::InternalError);
}
예제 #2
0
void
Dbtux::dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 senderData)
{
  jam();
  /*
   * Index state should be Defining or Dropping but in 7.0 it can also
   * be NotDefined (due to double call).  The Index record is always
   * consistent regardless of state so there is no state assert here.
   */
  // drop fragments
  while (indexPtr.p->m_numFrags > 0) {
    jam();
    Uint32 i = --indexPtr.p->m_numFrags;
    FragPtr fragPtr;
    c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]);
    /*
     * Verify that LQH has terminated scans.  (If not, then drop order
     * must change from TUP,TUX to TUX,TUP and we must wait for scans).
     */
    ScanOpPtr scanPtr;
    bool b = fragPtr.p->m_scanList.first(scanPtr);
    ndbrequire(!b);
    c_fragPool.release(fragPtr);
  }
  // drop attributes
  if (indexPtr.p->m_descPage != RNIL) {
    jam();
    freeDescEnt(indexPtr);
    indexPtr.p->m_descPage = RNIL;
  }
  if (senderRef != 0) {
    jam();
    // reply to sender
    DropTabConf* const conf = (DropTabConf*)signal->getDataPtrSend();
    conf->senderRef = reference();
    conf->senderData = senderData;
    conf->tableId = indexPtr.i;
    sendSignal(senderRef, GSN_DROP_TAB_CONF,
        signal, DropTabConf::SignalLength, JBB);
  }
  new (indexPtr.p) Index();
}
예제 #3
0
/* ---------------------------------------------------------------- */
Uint32
Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req, BlockNumber sender)
{
  if (ERROR_INSERTED(4004)) {
    CLEAR_ERROR_INSERT_VALUE;
    return 9999;
  }
  Uint32 triggerId = req->getTriggerId();

  TriggerType::Value ttype = req->getTriggerType();
  TriggerActionTime::Value ttime = req->getTriggerActionTime();
  TriggerEvent::Value tevent = req->getTriggerEvent();

  //  ndbout_c("Drop TupTrigger %u = %u %u %u %u by %u", triggerId, table, ttype, ttime, tevent, sender);

  DLList<TupTriggerData>* tlist = findTriggerList(table, ttype, ttime, tevent);
  ndbrequire(tlist != NULL);

  Ptr<TupTriggerData> ptr;
  for (tlist->first(ptr); !ptr.isNull(); tlist->next(ptr)) {
    jam();
    if (ptr.p->triggerId == triggerId) {
      if(ttype==TriggerType::SUBSCRIPTION && sender != ptr.p->m_receiverBlock)
      {
	/**
	 * You can only drop your own triggers for subscription triggers.
	 * Trigger IDs are private for each block.
	 *
	 * SUMA encodes information in the triggerId
	 *
	 * Backup doesn't really care about the Ids though.
	 */
	jam();
	continue;
      }
      jam();
      tlist->release(ptr.i);
      return 0;
    }
  }
  return DropTrigRef::TriggerNotFound;
}//Dbtup::dropTrigger()
예제 #4
0
void
DblqhProxy::execALTER_TAB_REQ(Signal* signal)
{
  jamEntry();
  if (!assembleFragments(signal))
  {
    jam();
    return;
  }
  const AlterTabReq* req = (const AlterTabReq*)signal->getDataPtr();
  Uint32 ssId = getSsId(req);
  Ss_ALTER_TAB_REQ& ss = ssSeize<Ss_ALTER_TAB_REQ>(ssId);
  ss.m_req = *req;
  ndbrequire(signal->getLength() == AlterTabReq::SignalLength);

  SectionHandle handle(this, signal);
  saveSections(ss, handle);

  sendREQ(signal, ss);
}
예제 #5
0
void
Dbtup::removeTuxEntries(Signal* signal,
                        Tablerec* regTabPtr)
{
  TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
  const DLList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
  TriggerPtr triggerPtr;
  triggerList.first(triggerPtr);
  while (triggerPtr.i != RNIL) {
    jam();
    req->indexId = triggerPtr.p->indexId;
    req->errorCode = RNIL,
    EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
        signal, TuxMaintReq::SignalLength);
    jamEntry();
    // must succeed
    ndbrequire(req->errorCode == 0);
    triggerList.next(triggerPtr);
  }
}
예제 #6
0
void
Dbtup::execBUILD_INDX_IMPL_REF(Signal* signal)
{
  jamEntry();
  BuildIndxImplRef* ref = (BuildIndxImplRef*)signal->getDataPtrSend();
  Uint32 ptr = ref->senderData;
  Uint32 err = ref->errorCode;

  BuildIndexPtr buildPtr;
  c_buildIndexList.getPtr(buildPtr, ptr);
  ndbrequire(buildPtr.p->m_outstanding);
  buildPtr.p->m_outstanding--;

  TablerecPtr tablePtr;
  (void)tablePtr; // hide unused warning
  buildPtr.p->m_errorCode = (BuildIndxImplRef::ErrorCode)err;
  // No point in starting any more
  buildPtr.p->m_fragNo = NDB_ARRAY_SIZE(tablePtr.p->fragrec);
  buildIndexOffline_table_readonly(signal, ptr);
}
예제 #7
0
파일: DbtupLCP.cpp 프로젝트: isleon/Jaxer
void Dbtup::lcpFlushRestartInfoLab(Signal* signal, Uint32 ciIndex) 
{
  CheckpointInfoPtr ciPtr;
  DiskBufferSegmentInfoPtr dbsiPtr;
  LocalLogInfoPtr lliPtr;
  UndoPagePtr undoCopyPagePtr;

  ciPtr.i = ciIndex;
  ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo);

  lliPtr.i = ciPtr.p->lcpLocalLogInfoP;
  ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
  dbsiPtr.i = ciPtr.p->lcpDataBufferSegmentP;
  ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
  undoCopyPagePtr.i = dbsiPtr.p->pdxDataPage[0];	/* UNDO INFO STORED AT PAGE 0 */
  ptrCheckGuard(undoCopyPagePtr, cnoOfUndoPage, undoPage);
  ndbrequire(ciPtr.p->lcpNoOfPages > 0);
  undoCopyPagePtr.p->undoPageWord[ZSRI_NO_OF_FRAG_PAGES_POS] = ciPtr.p->lcpNoOfPages;
  undoCopyPagePtr.p->undoPageWord[ZSRI_NO_COPY_PAGES_ALLOC] = ciPtr.p->lcpNoCopyPagesAlloc;
  undoCopyPagePtr.p->undoPageWord[ZSRI_EMPTY_PRIM_PAGE] = ciPtr.p->lcpEmptyPrimPage;
  undoCopyPagePtr.p->undoPageWord[ZSRI_TH_FREE_FIRST] = ciPtr.p->lcpThFreeFirst;
  undoCopyPagePtr.p->undoPageWord[ZSRI_TH_FREE_COPY_FIRST] = ciPtr.p->lcpThFreeCopyFirst;
  undoCopyPagePtr.p->undoPageWord[ZSRI_UNDO_LOG_END_REC_ID] = lliPtr.p->lliPrevRecordId;
  undoCopyPagePtr.p->undoPageWord[ZSRI_UNDO_FILE_VER] = cundoFileVersion;
  if (lliPtr.p->lliUndoWord == ZUNDO_PAGE_HEADER_SIZE) {
    ljam();
    undoCopyPagePtr.p->undoPageWord[ZSRI_UNDO_LOG_END_PAGE_ID] = lliPtr.p->lliLogFilePage - 1;
  } else {
    ljam();
    undoCopyPagePtr.p->undoPageWord[ZSRI_UNDO_LOG_END_PAGE_ID] = lliPtr.p->lliLogFilePage;
  }//if
  dbsiPtr.p->pdxNumDataPages = 1;
  dbsiPtr.p->pdxFilePage = 0;
  if (clblPageCounter > 0) {
    ljam();
    clblPageCounter--;
  }//if
  lcpWriteListDataPageSegment(signal, dbsiPtr, ciPtr, true);
  dbsiPtr.p->pdxOperation = CHECKPOINT_DATA_WRITE_FLUSH;
  return;
}//Dbtup::lcpFlushRestartInfoLab()
예제 #8
0
void
DblqhProxy::execLQH_TRANSREQ(Signal* signal)
{
  jamEntry();
  
  if (!checkNodeFailSequence(signal))
  {
    jam();
    return;
  }
  const LqhTransReq* req = (const LqhTransReq*)signal->getDataPtr();
  Ss_LQH_TRANSREQ& ss = ssSeize<Ss_LQH_TRANSREQ>();
  ss.m_maxInstanceId = 0;
  ss.m_req = *req;
  if (signal->getLength() < LqhTransReq::SignalLength)
  {
    /**
     * TC that performs take over doesn't suppport taking over one
     * TC instance at a time
     */
     ss.m_req.instanceId = RNIL;
  }
  ndbrequire(signal->getLength() <= LqhTransReq::SignalLength);
  sendREQ(signal, ss);

  /**
   * See if this is a "resend" (i.e multi TC failure)
   *   and if so, mark "old" record as invalid
   */
  Uint32 nodeId = ss.m_req.failedNodeId;
  for (Uint32 i = 0; i<NDB_ARRAY_SIZE(c_ss_LQH_TRANSREQ.m_pool); i++)
  {
    if (c_ss_LQH_TRANSREQ.m_pool[i].m_ssId != 0 &&
        c_ss_LQH_TRANSREQ.m_pool[i].m_ssId != ss.m_ssId &&
        c_ss_LQH_TRANSREQ.m_pool[i].m_req.failedNodeId == nodeId)
    {
      jam();
      c_ss_LQH_TRANSREQ.m_pool[i].m_valid = false;
    }
  }
}
예제 #9
0
bool
Dbtux::allocDescEnt(IndexPtr indexPtr)
{
  jam();
  const Uint32 size = getDescSize(*indexPtr.p);
  DescPagePtr pagePtr;
  pagePtr.i = c_descPageList;
  while (pagePtr.i != RNIL) {
    jam();
    c_descPagePool.getPtr(pagePtr);
    if (pagePtr.p->m_numFree >= size) {
      jam();
      break;
    }
    pagePtr.i = pagePtr.p->m_nextPage;
  }
  if (pagePtr.i == RNIL) {
    jam();
    if (! c_descPagePool.seize(pagePtr)) {
      jam();
      return false;
    }
    new (pagePtr.p) DescPage();
    // add in front of list
    pagePtr.p->m_nextPage = c_descPageList;
    c_descPageList = pagePtr.i;
    pagePtr.p->m_numFree = DescPageSize;
  }
  ndbrequire(pagePtr.p->m_numFree >= size);
  indexPtr.p->m_descPage = pagePtr.i;
  indexPtr.p->m_descOff = DescPageSize - pagePtr.p->m_numFree;
  pagePtr.p->m_numFree -= size;
  DescHead& descHead = *(DescHead*)&pagePtr.p->m_data[indexPtr.p->m_descOff];
  descHead.m_indexId = indexPtr.i;
  descHead.m_numAttrs = indexPtr.p->m_numAttrs;
  descHead.m_magic = DescHead::Magic;
  KeySpec& keySpec = indexPtr.p->m_keySpec;
  KeyType* keyTypes = getKeyTypes(descHead);
  keySpec.set_buf(keyTypes, indexPtr.p->m_numAttrs);
  return true;
}
예제 #10
0
void
DblqhProxy::completeLCP_3(Signal* signal)
{
  jamEntry();
  ndbrequire(c_lcpRecord.m_state == LcpRecord::L_COMPLETING_2);
  c_lcpRecord.m_state = LcpRecord::L_COMPLETING_3;

  /**
   * And finally also checkpoint UNDO LOG
   *   and inform TSMAN that checkpoint is "complete"
   */
  EndLcpReq* req = (EndLcpReq*)signal->getDataPtrSend();
  req->senderData= 0;
  req->senderRef= reference();
  req->backupPtr= 0;
  req->backupId= c_lcpRecord.m_lcpId;

  // no reply from this
  sendSignal(TSMAN_REF, GSN_END_LCP_REQ, signal,
             EndLcpReq::SignalLength, JBB);

  if (c_lcpRecord.m_lcp_frag_rep_cnt)
  {
    jam();
    c_lcpRecord.m_complete_outstanding++;
    sendSignal(LGMAN_REF, GSN_END_LCP_REQ, signal,
               EndLcpReq::SignalLength, JBB);
  }
  else
  {
    jam();
    /**
     * lgman does currently not like 0 fragments,
     *   cause then it does not get a LCP_FRAG_ORD
     *
     *   this should change so that it gets this first (style)
     */
    sendLCP_COMPLETE_REP(signal);
  }
}
예제 #11
0
/*
 * STTOR is sent to one block at a time.  In NDBCNTR it triggers
 * NDB_STTOR to the "old" blocks.  STTOR carries start phase (SP) and
 * NDB_STTOR carries internal start phase (ISP).
 *
 *      SP      ISP     activities
 *      1       none
 *      2       1       
 *      3       2       recover metadata, activate indexes
 *      4       3       recover data
 *      5       4-6     
 *      6       skip    
 *      7       skip    
 *      8       7       build non-logged indexes on SR
 *
 * DBTUX catches type of start (IS, SR, NR, INR) at SP 3 and updates
 * internal start phase at SP 7.  These are used to prevent index
 * maintenance operations caused by redo log at SR.
 */
void
Dbtux::execSTTOR(Signal* signal)
{
  jamEntry();
  Uint32 startPhase = signal->theData[1];
  switch (startPhase) {
  case 1:
    jam();
    CLEAR_ERROR_INSERT_VALUE;
    c_tup = (Dbtup*)globalData.getBlock(DBTUP, instance());
    ndbrequire(c_tup != 0);
    break;
  case 3:
    jam();
    c_typeOfStart = signal->theData[7];
    break;
    return;
  case 7:
    c_internalStartPhase = 6;
    /*
     * config cannot yet be changed dynamically but we start the
     * loop always anyway because the cost is minimal
     */
    c_statMon.m_loopIndexId = 0;
    statMonSendContinueB(signal);
    break;
  default:
    jam();
    break;
  }
  signal->theData[0] = 0;       // garbage
  signal->theData[1] = 0;       // garbage
  signal->theData[2] = 0;       // garbage
  signal->theData[3] = 1;
  signal->theData[4] = 3;       // for c_typeOfStart
  signal->theData[5] = 7;       // for c_internalStartPhase
  signal->theData[6] = 255;
  BlockReference cntrRef = !isNdbMtLqh() ? NDBCNTR_REF : DBTUX_REF;
  sendSignal(cntrRef, GSN_STTORRY, signal, 7, JBB);
}
예제 #12
0
void
Trpman::execDISCONNECT_REP(Signal *signal)
{
  const DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0];
  const Uint32 hostId = rep->nodeId;
  jamEntry();

  setNodeInfo(hostId).m_connected = false;
  setNodeInfo(hostId).m_connectCount++;
  const NodeInfo::NodeType type = getNodeInfo(hostId).getType();
  ndbrequire(type != NodeInfo::INVALID);

  sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal,
             DisconnectRep::SignalLength, JBA);

  signal->theData[0] = hostId;
  sendSignal(CMVMI_REF, GSN_CANCEL_SUBSCRIPTION_REQ, signal, 1, JBB);

  signal->theData[0] = NDB_LE_Disconnected;
  signal->theData[1] = hostId;
  sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
}
예제 #13
0
void
Dbtup::tuxFreeNode(Signal* signal,
                   Uint32 fragPtrI,
                   Uint32 pageId,
                   Uint32 pageOffset,
                   Uint32* node)
{
  jamEntry();
  FragrecordPtr fragPtr;
  fragPtr.i= fragPtrI;
  ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
  TablerecPtr tablePtr;
  tablePtr.i= fragPtr.p->fragTableId;
  ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
  PagePtr pagePtr;
  pagePtr.i= pageId;
  ptrCheckGuard(pagePtr, cnoOfPage, cpage);
  Uint32 attrDescIndex= tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE);
  Uint32 attrDataOffset= AttributeOffset::getOffset(tableDescriptor[attrDescIndex + 1].tabDescr);
  ndbrequire(node == &pagePtr.p->pageWord[pageOffset] + attrDataOffset);
  freeTh(fragPtr.p, tablePtr.p, signal, pagePtr.p, pageOffset);
}
예제 #14
0
void
DblqhProxy::sendLQH_TRANSCONF(Signal* signal, Uint32 ssId)
{
  Ss_LQH_TRANSREQ& ss = ssFind<Ss_LQH_TRANSREQ>(ssId);

  if (ss.m_conf.operationStatus != LqhTransConf::LastTransConf) {
    jam();
    LqhTransConf* conf = (LqhTransConf*)signal->getDataPtrSend();
    *conf = ss.m_conf;
    conf->tcRef = ss.m_req.senderData;
    sendSignal(ss.m_req.senderRef, GSN_LQH_TRANSCONF,
               signal, LqhTransConf::SignalLength, JBB);

    // more replies from this worker
    skipConf(ss);
  }

  if (ss.m_conf.maxInstanceId > ss.m_maxInstanceId)
  {
    ss.m_maxInstanceId = ss.m_conf.maxInstanceId;
  }
  if (!lastReply(ss))
    return;

  if (ss.m_error == 0) {
    jam();
    LqhTransConf* conf = (LqhTransConf*)signal->getDataPtrSend();
    conf->tcRef = ss.m_req.senderData;
    conf->lqhNodeId = getOwnNodeId();
    conf->operationStatus = LqhTransConf::LastTransConf;
    conf->maxInstanceId = ss.m_maxInstanceId;
    sendSignal(ss.m_req.senderRef, GSN_LQH_TRANSCONF,
               signal, LqhTransConf::SignalLength, JBB);
  } else {
    ndbrequire(false);
  }

  ssRelease<Ss_LQH_TRANSREQ>(ssId);
}
예제 #15
0
void Dbtup::execTUP_DEALLOCREQ(Signal* signal)
{
  TablerecPtr regTabPtr;
  FragrecordPtr regFragPtr;
  Uint32 frag_page_id, frag_id;

  jamEntry();

  frag_id= signal->theData[0];
  regTabPtr.i= signal->theData[1];
  frag_page_id= signal->theData[2];
  Uint32 page_index= signal->theData[3];

  ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
  
  getFragmentrec(regFragPtr, frag_id, regTabPtr.p);
  ndbassert(regFragPtr.p != NULL);
  
  if (! Local_key::isInvalid(frag_page_id, page_index))
  {
    Local_key tmp;
    tmp.m_page_no= getRealpid(regFragPtr.p, frag_page_id); 
    tmp.m_page_idx= page_index;
    
    PagePtr pagePtr;
    Tuple_header* ptr= (Tuple_header*)get_ptr(&pagePtr, &tmp, regTabPtr.p);

    ndbrequire(ptr->m_header_bits & Tuple_header::FREED);

    if (regTabPtr.p->m_attributes[MM].m_no_of_varsize +
        regTabPtr.p->m_attributes[MM].m_no_of_dynamic)
    {
      jam();
      free_var_rec(regFragPtr.p, regTabPtr.p, &tmp, pagePtr);
    } else {
      free_fix_rec(regFragPtr.p, regTabPtr.p, &tmp, (Fix_page*)pagePtr.p);
    }
  }
}
예제 #16
0
void Dbtup::bufferTRANSID_AI(Signal* signal, BlockReference aRef,
                             Uint32 Tlen)
{
  if (Tlen == 3)
    return;
  
  Uint32 hostId= refToNode(aRef);
  Uint32 Theader= ((refToBlock(aRef) << 16)+(Tlen-3));
  
  ndbrequire(hostId < MAX_NODES);
  Uint32 TpacketLen= hostBuffer[hostId].packetLenTA;
  Uint32 TnoOfPackets= hostBuffer[hostId].noOfPacketsTA;
  Uint32 sig0= signal->theData[0];
  Uint32 sig1= signal->theData[1];
  Uint32 sig2= signal->theData[2];

  BlockReference TBref= numberToRef(API_PACKED, hostId);

  if ((Tlen + TpacketLen + 1) <= 25) {
// ----------------------------------------------------------------
// There is still space in the buffer. We will copy it into the
// buffer.
// ----------------------------------------------------------------
    jam();
    updatePackedList(signal, hostId);
  } else if (false && TnoOfPackets == 1) {
// ----------------------------------------------------------------
// The buffer is full and there was only one packet buffered. We
// will send this as a normal signal.
// ----------------------------------------------------------------
    Uint32 TnewRef= numberToRef((hostBuffer[hostId].packetBufferTA[0] >> 16),
                                 hostId);
    MEMCOPY_NO_WORDS(&signal->theData[0],
                     &hostBuffer[hostId].packetBufferTA[1],
                     TpacketLen - 1);
    sendSignal(TnewRef, GSN_TRANSID_AI, signal, (TpacketLen - 1), JBB);
    TpacketLen= 0;
    TnoOfPackets= 0;
  } else {
예제 #17
0
void Cmvmi::execDISCONNECT_REP(Signal *signal)
{
  const DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0];
  const Uint32 hostId = rep->nodeId;
  const Uint32 errNo  = rep->err;
  
  jamEntry();

  setNodeInfo(hostId).m_connected = false;
  setNodeInfo(hostId).m_connectCount++;
  const NodeInfo::NodeType type = getNodeInfo(hostId).getType();
  ndbrequire(type != NodeInfo::INVALID);

  sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal, 
             DisconnectRep::SignalLength, JBA);
  
  cancelSubscription(hostId);

  signal->theData[0] = NDB_LE_Disconnected;
  signal->theData[1] = hostId;
  sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
}
예제 #18
0
void
DblqhProxy::sendSTART_RECREQ_2(Signal* signal, Uint32 ssId)
{
  Ss_START_RECREQ_2& ss = ssFind<Ss_START_RECREQ_2>(ssId);

  const Ss_START_RECREQ_2::Req* req =
    (const Ss_START_RECREQ_2::Req*)signal->getDataPtr();

  if (firstReply(ss)) {
    ss.m_req = *req;
  } else {
    jam();
    /*
     * Fragments can be started from different lcpId's.  LGMAN must run
     * UNDO until lowest lcpId.  Each DBLQH instance computes the lowest
     * lcpId in START_FRAGREQ.  In MT case the proxy further computes
     * the lowest of the lcpId's from worker instances.
     */
    if (req->lcpId < ss.m_req.lcpId)
    {
      jam();
      ss.m_req.lcpId = req->lcpId;
    }
    ndbrequire(ss.m_req.proxyBlockNo == req->proxyBlockNo);
  }

  if (!lastReply(ss))
    return;

  {
    Ss_START_RECREQ_2::Req* req =
      (Ss_START_RECREQ_2::Req*)signal->getDataPtrSend();
    *req = ss.m_req;
    BlockReference ref = numberToRef(req->proxyBlockNo, getOwnNodeId());
    sendSignal(ref, GSN_START_RECREQ, signal,
               Ss_START_RECREQ_2::Req::SignalLength, JBB);
  }
}
예제 #19
0
Uint16
Ndbfs::newId()
{
  // finds a new key, eg a new filepointer
  for (int i = 1; i < SHRT_MAX; i++) 
  {
    if (theLastId == SHRT_MAX) {
      jam();
      theLastId = 1;
    } else {
      jam();
      theLastId++;
    }
      
    if(theOpenFiles.find(theLastId) == NULL) {
      jam();
      return theLastId;
    }
  }  
  ndbrequire(1 == 0);
  // The program will not reach this point
  return 0;
}
예제 #20
0
void
DblqhProxy::completeLCP_2(Signal* signal)
{
  jamEntry();
  ndbrequire(c_lcpRecord.m_state == LcpRecord::L_COMPLETING_1);
  c_lcpRecord.m_state = LcpRecord::L_COMPLETING_2;

  EndLcpReq* req = (EndLcpReq*)signal->getDataPtrSend();
  req->senderData= 0;
  req->senderRef= reference();
  req->backupPtr= 0;
  req->backupId= c_lcpRecord.m_lcpId;
  c_lcpRecord.m_complete_outstanding++;

  /**
   * send to "extra" instance
   *   that will checkpoint extent-pages
   */
  // NOTE: ugly to use MaxLqhWorkers directly
  Uint32 instance = c_workers + 1;
  sendSignal(numberToRef(PGMAN, instance, getOwnNodeId()),
             GSN_END_LCP_REQ, signal, EndLcpReq::SignalLength, JBB);
}
예제 #21
0
void
Restore::execCONTINUEB(Signal* signal){
  jamEntry();

  switch(signal->theData[0]){
  case RestoreContinueB::RESTORE_NEXT:
  {
    FilePtr file_ptr;
    m_file_pool.getPtr(file_ptr, signal->theData[1]);
    restore_next(signal, file_ptr);
    return;
  }
  case RestoreContinueB::READ_FILE:
  {
    FilePtr file_ptr;
    m_file_pool.getPtr(file_ptr, signal->theData[1]);
    read_file(signal, file_ptr);
    return;
  }
  default:
    ndbrequire(false);
  }
}
예제 #22
0
/*
 * PR0: File Pointer DR0: User reference DR1: User Pointer DR2: Flag bit 0= 1
 * remove file
 */
void 
Ndbfs::execFSCLOSEREQ(Signal * signal)
{
  jamEntry();
  const FsCloseReq * const fsCloseReq = (FsCloseReq *)&signal->theData[0];
  const BlockReference userRef = fsCloseReq->userReference;
  const Uint16 filePointer = (Uint16)fsCloseReq->filePointer;
  const UintR userPointer = fsCloseReq->userPointer; 

  AsyncFile* openFile = theOpenFiles.find(filePointer);
  if (openFile == NULL) {
    // The file was not open, send error back to sender
    jam();    
    // Initialise FsRef signal
    FsRef * const fsRef = (FsRef *)&signal->theData[0];
    fsRef->userPointer  = userPointer; 
    fsRef->setErrorCode(fsRef->errorCode, FsRef::fsErrFileDoesNotExist);
    fsRef->osErrorCode  = ~0; // Indicate local error
    sendSignal(userRef, GSN_FSCLOSEREF, signal, 3, JBB);
    return;
  }

  Request *request = theRequestPool->get();
  if( fsCloseReq->getRemoveFileFlag(fsCloseReq->fileFlag) == true ) {
     jam();
     request->action = Request::closeRemove;
  } else {
     jam();
     request->action = Request::close;
  }
  request->set(userRef, fsCloseReq->userPointer, filePointer);
  request->file = openFile;
  request->error = 0;
  request->theTrace = signal->getTrace();

  ndbrequire(forward(openFile, request));
}
예제 #23
0
/* ------------------------------------------------------------------------ */
void Dbtup::update_free_page_list(Fragrecord* fragPtr,
                                  Ptr<Page> pagePtr)
{
  Uint32 free_space, list_index;
  free_space= pagePtr.p->free_space;
  list_index= pagePtr.p->list_index;
  if ((free_space < c_min_list_size[list_index]) ||
      (free_space > c_max_list_size[list_index])) {
    Uint32 new_list_index= calculate_free_list_impl(free_space);
    if (list_index != MAX_FREE_LIST) {
      jam();
      /*
       * Only remove it from its list if it is in a list
       */
      LocalDLList<Page> 
	list(c_page_pool, fragPtr->free_var_page_array[list_index]);
      list.remove(pagePtr);
    }
    if (free_space < c_min_list_size[new_list_index]) {
      /*
	We have not sufficient amount of free space to put it into any
	free list. Thus the page will not be available for new inserts.
	This can only happen for the free list with least guaranteed 
	free space.
      */
      jam();
      ndbrequire(new_list_index == 0);
      pagePtr.p->list_index= MAX_FREE_LIST;
    } else {
      jam();
      LocalDLList<Page> list(c_page_pool, 
			     fragPtr->free_var_page_array[new_list_index]);
      list.add(pagePtr);
      pagePtr.p->list_index = new_list_index;
    }
  }
}
예제 #24
0
void
Trpman::execCONNECT_REP(Signal *signal)
{
  const Uint32 hostId = signal->theData[0];
  jamEntry();

  const NodeInfo::NodeType type = (NodeInfo::NodeType)getNodeInfo(hostId).m_type;
  ndbrequire(type != NodeInfo::INVALID);

  /**
   * Inform QMGR that client has connected
   */
  signal->theData[0] = hostId;
  if (ERROR_INSERTED(9005))
  {
    sendSignalWithDelay(QMGR_REF, GSN_CONNECT_REP, signal, 50, 1);
  }
  else
  {
    sendSignal(QMGR_REF, GSN_CONNECT_REP, signal, 1, JBA);
  }

  /* Automatically subscribe events for MGM nodes.
   */
  if (type == NodeInfo::MGM)
  {
    jam();
    globalTransporterRegistry.setIOState(hostId, NoHalt);
  }

  //------------------------------------------
  // Also report this event to the Event handler
  //------------------------------------------
  signal->theData[0] = NDB_LE_Connected;
  signal->theData[1] = hostId;
  sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
}
예제 #25
0
void
Dbtux::nodePushUpScans(NodeHandle& node, unsigned pos)
{
  const unsigned occup = node.getOccup();
  ScanOpPtr scanPtr;
  scanPtr.i = node.getNodeScan();
  do {
    jam();
    c_scanOpPool.getPtr(scanPtr);
    TreePos& scanPos = scanPtr.p->m_scanPos;
    ndbrequire(scanPos.m_loc == node.m_loc && scanPos.m_pos < occup);
    if (scanPos.m_pos >= pos) {
      jam();
#ifdef VM_TRACE
      if (debugFlags & DebugScan) {
        debugOut << "Fix scan " << scanPtr.i << " " << *scanPtr.p << endl;
        debugOut << "At pushUp pos=" << pos << " " << node << endl;
      }
#endif
      scanPos.m_pos++;
    }
    scanPtr.i = scanPtr.p->m_nodeScan;
  } while (scanPtr.i != RNIL);
}
예제 #26
0
void
DbtuxProxy::sendINDEX_STAT_REP(Signal* signal, Uint32 ssId,
                               SectionHandle*)
{
  Ss_INDEX_STAT_REP& ss = ssFind<Ss_INDEX_STAT_REP>(ssId);

  IndexStatRep* rep = (IndexStatRep*)signal->getDataPtrSend();
  *rep = ss.m_rep;
  rep->senderData = reference();
  rep->senderData = ssId;

  const Uint32 instance = workerInstance(ss.m_worker);
  NdbLogPartInfo lpinfo(instance);

  ndbrequire(rep->fragId != ZNIL);
  if (!lpinfo.partNoOwner(rep->indexId, rep->fragId)) {
    jam();
    skipReq(ss);
    return;
  }

  sendSignal(workerRef(ss.m_worker), GSN_INDEX_STAT_REP,
             signal, IndexStatRep::SignalLength, JBB);
}
예제 #27
0
void
DblqhProxy::sendSTART_RECCONF(Signal* signal, Uint32 ssId)
{
  Ss_START_RECREQ& ss = ssFind<Ss_START_RECREQ>(ssId);

  if (!lastReply(ss))
    return;

  if (ss.m_error == 0) {
    jam();

    /**
     * There should be no disk-ops in flight here...check it
     */
    signal->theData[0] = 12003;
    sendSignal(LGMAN_REF, GSN_DUMP_STATE_ORD, signal, 1, JBB);

    StartRecConf* conf = (StartRecConf*)signal->getDataPtrSend();
    conf->startingNodeId = getOwnNodeId();
    conf->senderData = ss.m_req.senderData;
    sendSignal(ss.m_req.senderRef, GSN_START_RECCONF,
               signal, StartRecConf::SignalLength, JBB);
  } else {
    ndbrequire(false);
  }

  {
    Uint32 i;
    for (i = 0; i < ss.m_req2cnt; i++) {
      jam();
      Uint32 ssId2 = ss.m_req2[i].m_ssId;
      ssRelease<Ss_START_RECREQ_2>(ssId2);
    }
  }
  ssRelease<Ss_START_RECREQ>(ssId);
}
예제 #28
0
void
Dbtux::readKeyAttrs(TuxCtx& ctx, const Frag& frag, TreeEnt ent, KeyData& keyData, Uint32 count)
{
  const Index& index = *c_indexPool.getPtr(frag.m_indexId);
  const DescHead& descHead = getDescHead(index);
  const AttributeHeader* keyAttrs = getKeyAttrs(descHead);
  Uint32* const outputBuffer = ctx.c_dataBuffer;

#ifdef VM_TRACE
  ndbrequire(&keyData.get_spec() == &index.m_keySpec);
  ndbrequire(keyData.get_spec().validate() == 0);
  ndbrequire(count <= index.m_numAttrs);
#endif

  const TupLoc tupLoc = ent.m_tupLoc;
  const Uint32 pageId = tupLoc.getPageId();
  const Uint32 pageOffset = tupLoc.getPageOffset();
  const Uint32 tupVersion = ent.m_tupVersion;
  const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI;
  const Uint32* keyAttrs32 = (const Uint32*)&keyAttrs[0];

  int ret;
  ret = c_tup->tuxReadAttrs(ctx.jamBuffer, tableFragPtrI, pageId, pageOffset, tupVersion, keyAttrs32, count, outputBuffer, false);
  jamEntry();
  ndbrequire(ret > 0);
  keyData.reset();
  Uint32 len;
  ret = keyData.add_poai(outputBuffer, count, &len);
  ndbrequire(ret == 0);
  ret = keyData.finalize();
  ndbrequire(ret == 0);

#ifdef VM_TRACE
  if (debugFlags & (DebugMaint | DebugScan)) {
    debugOut << "readKeyAttrs: ";
    debugOut << " ent:" << ent << " count:" << count;
    debugOut << " data:" << keyData.print(ctx.c_debugBuffer, DebugBufferBytes);
    debugOut << endl;
  }
#endif
}
예제 #29
0
void
Backup::execREAD_CONFIG_REQ(Signal* signal)
{
    const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
    Uint32 ref = req->senderRef;
    Uint32 senderData = req->senderData;
    ndbrequire(req->noOfParameters == 0);

    const ndb_mgm_configuration_iterator * p =
        m_ctx.m_config.getOwnConfigIterator();
    ndbrequire(p != 0);

    c_defaults.m_disk_write_speed = 10 * (1024 * 1024);
    c_defaults.m_disk_write_speed_sr = 100 * (1024 * 1024);
    c_defaults.m_disk_synch_size = 4 * (1024 * 1024);
    c_defaults.m_o_direct = true;

    Uint32 noBackups = 0, noTables = 0, noAttribs = 0, noFrags = 0;
    ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS,
                                          &c_defaults.m_diskless));
    ndb_mgm_get_int_parameter(p, CFG_DB_O_DIRECT,
                              &c_defaults.m_o_direct);
    ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED_SR,
                              &c_defaults.m_disk_write_speed_sr);
    ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED,
                              &c_defaults.m_disk_write_speed);
    ndb_mgm_get_int_parameter(p, CFG_DB_DISK_SYNCH_SIZE,
                              &c_defaults.m_disk_synch_size);
    ndb_mgm_get_int_parameter(p, CFG_DB_COMPRESSED_BACKUP,
                              &c_defaults.m_compressed_backup);
    ndb_mgm_get_int_parameter(p, CFG_DB_COMPRESSED_LCP,
                              &c_defaults.m_compressed_lcp);

    m_backup_report_frequency = 0;
    ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_REPORT_FREQUENCY,
                              &m_backup_report_frequency);
    /*
      We adjust the disk speed parameters from bytes per second to rather be
      words per 100 milliseconds. We convert disk synch size from bytes per
      second to words per second.
    */
    c_defaults.m_disk_write_speed /= (4 * 10);
    c_defaults.m_disk_write_speed_sr /= (4 * 10);

    /*
      Temporary fix, we divide the speed by number of ldm threads since we
      now can write in all ldm threads in parallel. Since previously we could
      write in 2 threads we also multiply by 2 if number of ldm threads is
      at least 2.

      The real fix will be to make the speed of writing more adaptable and also
      to use the real configured value and also add a new max disk speed value
      that can be used when one needs to write faster.
    */
    Uint32 num_ldm_threads = globalData.ndbMtLqhThreads;
    if (num_ldm_threads == 0)
    {
        /* We are running with ndbd binary */
        jam();
        num_ldm_threads = 1;
    }
    c_defaults.m_disk_write_speed /= num_ldm_threads;
    c_defaults.m_disk_write_speed_sr /= num_ldm_threads;

    if (num_ldm_threads > 1)
    {
        jam();
        c_defaults.m_disk_write_speed *= 2;
        c_defaults.m_disk_write_speed_sr *= 2;
    }

    ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_BACKUPS, &noBackups);
    //  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES, &noTables));
    ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &noTables));
    ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_ATTRIBUTES, &noAttribs));
    ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_FRAG_CONNECT, &noFrags));

    noAttribs++; //RT 527 bug fix

    c_nodePool.setSize(MAX_NDB_NODES);
    c_backupPool.setSize(noBackups + 1);
    c_backupFilePool.setSize(3 * noBackups + 1);
    c_tablePool.setSize(noBackups * noTables + 1);
    c_triggerPool.setSize(noBackups * 3 * noTables);
    c_fragmentPool.setSize(noBackups * noFrags + 1);

    Uint32 szDataBuf = (2 * 1024 * 1024);
    Uint32 szLogBuf = (2 * 1024 * 1024);
    Uint32 szWrite = 32768, maxWriteSize = (256 * 1024);
    ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_DATA_BUFFER_MEM, &szDataBuf);
    ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_LOG_BUFFER_MEM, &szLogBuf);
    ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_WRITE_SIZE, &szWrite);
    ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_MAX_WRITE_SIZE, &maxWriteSize);

    if (maxWriteSize < szWrite)
    {
        /**
         * max can't be lower than min
         */
        maxWriteSize = szWrite;
    }
    if ((maxWriteSize % szWrite) != 0)
    {
        /**
         * max needs to be a multiple of min
         */
        maxWriteSize = (maxWriteSize + szWrite - 1) / szWrite;
        maxWriteSize *= szWrite;
    }

    /**
     * add min writesize to buffer size...and the alignment added here and there
     */
    Uint32 extra = szWrite + 4 * (/* align * 512b */ 128);

    szDataBuf += extra;
    szLogBuf += extra;

    c_defaults.m_logBufferSize = szLogBuf;
    c_defaults.m_dataBufferSize = szDataBuf;
    c_defaults.m_minWriteSize = szWrite;
    c_defaults.m_maxWriteSize = maxWriteSize;
    c_defaults.m_lcp_buffer_size = szDataBuf;

    Uint32 szMem = 0;
    ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_MEM, &szMem);

    szMem += 3 * extra; // (data+log+lcp);
    Uint32 noPages =
        (szMem + sizeof(Page32) - 1) / sizeof(Page32) +
        (c_defaults.m_lcp_buffer_size + sizeof(Page32) - 1) / sizeof(Page32);

    // We need to allocate an additional of 2 pages. 1 page because of a bug in
    // ArrayPool and another one for DICTTAINFO.
    c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2, true);

    {   // Init all tables
        SLList<Table> tables(c_tablePool);
        TablePtr ptr;
        while (tables.seizeFirst(ptr)) {
            new (ptr.p) Table(c_fragmentPool);
        }
        while (tables.releaseFirst());
    }

    {
        SLList<BackupFile> ops(c_backupFilePool);
        BackupFilePtr ptr;
        while (ops.seizeFirst(ptr)) {
            new (ptr.p) BackupFile(* this, c_pagePool);
        }
        while (ops.releaseFirst());
    }

    {
        SLList<BackupRecord> recs(c_backupPool);
        BackupRecordPtr ptr;
        while (recs.seizeFirst(ptr)) {
            new (ptr.p) BackupRecord(* this, c_tablePool,
                                     c_backupFilePool, c_triggerPool);
        }
        while (recs.releaseFirst());
    }

    // Initialize BAT for interface to file system
    {
        Page32Ptr p;
        ndbrequire(c_pagePool.seizeId(p, 0));
        c_startOfPages = (Uint32 *)p.p;
        c_pagePool.release(p);

        NewVARIABLE* bat = allocateBat(1);
        bat[0].WA = c_startOfPages;
        bat[0].nrr = c_pagePool.getSize()*sizeof(Page32)/sizeof(Uint32);
    }

    ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
    conf->senderRef = reference();
    conf->senderData = senderData;
    sendSignal(ref, GSN_READ_CONFIG_CONF, signal,
               ReadConfigConf::SignalLength, JBB);
}
예제 #30
0
inline
const NodeInfo &
SimulatedBlock::getNodeInfo(NodeId nodeId) const {
  ndbrequire(nodeId > 0 && nodeId < MAX_NODES);
  return globalData.m_nodeInfo[nodeId];
}