void
Dbtup::executeTuxCommitTriggers(Signal* signal,
                                Operationrec* regOperPtr,
                                Fragrecord* regFragPtr,
                                Tablerec* regTabPtr)
{
  TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
  Uint32 tupVersion;
  if (regOperPtr->op_struct.op_type == ZINSERT) {
    if (! regOperPtr->op_struct.delete_insert_flag)
      return;
    jam();
    tupVersion= decr_tup_version(regOperPtr->tupVersion);
  } else if (regOperPtr->op_struct.op_type == ZUPDATE) {
    jam();
    tupVersion= decr_tup_version(regOperPtr->tupVersion);
  } else if (regOperPtr->op_struct.op_type == ZDELETE) {
    if (regOperPtr->op_struct.delete_insert_flag)
      return;
    jam();
    tupVersion= regOperPtr->tupVersion;
  } else {
    ndbrequire(false);
    tupVersion= 0; // remove warning
  }
  // fill in constant part
  req->tableId = regFragPtr->fragTableId;
  req->fragId = regFragPtr->fragmentId;
  req->pageId = regOperPtr->m_tuple_location.m_page_no;
  req->pageIndex = regOperPtr->m_tuple_location.m_page_idx;
  req->tupVersion = tupVersion;
  req->opInfo = TuxMaintReq::OpRemove;
  removeTuxEntries(signal, regTabPtr);
}
Example #2
0
void
Dbtup::checkImmediateTriggersAfterUpdate(KeyReqStruct *req_struct,
                                         Operationrec* regOperPtr, 
                                         Tablerec* regTablePtr,
                                         bool disk)
{
  if(refToBlock(req_struct->TC_ref) != DBTC) {
    return;
  }

  if ((regOperPtr->op_struct.primary_replica) &&
      (!(regTablePtr->afterUpdateTriggers.isEmpty()))) {
    jam();
    fireImmediateTriggers(req_struct,
                          regTablePtr->afterUpdateTriggers,
                          regOperPtr,
                          disk);
  }
  if ((regOperPtr->op_struct.primary_replica) &&
      (!(regTablePtr->constraintUpdateTriggers.isEmpty()))) {
    jam();
    fireImmediateTriggers(req_struct,
                          regTablePtr->constraintUpdateTriggers,
                          regOperPtr,
                          disk);
  }
}
Example #3
0
void 
Dbtup::fireDetachedTriggers(KeyReqStruct *req_struct,
                            DLList<TupTriggerData>& triggerList, 
                            Operationrec* const regOperPtr,
                            bool disk)
{
  
  TriggerPtr trigPtr;  
  
  /**
   * Set disk page
   */
  req_struct->m_disk_page_ptr.i = m_pgman.m_ptr.i;
  
  ndbrequire(regOperPtr->is_first_operation());
  triggerList.first(trigPtr);
  while (trigPtr.i != RNIL) {
    jam();
    if ((trigPtr.p->monitorReplicas ||
         regOperPtr->op_struct.primary_replica) &&
        (trigPtr.p->monitorAllAttributes ||
         trigPtr.p->attributeMask.overlaps(req_struct->changeMask))) {
      jam();
      executeTrigger(req_struct,
                     trigPtr.p,
                     regOperPtr,
                     disk);
    }
    triggerList.next(trigPtr);
  }
}
Example #4
0
bool Dbinfo::find_next(Ndbinfo::ScanCursor* cursor) const
{
  Uint32 node = refToNode(cursor->currRef);
  Uint32 block = refToBlock(cursor->currRef);
  const Uint32 instance = refToInstance(cursor->currRef);
  ndbrequire(instance == 0);

  if (node == 0)
  {
    jam();
    // First 'find_next'
    ndbrequire(block == 0);
    cursor->currRef = switchRef(dbinfo_blocks[0], getOwnNodeId());
    return true;
  }

  if (block)
  {
    jam();
    // Find next block
    ndbrequire(node == getOwnNodeId());
    block = find_next_block(block);
    if (block)
    {
      jam();
      cursor->currRef = switchRef(block, node);
      return true;
    }
  }

  // Nothing more to scan
  cursor->currRef = 0;
  return false;
}
Example #5
0
void Dbinfo::execDUMP_STATE_ORD(Signal* signal)
{
  jamEntry();

  switch(signal->theData[0])
  {
  case DumpStateOrd::DbinfoListTables:
    jam();
    ndbout_c("--- BEGIN NDB$INFO.TABLES ---");
    for(int i = 0; i < Ndbinfo::getNumTables(); i++)
    {
      const Ndbinfo::Table& tab = Ndbinfo::getTable(i);
      ndbout_c("%d,%s", i, tab.m.name);
    }
    ndbout_c("--- END NDB$INFO.TABLES ---");
    break;

  case DumpStateOrd::DbinfoListColumns:
    jam();
    ndbout_c("--- BEGIN NDB$INFO.COLUMNS ---");
    for(int i = 0; i < Ndbinfo::getNumTables(); i++)
    {
      const Ndbinfo::Table& tab = Ndbinfo::getTable(i);

      for(int j = 0; j < tab.m.ncols; j++)
        ndbout_c("%d,%d,%s,%d", i, j,
                 tab.col[j].name, tab.col[j].coltype);
    }
    ndbout_c("--- END NDB$INFO.COLUMNS ---");
    break;

  };
}
void
DblqhProxy::execEND_LCP_CONF(Signal* signal)
{
  jamEntry();
  ndbrequire(c_lcpRecord.m_state == LcpRecord::L_COMPLETING_1 ||
             c_lcpRecord.m_state == LcpRecord::L_COMPLETING_2 ||
             c_lcpRecord.m_state == LcpRecord::L_COMPLETING_3);

  ndbrequire(c_lcpRecord.m_complete_outstanding);
  c_lcpRecord.m_complete_outstanding--;

  if (c_lcpRecord.m_complete_outstanding == 0)
  {
    jam();
    if (c_lcpRecord.m_state == LcpRecord::L_COMPLETING_1)
    {
      jam();
      completeLCP_2(signal);
      return;
    }
    else if (c_lcpRecord.m_state == LcpRecord::L_COMPLETING_2)
    {
      jam();
      completeLCP_3(signal);
      return;
    }
    else
    {
      jam();
      sendLCP_COMPLETE_REP(signal);
      return;
    }
  }
}
void
DblqhProxy::sendPREP_DROP_TAB_CONF(Signal* signal, Uint32 ssId)
{
  Ss_PREP_DROP_TAB_REQ& ss = ssFind<Ss_PREP_DROP_TAB_REQ>(ssId);
  BlockReference dictRef = ss.m_req.senderRef;

  if (!lastReply(ss))
    return;

  if (ss.m_error == 0) {
    jam();
    PrepDropTabConf* conf = (PrepDropTabConf*)signal->getDataPtrSend();
    conf->senderRef = reference();
    conf->senderData = ss.m_req.senderData;
    conf->tableId = ss.m_req.tableId;
    sendSignal(dictRef, GSN_PREP_DROP_TAB_CONF,
               signal, PrepDropTabConf::SignalLength, JBB);
  } else {
    jam();
    PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
    ref->senderRef = reference();
    ref->senderData = ss.m_req.senderData;
    ref->tableId = ss.m_req.tableId;
    ref->errorCode = ss.m_error;
    sendSignal(dictRef, GSN_PREP_DROP_TAB_REF,
               signal, PrepDropTabRef::SignalLength, JBB);
  }

  ssRelease<Ss_PREP_DROP_TAB_REQ>(ssId);
}
void
DblqhProxy::execEXEC_SR_2(Signal* signal, GlobalSignalNumber gsn)
{
  ndbrequire(signal->getLength() == Ss_EXEC_SR_2::Sig::SignalLength);

  const Ss_EXEC_SR_2::Sig* sig =
    (const Ss_EXEC_SR_2::Sig*)signal->getDataPtr();
  Uint32 ssId = getSsId(sig);

  bool found = false;
  Ss_EXEC_SR_2& ss = ssFindSeize<Ss_EXEC_SR_2>(ssId, &found);
  if (!found) {
    jam();
    setMask(ss);
  }

  ndbrequire(sig->nodeId == getOwnNodeId());
  if (ss.m_sigcount == 0) {
    jam();
    ss.m_gsn = gsn;
    ss.m_sig = *sig;
  } else {
    jam();
    ndbrequire(ss.m_gsn == gsn);
    ndbrequire(memcmp(&ss.m_sig, sig, sizeof(*sig)) == 0);
  }
  ss.m_sigcount++;

  // reversed roles
  recvCONF(signal, ss);
}
void
DblqhProxy::sendTAB_COMMITCONF(Signal* signal, Uint32 ssId)
{
  Ss_TAB_COMMITREQ& ss = ssFind<Ss_TAB_COMMITREQ>(ssId);
  BlockReference dictRef = ss.m_req.senderRef;

  if (!lastReply(ss))
    return;

  if (ss.m_error == 0) {
    jam();
    TabCommitConf* conf = (TabCommitConf*)signal->getDataPtrSend();
    conf->senderData = ss.m_req.senderData;
    conf->nodeId = getOwnNodeId();
    conf->tableId = ss.m_req.tableId;
    sendSignal(dictRef, GSN_TAB_COMMITCONF,
               signal, TabCommitConf::SignalLength, JBB);
  } else {
    jam();
    TabCommitRef* ref = (TabCommitRef*)signal->getDataPtrSend();
    ref->senderData = ss.m_req.senderData;
    ref->nodeId = getOwnNodeId();
    ref->tableId = ss.m_req.tableId;
    sendSignal(dictRef, GSN_TAB_COMMITREF,
               signal, TabCommitRef::SignalLength, JBB);
    return;
  }

  ssRelease<Ss_TAB_COMMITREQ>(ssId);
}
void Dbtup::execTUP_WRITELOG_REQ(Signal* signal)
{
  jamEntry();
  OperationrecPtr loopOpPtr;
  loopOpPtr.i= signal->theData[0];
  Uint32 gci_hi = signal->theData[1];
  Uint32 gci_lo = signal->theData[2];
  c_operation_pool.getPtr(loopOpPtr);
  while (loopOpPtr.p->prevActiveOp != RNIL) {
    jam();
    loopOpPtr.i= loopOpPtr.p->prevActiveOp;
    c_operation_pool.getPtr(loopOpPtr);
  }
  do {
    ndbrequire(get_trans_state(loopOpPtr.p) == TRANS_STARTED);
    signal->theData[0] = loopOpPtr.p->userpointer;
    signal->theData[1] = gci_hi;
    signal->theData[2] = gci_lo;
    if (loopOpPtr.p->nextActiveOp == RNIL) {
      jam();
      EXECUTE_DIRECT(DBLQH, GSN_LQH_WRITELOG_REQ, signal, 3);
      return;
    }
    jam();
    EXECUTE_DIRECT(DBLQH, GSN_LQH_WRITELOG_REQ, signal, 3);
    jamEntry();
    loopOpPtr.i= loopOpPtr.p->nextActiveOp;
    c_operation_pool.getPtr(loopOpPtr);
  } while (true);
}
int Dbtup::retrieve_log_page(Signal *signal,
                             FragrecordPtr regFragPtr,
                             OperationrecPtr regOperPtr)
{
  jam();
  /**
   * Only last op on tuple needs "real" commit,
   *   hence only this one should have m_wait_log_buffer
   */

  CallbackPtr cb;
  cb.m_callbackData= regOperPtr.i;
  cb.m_callbackIndex = DISK_PAGE_LOG_BUFFER_CALLBACK;
  Uint32 sz= regOperPtr.p->m_undo_buffer_space;

  D("Logfile_client - execTUP_COMMITREQ");
  Logfile_client lgman(this, c_lgman, regFragPtr.p->m_logfile_group_id);
  int res= lgman.get_log_buffer(signal, sz, &cb);
  jamEntry();
  switch(res){
  case 0:
    jam();
    signal->theData[0] = 1;
    return res;
  case -1:
    ndbrequire("NOT YET IMPLEMENTED" == 0);
    break;
  default:
    jam();
  }
  regOperPtr.p->op_struct.bit_field.m_wait_log_buffer= 0;

  return res;
}
Example #12
0
void
DblqhProxy::sendLQH_TRANSCONF(Signal* signal, Uint32 ssId)
{
  Ss_LQH_TRANSREQ& ss = ssFind<Ss_LQH_TRANSREQ>(ssId);

  if (ss.m_conf.operationStatus != LqhTransConf::LastTransConf) {
    jam();
    LqhTransConf* conf = (LqhTransConf*)signal->getDataPtrSend();
    *conf = ss.m_conf;
    conf->tcRef = ss.m_req.senderData;
    sendSignal(ss.m_req.senderRef, GSN_LQH_TRANSCONF,
               signal, LqhTransConf::SignalLength, JBB);

    // more replies from this worker
    skipConf(ss);
  }

  if (!lastReply(ss))
    return;

  if (ss.m_error == 0) {
    jam();
    LqhTransConf* conf = (LqhTransConf*)signal->getDataPtrSend();
    conf->tcRef = ss.m_req.senderData;
    conf->lqhNodeId = getOwnNodeId();
    conf->operationStatus = LqhTransConf::LastTransConf;
    sendSignal(ss.m_req.senderRef, GSN_LQH_TRANSCONF,
               signal, LqhTransConf::SignalLength, JBB);
  } else {
    ndbrequire(false);
  }

  ssRelease<Ss_LQH_TRANSREQ>(ssId);
}
Example #13
0
void
DblqhProxy::execLQH_TRANSREQ(Signal* signal)
{
  jamEntry();
  
  if (!checkNodeFailSequence(signal))
  {
    jam();
    return;
  }
  const LqhTransReq* req = (const LqhTransReq*)signal->getDataPtr();
  Ss_LQH_TRANSREQ& ss = ssSeize<Ss_LQH_TRANSREQ>();
  ss.m_req = *req;
  ndbrequire(signal->getLength() == LqhTransReq::SignalLength);
  sendREQ(signal, ss);

  /**
   * See if this is a "resend" (i.e multi TC failure)
   *   and if so, mark "old" record as invalid
   */
  Uint32 nodeId = ss.m_req.failedNodeId;
  for (Uint32 i = 0; i<NDB_ARRAY_SIZE(c_ss_LQH_TRANSREQ.m_pool); i++)
  {
    if (c_ss_LQH_TRANSREQ.m_pool[i].m_ssId != 0 &&
        c_ss_LQH_TRANSREQ.m_pool[i].m_ssId != ss.m_ssId &&
        c_ss_LQH_TRANSREQ.m_pool[i].m_req.failedNodeId == nodeId)
    {
      jam();
      c_ss_LQH_TRANSREQ.m_pool[i].m_valid = false;
    }
  }
}
Example #14
0
void
PgmanProxy::sendEND_LCPCONF(Signal* signal, Uint32 ssId)
{
  Ss_END_LCPREQ& ss = ssFind<Ss_END_LCPREQ>(ssId);
  BlockReference senderRef = ss.m_req.senderRef;

  if (!lastReply(ss)) {
    jam();
    return;
  }

  if (!ss.m_extraLast)
  {
    jam();
    ss.m_extraLast = true;
    ss.m_worker = c_workers - 1; // send to last PGMAN
    ss.m_workerMask.set(ss.m_worker);
    SectionHandle handle(this);
    (this->*ss.m_sendREQ)(signal, ss.m_ssId, &handle);
    return;
  }

  if (ss.m_error == 0) {
    jam();
    EndLcpConf* conf = (EndLcpConf*)signal->getDataPtrSend();
    conf->senderData = ss.m_req.senderData;
    conf->senderRef = reference();
    sendSignal(senderRef, GSN_END_LCPCONF,
               signal, EndLcpConf::SignalLength, JBB);
  } else {
    ndbrequire(false);
  }

  ssRelease<Ss_END_LCPREQ>(ssId);
}
int
Dbtup::addTuxEntries(Signal* signal,
                     Operationrec* regOperPtr,
                     Tablerec* regTabPtr)
{
  if (ERROR_INSERTED(4022)) {
    jam();
    CLEAR_ERROR_INSERT_VALUE;
    terrorCode = 9999;
    return -1;
  }
  TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
  const DLList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
  TriggerPtr triggerPtr;
  Uint32 failPtrI;
  triggerList.first(triggerPtr);
  while (triggerPtr.i != RNIL) {
    jam();
    req->indexId = triggerPtr.p->indexId;
    req->errorCode = RNIL;
    if (ERROR_INSERTED(4023) &&
        ! triggerList.hasNext(triggerPtr)) {
      jam();
      CLEAR_ERROR_INSERT_VALUE;
      terrorCode = 9999;
      failPtrI = triggerPtr.i;
      goto fail;
    }
    EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
        signal, TuxMaintReq::SignalLength);
    jamEntry();
    if (req->errorCode != 0) {
      jam();
      terrorCode = req->errorCode;
      failPtrI = triggerPtr.i;
      goto fail;
    }
    triggerList.next(triggerPtr);
  }
  return 0;
fail:
  req->opInfo = TuxMaintReq::OpRemove;
  triggerList.first(triggerPtr);
  while (triggerPtr.i != failPtrI) {
    jam();
    req->indexId = triggerPtr.p->indexId;
    req->errorCode = RNIL;
    EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
        signal, TuxMaintReq::SignalLength);
    jamEntry();
    ndbrequire(req->errorCode == 0);
    triggerList.next(triggerPtr);
  }
#ifdef VM_TRACE
  ndbout << "aborted partial tux update: op " << hex << regOperPtr << endl;
#endif
  return -1;
}
/* ---------------------------------------------------------------- */
void Dbtup::execSTORED_PROCREQ(Signal* signal) 
{
  OperationrecPtr regOperPtr;
  TablerecPtr regTabPtr;
  jamEntry();
  regOperPtr.i = signal->theData[0];
  c_operation_pool.getPtr(regOperPtr);
  regTabPtr.i = signal->theData[1];
  ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);

  Uint32 requestInfo = signal->theData[3];
  TransState trans_state= get_trans_state(regOperPtr.p);
  ndbrequire(trans_state == TRANS_IDLE ||
             ((trans_state == TRANS_ERROR_WAIT_STORED_PROCREQ) &&
             (requestInfo == ZSTORED_PROCEDURE_DELETE)));
  ndbrequire(regTabPtr.p->tableStatus == DEFINED);
  /*
   * Also store count of procs called from non-API scans.
   * It can be done here since seize/release always succeeds.
   * The count is only used under -DERROR_INSERT via DUMP.
   */
  BlockReference apiBlockref = signal->theData[5];
  switch (requestInfo) {
  case ZSCAN_PROCEDURE:
  {
    jam();
#if defined VM_TRACE || defined ERROR_INSERT
    storedProcCountNonAPI(apiBlockref, +1);
#endif
    SectionHandle handle(this, signal);
    ndbrequire(handle.m_cnt == 1);

    scanProcedure(signal,
                  regOperPtr.p,
                  &handle,
                  false); // Not copy
    break;
  }
  case ZCOPY_PROCEDURE:
    jam();
#if defined VM_TRACE || defined ERROR_INSERT
    storedProcCountNonAPI(apiBlockref, +1);
#endif
    copyProcedure(signal, regTabPtr, regOperPtr.p);
    break;
  case ZSTORED_PROCEDURE_DELETE:
    jam();
#if defined VM_TRACE || defined ERROR_INSERT
    storedProcCountNonAPI(apiBlockref, -1);
#endif
    deleteScanProcedure(signal, regOperPtr.p);
    break;
  default:
    ndbrequire(false);
  }//switch
}//Dbtup::execSTORED_PROCREQ()
// Trigger signals
void
Dbtup::execCREATE_TRIG_REQ(Signal* signal)
{
  jamEntry();
  BlockReference senderRef = signal->getSendersBlockRef();
  const CreateTrigReq reqCopy = *(const CreateTrigReq*)signal->getDataPtr();
  const CreateTrigReq* const req = &reqCopy;
  CreateTrigRef::ErrorCode error= CreateTrigRef::NoError;

  // Find table
  TablerecPtr tabPtr;
  tabPtr.i = req->getTableId();
  ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);

  if (tabPtr.p->tableStatus != DEFINED )
  {
    jam();
    error= CreateTrigRef::InvalidTable;
  }
  // Create trigger and associate it with the table
  else if (createTrigger(tabPtr.p, req))
  {
    jam();
    // Send conf
    CreateTrigConf* const conf = (CreateTrigConf*)signal->getDataPtrSend();
    conf->setUserRef(reference());
    conf->setConnectionPtr(req->getConnectionPtr());
    conf->setRequestType(req->getRequestType());
    conf->setTableId(req->getTableId());
    conf->setIndexId(req->getIndexId());
    conf->setTriggerId(req->getTriggerId());
    conf->setTriggerInfo(req->getTriggerInfo());
    sendSignal(senderRef, GSN_CREATE_TRIG_CONF, 
               signal, CreateTrigConf::SignalLength, JBB);
    return;
  }
  else
  {
    jam();
    error= CreateTrigRef::TooManyTriggers;
  }
  ndbassert(error != CreateTrigRef::NoError);
  // Send ref
  CreateTrigRef* const ref = (CreateTrigRef*)signal->getDataPtrSend();
  ref->setUserRef(reference());
  ref->setConnectionPtr(req->getConnectionPtr());
  ref->setRequestType(req->getRequestType());
  ref->setTableId(req->getTableId());
  ref->setIndexId(req->getIndexId());
  ref->setTriggerId(req->getTriggerId());
  ref->setTriggerInfo(req->getTriggerInfo());
  ref->setErrorCode(error);
  sendSignal(senderRef, GSN_CREATE_TRIG_REF, 
	     signal, CreateTrigRef::SignalLength, JBB);
}//Dbtup::execCREATE_TRIG_REQ()
Example #18
0
void Cmvmi::execCONNECT_REP(Signal *signal){
  const Uint32 hostId = signal->theData[0];
  jamEntry();
  
  const NodeInfo::NodeType type = (NodeInfo::NodeType)getNodeInfo(hostId).m_type;
  ndbrequire(type != NodeInfo::INVALID);
  globalData.m_nodeInfo[hostId].m_version = 0;
  globalData.m_nodeInfo[hostId].m_signalVersion = 0;
  
  if(type == NodeInfo::DB || globalData.theStartLevel >= NodeState::SL_STARTED){
    jam();
    
    /**
     * Inform QMGR that client has connected
     */

    signal->theData[0] = hostId;
    sendSignal(QMGR_REF, GSN_CONNECT_REP, signal, 1, JBA);
  } else if(globalData.theStartLevel == NodeState::SL_CMVMI ||
            globalData.theStartLevel == NodeState::SL_STARTING) {
    jam();
    /**
     * Someone connected before start was finished
     */
    if(type == NodeInfo::MGM){
      jam();
      signal->theData[0] = hostId;
      sendSignal(QMGR_REF, GSN_CONNECT_REP, signal, 1, JBA);
    } else {
      /**
       * Dont allow api nodes to connect
       */
      ndbout_c("%d %d %d", hostId, type, globalData.theStartLevel);
      abort();
      globalTransporterRegistry.do_disconnect(hostId);
    }
  }
  
  /* Automatically subscribe events for MGM nodes.
   */
  if(type == NodeInfo::MGM){
    jam();
    globalTransporterRegistry.setIOState(hostId, NoHalt);
  }

  //------------------------------------------
  // Also report this event to the Event handler
  //------------------------------------------
  signal->theData[0] = NDB_LE_Connected;
  signal->theData[1] = hostId;
  signal->header.theLength = 2;
  
  execEVENT_REP(signal);
}
Example #19
0
int
Dbtup::realloc_var_part(Fragrecord* fragPtr, Tablerec* tabPtr, PagePtr pagePtr,
			Var_part_ref* refptr, Uint32 oldsz, Uint32 newsz)
{
  Uint32 add = newsz - oldsz;
  Var_page* pageP = (Var_page*)pagePtr.p;
  Local_key oldref;
  refptr->copyout(&oldref);
  
  if (pageP->free_space >= add)
  {
    jam();
    if(!pageP->is_space_behind_entry(oldref.m_page_idx, add))
    {
      if(0) printf("extra reorg");
      jam();
      /**
       * In this case we need to reorganise the page to fit. To ensure we
       * don't complicate matters we make a little trick here where we
       * fool the reorg_page to avoid copying the entry at hand and copy
       * that separately at the end. This means we need to copy it out of
       * the page before reorg_page to save the entry contents.
       */
      Uint32* copyBuffer= cinBuffer;
      memcpy(copyBuffer, pageP->get_ptr(oldref.m_page_idx), 4*oldsz);
      pageP->set_entry_len(oldref.m_page_idx, 0);
      pageP->free_space += oldsz;
      pageP->reorg((Var_page*)ctemp_page);
      memcpy(pageP->get_free_space_ptr(), copyBuffer, 4*oldsz);
      pageP->set_entry_offset(oldref.m_page_idx, pageP->insert_pos);
      add += oldsz;
    }
    pageP->grow_entry(oldref.m_page_idx, add);
    update_free_page_list(fragPtr, pagePtr);
  }
  else
  {
    Local_key newref;
    Uint32 *src = pageP->get_ptr(oldref.m_page_idx);
    Uint32 *dst = alloc_var_part(fragPtr, tabPtr, newsz, &newref);
    if (unlikely(dst == 0))
      return -1;

    ndbassert(oldref.m_page_no != newref.m_page_no);
    ndbassert(pageP->get_entry_len(oldref.m_page_idx) == oldsz);
    memcpy(dst, src, 4*oldsz);
    refptr->assign(&newref);
    
    pageP->free_record(oldref.m_page_idx, Var_page::CHAIN);
    update_free_page_list(fragPtr, pagePtr);    
  }
  
  return 0;
}
Example #20
0
/**
 * execROUTE_ORD
 * Allows other blocks to route signals as if they
 * came from TRPMAN
 * Useful in ndbmtd for synchronising signals w.r.t
 * external signals received from other nodes which
 * arrive from the same thread that runs TRPMAN
 */
void
Trpman::execROUTE_ORD(Signal* signal)
{
  jamEntry();
  if (!assembleFragments(signal))
  {
    jam();
    return;
  }

  SectionHandle handle(this, signal);

  RouteOrd* ord = (RouteOrd*)signal->getDataPtr();
  Uint32 dstRef = ord->dstRef;
  Uint32 srcRef = ord->srcRef;
  Uint32 gsn = ord->gsn;
  /* ord->cnt ignored */

  Uint32 nodeId = refToNode(dstRef);

  if (likely((nodeId == 0) ||
             getNodeInfo(nodeId).m_connected))
  {
    jam();
    Uint32 secCount = handle.m_cnt;
    ndbrequire(secCount >= 1 && secCount <= 3);

    jamLine(secCount);

    /**
     * Put section 0 in signal->theData
     */
    Uint32 sigLen = handle.m_ptr[0].sz;
    ndbrequire(sigLen <= 25);
    copy(signal->theData, handle.m_ptr[0]);

    SegmentedSectionPtr save = handle.m_ptr[0];
    for (Uint32 i = 0; i < secCount - 1; i++)
      handle.m_ptr[i] = handle.m_ptr[i+1];
    handle.m_cnt--;

    sendSignal(dstRef, gsn, signal, sigLen, JBB, &handle);

    handle.m_cnt = 1;
    handle.m_ptr[0] = save;
    releaseSections(handle);
    return ;
  }

  releaseSections(handle);
  warningEvent("Unable to route GSN: %d from %x to %x",
	       gsn, srcRef, dstRef);
}
Example #21
0
bool
Ndbfs::scanIPC(Signal* signal)
{
   Request* request = theFromThreads.tryReadChannel();
   jam();
   if (request) {
      jam();
      report(request, signal);
      theRequestPool->put(request);
      return true;
   }
   return false;
}
Example #22
0
/* ------------------------------------------------------------------------ */
Uint32 Dbtup::calculate_free_list_impl(Uint32 free_space_size) const
{
  Uint32 i;
  for (i = 0; i < MAX_FREE_LIST; i++) {
    jam();
    if (free_space_size <= c_max_list_size[i]) {
      jam();
      return i;
    }
  }
  ndbrequire(false);
  return 0;
}
Example #23
0
void
Trpman::execCLOSE_COMREQ(Signal* signal)
{
  // Close communication with the node and halt input/output from
  // other blocks than QMGR

  CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0];

  const BlockReference userRef = closeCom->xxxBlockRef;
  Uint32 requestType = closeCom->requestType;
  Uint32 failNo = closeCom->failNo;
//  Uint32 noOfNodes = closeCom->noOfNodes;

  jamEntry();
  for (unsigned i = 1; i < MAX_NODES; i++)
  {
    if (NodeBitmask::get(closeCom->theNodes, i) &&
        handles_this_node(i))
    {
      jam();

      //-----------------------------------------------------
      // Report that the connection to the node is closed
      //-----------------------------------------------------
      signal->theData[0] = NDB_LE_CommunicationClosed;
      signal->theData[1] = i;
      sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);

      globalTransporterRegistry.setIOState(i, HaltIO);
      globalTransporterRegistry.do_disconnect(i);
    }
  }

  if (requestType != CloseComReqConf::RT_NO_REPLY)
  {
    ndbassert((requestType == CloseComReqConf::RT_API_FAILURE) ||
              ((requestType == CloseComReqConf::RT_NODE_FAILURE) &&
               (failNo != 0)));
    jam();
    CloseComReqConf* closeComConf = (CloseComReqConf *)signal->getDataPtrSend();
    closeComConf->xxxBlockRef = userRef;
    closeComConf->requestType = requestType;
    closeComConf->failNo = failNo;

    /* Note assumption that noOfNodes and theNodes
     * bitmap is not trampled above
     * signals received from the remote node.
     */
    sendSignal(TRPMAN_REF, GSN_CLOSE_COMCONF, signal, 19, JBA);
  }
}
Example #24
0
/*
    PR0: File Pointer , theData[0]
    DR0: User reference, theData[1]
    DR1: User Pointer,   etc.
    DR2: Flag
    DR3: Var number
    DR4: amount of pages
    DR5->: Memory Page id and File page id according to Flag
*/
void 
Ndbfs::execFSWRITEREQ(Signal* signal)
{
  jamEntry();
  const FsReadWriteReq * const fsWriteReq = (FsReadWriteReq *)&signal->theData[0];
  
  if (fsWriteReq->getSyncFlag(fsWriteReq->operationFlag) == true){
    jam();
    readWriteRequest( Request::writeSync, signal );
  } else {
    jam();
    readWriteRequest( Request::write, signal );
  }
}
Example #25
0
void
Dbtup::execACC_CHECK_SCAN(Signal* signal)
{
  jamEntry();
  const AccCheckScan reqCopy = *(const AccCheckScan*)signal->getDataPtr();
  const AccCheckScan* const req = &reqCopy;
  ScanOpPtr scanPtr;
  c_scanOpPool.getPtr(scanPtr, req->accPtr);
  ScanOp& scan = *scanPtr.p;
  // fragment
  FragrecordPtr fragPtr;
  fragPtr.i = scan.m_fragPtrI;
  ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
  Fragrecord& frag = *fragPtr.p;
  if (req->checkLcpStop == AccCheckScan::ZCHECK_LCP_STOP) {
    jam();
    signal->theData[0] = scan.m_userPtr;
    signal->theData[1] = true;
    EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2);
    jamEntry();
    return;
  }
  if (scan.m_bits & ScanOp::SCAN_LOCK_WAIT) {
    jam();
    // LQH asks if we are waiting for lock and we tell it to ask again
    NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
    conf->scanPtr = scan.m_userPtr;
    conf->accOperationPtr = RNIL;       // no tuple returned
    conf->fragId = frag.fragmentId;
    unsigned signalLength = 3;
    // if TC has ordered scan close, it will be detected here
    sendSignal(scan.m_userRef, GSN_NEXT_SCANCONF,
        signal, signalLength, JBB);
    return;     // stop
  }
  if (scan.m_state == ScanOp::First) {
    jam();
    scanFirst(signal, scanPtr);
  }
  if (scan.m_state == ScanOp::Next) {
    jam();
    bool immediate = scanNext(signal, scanPtr);
    if (! immediate) {
      jam();
      // time-slicing via TUP or PGMAN
      return;
    }
  }
  scanReply(signal, scanPtr);
}
Uint32 Dbtup::setAttrIds(Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask, 
                         Uint32 m_no_of_attributesibutes, 
                         Uint32* inBuffer)
{
  Uint32 bufIndx = 0;
  for (Uint32 i = 0; i < m_no_of_attributesibutes; i++) {
    jam();
    if (attributeMask.get(i)) {
      jam();
      AttributeHeader::init(&inBuffer[bufIndx++], i, 0);
    }
  }
  return bufIndx;
}
Example #27
0
void
Dbtux::findFrag(const Index& index, Uint32 fragId, FragPtr& fragPtr)
{
  const Uint32 numFrags = index.m_numFrags;
  for (Uint32 i = 0; i < numFrags; i++) {
    jam();
    if (index.m_fragId[i] == fragId) {
      jam();
      fragPtr.i = index.m_fragPtrI[i];
      c_fragPool.getPtr(fragPtr);
      return;
    }
  }
  fragPtr.i = RNIL;
}
Example #28
0
void Trix::execREAD_NODESCONF(Signal* signal)
{
  jamEntry();

  ReadNodesConf * const  readNodes = (ReadNodesConf *)signal->getDataPtr();
  //Uint32 noOfNodes   = readNodes->noOfNodes;
  NodeRecPtr nodeRecPtr;

  c_masterNodeId = readNodes->masterNodeId;
  c_masterTrixRef = RNIL;
  c_noNodesFailed = 0;

  for(unsigned i = 0; i < MAX_NDB_NODES; i++) {
    jam();
    if(NodeBitmask::get(readNodes->allNodes, i)) {
      // Node is defined
      jam();
      ndbrequire(c_theNodes.seizeId(nodeRecPtr, i));
      nodeRecPtr.p->trixRef = calcTrixBlockRef(i);
      if (i == c_masterNodeId) {
        c_masterTrixRef = nodeRecPtr.p->trixRef;
      }
      if(NodeBitmask::get(readNodes->inactiveNodes, i)){
        // Node is not active
	jam();
	/**-----------------------------------------------------------------
	 * THIS NODE IS DEFINED IN THE CLUSTER BUT IS NOT ALIVE CURRENTLY.
	 * WE ADD THE NODE TO THE SET OF FAILED NODES AND ALSO SET THE
	 * BLOCKSTATE TO BUSY TO AVOID ADDING TRIGGERS OR INDEXES WHILE 
	 * NOT ALL NODES ARE ALIVE.
	 *------------------------------------------------------------------*/
	arrGuard(c_noNodesFailed, MAX_NDB_NODES);
	nodeRecPtr.p->alive = false;
	c_noNodesFailed++;
	c_blockState = Trix::NODE_FAILURE;
      }
      else {
        // Node is active
        jam();
        c_noActiveNodes++;
        nodeRecPtr.p->alive = true;
      }
    }
  }
  if (c_noNodesFailed == 0) {
    c_blockState = Trix::STARTED;
  }
}
/*
 * Check if a scan is linked to this node.  Only for ndbrequire.
 */
bool
Dbtux::islinkScan(NodeHandle& node, ScanOpPtr scanPtr)
{
  ScanOpPtr currPtr;
  currPtr.i = node.getNodeScan();
  while (currPtr.i != RNIL) {
    jam();
    c_scanOpPool.getPtr(currPtr);
    if (currPtr.i == scanPtr.i) {
      jam();
      return true;
    }
    currPtr.i = currPtr.p->m_nodeScan;
  }
  return false;
}
/*
 * Allocate index node in TUP.
 */
int
Dbtux::allocNode(TuxCtx& ctx, NodeHandle& node)
{
  if (ERROR_INSERTED(12007)) {
    jam();
    CLEAR_ERROR_INSERT_VALUE;
    return TuxMaintReq::NoMemError;
  }
  Frag& frag = node.m_frag;
  Uint32 pageId = NullTupLoc.getPageId();
  Uint32 pageOffset = NullTupLoc.getPageOffset();
  Uint32* node32 = 0;
  int errorCode = c_tup->tuxAllocNode(ctx.jamBuffer,
                                      frag.m_tupIndexFragPtrI,
                                      pageId, pageOffset, node32);
  thrjamEntry(ctx.jamBuffer);
  if (errorCode == 0) {
    thrjam(ctx.jamBuffer);
    node.m_loc = TupLoc(pageId, pageOffset);
    node.m_node = reinterpret_cast<TreeNode*>(node32);
    ndbrequire(node.m_loc != NullTupLoc && node.m_node != 0);
  } else {
    switch (errorCode) {
    case 827:
      errorCode = TuxMaintReq::NoMemError;
      break;
    }
  }
  return errorCode;
}