Esempio n. 1
0
void
Dbtup::execNEXT_SCANREQ(Signal* signal)
{
  jamEntry();
  const NextScanReq reqCopy = *(const NextScanReq*)signal->getDataPtr();
  const NextScanReq* const req = &reqCopy;
  ScanOpPtr scanPtr;
  c_scanOpPool.getPtr(scanPtr, req->accPtr);
  ScanOp& scan = *scanPtr.p;
  switch (req->scanFlag) {
  case NextScanReq::ZSCAN_NEXT:
    jam();
    break;
  case NextScanReq::ZSCAN_NEXT_COMMIT:
    jam();
  case NextScanReq::ZSCAN_COMMIT:
    jam();
    if ((scan.m_bits & ScanOp::SCAN_LOCK) != 0) {
      jam();
      AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
      lockReq->returnCode = RNIL;
      lockReq->requestInfo = AccLockReq::Unlock;
      lockReq->accOpPtr = req->accOperationPtr;
      EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ,
          signal, AccLockReq::UndoSignalLength);
      jamEntry();
      ndbrequire(lockReq->returnCode == AccLockReq::Success);
      removeAccLockOp(scan, req->accOperationPtr);
    }
    if (req->scanFlag == NextScanReq::ZSCAN_COMMIT) {
      NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
      conf->scanPtr = scan.m_userPtr;
      unsigned signalLength = 1;
      sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF,
		 signal, signalLength, JBB);
      return;
    }
    break;
  case NextScanReq::ZSCAN_CLOSE:
    jam();
    if (scan.m_bits & ScanOp::SCAN_LOCK_WAIT) {
      jam();
      ndbrequire(scan.m_accLockOp != RNIL);
      // use ACC_ABORTCONF to flush out any reply in job buffer
      AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
      lockReq->returnCode = RNIL;
      lockReq->requestInfo = AccLockReq::AbortWithConf;
      lockReq->accOpPtr = scan.m_accLockOp;
      EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ,
		     signal, AccLockReq::UndoSignalLength);
      jamEntry();
      ndbrequire(lockReq->returnCode == AccLockReq::Success);
      scan.m_state = ScanOp::Aborting;
      return;
    }
    if (scan.m_state == ScanOp::Locked) {
      jam();
      ndbrequire(scan.m_accLockOp != RNIL);
      AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
      lockReq->returnCode = RNIL;
      lockReq->requestInfo = AccLockReq::Abort;
      lockReq->accOpPtr = scan.m_accLockOp;
      EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ,
		     signal, AccLockReq::UndoSignalLength);
      jamEntry();
      ndbrequire(lockReq->returnCode == AccLockReq::Success);
      scan.m_accLockOp = RNIL;
    }
    scan.m_state = ScanOp::Aborting;
    scanClose(signal, scanPtr);
    return;
  case NextScanReq::ZSCAN_NEXT_ABORT:
    jam();
  default:
    jam();
    ndbrequire(false);
    break;
  }
  // start looking for next scan result
  AccCheckScan* checkReq = (AccCheckScan*)signal->getDataPtrSend();
  checkReq->accPtr = scanPtr.i;
  checkReq->checkLcpStop = AccCheckScan::ZNOT_CHECK_LCP_STOP;
  EXECUTE_DIRECT(DBTUP, GSN_ACC_CHECK_SCAN, signal, AccCheckScan::SignalLength);
  jamEntry();
}
Esempio n. 2
0
void
Dbtup::execBUILD_INDX_IMPL_REQ(Signal* signal)
{
  jamEntry();
#ifdef TIME_MEASUREMENT
  time_events= 0;
  tot_time_passed= 0;
  number_events= 1;
#endif
  const BuildIndxImplReq* const req =
    (const BuildIndxImplReq*)signal->getDataPtr();
  // get new operation
  BuildIndexPtr buildPtr;
  if (ERROR_INSERTED(4031) || ! c_buildIndexList.seizeFirst(buildPtr)) {
    jam();
    BuildIndexRec buildRec;
    buildRec.m_request = *req;
    buildRec.m_errorCode = BuildIndxImplRef::Busy;
    if (ERROR_INSERTED(4031))
    {
      CLEAR_ERROR_INSERT_VALUE;
    }
    buildIndexReply(signal, &buildRec);
    return;
  }
  buildPtr.p->m_request = *req;
  const BuildIndxImplReq* buildReq = &buildPtr.p->m_request;
  // check
  buildPtr.p->m_errorCode= BuildIndxImplRef::NoError;
  buildPtr.p->m_outstanding = 0;
  do {
    if (buildReq->tableId >= cnoOfTablerec) {
      jam();
      buildPtr.p->m_errorCode= BuildIndxImplRef::InvalidPrimaryTable;
      break;
    }
    TablerecPtr tablePtr;
    tablePtr.i= buildReq->tableId;
    ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
    if (tablePtr.p->tableStatus != DEFINED) {
      jam();
      buildPtr.p->m_errorCode= BuildIndxImplRef::InvalidPrimaryTable;
      break;
    }
    // memory page format
    buildPtr.p->m_build_vs =
      (tablePtr.p->m_attributes[MM].m_no_of_varsize +
       tablePtr.p->m_attributes[MM].m_no_of_dynamic) > 0;
    if (DictTabInfo::isOrderedIndex(buildReq->indexType)) {
      jam();
      const DLList<TupTriggerData>& triggerList = 
	tablePtr.p->tuxCustomTriggers;

      TriggerPtr triggerPtr;
      triggerList.first(triggerPtr);
      while (triggerPtr.i != RNIL) {
	if (triggerPtr.p->indexId == buildReq->indexId) {
	  jam();
	  break;
	}
	triggerList.next(triggerPtr);
      }
      if (triggerPtr.i == RNIL) {
	jam();
	// trigger was not created
        ndbassert(false);
	buildPtr.p->m_errorCode = BuildIndxImplRef::InternalError;
	break;
      }
      buildPtr.p->m_indexId = buildReq->indexId;
      buildPtr.p->m_buildRef = DBTUX;
      AlterIndxImplReq* req = (AlterIndxImplReq*)signal->getDataPtrSend();
      req->indexId = buildReq->indexId;
      req->senderRef = 0;
      req->requestType = AlterIndxImplReq::AlterIndexBuilding;
      EXECUTE_DIRECT(DBTUX, GSN_ALTER_INDX_IMPL_REQ, signal, 
                     AlterIndxImplReq::SignalLength);
    } else if(buildReq->indexId == RNIL) {
      jam();
      // REBUILD of acc
      buildPtr.p->m_indexId = RNIL;
      buildPtr.p->m_buildRef = DBACC;
    } else {
      jam();
      buildPtr.p->m_errorCode = BuildIndxImplRef::InvalidIndexType;
      break;
    }

    // set to first tuple position
    const Uint32 firstTupleNo = 0;
    buildPtr.p->m_fragNo= 0;
    buildPtr.p->m_pageId= 0;
    buildPtr.p->m_tupleNo= firstTupleNo;
    // start build

    bool offline = !!(buildReq->requestType&BuildIndxImplReq::RF_BUILD_OFFLINE);
    if (offline && m_max_parallel_index_build > 1)
    {
      jam();
      buildIndexOffline(signal, buildPtr.i);
    }
    else
    {
      jam();
      buildIndex(signal, buildPtr.i);
    }
    return;
  } while (0);
  // check failed
  buildIndexReply(signal, buildPtr.p);
  c_buildIndexList.release(buildPtr);
}
Esempio n. 3
0
void
Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
{
  // get build record
  BuildIndexPtr buildPtr;
  buildPtr.i= buildPtrI;
  c_buildIndexList.getPtr(buildPtr);
  const BuildIndxImplReq* buildReq= &buildPtr.p->m_request;
  // get table
  TablerecPtr tablePtr;
  tablePtr.i= buildReq->tableId;
  ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);

  const Uint32 firstTupleNo = 0;
  const Uint32 tupheadsize = tablePtr.p->m_offsets[MM].m_fix_header_size;

#ifdef TIME_MEASUREMENT
  NDB_TICKS start;
  NDB_TICKS stop;
  Uint64 time_passed;
#endif
  do {
    // get fragment
    FragrecordPtr fragPtr;
    if (buildPtr.p->m_fragNo == NDB_ARRAY_SIZE(tablePtr.p->fragrec)) {
      jam();
      // build ready
      buildIndexReply(signal, buildPtr.p);
      c_buildIndexList.release(buildPtr);
      return;
    }
    ndbrequire(buildPtr.p->m_fragNo < NDB_ARRAY_SIZE(tablePtr.p->fragrec));
    fragPtr.i= tablePtr.p->fragrec[buildPtr.p->m_fragNo];
    if (fragPtr.i == RNIL) {
      jam();
      buildPtr.p->m_fragNo++;
      buildPtr.p->m_pageId= 0;
      buildPtr.p->m_tupleNo= firstTupleNo;
      break;
    }
    ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
    // get page
    PagePtr pagePtr;
    if (buildPtr.p->m_pageId >= fragPtr.p->m_max_page_cnt)
    {
      jam();
      buildPtr.p->m_fragNo++;
      buildPtr.p->m_pageId= 0;
      buildPtr.p->m_tupleNo= firstTupleNo;
      break;
    }
    Uint32 realPageId= getRealpidCheck(fragPtr.p, buildPtr.p->m_pageId);
    // skip empty page
    if (realPageId == RNIL) 
    {
      jam();
      goto next_tuple;
    }

    c_page_pool.getPtr(pagePtr, realPageId);

next_tuple:
    // get tuple
    Uint32 pageIndex = ~0;
    const Tuple_header* tuple_ptr = 0;
    pageIndex = buildPtr.p->m_tupleNo * tupheadsize;
    if (pageIndex + tupheadsize > Fix_page::DATA_WORDS) {
      jam();
      buildPtr.p->m_pageId++;
      buildPtr.p->m_tupleNo= firstTupleNo;
      break;
    }
    
    if (realPageId == RNIL)
    {
      jam();
      buildPtr.p->m_tupleNo++;
      break;
    }

    tuple_ptr = (Tuple_header*)&pagePtr.p->m_data[pageIndex];
    // skip over free tuple
    if (tuple_ptr->m_header_bits & Tuple_header::FREE) {
      jam();
      buildPtr.p->m_tupleNo++;
      break;
    }
    Uint32 tupVersion= tuple_ptr->get_tuple_version();
    OperationrecPtr pageOperPtr;
    pageOperPtr.i= tuple_ptr->m_operation_ptr_i;
#ifdef TIME_MEASUREMENT
    start = NdbTick_getCurrentTicks();
#endif
    // add to index
    TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
    req->errorCode = RNIL;
    req->tableId = tablePtr.i;
    req->indexId = buildPtr.p->m_indexId;
    req->fragId = tablePtr.p->fragid[buildPtr.p->m_fragNo];
    req->pageId = realPageId;
    req->tupVersion = tupVersion;
    req->opInfo = TuxMaintReq::OpAdd;
    req->tupFragPtrI = fragPtr.i;
    req->fragPageId = buildPtr.p->m_pageId;
    req->pageIndex = pageIndex;

    if (pageOperPtr.i == RNIL)
    {
      EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ,
		     signal, TuxMaintReq::SignalLength+2);
    }
    else
    {
      /*
      If there is an ongoing operation on the tuple then it is either a
      copy tuple or an original tuple with an ongoing transaction. In
      both cases realPageId and pageOffset refer to the original tuple.
      The tuple address stored in TUX will always be the original tuple
      but with the tuple version of the tuple we found.

      This is necessary to avoid having to update TUX at abort of
      update. If an update aborts then the copy tuple is copied to
      the original tuple. The build will however have found that
      tuple as a copy tuple. The original tuple is stable and is thus
      preferrable to store in TUX.
      */
      jam();

      /**
       * Since copy tuples now can't be found on real pages.
       *   we will here build all copies of the tuple
       *
       * Note only "real" tupVersion's should be added 
       *      i.e delete's shouldnt be added 
       *      (unless it's the first op, when "original" should be added)
       */

      /*
       * Start from first operation.  This is only to make things more
       * clear.  It is not required by ordered index implementation.
       */
      c_operation_pool.getPtr(pageOperPtr);
      while (pageOperPtr.p->prevActiveOp != RNIL)
      {
        jam();
        pageOperPtr.i = pageOperPtr.p->prevActiveOp;
        c_operation_pool.getPtr(pageOperPtr);
      }
      /*
       * Do not use req->errorCode as global control.
       */
      bool ok = true;
      /*
       * If first operation is an update, add previous version.
       * This version does not appear as the version of any operation.
       * At commit this version is removed by executeTuxCommitTriggers.
       * At abort it is preserved by executeTuxAbortTriggers.
       */
      if (pageOperPtr.p->op_type == ZUPDATE)
      {
        jam();
        req->errorCode = RNIL;
        req->tupVersion =
          decr_tup_version(pageOperPtr.p->op_struct.bit_field.tupVersion);
        EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ,
                       signal, TuxMaintReq::SignalLength+2);
        ok = (req->errorCode == 0);
      }
      /*
       * Add versions from all operations.
       *
       * Each operation has a tuple version.  For insert and update it
       * is the newly created version.  For delete it is the version
       * deleted.  The existence of operation tuple version implies that
       * a corresponding tuple version exists for TUX to read.
       *
       * We could be in the middle of a commit.  The process here makes
       * no assumptions about operation commit order.  (It should be
       * first to last but this is not the place to assert it).
       *
       * Duplicate versions are possible e.g. a delete in the middle
       * may have same version as the previous operation.  TUX ignores
       * duplicate version errors during index build.
       */
      while (pageOperPtr.i != RNIL && ok)
      {
        jam();
        c_operation_pool.getPtr(pageOperPtr);
        req->errorCode = RNIL;
        req->tupVersion = pageOperPtr.p->op_struct.bit_field.tupVersion;
        EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ,
                       signal, TuxMaintReq::SignalLength+2);
        pageOperPtr.i = pageOperPtr.p->nextActiveOp;
        ok = (req->errorCode == 0);
      }
    } 
    
    jamEntry();
    if (req->errorCode != 0) {
      switch (req->errorCode) {
      case TuxMaintReq::NoMemError:
        jam();
        buildPtr.p->m_errorCode= BuildIndxImplRef::AllocationFailure;
        break;
      default:
        ndbrequire(false);
        break;
      }
      buildIndexReply(signal, buildPtr.p);
      c_buildIndexList.release(buildPtr);
      return;
    }
#ifdef TIME_MEASUREMENT
    stop = NdbTick_getCurrentTicks();
    time_passed= NdbTick_Elapsed(start, stop).microSec();
    if (time_passed < 1000) {
      time_events++;
      tot_time_passed += time_passed;
      if (time_events == number_events) {
        Uint64 mean_time_passed= tot_time_passed /
                                     (Uint64)number_events;
        ndbout << "Number of events= " << number_events;
        ndbout << " Mean time passed= " << mean_time_passed << endl;
        number_events <<= 1;
        tot_time_passed= 0;
        time_events= 0;
      }
    }
#endif
    // next tuple
    buildPtr.p->m_tupleNo++;
    break;
  } while (0);
  signal->theData[0]= ZBUILD_INDEX;
  signal->theData[1]= buildPtr.i;
  sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
}
Esempio n. 4
0
void
DblqhProxy::execLCP_FRAG_ORD(Signal* signal)
{
  ndbrequire(signal->getLength() == LcpFragOrd::SignalLength);

  const LcpFragOrd* req = (const LcpFragOrd*)signal->getDataPtr();
  const LcpFragOrd req_copy = *req;

  bool lcp_complete_ord = req->lastFragmentFlag;

  if (c_lcpRecord.m_state == LcpRecord::L_IDLE)
  {
    jam();
    D("LCP: start" << V(req->lcpId));
    c_lcpRecord.m_state = LcpRecord::L_STARTING;
    c_lcpRecord.m_lcpId = req->lcpId;
    c_lcpRecord.m_lcp_frag_rep_cnt = 0;
    c_lcpRecord.m_lcp_frag_ord_cnt = 0;
    c_lcpRecord.m_complete_outstanding = 0;
    c_lcpRecord.m_lastFragmentFlag = false;
    c_lcpRecord.m_empty_lcp_req.clear();

    // handle start of LCP in PGMAN and TSMAN
    LcpFragOrd* req = (LcpFragOrd*)signal->getDataPtrSend();
    *req = req_copy;
    EXECUTE_DIRECT(PGMAN, GSN_LCP_FRAG_ORD,
                   signal, LcpFragOrd::SignalLength);
    *req = req_copy;
    EXECUTE_DIRECT(TSMAN, GSN_LCP_FRAG_ORD,
                   signal, LcpFragOrd::SignalLength);

    c_lcpRecord.m_state = LcpRecord::L_RUNNING;
  }

  jam();
  D("LCP: continue" << V(req->lcpId) << V(c_lcpRecord.m_lcp_frag_ord_cnt));
  ndbrequire(c_lcpRecord.m_state == LcpRecord::L_RUNNING);
  ndbrequire(c_lcpRecord.m_lcpId == req->lcpId);

  if (lcp_complete_ord)
  {
    jam();
    c_lcpRecord.m_lastFragmentFlag = true;
    if (getNoOfOutstanding(c_lcpRecord) == 0)
    {
      jam();
      completeLCP_1(signal);
      return;
    }

    /**
     * Wait for all LCP_FRAG_ORD/REP to complete
     */
    return;
  }
  else
  {
    jam();
    c_lcpRecord.m_last_lcp_frag_ord = req_copy;
  }

  c_lcpRecord.m_lcp_frag_ord_cnt++;

  // Forward
  ndbrequire(req->tableId < c_tableRecSize);
  if (c_tableRec[req->tableId] == 0)
  {
    jam();
    /**
     * Send to lqh-0...that will handle it...
     */
    sendSignal(workerRef(0),
               GSN_LCP_FRAG_ORD, signal, LcpFragOrd::SignalLength, JBB);
  }
  else
  {
    jam();
    Uint32 instance = getInstanceKey(req->tableId, req->fragmentId);
    sendSignal(numberToRef(DBLQH, instance, getOwnNodeId()),
               GSN_LCP_FRAG_ORD, signal, LcpFragOrd::SignalLength, JBB);
  }
}
Esempio n. 5
0
void
Restore::parse_record(Signal* signal, FilePtr file_ptr, 
		      const Uint32 *data, Uint32 len)
{
  List::Iterator it;
  LocalDataBuffer<15> columns(m_databuffer_pool, file_ptr.p->m_columns);  

  Uint32 * const key_start = signal->getDataPtrSend()+24;
  Uint32 * const attr_start = key_start + MAX_KEY_SIZE_IN_WORDS;

  data += 1;
  const Uint32* const dataStart = data;

  bool disk = false;
  bool rowid = false;
  bool gci = false;
  Uint32 keyLen;
  Uint32 attrLen;
  Local_key rowid_val;
  Uint64 gci_val;
  Uint32 tableId = file_ptr.p->m_table_id;
  const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tableId);

  if (likely(file_ptr.p->m_lcp_version >= NDBD_RAW_LCP))
  {
    rowid = true;
    rowid_val.m_page_no = data[0];
    rowid_val.m_page_idx = data[1];
    keyLen = c_tup->read_lcp_keys(tableId, data+2, len - 3, key_start);

    AttributeHeader::init(attr_start, AttributeHeader::READ_LCP, 4*(len - 3));
    memcpy(attr_start + 1, data + 2, 4 * (len - 3));
    attrLen = 1 + len - 3;
  }
  else
  {
    Uint32 *keyData = key_start;
    Uint32 *attrData = attr_start;
    union {
      Column c;
      Uint32 _align[sizeof(Column)/sizeof(Uint32)];
    };
    
    columns.first(it);
    while(!it.isNull())
    {
      _align[0] = *it.data; ndbrequire(columns.next(it));
      _align[1] = *it.data; columns.next(it);

      if (c.m_id == AttributeHeader::ROWID)
      {
        rowid_val.m_page_no = data[0];
        rowid_val.m_page_idx = data[1];
        data += 2;
        rowid = true;
        continue;
      }

      if (c.m_id == AttributeHeader::ROW_GCI)
      {
        memcpy(&gci_val, data, 8);
        data += 2;
        gci = true;
        continue;
      }

      if (! (c.m_flags & (Column::COL_VAR | Column::COL_NULL)))
      {
        ndbrequire(data < dataStart + len);

        if(c.m_flags & Column::COL_KEY)
        {
          memcpy(keyData, data, 4*c.m_size);
          keyData += c.m_size;
        }

        AttributeHeader::init(attrData++, c.m_id, c.m_size << 2);
        memcpy(attrData, data, 4*c.m_size);
        attrData += c.m_size;
        data += c.m_size;
      }

      if(c.m_flags & Column::COL_DISK)
        disk= true;
    }

    // second part is data driven
    while (data + 2 < dataStart + len) {
      Uint32 sz= ntohl(*data); data++;
      Uint32 id= ntohl(*data); data++; // column_no

      ndbrequire(columns.position(it, 2 * id));

      _align[0] = *it.data; ndbrequire(columns.next(it));
      _align[1] = *it.data;

      Uint32 sz32 = (sz + 3) >> 2;
      ndbassert(c.m_flags & (Column::COL_VAR | Column::COL_NULL));
      if (c.m_flags & Column::COL_KEY)
      {
        memcpy(keyData, data, 4 * sz32);
        keyData += sz32;
      }

      AttributeHeader::init(attrData++, c.m_id, sz);
      memcpy(attrData, data, sz);

      attrData += sz32;
      data += sz32;
    }

    ndbrequire(data == dataStart + len - 1);

    ndbrequire(disk == false); // Not supported...
    ndbrequire(rowid == true);
    keyLen = Uint32(keyData - key_start);
    attrLen = Uint32(attrData - attr_start);
    if (desc->noOfKeyAttr != desc->noOfVarKeys)
    {
      reorder_key(desc, key_start, keyLen);
    }
  }
  
  LqhKeyReq * req = (LqhKeyReq *)signal->getDataPtrSend();
  
  Uint32 hashValue;
  if (g_key_descriptor_pool.getPtr(tableId)->hasCharAttr)
    hashValue = calulate_hash(tableId, key_start);
  else
    hashValue = md5_hash((Uint64*)key_start, keyLen);
  
  Uint32 tmp= 0;
  LqhKeyReq::setAttrLen(tmp, attrLen);
  req->attrLen = tmp;

  tmp= 0;
  LqhKeyReq::setKeyLen(tmp, keyLen);
  LqhKeyReq::setLastReplicaNo(tmp, 0);
  /* ---------------------------------------------------------------------- */
  // Indicate Application Reference is present in bit 15
  /* ---------------------------------------------------------------------- */
  LqhKeyReq::setApplicationAddressFlag(tmp, 0);
  LqhKeyReq::setDirtyFlag(tmp, 1);
  LqhKeyReq::setSimpleFlag(tmp, 1);
  LqhKeyReq::setOperation(tmp, ZINSERT);
  LqhKeyReq::setSameClientAndTcFlag(tmp, 0);
  LqhKeyReq::setAIInLqhKeyReq(tmp, 0);
  LqhKeyReq::setNoDiskFlag(tmp, disk ? 0 : 1);
  LqhKeyReq::setRowidFlag(tmp, 1);
  LqhKeyReq::setGCIFlag(tmp, gci);
  req->clientConnectPtr = file_ptr.i;
  req->hashValue = hashValue;
  req->requestInfo = tmp;
  req->tcBlockref = reference();
  req->savePointId = 0;
  req->tableSchemaVersion = file_ptr.p->m_table_id + 
    (file_ptr.p->m_table_version << 16);
  req->fragmentData = file_ptr.p->m_fragment_id;
  req->transId1 = 0;
  req->transId2 = 0;
  req->scanInfo = 0;
  memcpy(req->variableData, key_start, 16);
  Uint32 pos = keyLen > 4 ? 4 : keyLen;
  req->variableData[pos++] = rowid_val.m_page_no;
  req->variableData[pos++] = rowid_val.m_page_idx;
  if (gci)
    req->variableData[pos++] = (Uint32)gci_val;
  file_ptr.p->m_outstanding_operations++;
  EXECUTE_DIRECT(DBLQH, GSN_LQHKEYREQ, signal, 
		 LqhKeyReq::FixedSignalLength+pos);
  
  if(keyLen > 4)
  {
    c_lqh->receive_keyinfo(signal,
			   key_start + 4,
			   keyLen - 4);
  }
  
  c_lqh->receive_attrinfo(signal, attr_start, attrLen);
}
void Dbtup::executeTrigger(KeyReqStruct *req_struct,
                           TupTriggerData* const trigPtr,
                           Operationrec* const regOperPtr,
                           bool disk)
{
  /**
   * The block below does not work together with GREP.
   * I have 2 db nodes (2 replicas) -> one node group.
   * I want to have FIRETRIG_ORD sent to all SumaParticipants,
   * from all nodes in the node group described above. However, 
   * only one of the nodes in the node group actually sends the
   *  FIRE_TRIG_ORD, and the other node enters this "hack" below.
   * I don't really know what the code snippet below does, but it
   * does not work with GREP the way Lars and I want it.
   * We need to have triggers fired from both the primary and the
   * backup replica, not only the primary as it is now.
   * 
   * Note: In Suma, I have changed triggers to be created with
   * setMonitorReplicas(true).
   * /Johan
   *
   * See RT 709
   */
  // XXX quick fix to NR, should fix in LQHKEYREQ instead
  /*  
      if (refToBlock(req_struct->TC_ref) == DBLQH) {
      jam();
      return;
      }
  */
  Signal* signal= req_struct->signal;
  BlockReference ref = trigPtr->m_receiverBlock;
  Uint32* const keyBuffer = &cinBuffer[0];
  Uint32* const afterBuffer = &coutBuffer[0];
  Uint32* const beforeBuffer = &clogMemBuffer[0];
  
  Uint32 noPrimKey, noAfterWords, noBeforeWords;
  FragrecordPtr regFragPtr;
  regFragPtr.i= regOperPtr->fragmentPtr;
  ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);

  if (ref == BACKUP) {
    jam();
    /*
    In order for the implementation of BACKUP to work even when changing
    primaries in the middle of the backup we need to set the trigger on
    all replicas. This check checks whether this is the node where this
    trigger should be fired. The check should preferably have been put
    completely in the BACKUP block but it was about five times simpler
    to put it here and also much faster for the backup (small overhead
    for everybody else.
    */
    signal->theData[0] = trigPtr->triggerId;
    signal->theData[1] = regFragPtr.p->fragmentId;
    EXECUTE_DIRECT(BACKUP, GSN_BACKUP_TRIG_REQ, signal, 2);
    jamEntry();
    if (signal->theData[0] == 0) {
      jam();
      return;
    }
  }
  if (!readTriggerInfo(trigPtr,
                       regOperPtr,
                       req_struct,
                       regFragPtr.p,
                       keyBuffer,
                       noPrimKey,
                       afterBuffer,
                       noAfterWords,
                       beforeBuffer,
                       noBeforeWords,
                       disk)) {
    jam();
    return;
  }
//--------------------------------------------------------------------
// Now all data for this trigger has been read. It is now time to send
// the trigger information consisting of two or three sets of TRIG_
// ATTRINFO signals and one FIRE_TRIG_ORD signal.
// We start by setting common header info for all TRIG_ATTRINFO signals.
//--------------------------------------------------------------------
  bool executeDirect;
  TrigAttrInfo* const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtrSend();
  trigAttrInfo->setConnectionPtr(req_struct->TC_index);
  trigAttrInfo->setTriggerId(trigPtr->triggerId);

  switch(trigPtr->triggerType) {
  case (TriggerType::SECONDARY_INDEX):
    jam();
    ref = req_struct->TC_ref;
    executeDirect = false;
    break;
  case (TriggerType::SUBSCRIPTION):
  case (TriggerType::SUBSCRIPTION_BEFORE):
    jam();
    // Since only backup uses subscription triggers we send to backup directly for now
    ref = trigPtr->m_receiverBlock;
    executeDirect = true;
    break;
  case (TriggerType::READ_ONLY_CONSTRAINT):
    terrorCode = ZREAD_ONLY_CONSTRAINT_VIOLATION;
    // XXX should return status and abort the rest
    return;
  default:
    ndbrequire(false);
    executeDirect= false; // remove warning
  }//switch

  req_struct->no_fired_triggers++;

  trigAttrInfo->setAttrInfoType(TrigAttrInfo::PRIMARY_KEY);
  sendTrigAttrInfo(signal, keyBuffer, noPrimKey, executeDirect, ref);

  switch(regOperPtr->op_struct.op_type) {
  case(ZINSERT):
    jam();
    // Send AttrInfo signals with new attribute values
    trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
    sendTrigAttrInfo(signal, afterBuffer, noAfterWords, executeDirect, ref);
    break;
  case(ZDELETE):
    if (trigPtr->sendBeforeValues) {
      jam();
      trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES);
      sendTrigAttrInfo(signal, beforeBuffer, noBeforeWords, executeDirect,ref);
    }
    break;
  case(ZUPDATE):
    jam();
    if (trigPtr->sendBeforeValues) {
      jam();
      trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES);
      sendTrigAttrInfo(signal, beforeBuffer, noBeforeWords, executeDirect,ref);
    }
    trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
    sendTrigAttrInfo(signal, afterBuffer, noAfterWords, executeDirect, ref);
    break;
  default:
    ndbrequire(false);
  }
  sendFireTrigOrd(signal,
                  req_struct,
                  regOperPtr,
                  trigPtr,
		  regFragPtr.p->fragmentId,
                  noPrimKey,
                  noBeforeWords,
                  noAfterWords);
}
void Dbtup::sendFireTrigOrd(Signal* signal,
                            KeyReqStruct *req_struct,
                            Operationrec * const regOperPtr, 
                            TupTriggerData* const trigPtr, 
			    Uint32 fragmentId,
                            Uint32 noPrimKeyWords, 
                            Uint32 noBeforeValueWords, 
                            Uint32 noAfterValueWords)
{
  FireTrigOrd* const fireTrigOrd = (FireTrigOrd *)signal->getDataPtrSend();
  
  fireTrigOrd->setConnectionPtr(req_struct->TC_index);
  fireTrigOrd->setTriggerId(trigPtr->triggerId);
  fireTrigOrd->fragId= fragmentId;

  switch(regOperPtr->op_struct.op_type) {
  case(ZINSERT):
    jam();
    fireTrigOrd->setTriggerEvent(TriggerEvent::TE_INSERT);
    break;
  case(ZDELETE):
    jam();
    fireTrigOrd->setTriggerEvent(TriggerEvent::TE_DELETE);
    break;
  case(ZUPDATE):
    jam();
    fireTrigOrd->setTriggerEvent(TriggerEvent::TE_UPDATE);
    break;
  default:
    ndbrequire(false);
    break;
  }

  fireTrigOrd->setNoOfPrimaryKeyWords(noPrimKeyWords);
  fireTrigOrd->setNoOfBeforeValueWords(noBeforeValueWords);
  fireTrigOrd->setNoOfAfterValueWords(noAfterValueWords);

  switch(trigPtr->triggerType) {
  case (TriggerType::SECONDARY_INDEX):
    jam();
    sendSignal(req_struct->TC_ref, GSN_FIRE_TRIG_ORD, 
               signal, FireTrigOrd::SignalLength, JBB);
    break;
  case (TriggerType::SUBSCRIPTION_BEFORE): // Only Suma
    jam();
    // Since only backup uses subscription triggers we 
    // send to backup directly for now
    fireTrigOrd->setGCI(req_struct->gci);
    fireTrigOrd->setHashValue(req_struct->hash_value);
    fireTrigOrd->m_any_value = regOperPtr->m_any_value;
    EXECUTE_DIRECT(trigPtr->m_receiverBlock,
                   GSN_FIRE_TRIG_ORD,
                   signal,
		   FireTrigOrd::SignalLengthSuma);
    break;
  case (TriggerType::SUBSCRIPTION):
    jam();
    // Since only backup uses subscription triggers we 
    // send to backup directly for now
    fireTrigOrd->setGCI(req_struct->gci);
    EXECUTE_DIRECT(trigPtr->m_receiverBlock,
                   GSN_FIRE_TRIG_ORD,
                   signal,
		   FireTrigOrd::SignalWithGCILength);
    break;
  default:
    ndbrequire(false);
    break;
  }
}
Esempio n. 8
0
void
Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
{
  // get build record
  BuildIndexPtr buildPtr;
  buildPtr.i= buildPtrI;
  c_buildIndexList.getPtr(buildPtr);
  const BuildIndxReq* buildReq= (const BuildIndxReq*)buildPtr.p->m_request;
  // get table
  TablerecPtr tablePtr;
  tablePtr.i= buildReq->getTableId();
  ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);

  const Uint32 firstTupleNo = 0;
  const Uint32 tupheadsize = tablePtr.p->m_offsets[MM].m_fix_header_size;

#ifdef TIME_MEASUREMENT
  MicroSecondTimer start;
  MicroSecondTimer stop;
  NDB_TICKS time_passed;
#endif
  do {
    // get fragment
    FragrecordPtr fragPtr;
    if (buildPtr.p->m_fragNo == MAX_FRAG_PER_NODE) {
      jam();
      // build ready
      buildIndexReply(signal, buildPtr.p);
      c_buildIndexList.release(buildPtr);
      return;
    }
    ndbrequire(buildPtr.p->m_fragNo < MAX_FRAG_PER_NODE);
    fragPtr.i= tablePtr.p->fragrec[buildPtr.p->m_fragNo];
    if (fragPtr.i == RNIL) {
      jam();
      buildPtr.p->m_fragNo++;
      buildPtr.p->m_pageId= 0;
      buildPtr.p->m_tupleNo= firstTupleNo;
      break;
    }
    ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
    // get page
    PagePtr pagePtr;
    if (buildPtr.p->m_pageId >= fragPtr.p->noOfPages) {
      jam();
      buildPtr.p->m_fragNo++;
      buildPtr.p->m_pageId= 0;
      buildPtr.p->m_tupleNo= firstTupleNo;
      break;
    }
    Uint32 realPageId= getRealpid(fragPtr.p, buildPtr.p->m_pageId);
    c_page_pool.getPtr(pagePtr, realPageId);
    Uint32 pageState= pagePtr.p->page_state;
    // skip empty page
    if (pageState == ZEMPTY_MM) {
      jam();
      buildPtr.p->m_pageId++;
      buildPtr.p->m_tupleNo= firstTupleNo;
      break;
    }
    // get tuple
    Uint32 pageIndex = ~0;
    const Tuple_header* tuple_ptr = 0;
    pageIndex = buildPtr.p->m_tupleNo * tupheadsize;
    if (pageIndex + tupheadsize > Fix_page::DATA_WORDS) {
      jam();
      buildPtr.p->m_pageId++;
      buildPtr.p->m_tupleNo= firstTupleNo;
      break;
    }
    tuple_ptr = (Tuple_header*)&pagePtr.p->m_data[pageIndex];
    // skip over free tuple
    if (tuple_ptr->m_header_bits & Tuple_header::FREE) {
      jam();
      buildPtr.p->m_tupleNo++;
      break;
    }
    Uint32 tupVersion= tuple_ptr->get_tuple_version();
    OperationrecPtr pageOperPtr;
    pageOperPtr.i= tuple_ptr->m_operation_ptr_i;
#ifdef TIME_MEASUREMENT
    NdbTick_getMicroTimer(&start);
#endif
    // add to index
    TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
    req->errorCode = RNIL;
    req->tableId = tablePtr.i;
    req->indexId = buildPtr.p->m_indexId;
    req->fragId = tablePtr.p->fragid[buildPtr.p->m_fragNo];
    req->pageId = realPageId;
    req->tupVersion = tupVersion;
    req->opInfo = TuxMaintReq::OpAdd;
    req->tupFragPtrI = fragPtr.i;
    req->fragPageId = buildPtr.p->m_pageId;
    req->pageIndex = pageIndex;

    if (pageOperPtr.i == RNIL)
    {
      EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ,
		     signal, TuxMaintReq::SignalLength+2);
    }
    else
    {
      /*
      If there is an ongoing operation on the tuple then it is either a
      copy tuple or an original tuple with an ongoing transaction. In
      both cases realPageId and pageOffset refer to the original tuple.
      The tuple address stored in TUX will always be the original tuple
      but with the tuple version of the tuple we found.

      This is necessary to avoid having to update TUX at abort of
      update. If an update aborts then the copy tuple is copied to
      the original tuple. The build will however have found that
      tuple as a copy tuple. The original tuple is stable and is thus
      preferrable to store in TUX.
      */
      jam();

      /**
       * Since copy tuples now can't be found on real pages.
       *   we will here build all copies of the tuple
       *
       * Note only "real" tupVersion's should be added 
       *      i.e delete's shouldnt be added 
       *      (unless it's the first op, when "original" should be added)
       */
      do 
      {
	c_operation_pool.getPtr(pageOperPtr);
	if(pageOperPtr.p->op_struct.op_type != ZDELETE ||
	   pageOperPtr.p->is_first_operation())
	{
	  req->errorCode = RNIL;
	  req->tupVersion= pageOperPtr.p->tupVersion;
	  EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ,
			 signal, TuxMaintReq::SignalLength+2);
	}
	else
	{
	  req->errorCode= 0;
	}
	pageOperPtr.i= pageOperPtr.p->prevActiveOp;
      } while(req->errorCode == 0 && pageOperPtr.i != RNIL);
    } 
    
    jamEntry();
    if (req->errorCode != 0) {
      switch (req->errorCode) {
      case TuxMaintReq::NoMemError:
        jam();
        buildPtr.p->m_errorCode= BuildIndxRef::AllocationFailure;
        break;
      default:
        ndbrequire(false);
        break;
      }
      buildIndexReply(signal, buildPtr.p);
      c_buildIndexList.release(buildPtr);
      return;
    }
#ifdef TIME_MEASUREMENT
    NdbTick_getMicroTimer(&stop);
    time_passed= NdbTick_getMicrosPassed(start, stop);
    if (time_passed < 1000) {
      time_events++;
      tot_time_passed += time_passed;
      if (time_events == number_events) {
        NDB_TICKS mean_time_passed= tot_time_passed /
                                     (NDB_TICKS)number_events;
        ndbout << "Number of events= " << number_events;
        ndbout << " Mean time passed= " << mean_time_passed << endl;
        number_events <<= 1;
        tot_time_passed= (NDB_TICKS)0;
        time_events= 0;
      }
    }
#endif
    // next tuple
    buildPtr.p->m_tupleNo++;
    break;
  } while (0);
  signal->theData[0]= ZBUILD_INDEX;
  signal->theData[1]= buildPtr.i;
  sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
}