Example #1
0
void
Dbtup::execALTER_TAB_CONF(Signal* signal)
{
  jamEntry();
  AlterTabConf* conf = (AlterTabConf*)signal->getDataPtr();

  BuildIndexPtr buildPtr;
  buildPtr.i = conf->senderData;
  c_buildIndexList.getPtr(buildPtr);


  if (buildPtr.p->m_fragNo == 0)
  {
    jam();
    buildIndexOffline_table_readonly(signal, conf->senderData);
    return;
  }
  else
  {
    jam();
    TablerecPtr tablePtr;
    (void)tablePtr; // hide unused warning
    ndbrequire(buildPtr.p->m_fragNo >= NDB_ARRAY_SIZE(tablePtr.p->fragid));
    buildIndexReply(signal, buildPtr.p);
    c_buildIndexList.release(buildPtr);
    return;
  }
}
Example #2
0
void
Dbtup::execALTER_TAB_CONF(Signal* signal)
{
    jamEntry();
    AlterTabConf* conf = (AlterTabConf*)signal->getDataPtr();

    BuildIndexPtr buildPtr;
    buildPtr.i = conf->senderData;
    c_buildIndexList.getPtr(buildPtr);


    if (buildPtr.p->m_fragNo == 0)
    {
        jam();
        buildIndexOffline_table_readonly(signal, conf->senderData);
        return;
    }
    else
    {
        jam();
        ndbrequire(buildPtr.p->m_fragNo >= MAX_FRAG_PER_NODE);
        buildIndexReply(signal, buildPtr.p);
        c_buildIndexList.release(buildPtr);
        return;
    }
}
Example #3
0
void
Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
{
  // get build record
  BuildIndexPtr buildPtr;
  buildPtr.i= buildPtrI;
  c_buildIndexList.getPtr(buildPtr);
  const BuildIndxImplReq* buildReq= &buildPtr.p->m_request;
  // get table
  TablerecPtr tablePtr;
  tablePtr.i= buildReq->tableId;
  ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);

  const Uint32 firstTupleNo = 0;
  const Uint32 tupheadsize = tablePtr.p->m_offsets[MM].m_fix_header_size;

#ifdef TIME_MEASUREMENT
  NDB_TICKS start;
  NDB_TICKS stop;
  Uint64 time_passed;
#endif
  do {
    // get fragment
    FragrecordPtr fragPtr;
    if (buildPtr.p->m_fragNo == NDB_ARRAY_SIZE(tablePtr.p->fragrec)) {
      jam();
      // build ready
      buildIndexReply(signal, buildPtr.p);
      c_buildIndexList.release(buildPtr);
      return;
    }
    ndbrequire(buildPtr.p->m_fragNo < NDB_ARRAY_SIZE(tablePtr.p->fragrec));
    fragPtr.i= tablePtr.p->fragrec[buildPtr.p->m_fragNo];
    if (fragPtr.i == RNIL) {
      jam();
      buildPtr.p->m_fragNo++;
      buildPtr.p->m_pageId= 0;
      buildPtr.p->m_tupleNo= firstTupleNo;
      break;
    }
    ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
    // get page
    PagePtr pagePtr;
    if (buildPtr.p->m_pageId >= fragPtr.p->m_max_page_cnt)
    {
      jam();
      buildPtr.p->m_fragNo++;
      buildPtr.p->m_pageId= 0;
      buildPtr.p->m_tupleNo= firstTupleNo;
      break;
    }
    Uint32 realPageId= getRealpidCheck(fragPtr.p, buildPtr.p->m_pageId);
    // skip empty page
    if (realPageId == RNIL) 
    {
      jam();
      goto next_tuple;
    }

    c_page_pool.getPtr(pagePtr, realPageId);

next_tuple:
    // get tuple
    Uint32 pageIndex = ~0;
    const Tuple_header* tuple_ptr = 0;
    pageIndex = buildPtr.p->m_tupleNo * tupheadsize;
    if (pageIndex + tupheadsize > Fix_page::DATA_WORDS) {
      jam();
      buildPtr.p->m_pageId++;
      buildPtr.p->m_tupleNo= firstTupleNo;
      break;
    }
    
    if (realPageId == RNIL)
    {
      jam();
      buildPtr.p->m_tupleNo++;
      break;
    }

    tuple_ptr = (Tuple_header*)&pagePtr.p->m_data[pageIndex];
    // skip over free tuple
    if (tuple_ptr->m_header_bits & Tuple_header::FREE) {
      jam();
      buildPtr.p->m_tupleNo++;
      break;
    }
    Uint32 tupVersion= tuple_ptr->get_tuple_version();
    OperationrecPtr pageOperPtr;
    pageOperPtr.i= tuple_ptr->m_operation_ptr_i;
#ifdef TIME_MEASUREMENT
    start = NdbTick_getCurrentTicks();
#endif
    // add to index
    TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
    req->errorCode = RNIL;
    req->tableId = tablePtr.i;
    req->indexId = buildPtr.p->m_indexId;
    req->fragId = tablePtr.p->fragid[buildPtr.p->m_fragNo];
    req->pageId = realPageId;
    req->tupVersion = tupVersion;
    req->opInfo = TuxMaintReq::OpAdd;
    req->tupFragPtrI = fragPtr.i;
    req->fragPageId = buildPtr.p->m_pageId;
    req->pageIndex = pageIndex;

    if (pageOperPtr.i == RNIL)
    {
      EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ,
		     signal, TuxMaintReq::SignalLength+2);
    }
    else
    {
      /*
      If there is an ongoing operation on the tuple then it is either a
      copy tuple or an original tuple with an ongoing transaction. In
      both cases realPageId and pageOffset refer to the original tuple.
      The tuple address stored in TUX will always be the original tuple
      but with the tuple version of the tuple we found.

      This is necessary to avoid having to update TUX at abort of
      update. If an update aborts then the copy tuple is copied to
      the original tuple. The build will however have found that
      tuple as a copy tuple. The original tuple is stable and is thus
      preferrable to store in TUX.
      */
      jam();

      /**
       * Since copy tuples now can't be found on real pages.
       *   we will here build all copies of the tuple
       *
       * Note only "real" tupVersion's should be added 
       *      i.e delete's shouldnt be added 
       *      (unless it's the first op, when "original" should be added)
       */

      /*
       * Start from first operation.  This is only to make things more
       * clear.  It is not required by ordered index implementation.
       */
      c_operation_pool.getPtr(pageOperPtr);
      while (pageOperPtr.p->prevActiveOp != RNIL)
      {
        jam();
        pageOperPtr.i = pageOperPtr.p->prevActiveOp;
        c_operation_pool.getPtr(pageOperPtr);
      }
      /*
       * Do not use req->errorCode as global control.
       */
      bool ok = true;
      /*
       * If first operation is an update, add previous version.
       * This version does not appear as the version of any operation.
       * At commit this version is removed by executeTuxCommitTriggers.
       * At abort it is preserved by executeTuxAbortTriggers.
       */
      if (pageOperPtr.p->op_type == ZUPDATE)
      {
        jam();
        req->errorCode = RNIL;
        req->tupVersion =
          decr_tup_version(pageOperPtr.p->op_struct.bit_field.tupVersion);
        EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ,
                       signal, TuxMaintReq::SignalLength+2);
        ok = (req->errorCode == 0);
      }
      /*
       * Add versions from all operations.
       *
       * Each operation has a tuple version.  For insert and update it
       * is the newly created version.  For delete it is the version
       * deleted.  The existence of operation tuple version implies that
       * a corresponding tuple version exists for TUX to read.
       *
       * We could be in the middle of a commit.  The process here makes
       * no assumptions about operation commit order.  (It should be
       * first to last but this is not the place to assert it).
       *
       * Duplicate versions are possible e.g. a delete in the middle
       * may have same version as the previous operation.  TUX ignores
       * duplicate version errors during index build.
       */
      while (pageOperPtr.i != RNIL && ok)
      {
        jam();
        c_operation_pool.getPtr(pageOperPtr);
        req->errorCode = RNIL;
        req->tupVersion = pageOperPtr.p->op_struct.bit_field.tupVersion;
        EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ,
                       signal, TuxMaintReq::SignalLength+2);
        pageOperPtr.i = pageOperPtr.p->nextActiveOp;
        ok = (req->errorCode == 0);
      }
    } 
    
    jamEntry();
    if (req->errorCode != 0) {
      switch (req->errorCode) {
      case TuxMaintReq::NoMemError:
        jam();
        buildPtr.p->m_errorCode= BuildIndxImplRef::AllocationFailure;
        break;
      default:
        ndbrequire(false);
        break;
      }
      buildIndexReply(signal, buildPtr.p);
      c_buildIndexList.release(buildPtr);
      return;
    }
#ifdef TIME_MEASUREMENT
    stop = NdbTick_getCurrentTicks();
    time_passed= NdbTick_Elapsed(start, stop).microSec();
    if (time_passed < 1000) {
      time_events++;
      tot_time_passed += time_passed;
      if (time_events == number_events) {
        Uint64 mean_time_passed= tot_time_passed /
                                     (Uint64)number_events;
        ndbout << "Number of events= " << number_events;
        ndbout << " Mean time passed= " << mean_time_passed << endl;
        number_events <<= 1;
        tot_time_passed= 0;
        time_events= 0;
      }
    }
#endif
    // next tuple
    buildPtr.p->m_tupleNo++;
    break;
  } while (0);
  signal->theData[0]= ZBUILD_INDEX;
  signal->theData[1]= buildPtr.i;
  sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
}
Example #4
0
void
Dbtup::execBUILD_INDX_IMPL_REQ(Signal* signal)
{
  jamEntry();
#ifdef TIME_MEASUREMENT
  time_events= 0;
  tot_time_passed= 0;
  number_events= 1;
#endif
  const BuildIndxImplReq* const req =
    (const BuildIndxImplReq*)signal->getDataPtr();
  // get new operation
  BuildIndexPtr buildPtr;
  if (ERROR_INSERTED(4031) || ! c_buildIndexList.seizeFirst(buildPtr)) {
    jam();
    BuildIndexRec buildRec;
    buildRec.m_request = *req;
    buildRec.m_errorCode = BuildIndxImplRef::Busy;
    if (ERROR_INSERTED(4031))
    {
      CLEAR_ERROR_INSERT_VALUE;
    }
    buildIndexReply(signal, &buildRec);
    return;
  }
  buildPtr.p->m_request = *req;
  const BuildIndxImplReq* buildReq = &buildPtr.p->m_request;
  // check
  buildPtr.p->m_errorCode= BuildIndxImplRef::NoError;
  buildPtr.p->m_outstanding = 0;
  do {
    if (buildReq->tableId >= cnoOfTablerec) {
      jam();
      buildPtr.p->m_errorCode= BuildIndxImplRef::InvalidPrimaryTable;
      break;
    }
    TablerecPtr tablePtr;
    tablePtr.i= buildReq->tableId;
    ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
    if (tablePtr.p->tableStatus != DEFINED) {
      jam();
      buildPtr.p->m_errorCode= BuildIndxImplRef::InvalidPrimaryTable;
      break;
    }
    // memory page format
    buildPtr.p->m_build_vs =
      (tablePtr.p->m_attributes[MM].m_no_of_varsize +
       tablePtr.p->m_attributes[MM].m_no_of_dynamic) > 0;
    if (DictTabInfo::isOrderedIndex(buildReq->indexType)) {
      jam();
      const DLList<TupTriggerData>& triggerList = 
	tablePtr.p->tuxCustomTriggers;

      TriggerPtr triggerPtr;
      triggerList.first(triggerPtr);
      while (triggerPtr.i != RNIL) {
	if (triggerPtr.p->indexId == buildReq->indexId) {
	  jam();
	  break;
	}
	triggerList.next(triggerPtr);
      }
      if (triggerPtr.i == RNIL) {
	jam();
	// trigger was not created
        ndbassert(false);
	buildPtr.p->m_errorCode = BuildIndxImplRef::InternalError;
	break;
      }
      buildPtr.p->m_indexId = buildReq->indexId;
      buildPtr.p->m_buildRef = DBTUX;
      AlterIndxImplReq* req = (AlterIndxImplReq*)signal->getDataPtrSend();
      req->indexId = buildReq->indexId;
      req->senderRef = 0;
      req->requestType = AlterIndxImplReq::AlterIndexBuilding;
      EXECUTE_DIRECT(DBTUX, GSN_ALTER_INDX_IMPL_REQ, signal, 
                     AlterIndxImplReq::SignalLength);
    } else if(buildReq->indexId == RNIL) {
      jam();
      // REBUILD of acc
      buildPtr.p->m_indexId = RNIL;
      buildPtr.p->m_buildRef = DBACC;
    } else {
      jam();
      buildPtr.p->m_errorCode = BuildIndxImplRef::InvalidIndexType;
      break;
    }

    // set to first tuple position
    const Uint32 firstTupleNo = 0;
    buildPtr.p->m_fragNo= 0;
    buildPtr.p->m_pageId= 0;
    buildPtr.p->m_tupleNo= firstTupleNo;
    // start build

    bool offline = !!(buildReq->requestType&BuildIndxImplReq::RF_BUILD_OFFLINE);
    if (offline && m_max_parallel_index_build > 1)
    {
      jam();
      buildIndexOffline(signal, buildPtr.i);
    }
    else
    {
      jam();
      buildIndex(signal, buildPtr.i);
    }
    return;
  } while (0);
  // check failed
  buildIndexReply(signal, buildPtr.p);
  c_buildIndexList.release(buildPtr);
}
Example #5
0
void
Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
{
  // get build record
  BuildIndexPtr buildPtr;
  buildPtr.i= buildPtrI;
  c_buildIndexList.getPtr(buildPtr);
  const BuildIndxReq* buildReq= (const BuildIndxReq*)buildPtr.p->m_request;
  // get table
  TablerecPtr tablePtr;
  tablePtr.i= buildReq->getTableId();
  ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);

  const Uint32 firstTupleNo = 0;
  const Uint32 tupheadsize = tablePtr.p->m_offsets[MM].m_fix_header_size;

#ifdef TIME_MEASUREMENT
  MicroSecondTimer start;
  MicroSecondTimer stop;
  NDB_TICKS time_passed;
#endif
  do {
    // get fragment
    FragrecordPtr fragPtr;
    if (buildPtr.p->m_fragNo == MAX_FRAG_PER_NODE) {
      jam();
      // build ready
      buildIndexReply(signal, buildPtr.p);
      c_buildIndexList.release(buildPtr);
      return;
    }
    ndbrequire(buildPtr.p->m_fragNo < MAX_FRAG_PER_NODE);
    fragPtr.i= tablePtr.p->fragrec[buildPtr.p->m_fragNo];
    if (fragPtr.i == RNIL) {
      jam();
      buildPtr.p->m_fragNo++;
      buildPtr.p->m_pageId= 0;
      buildPtr.p->m_tupleNo= firstTupleNo;
      break;
    }
    ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
    // get page
    PagePtr pagePtr;
    if (buildPtr.p->m_pageId >= fragPtr.p->noOfPages) {
      jam();
      buildPtr.p->m_fragNo++;
      buildPtr.p->m_pageId= 0;
      buildPtr.p->m_tupleNo= firstTupleNo;
      break;
    }
    Uint32 realPageId= getRealpid(fragPtr.p, buildPtr.p->m_pageId);
    c_page_pool.getPtr(pagePtr, realPageId);
    Uint32 pageState= pagePtr.p->page_state;
    // skip empty page
    if (pageState == ZEMPTY_MM) {
      jam();
      buildPtr.p->m_pageId++;
      buildPtr.p->m_tupleNo= firstTupleNo;
      break;
    }
    // get tuple
    Uint32 pageIndex = ~0;
    const Tuple_header* tuple_ptr = 0;
    pageIndex = buildPtr.p->m_tupleNo * tupheadsize;
    if (pageIndex + tupheadsize > Fix_page::DATA_WORDS) {
      jam();
      buildPtr.p->m_pageId++;
      buildPtr.p->m_tupleNo= firstTupleNo;
      break;
    }
    tuple_ptr = (Tuple_header*)&pagePtr.p->m_data[pageIndex];
    // skip over free tuple
    if (tuple_ptr->m_header_bits & Tuple_header::FREE) {
      jam();
      buildPtr.p->m_tupleNo++;
      break;
    }
    Uint32 tupVersion= tuple_ptr->get_tuple_version();
    OperationrecPtr pageOperPtr;
    pageOperPtr.i= tuple_ptr->m_operation_ptr_i;
#ifdef TIME_MEASUREMENT
    NdbTick_getMicroTimer(&start);
#endif
    // add to index
    TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
    req->errorCode = RNIL;
    req->tableId = tablePtr.i;
    req->indexId = buildPtr.p->m_indexId;
    req->fragId = tablePtr.p->fragid[buildPtr.p->m_fragNo];
    req->pageId = realPageId;
    req->tupVersion = tupVersion;
    req->opInfo = TuxMaintReq::OpAdd;
    req->tupFragPtrI = fragPtr.i;
    req->fragPageId = buildPtr.p->m_pageId;
    req->pageIndex = pageIndex;

    if (pageOperPtr.i == RNIL)
    {
      EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ,
		     signal, TuxMaintReq::SignalLength+2);
    }
    else
    {
      /*
      If there is an ongoing operation on the tuple then it is either a
      copy tuple or an original tuple with an ongoing transaction. In
      both cases realPageId and pageOffset refer to the original tuple.
      The tuple address stored in TUX will always be the original tuple
      but with the tuple version of the tuple we found.

      This is necessary to avoid having to update TUX at abort of
      update. If an update aborts then the copy tuple is copied to
      the original tuple. The build will however have found that
      tuple as a copy tuple. The original tuple is stable and is thus
      preferrable to store in TUX.
      */
      jam();

      /**
       * Since copy tuples now can't be found on real pages.
       *   we will here build all copies of the tuple
       *
       * Note only "real" tupVersion's should be added 
       *      i.e delete's shouldnt be added 
       *      (unless it's the first op, when "original" should be added)
       */
      do 
      {
	c_operation_pool.getPtr(pageOperPtr);
	if(pageOperPtr.p->op_struct.op_type != ZDELETE ||
	   pageOperPtr.p->is_first_operation())
	{
	  req->errorCode = RNIL;
	  req->tupVersion= pageOperPtr.p->tupVersion;
	  EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ,
			 signal, TuxMaintReq::SignalLength+2);
	}
	else
	{
	  req->errorCode= 0;
	}
	pageOperPtr.i= pageOperPtr.p->prevActiveOp;
      } while(req->errorCode == 0 && pageOperPtr.i != RNIL);
    } 
    
    jamEntry();
    if (req->errorCode != 0) {
      switch (req->errorCode) {
      case TuxMaintReq::NoMemError:
        jam();
        buildPtr.p->m_errorCode= BuildIndxRef::AllocationFailure;
        break;
      default:
        ndbrequire(false);
        break;
      }
      buildIndexReply(signal, buildPtr.p);
      c_buildIndexList.release(buildPtr);
      return;
    }
#ifdef TIME_MEASUREMENT
    NdbTick_getMicroTimer(&stop);
    time_passed= NdbTick_getMicrosPassed(start, stop);
    if (time_passed < 1000) {
      time_events++;
      tot_time_passed += time_passed;
      if (time_events == number_events) {
        NDB_TICKS mean_time_passed= tot_time_passed /
                                     (NDB_TICKS)number_events;
        ndbout << "Number of events= " << number_events;
        ndbout << " Mean time passed= " << mean_time_passed << endl;
        number_events <<= 1;
        tot_time_passed= (NDB_TICKS)0;
        time_events= 0;
      }
    }
#endif
    // next tuple
    buildPtr.p->m_tupleNo++;
    break;
  } while (0);
  signal->theData[0]= ZBUILD_INDEX;
  signal->theData[1]= buildPtr.i;
  sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
}
Example #6
0
void
Dbtup::execBUILDINDXREQ(Signal* signal)
{
  jamEntry();
#ifdef TIME_MEASUREMENT
  time_events= 0;
  tot_time_passed= 0;
  number_events= 1;
#endif
  // get new operation
  BuildIndexPtr buildPtr;
  if (! c_buildIndexList.seize(buildPtr)) {
    jam();
    BuildIndexRec buildRec;
    memcpy(buildRec.m_request, signal->theData, sizeof(buildRec.m_request));
    buildRec.m_errorCode= BuildIndxRef::Busy;
    buildIndexReply(signal, &buildRec);
    return;
  }
  memcpy(buildPtr.p->m_request,
         signal->theData,
         sizeof(buildPtr.p->m_request));
  // check
  buildPtr.p->m_errorCode= BuildIndxRef::NoError;
  do {
    const BuildIndxReq* buildReq= (const BuildIndxReq*)buildPtr.p->m_request;
    if (buildReq->getTableId() >= cnoOfTablerec) {
      jam();
      buildPtr.p->m_errorCode= BuildIndxRef::InvalidPrimaryTable;
      break;
    }
    TablerecPtr tablePtr;
    tablePtr.i= buildReq->getTableId();
    ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
    if (tablePtr.p->tableStatus != DEFINED) {
      jam();
      buildPtr.p->m_errorCode= BuildIndxRef::InvalidPrimaryTable;
      break;
    }
    // memory page format
    buildPtr.p->m_build_vs =
      tablePtr.p->m_attributes[MM].m_no_of_varsize > 0;
    if (DictTabInfo::isOrderedIndex(buildReq->getIndexType())) {
      jam();
      const DLList<TupTriggerData>& triggerList = 
	tablePtr.p->tuxCustomTriggers;

      TriggerPtr triggerPtr;
      triggerList.first(triggerPtr);
      while (triggerPtr.i != RNIL) {
	if (triggerPtr.p->indexId == buildReq->getIndexId()) {
	  jam();
	  break;
	}
	triggerList.next(triggerPtr);
      }
      if (triggerPtr.i == RNIL) {
	jam();
	// trigger was not created
	buildPtr.p->m_errorCode = BuildIndxRef::InternalError;
	break;
      }
      buildPtr.p->m_indexId = buildReq->getIndexId();
      buildPtr.p->m_buildRef = DBTUX;
    } else if(buildReq->getIndexId() == RNIL) {
      jam();
      // REBUILD of acc
      buildPtr.p->m_indexId = RNIL;
      buildPtr.p->m_buildRef = DBACC;
    } else {
      jam();
      buildPtr.p->m_errorCode = BuildIndxRef::InvalidIndexType;
      break;
    }

    // set to first tuple position
    const Uint32 firstTupleNo = 0;
    buildPtr.p->m_fragNo= 0;
    buildPtr.p->m_pageId= 0;
    buildPtr.p->m_tupleNo= firstTupleNo;
    // start build
    buildIndex(signal, buildPtr.i);
    return;
  } while (0);
  // check failed
  buildIndexReply(signal, buildPtr.p);
  c_buildIndexList.release(buildPtr);
}