Example #1
0
void
Dbtux::execREAD_CONFIG_REQ(Signal* signal)
{
  jamEntry();
 
  const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
  Uint32 ref = req->senderRef;
  Uint32 senderData = req->senderData;
  ndbrequire(req->noOfParameters == 0);

  Uint32 nIndex;
  Uint32 nFragment;
  Uint32 nAttribute;
  Uint32 nScanOp; 
  Uint32 nScanBatch;
  Uint32 nStatAutoUpdate;
  Uint32 nStatSaveSize;
  Uint32 nStatSaveScale;
  Uint32 nStatTriggerPct;
  Uint32 nStatTriggerScale;
  Uint32 nStatUpdateDelay;

  const ndb_mgm_configuration_iterator * p = 
    m_ctx.m_config.getOwnConfigIterator();
  ndbrequire(p != 0);

  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_INDEX, &nIndex));
  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_FRAGMENT, &nFragment));
  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_ATTRIBUTE, &nAttribute));
  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_SCAN_OP, &nScanOp));
  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_BATCH_SIZE, &nScanBatch));

  nStatAutoUpdate = 0;
  ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_AUTO_UPDATE,
                            &nStatAutoUpdate);

  nStatSaveSize = 32768;
  ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_SAVE_SIZE,
                            &nStatSaveSize);

  nStatSaveScale = 100;
  ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_SAVE_SCALE,
                            &nStatSaveScale);

  nStatTriggerPct = 100;
  ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_TRIGGER_PCT,
                            &nStatTriggerPct);

  nStatTriggerScale = 100;
  ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_TRIGGER_SCALE,
                            &nStatTriggerScale);

  nStatUpdateDelay = 60;
  ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_UPDATE_DELAY,
                            &nStatUpdateDelay);

  const Uint32 nDescPage = (nIndex * DescHeadSize + nAttribute * KeyTypeSize + nAttribute * AttributeHeaderSize + DescPageSize - 1) / DescPageSize;
  const Uint32 nScanBoundWords = nScanOp * ScanBoundSegmentSize * 4;
  const Uint32 nScanLock = nScanOp * nScanBatch;
  const Uint32 nStatOp = 8;
  
  c_indexPool.setSize(nIndex);
  c_fragPool.setSize(nFragment);
  c_descPagePool.setSize(nDescPage);
  c_fragOpPool.setSize(MaxIndexFragments);
  c_scanOpPool.setSize(nScanOp);
  c_scanBoundPool.setSize(nScanBoundWords);
  c_scanLockPool.setSize(nScanLock);
  c_statOpPool.setSize(nStatOp);
  c_indexStatAutoUpdate = nStatAutoUpdate;
  c_indexStatSaveSize = nStatSaveSize;
  c_indexStatSaveScale = nStatSaveScale;
  c_indexStatTriggerPct = nStatTriggerPct;
  c_indexStatTriggerScale = nStatTriggerScale;
  c_indexStatUpdateDelay = nStatUpdateDelay;

  /*
   * Index id is physical array index.  We seize and initialize all
   * index records now.
   */
  IndexPtr indexPtr;
  while (1) {
    jam();
    refresh_watch_dog();
    c_indexPool.seize(indexPtr);
    if (indexPtr.i == RNIL) {
      jam();
      break;
    }
    new (indexPtr.p) Index();
  }
  // allocate buffers
  c_ctx.jamBuffer = jamBuffer();
  c_ctx.c_searchKey = (Uint32*)allocRecord("c_searchKey", sizeof(Uint32), MaxAttrDataSize);
  c_ctx.c_entryKey = (Uint32*)allocRecord("c_entryKey", sizeof(Uint32), MaxAttrDataSize);

  c_ctx.c_dataBuffer = (Uint32*)allocRecord("c_dataBuffer", sizeof(Uint64), (MaxXfrmDataSize + 1) >> 1);

#ifdef VM_TRACE
  c_ctx.c_debugBuffer = (char*)allocRecord("c_debugBuffer", sizeof(char), DebugBufferBytes);
#endif

  // ack
  ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
  conf->senderRef = reference();
  conf->senderData = senderData;
  sendSignal(ref, GSN_READ_CONFIG_CONF, signal, 
	     ReadConfigConf::SignalLength, JBB);
}
void
Dbtux::execTUX_MAINT_REQ(Signal* signal)
{
  jamEntry();
  TuxMaintReq* const sig = (TuxMaintReq*)signal->getDataPtrSend();
  // ignore requests from redo log
  IndexPtr indexPtr;
  c_indexPool.getPtr(indexPtr, sig->indexId);

  if (unlikely(! (indexPtr.p->m_state == Index::Online ||
                  indexPtr.p->m_state == Index::Building)))
  {
    jam();
#ifdef VM_TRACE
    if (debugFlags & DebugMaint) {
      TupLoc tupLoc(sig->pageId, sig->pageIndex);
      debugOut << "opInfo=" << hex << sig->opInfo;
      debugOut << " tableId=" << dec << sig->tableId;
      debugOut << " indexId=" << dec << sig->indexId;
      debugOut << " fragId=" << dec << sig->fragId;
      debugOut << " tupLoc=" << tupLoc;
      debugOut << " tupVersion=" << dec << sig->tupVersion;
      debugOut << " -- ignored at ISP=" << dec << c_internalStartPhase;
      debugOut << " TOS=" << dec << c_typeOfStart;
      debugOut << endl;
    }
#endif
    sig->errorCode = 0;
    return;
  }

  TuxMaintReq reqCopy = *sig;
  TuxMaintReq* const req = &reqCopy;
  const Uint32 opCode = req->opInfo & 0xFF;
  // get the index
  ndbrequire(indexPtr.p->m_tableId == req->tableId);
  // get base fragment id and extra bits
  const Uint32 fragId = req->fragId;
  // get the fragment
  FragPtr fragPtr;
  findFrag(jamBuffer(), *indexPtr.p, fragId, fragPtr);
  ndbrequire(fragPtr.i != RNIL);
  Frag& frag = *fragPtr.p;
  // set up search entry
  TreeEnt ent;
  ent.m_tupLoc = TupLoc(req->pageId, req->pageIndex);
  ent.m_tupVersion = req->tupVersion;
  // set up and read search key
  KeyData searchKey(indexPtr.p->m_keySpec, false, 0);
  searchKey.set_buf(c_ctx.c_searchKey, MaxAttrDataSize << 2);
  readKeyAttrs(c_ctx, frag, ent, searchKey, indexPtr.p->m_numAttrs);
  if (unlikely(! indexPtr.p->m_storeNullKey) &&
      searchKey.get_null_cnt() == indexPtr.p->m_numAttrs) {
    jam();
    return;
  }
#ifdef VM_TRACE
  if (debugFlags & DebugMaint) {
    const Uint32 opFlag = req->opInfo >> 8;
    debugOut << "opCode=" << dec << opCode;
    debugOut << " opFlag=" << dec << opFlag;
    debugOut << " tableId=" << dec << req->tableId;
    debugOut << " indexId=" << dec << req->indexId;
    debugOut << " fragId=" << dec << req->fragId;
    debugOut << " entry=" << ent;
    debugOut << endl;
  }
#endif
  // do the operation
  req->errorCode = 0;
  TreePos treePos;
  bool ok;
  switch (opCode) {
  case TuxMaintReq::OpAdd:
    jam();
    ok = searchToAdd(c_ctx, frag, searchKey, ent, treePos);
#ifdef VM_TRACE
    if (debugFlags & DebugMaint) {
      debugOut << treePos << (! ok ? " - error" : "") << endl;
    }
#endif
    if (! ok) {
      jam();
      // there is no "Building" state so this will have to do
      if (indexPtr.p->m_state == Index::Online) {
        jam();
        req->errorCode = TuxMaintReq::SearchError;
      }
      break;
    }
    /*
     * At most one new node is inserted in the operation.  Pre-allocate
     * it so that the operation cannot fail.
     */
    if (frag.m_freeLoc == NullTupLoc) {
      jam();
      NodeHandle node(frag);
      req->errorCode = allocNode(c_ctx, node);
      if (req->errorCode != 0) {
        jam();
        break;
      }
      frag.m_freeLoc = node.m_loc;
      ndbrequire(frag.m_freeLoc != NullTupLoc);
    }
    treeAdd(c_ctx, frag, treePos, ent);
    frag.m_entryCount++;
    frag.m_entryBytes += searchKey.get_data_len();
    frag.m_entryOps++;
    break;
  case TuxMaintReq::OpRemove:
    jam();
    ok = searchToRemove(c_ctx, frag, searchKey, ent, treePos);
#ifdef VM_TRACE
    if (debugFlags & DebugMaint) {
      debugOut << treePos << (! ok ? " - error" : "") << endl;
    }
#endif
    if (! ok) {
      jam();
      // there is no "Building" state so this will have to do
      if (indexPtr.p->m_state == Index::Online) {
        jam();
        req->errorCode = TuxMaintReq::SearchError;
      }
      break;
    }
    treeRemove(frag, treePos);
    ndbrequire(frag.m_entryCount != 0);
    frag.m_entryCount--;
    frag.m_entryBytes -= searchKey.get_data_len();
    frag.m_entryOps++;
    break;
  default:
    ndbrequire(false);
    break;
  }
#ifdef VM_TRACE
  if (debugFlags & DebugTree) {
    printTree(signal, frag, debugOut);
  }
#endif
  // copy back
  *sig = *req;

  //ndbrequire(c_keyAttrs[0] == c_keyAttrs[1]);
  //ndbrequire(c_sqlCmp[0] == c_sqlCmp[1]);
  //ndbrequire(c_searchKey[0] == c_searchKey[1]);
  //ndbrequire(c_entryKey[0] == c_entryKey[1]);
  //ndbrequire(c_dataBuffer[0] == c_dataBuffer[1]);
}