Exemple #1
0
void Dbdih::initData() 
{
  cpageFileSize = ZPAGEREC;

  // Records with constant sizes
  createReplicaRecord = (CreateReplicaRecord*)
    allocRecord("CreateReplicaRecord", sizeof(CreateReplicaRecord),
                 ZCREATE_REPLICA_FILE_SIZE);

  nodeGroupRecord = (NodeGroupRecord*)
    allocRecord("NodeGroupRecord", sizeof(NodeGroupRecord), MAX_NDB_NODES);

  nodeRecord = (NodeRecord*)
    allocRecord("NodeRecord", sizeof(NodeRecord), MAX_NDB_NODES);

  Uint32 i;
  for(i = 0; i<MAX_NDB_NODES; i++){
    new (&nodeRecord[i]) NodeRecord();
  }
  Uint32 max_takeover_threads = MAX(MAX_NDB_NODES,
                                    ZMAX_TAKE_OVER_THREADS);
  c_takeOverPool.setSize(max_takeover_threads);
  {
    Ptr<TakeOverRecord> ptr;
    while (c_masterActiveTakeOverList.seizeFirst(ptr))
    {
      new (ptr.p) TakeOverRecord;
    }
    while (c_masterActiveTakeOverList.first(ptr))
    {
      releaseTakeOver(ptr, true);
    }
  }
  
  waitGCPProxyPool.setSize(ZPROXY_FILE_SIZE);
  waitGCPMasterPool.setSize(ZPROXY_MASTER_FILE_SIZE);

  c_dictLockSlavePool.setSize(1); // assert single usage
  c_dictLockSlavePtrI_nodeRestart = RNIL;

  cgcpOrderBlocked = 0;
  c_lcpState.ctcCounter = 0;
  c_lcpState.m_lcp_trylock_timeout = 0;
  cwaitLcpSr       = false;
  c_blockCommit    = false;
  c_blockCommitNo  = 1;
  cntrlblockref    = RNIL;
  c_set_initial_start_flag = FALSE;
  c_sr_wait_to = false;
  c_2pass_inr = false;
  c_handled_master_take_over_copy_gci = 0;
  
  c_lcpTabDefWritesControl.init(MAX_CONCURRENT_LCP_TAB_DEF_FLUSHES);
}//Dbdih::initData()
    StatusWith<DiskLoc> RecordStoreV1Base::insertRecord( OperationContext* txn,
                                                         const DocWriter* doc,
                                                         bool enforceQuota ) {
        int docSize = doc->documentSize();
        if ( docSize < 4 ) {
            return StatusWith<DiskLoc>( ErrorCodes::InvalidLength,
                                        "record has to be >= 4 bytes" );
        }
        int lenWHdr = docSize + Record::HeaderSize;
        if ( doc->addPadding() )
            lenWHdr = getRecordAllocationSize( lenWHdr );

        StatusWith<DiskLoc> loc = allocRecord( txn, lenWHdr, enforceQuota );
        if ( !loc.isOK() )
            return loc;

        Record *r = recordFor( loc.getValue() );
        fassert( 17319, r->lengthWithHeaders() >= lenWHdr );

        r = reinterpret_cast<Record*>( txn->recoveryUnit()->writingPtr(r, lenWHdr) );
        doc->writeDocument( r->data() );

        _addRecordToRecListInExtent(txn, r, loc.getValue());

        _details->incrementStats( txn, r->netLength(), 1 );

        _paddingFits( txn );

        return loc;
    }
    StatusWith<DiskLoc> RecordStoreV1Base::_insertRecord( OperationContext* txn,
                                                          const char* data,
                                                          int len,
                                                          bool enforceQuota ) {

        int lenWHdr = getRecordAllocationSize( len + Record::HeaderSize );
        fassert( 17208, lenWHdr >= ( len + Record::HeaderSize ) );

        StatusWith<DiskLoc> loc = allocRecord( txn, lenWHdr, enforceQuota );
        if ( !loc.isOK() )
            return loc;

        Record *r = recordFor( loc.getValue() );
        fassert( 17210, r->lengthWithHeaders() >= lenWHdr );

        // copy the data
        r = reinterpret_cast<Record*>( txn->recoveryUnit()->writingPtr(r, lenWHdr) );
        memcpy( r->data(), data, len );

        _addRecordToRecListInExtent(txn, r, loc.getValue());

        _details->incrementStats( txn, r->netLength(), 1 );

        return loc;
    }
StatusWith<RecordId> RecordStoreV1Base::_insertRecord(OperationContext* txn,
                                                      const char* data,
                                                      int len,
                                                      bool enforceQuota) {
    const int lenWHdr = len + MmapV1RecordHeader::HeaderSize;
    const int lenToAlloc = shouldPadInserts() ? quantizeAllocationSpace(lenWHdr) : lenWHdr;
    fassert(17208, lenToAlloc >= lenWHdr);

    StatusWith<DiskLoc> loc = allocRecord(txn, lenToAlloc, enforceQuota);
    if (!loc.isOK())
        return StatusWith<RecordId>(loc.getStatus());

    MmapV1RecordHeader* r = recordFor(loc.getValue());
    fassert(17210, r->lengthWithHeaders() >= lenWHdr);

    // copy the data
    r = reinterpret_cast<MmapV1RecordHeader*>(txn->recoveryUnit()->writingPtr(r, lenWHdr));
    memcpy(r->data(), data, len);

    _addRecordToRecListInExtent(txn, r, loc.getValue());

    _details->incrementStats(txn, r->netLength(), 1);

    return StatusWith<RecordId>(loc.getValue().toRecordId());
}
Exemple #5
0
void Dbtc::initData() 
{
  capiConnectFilesize = ZAPI_CONNECT_FILESIZE;
  ccacheFilesize = ZAPI_CONNECT_FILESIZE;
  chostFilesize = MAX_NODES;
  cgcpFilesize = ZGCP_FILESIZE;
  cscanrecFileSize = ZSCANREC_FILE_SIZE;
  cscanFragrecFileSize = ZSCAN_FRAGREC_FILE_SIZE;
  ctabrecFilesize = ZTABREC_FILESIZE;
  ctcConnectFilesize = ZTC_CONNECT_FILESIZE;
  cdihblockref = DBDIH_REF;
  cdictblockref = DBDICT_REF;
  clqhblockref = DBLQH_REF;
  cerrorBlockref = NDBCNTR_REF;

  // Records with constant sizes
  tcFailRecord = (TcFailRecord*)allocRecord("TcFailRecord",
					    sizeof(TcFailRecord), 1);

  // Variables
  ctcTimer = 0;

  // Trigger and index pools
  c_theDefinedTriggerPool.setSize(c_maxNumberOfDefinedTriggers);
  c_theFiredTriggerPool.setSize(c_maxNumberOfFiredTriggers);
  c_theIndexPool.setSize(c_maxNumberOfIndexes);
  c_theIndexOperationPool.setSize(c_maxNumberOfIndexOperations);
  c_theAttributeBufferPool.setSize(c_transactionBufferSpace);
  c_firedTriggerHash.setSize((c_maxNumberOfFiredTriggers+10)/10);
}//Dbtc::initData()
StatusWith<RecordId> RecordStoreV1Base::insertRecord(OperationContext* txn,
                                                     const DocWriter* doc,
                                                     bool enforceQuota) {
    int docSize = doc->documentSize();
    if (docSize < 4) {
        return StatusWith<RecordId>(ErrorCodes::InvalidLength, "record has to be >= 4 bytes");
    }
    const int lenWHdr = docSize + MmapV1RecordHeader::HeaderSize;
    if (lenWHdr > MaxAllowedAllocation) {
        return StatusWith<RecordId>(ErrorCodes::InvalidLength, "record has to be <= 16.5MB");
    }
    const int lenToAlloc =
        (doc->addPadding() && shouldPadInserts()) ? quantizeAllocationSpace(lenWHdr) : lenWHdr;

    StatusWith<DiskLoc> loc = allocRecord(txn, lenToAlloc, enforceQuota);
    if (!loc.isOK())
        return StatusWith<RecordId>(loc.getStatus());

    MmapV1RecordHeader* r = recordFor(loc.getValue());
    fassert(17319, r->lengthWithHeaders() >= lenWHdr);

    r = reinterpret_cast<MmapV1RecordHeader*>(txn->recoveryUnit()->writingPtr(r, lenWHdr));
    doc->writeDocument(r->data());

    _addRecordToRecListInExtent(txn, r, loc.getValue());

    _details->incrementStats(txn, r->netLength(), 1);

    return StatusWith<RecordId>(loc.getValue().toRecordId());
}
Exemple #7
0
void Dbdih::initData()
{
    cpageFileSize = ZPAGEREC;

    // Records with constant sizes
    createReplicaRecord = (CreateReplicaRecord*)
                          allocRecord("CreateReplicaRecord", sizeof(CreateReplicaRecord),
                                      ZCREATE_REPLICA_FILE_SIZE);

    nodeGroupRecord = (NodeGroupRecord*)
                      allocRecord("NodeGroupRecord", sizeof(NodeGroupRecord), MAX_NDB_NODES);

    nodeRecord = (NodeRecord*)
                 allocRecord("NodeRecord", sizeof(NodeRecord), MAX_NDB_NODES);

    Uint32 i;
    for(i = 0; i<MAX_NDB_NODES; i++) {
        new (&nodeRecord[i]) NodeRecord();
    }

    takeOverRecord = (TakeOverRecord*)allocRecord("TakeOverRecord",
                     sizeof(TakeOverRecord),
                     MAX_NDB_NODES);
    for(i = 0; i<MAX_NDB_NODES; i++)
        new (&takeOverRecord[i]) TakeOverRecord();

    for(i = 0; i<MAX_NDB_NODES; i++)
        new (&takeOverRecord[i]) TakeOverRecord();

    waitGCPProxyPool.setSize(ZPROXY_FILE_SIZE);
    waitGCPMasterPool.setSize(ZPROXY_MASTER_FILE_SIZE);

    c_dictLockSlavePool.setSize(1); // assert single usage
    c_dictLockSlavePtrI_nodeRestart = RNIL;

    cgcpOrderBlocked = 0;
    c_lcpState.ctcCounter = 0;
    cwaitLcpSr       = false;
    c_blockCommit    = false;
    c_blockCommitNo  = 1;
    cntrlblockref    = RNIL;
    c_set_initial_start_flag = FALSE;
}//Dbdih::initData()
Exemple #8
0
void Dbdih::initRecords()
{
    // Records with dynamic sizes
    apiConnectRecord = (ApiConnectRecord*)
                       allocRecord("ApiConnectRecord",
                                   sizeof(ApiConnectRecord),
                                   capiConnectFileSize);

    connectRecord = (ConnectRecord*)allocRecord("ConnectRecord",
                    sizeof(ConnectRecord),
                    cconnectFileSize);

    fileRecord = (FileRecord*)allocRecord("FileRecord",
                                          sizeof(FileRecord),
                                          cfileFileSize);

    fragmentstore = (Fragmentstore*)allocRecord("Fragmentstore",
                    sizeof(Fragmentstore),
                    cfragstoreFileSize);

    pageRecord = (PageRecord*)allocRecord("PageRecord",
                                          sizeof(PageRecord),
                                          cpageFileSize);

    replicaRecord = (ReplicaRecord*)allocRecord("ReplicaRecord",
                    sizeof(ReplicaRecord),
                    creplicaFileSize);

    tabRecord = (TabRecord*)allocRecord("TabRecord",
                                        sizeof(TabRecord),
                                        ctabFileSize);

    // Initialize BAT for interface to file system
    NewVARIABLE* bat = allocateBat(22);
    bat[1].WA = &pageRecord->word[0];
    bat[1].nrr = cpageFileSize;
    bat[1].ClusterSize = sizeof(PageRecord);
    bat[1].bits.q = 11;
    bat[1].bits.v = 5;
    bat[20].WA = &sysfileData[0];
    bat[20].nrr = 1;
    bat[20].ClusterSize = sizeof(sysfileData);
    bat[20].bits.q = 7;
    bat[20].bits.v = 5;
    bat[21].WA = &sysfileDataToFile[0];
    bat[21].nrr = 1;
    bat[21].ClusterSize = sizeof(sysfileDataToFile);
    bat[21].bits.q = 7;
    bat[21].bits.v = 5;
}//Dbdih::initRecords()
Exemple #9
0
// GSN_READ_CONFIG_REQ
void
DblqhProxy::callREAD_CONFIG_REQ(Signal* signal)
{
  const ReadConfigReq* req = (const ReadConfigReq*)signal->getDataPtr();
  ndbrequire(req->noOfParameters == 0);

  const ndb_mgm_configuration_iterator * p = 
    m_ctx.m_config.getOwnConfigIterator();
  ndbrequire(p != 0);
  
  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_TABLE, &c_tableRecSize));
  c_tableRec = (Uint8*)allocRecord("TableRec", sizeof(Uint8), c_tableRecSize);
  D("proxy:" << V(c_tableRecSize));
  Uint32 i;
  for (i = 0; i < c_tableRecSize; i++)
    c_tableRec[i] = 0;
  backREAD_CONFIG_REQ(signal);
}
Exemple #10
0
void Dbacc::initRecords() 
{
  // Records with dynamic sizes
  page8 = (Page8*)allocRecord("Page8",
			      sizeof(Page8), 
			      cpagesize,
			      false,
            CFG_DB_INDEX_MEM);

  operationrec = (Operationrec*)allocRecord("Operationrec",
					    sizeof(Operationrec),
					    coprecsize);

  dirRange = (DirRange*)allocRecord("DirRange",
				    sizeof(DirRange), 
				    cdirrangesize);

  directoryarray = (Directoryarray*)allocRecord("Directoryarray",
						sizeof(Directoryarray), 
						cdirarraysize);

  fragmentrec = (Fragmentrec*)allocRecord("Fragmentrec",
					  sizeof(Fragmentrec), 
					  cfragmentsize);

  overflowRecord = (OverflowRecord*)allocRecord("OverflowRecord",
						sizeof(OverflowRecord),
						coverflowrecsize);

  scanRec = (ScanRec*)allocRecord("ScanRec",
				  sizeof(ScanRec), 
				  cscanRecSize);

  tabrec = (Tabrec*)allocRecord("Tabrec",
				sizeof(Tabrec),
				ctablesize);
}//Dbacc::initRecords()
    StatusWith<DiskLoc> RecordStoreV1Base::insertRecord( TransactionExperiment* txn,
                                                         const DocWriter* doc,
                                                         int quotaMax ) {
        int lenWHdr = doc->documentSize() + Record::HeaderSize;
        if ( doc->addPadding() )
            lenWHdr = getRecordAllocationSize( lenWHdr );

        StatusWith<DiskLoc> loc = allocRecord( txn, lenWHdr, quotaMax );
        if ( !loc.isOK() )
            return loc;

        Record *r = recordFor( loc.getValue() );
        fassert( 17319, r->lengthWithHeaders() >= lenWHdr );

        r = reinterpret_cast<Record*>( txn->writingPtr(r, lenWHdr) );
        doc->writeDocument( r->data() );

        _addRecordToRecListInExtent(txn, r, loc.getValue());

        _details->incrementStats( txn, r->netLength(), 1 );

        return loc;
    }
Exemple #12
0
void Dblqh::initRecords() 
{
  // Records with dynamic sizes
  addFragRecord = (AddFragRecord*)allocRecord("AddFragRecord",
					      sizeof(AddFragRecord), 
					      caddfragrecFileSize);
  attrbuf = (Attrbuf*)allocRecord("Attrbuf",
				  sizeof(Attrbuf), 
				  cattrinbufFileSize);

  databuf = (Databuf*)allocRecord("Databuf",
				  sizeof(Databuf), 
				  cdatabufFileSize);

  gcpRecord = (GcpRecord*)allocRecord("GcpRecord",
				      sizeof(GcpRecord), 
				      cgcprecFileSize);

  hostRecord = (HostRecord*)allocRecord("HostRecord",
					sizeof(HostRecord), 
					chostFileSize);

  lcpRecord = (LcpRecord*)allocRecord("LcpRecord",
				      sizeof(LcpRecord), 
				      clcpFileSize);

  for(Uint32 i = 0; i<clcpFileSize; i++){
    new (&lcpRecord[i])LcpRecord();
  }

  logPartRecord = (LogPartRecord*)allocRecord("LogPartRecord",
					      sizeof(LogPartRecord), 
					      clogPartFileSize);

  logFileRecord = (LogFileRecord*)allocRecord("LogFileRecord",
					      sizeof(LogFileRecord),
					      clogFileFileSize);

  logFileOperationRecord = (LogFileOperationRecord*)
    allocRecord("LogFileOperationRecord", 
		sizeof(LogFileOperationRecord), 
		clfoFileSize);

  logPageRecord =
    (LogPageRecord*)allocRecordAligned("LogPageRecord",
                                       sizeof(LogPageRecord),
                                       clogPageFileSize,
                                       &logPageRecordUnaligned,
                                       NDB_O_DIRECT_WRITE_ALIGNMENT,
                                       false);

  pageRefRecord = (PageRefRecord*)allocRecord("PageRefRecord",
					      sizeof(PageRefRecord),
					      cpageRefFileSize);

  cscanNoFreeRec = cscanrecFileSize;
  c_scanRecordPool.setSize(cscanrecFileSize);
  c_scanTakeOverHash.setSize(64);

  tablerec = (Tablerec*)allocRecord("Tablerec",
				    sizeof(Tablerec), 
				    ctabrecFileSize);

  tcConnectionrec = (TcConnectionrec*)allocRecord("TcConnectionrec",
						  sizeof(TcConnectionrec),
						  ctcConnectrecFileSize);
  
  m_commitAckMarkerPool.setSize(ctcConnectrecFileSize);
  m_commitAckMarkerHash.setSize(1024);
  
  tcNodeFailRecord = (TcNodeFailRecord*)allocRecord("TcNodeFailRecord",
						    sizeof(TcNodeFailRecord),
						    ctcNodeFailrecFileSize);
  
  /*
  ndbout << "FRAGREC SIZE = " << sizeof(Fragrecord) << endl;
  ndbout << "TAB SIZE = " << sizeof(Tablerec) << endl;
  ndbout << "GCP SIZE = " << sizeof(GcpRecord) << endl;
  ndbout << "LCP SIZE = " << sizeof(LcpRecord) << endl;
  ndbout << "LCPLOC SIZE = " << sizeof(LcpLocRecord) << endl;
  ndbout << "LOGPART SIZE = " << sizeof(LogPartRecord) << endl;
  ndbout << "LOGFILE SIZE = " << sizeof(LogFileRecord) << endl;
  ndbout << "TC SIZE = " << sizeof(TcConnectionrec) << endl;
  ndbout << "HOST SIZE = " << sizeof(HostRecord) << endl;
  ndbout << "LFO SIZE = " << sizeof(LogFileOperationRecord) << endl;
  ndbout << "PR SIZE = " << sizeof(PageRefRecord) << endl;
  ndbout << "SCAN SIZE = " << sizeof(ScanRecord) << endl;
*/

  // Initialize BAT for interface to file system
  NewVARIABLE* bat = allocateBat(2);
  bat[1].WA = &logPageRecord->logPageWord[0];
  bat[1].nrr = clogPageFileSize;
  bat[1].ClusterSize = sizeof(LogPageRecord);
  bat[1].bits.q = ZTWOLOG_PAGE_SIZE;
  bat[1].bits.v = 5;
}//Dblqh::initRecords()
Exemple #13
0
void
Dbtux::execREAD_CONFIG_REQ(Signal* signal)
{
  jamEntry();
 
  const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
  Uint32 ref = req->senderRef;
  Uint32 senderData = req->senderData;
  ndbrequire(req->noOfParameters == 0);

  Uint32 nIndex;
  Uint32 nFragment;
  Uint32 nAttribute;
  Uint32 nScanOp; 
  Uint32 nScanBatch;
  Uint32 nStatAutoUpdate;
  Uint32 nStatSaveSize;
  Uint32 nStatSaveScale;
  Uint32 nStatTriggerPct;
  Uint32 nStatTriggerScale;
  Uint32 nStatUpdateDelay;

  const ndb_mgm_configuration_iterator * p = 
    m_ctx.m_config.getOwnConfigIterator();
  ndbrequire(p != 0);

  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_INDEX, &nIndex));
  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_FRAGMENT, &nFragment));
  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_ATTRIBUTE, &nAttribute));
  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_SCAN_OP, &nScanOp));
  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_BATCH_SIZE, &nScanBatch));

  nStatAutoUpdate = 0;
  ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_AUTO_UPDATE,
                            &nStatAutoUpdate);

  nStatSaveSize = 32768;
  ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_SAVE_SIZE,
                            &nStatSaveSize);

  nStatSaveScale = 100;
  ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_SAVE_SCALE,
                            &nStatSaveScale);

  nStatTriggerPct = 100;
  ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_TRIGGER_PCT,
                            &nStatTriggerPct);

  nStatTriggerScale = 100;
  ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_TRIGGER_SCALE,
                            &nStatTriggerScale);

  nStatUpdateDelay = 60;
  ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_UPDATE_DELAY,
                            &nStatUpdateDelay);

  const Uint32 nDescPage = (nIndex * DescHeadSize + nAttribute * KeyTypeSize + nAttribute * AttributeHeaderSize + DescPageSize - 1) / DescPageSize;
  const Uint32 nScanBoundWords = nScanOp * ScanBoundSegmentSize * 4;
  const Uint32 nScanLock = nScanOp * nScanBatch;
  const Uint32 nStatOp = 8;
  
  c_indexPool.setSize(nIndex);
  c_fragPool.setSize(nFragment);
  c_descPagePool.setSize(nDescPage);
  c_fragOpPool.setSize(MaxIndexFragments);
  c_scanOpPool.setSize(nScanOp);
  c_scanBoundPool.setSize(nScanBoundWords);
  c_scanLockPool.setSize(nScanLock);
  c_statOpPool.setSize(nStatOp);
  c_indexStatAutoUpdate = nStatAutoUpdate;
  c_indexStatSaveSize = nStatSaveSize;
  c_indexStatSaveScale = nStatSaveScale;
  c_indexStatTriggerPct = nStatTriggerPct;
  c_indexStatTriggerScale = nStatTriggerScale;
  c_indexStatUpdateDelay = nStatUpdateDelay;

  /*
   * Index id is physical array index.  We seize and initialize all
   * index records now.
   */
  IndexPtr indexPtr;
  while (1) {
    jam();
    refresh_watch_dog();
    c_indexPool.seize(indexPtr);
    if (indexPtr.i == RNIL) {
      jam();
      break;
    }
    new (indexPtr.p) Index();
  }
  // allocate buffers
  c_ctx.jamBuffer = jamBuffer();
  c_ctx.c_searchKey = (Uint32*)allocRecord("c_searchKey", sizeof(Uint32), MaxAttrDataSize);
  c_ctx.c_entryKey = (Uint32*)allocRecord("c_entryKey", sizeof(Uint32), MaxAttrDataSize);

  c_ctx.c_dataBuffer = (Uint32*)allocRecord("c_dataBuffer", sizeof(Uint64), (MaxXfrmDataSize + 1) >> 1);

#ifdef VM_TRACE
  c_ctx.c_debugBuffer = (char*)allocRecord("c_debugBuffer", sizeof(char), DebugBufferBytes);
#endif

  // ack
  ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
  conf->senderRef = reference();
  conf->senderData = senderData;
  sendSignal(ref, GSN_READ_CONFIG_CONF, signal, 
	     ReadConfigConf::SignalLength, JBB);
}
Exemple #14
0
void Dbacc::initRecords() 
{
  {
    AllocChunk chunks[16];
    const Uint32 pages = (cpagesize + 3) / 4;
    const Uint32 chunkcnt = allocChunks(chunks, 16, RT_DBTUP_PAGE, pages,
                                        CFG_DB_INDEX_MEM);

    /**
     * Set base ptr
     */
    Ptr<GlobalPage> pagePtr;
    m_shared_page_pool.getPtr(pagePtr, chunks[0].ptrI);
    page8 = (Page8*)pagePtr.p;

    /**
     * 1) Build free-list per chunk
     * 2) Add chunks to cfirstfreepage-list
     */
    cfirstfreepage = RNIL;
    cpagesize = 0;
    cpageCount = 0;
    for (Int32 i = chunkcnt - 1; i >= 0; i--)
    {
      Ptr<GlobalPage> pagePtr;
      m_shared_page_pool.getPtr(pagePtr, chunks[i].ptrI);
      const Uint32 cnt = 4 * chunks[i].cnt; // 4 8k per 32k
      Page8* base = (Page8*)pagePtr.p;
      ndbrequire(base >= page8);
      const Uint32 ptrI = Uint32(base - page8);
      for (Uint32 j = 0; j < cnt; j++)
      {
        refresh_watch_dog();
        base[j].word32[0] = ptrI + j + 1;
      }

      base[cnt-1].word32[0] = cfirstfreepage;
      cfirstfreepage = ptrI;

      cpageCount += cnt;
      if (ptrI + cnt > cpagesize)
        cpagesize = ptrI + cnt;
    }
  }

  operationrec = (Operationrec*)allocRecord("Operationrec",
					    sizeof(Operationrec),
					    coprecsize);

  dirRange = (DirRange*)allocRecord("DirRange",
				    sizeof(DirRange), 
				    cdirrangesize);

  directoryarray = (Directoryarray*)allocRecord("Directoryarray",
						sizeof(Directoryarray), 
						cdirarraysize);

  fragmentrec = (Fragmentrec*)allocRecord("Fragmentrec",
					  sizeof(Fragmentrec), 
					  cfragmentsize);

  overflowRecord = (OverflowRecord*)allocRecord("OverflowRecord",
						sizeof(OverflowRecord),
						coverflowrecsize);

  scanRec = (ScanRec*)allocRecord("ScanRec",
				  sizeof(ScanRec), 
				  cscanRecSize);

  tabrec = (Tabrec*)allocRecord("Tabrec",
				sizeof(Tabrec),
				ctablesize);
}//Dbacc::initRecords()
void Dblqh::initRecords() 
{
  // Records with dynamic sizes
  addFragRecord = (AddFragRecord*)allocRecord("AddFragRecord",
					      sizeof(AddFragRecord), 
					      caddfragrecFileSize);

  gcpRecord = (GcpRecord*)allocRecord("GcpRecord",
				      sizeof(GcpRecord), 
				      cgcprecFileSize);

  hostRecord = (HostRecord*)allocRecord("HostRecord",
					sizeof(HostRecord), 
					chostFileSize);

  lcpRecord = (LcpRecord*)allocRecord("LcpRecord",
				      sizeof(LcpRecord), 
				      clcpFileSize);

  for(Uint32 i = 0; i<clcpFileSize; i++){
    new (&lcpRecord[i])LcpRecord();
  }

  logPartRecord = (LogPartRecord*)allocRecord("LogPartRecord",
					      sizeof(LogPartRecord), 
					      NDB_MAX_LOG_PARTS);

  logFileRecord = (LogFileRecord*)allocRecord("LogFileRecord",
					      sizeof(LogFileRecord),
					      clogFileFileSize);

  logFileOperationRecord = (LogFileOperationRecord*)
    allocRecord("LogFileOperationRecord", 
		sizeof(LogFileOperationRecord), 
		clfoFileSize);

  {
    AllocChunk chunks[16];
    const Uint32 chunkcnt = allocChunks(chunks, 16, RG_FILE_BUFFERS,
                                        clogPageFileSize, CFG_DB_REDO_BUFFER);

    {
      Ptr<GlobalPage> pagePtr;
      m_shared_page_pool.getPtr(pagePtr, chunks[0].ptrI);
      logPageRecord = (LogPageRecord*)pagePtr.p;
    }

    cfirstfreeLogPage = RNIL;
    clogPageFileSize = 0;
    clogPageCount = 0;
    for (Int32 i = chunkcnt - 1; i >= 0; i--)
    {
      const Uint32 cnt = chunks[i].cnt;
      ndbrequire(cnt != 0);

      Ptr<GlobalPage> pagePtr;
      m_shared_page_pool.getPtr(pagePtr, chunks[i].ptrI);
      LogPageRecord * base = (LogPageRecord*)pagePtr.p;
      ndbrequire(base >= logPageRecord);
      const Uint32 ptrI = Uint32(base - logPageRecord);

      for (Uint32 j = 0; j<cnt; j++)
      {
        refresh_watch_dog();
        base[j].logPageWord[ZNEXT_PAGE] = ptrI + j + 1;
        base[j].logPageWord[ZPOS_IN_FREE_LIST]= 1;
        base[j].logPageWord[ZPOS_IN_WRITING]= 0;
      }

      base[cnt-1].logPageWord[ZNEXT_PAGE] = cfirstfreeLogPage;
      cfirstfreeLogPage = ptrI;

      clogPageCount += cnt;
      if (ptrI + cnt > clogPageFileSize)
        clogPageFileSize = ptrI + cnt;
    }
    cnoOfLogPages = clogPageCount;
  }

#ifndef NO_REDO_PAGE_CACHE
  m_redo_page_cache.m_pool.set((RedoCacheLogPageRecord*)logPageRecord,
                               clogPageFileSize);
  m_redo_page_cache.m_hash.setSize(63);

  const Uint32 * base = (Uint32*)logPageRecord;
  const RedoCacheLogPageRecord* tmp1 = (RedoCacheLogPageRecord*)logPageRecord;
  ndbrequire(&base[ZPOS_PAGE_NO] == &tmp1->m_page_no);
  ndbrequire(&base[ZPOS_PAGE_FILE_NO] == &tmp1->m_file_no);
#endif

#ifndef NO_REDO_OPEN_FILE_CACHE
  m_redo_open_file_cache.m_pool.set(logFileRecord, clogFileFileSize);
#endif

  pageRefRecord = (PageRefRecord*)allocRecord("PageRefRecord",
					      sizeof(PageRefRecord),
					      cpageRefFileSize);

  c_scanRecordPool.setSize(cscanrecFileSize);
  c_scanTakeOverHash.setSize(64);

  tablerec = (Tablerec*)allocRecord("Tablerec",
				    sizeof(Tablerec), 
				    ctabrecFileSize);

  tcConnectionrec = (TcConnectionrec*)allocRecord("TcConnectionrec",
						  sizeof(TcConnectionrec),
						  ctcConnectrecFileSize);
  
  m_commitAckMarkerPool.setSize(ctcConnectrecFileSize);
  m_commitAckMarkerHash.setSize(1024);
  
  tcNodeFailRecord = (TcNodeFailRecord*)allocRecord("TcNodeFailRecord",
						    sizeof(TcNodeFailRecord),
						    ctcNodeFailrecFileSize);
  
  /*
  ndbout << "FRAGREC SIZE = " << sizeof(Fragrecord) << endl;
  ndbout << "TAB SIZE = " << sizeof(Tablerec) << endl;
  ndbout << "GCP SIZE = " << sizeof(GcpRecord) << endl;
  ndbout << "LCP SIZE = " << sizeof(LcpRecord) << endl;
  ndbout << "LCPLOC SIZE = " << sizeof(LcpLocRecord) << endl;
  ndbout << "LOGPART SIZE = " << sizeof(LogPartRecord) << endl;
  ndbout << "LOGFILE SIZE = " << sizeof(LogFileRecord) << endl;
  ndbout << "TC SIZE = " << sizeof(TcConnectionrec) << endl;
  ndbout << "HOST SIZE = " << sizeof(HostRecord) << endl;
  ndbout << "LFO SIZE = " << sizeof(LogFileOperationRecord) << endl;
  ndbout << "PR SIZE = " << sizeof(PageRefRecord) << endl;
  ndbout << "SCAN SIZE = " << sizeof(ScanRecord) << endl;
*/

  // Initialize BAT for interface to file system
  NewVARIABLE* bat = allocateBat(2);
  bat[1].WA = &logPageRecord->logPageWord[0];
  bat[1].nrr = clogPageFileSize;
  bat[1].ClusterSize = sizeof(LogPageRecord);
  bat[1].bits.q = ZTWOLOG_PAGE_SIZE;
  bat[1].bits.v = 5;
}//Dblqh::initRecords()
Exemple #16
0
void Dbtc::initRecords() 
{
  void *p;
  // Records with dynamic sizes
  cacheRecord = (CacheRecord*)allocRecord("CacheRecord",
					  sizeof(CacheRecord), 
					  ccacheFilesize);

  apiConnectRecord = (ApiConnectRecord*)allocRecord("ApiConnectRecord",
						    sizeof(ApiConnectRecord),
						    capiConnectFilesize);

  for(unsigned i = 0; i<capiConnectFilesize; i++) {
    p = &apiConnectRecord[i];
    new (p) ApiConnectRecord(c_theFiredTriggerPool, 
			     c_theIndexOperationPool);
  }
  // Init all fired triggers
  DLFifoList<TcFiredTriggerData> triggers(c_theFiredTriggerPool);
  FiredTriggerPtr tptr;
  while (triggers.seizeLast(tptr) == true) {
    p= tptr.p;
    new (p) TcFiredTriggerData();
  }
  while (triggers.releaseFirst());

  /*
  // Init all index records
  ArrayList<TcIndexData> indexes(c_theIndexPool);
  TcIndexDataPtr iptr;
  while(indexes.seize(iptr) == true) {
    new (iptr.p) TcIndexData(c_theAttrInfoListPool);
  }
  indexes.release();
  */

  // Init all index operation records
  SLList<TcIndexOperation> indexOps(c_theIndexOperationPool);
  TcIndexOperationPtr ioptr;
  while (indexOps.seizeFirst(ioptr) == true) {
    p= ioptr.p;
    new (p) TcIndexOperation(); // TODO : Modify alloc size of c_theAttributeBufferPool
  }
  while (indexOps.releaseFirst());

  c_apiConTimer = (UintR*)allocRecord("ApiConTimer",
				      sizeof(UintR),
				      capiConnectFilesize);
  
  c_apiConTimer_line = (UintR*)allocRecord("ApiConTimer_line",
					   sizeof(UintR),
					   capiConnectFilesize);

  tcConnectRecord = (TcConnectRecord*)allocRecord("TcConnectRecord",
						  sizeof(TcConnectRecord),
						  ctcConnectFilesize);
  
  m_commitAckMarkerPool.setSize(2 * capiConnectFilesize);
  m_commitAckMarkerHash.setSize(1024);
  c_theCommitAckMarkerBufferPool.setSize(4 * capiConnectFilesize);

  hostRecord = (HostRecord*)allocRecord("HostRecord",
					sizeof(HostRecord),
					chostFilesize);

  tableRecord = (TableRecord*)allocRecord("TableRecord",
					  sizeof(TableRecord),
					  ctabrecFilesize);

  scanRecord = (ScanRecord*)allocRecord("ScanRecord",
					sizeof(ScanRecord),
					cscanrecFileSize);


  c_scan_frag_pool.setSize(cscanFragrecFileSize);
  {
    ScanFragRecPtr ptr;
    SLList<ScanFragRec> tmp(c_scan_frag_pool);
    while (tmp.seizeFirst(ptr)) {
      new (ptr.p) ScanFragRec();
    }
    while (tmp.releaseFirst());
  }

  while (indexOps.releaseFirst());
  
  gcpRecord = (GcpRecord*)allocRecord("GcpRecord",
				      sizeof(GcpRecord), 
				      cgcpFilesize);
  
}//Dbtc::initRecords()