void Ndbfs::execREAD_CONFIG_REQ(Signal* signal) { const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); Uint32 ref = req->senderRef; Uint32 senderData = req->senderData; const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); ndbrequire(p != 0); theFileSystemPath.assfmt("%sndb_%u_fs%s", m_ctx.m_config.fileSystemPath(), getOwnNodeId(), DIR_SEPARATOR); theBackupFilePath.assign(m_ctx.m_config.backupFilePath()); theRequestPool = new Pool<Request>; m_maxFiles = 0; ndb_mgm_get_int_parameter(p, CFG_DB_MAX_OPEN_FILES, &m_maxFiles); Uint32 noIdleFiles = 27; ndb_mgm_get_int_parameter(p, CFG_DB_INITIAL_OPEN_FILES, &noIdleFiles); if (noIdleFiles > m_maxFiles && m_maxFiles != 0) m_maxFiles = noIdleFiles; // Create idle AsyncFiles for (Uint32 i = 0; i < noIdleFiles; i++){ theIdleFiles.push_back(createAsyncFile()); } ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); conf->senderRef = reference(); conf->senderData = senderData; sendSignal(ref, GSN_READ_CONFIG_CONF, signal, ReadConfigConf::SignalLength, JBB); }
void SimBlockList::load(EmulatorData& data){ noOfBlocks = NO_OF_BLOCKS; theList = new SimulatedBlock * [noOfBlocks]; Dbdict* dbdict = 0; Dbdih* dbdih = 0; Pgman* pg = 0; Lgman* lg = 0; Tsman* ts = 0; Block_context ctx(*data.theConfiguration, *data.m_mem_manager); SimulatedBlock * fs = 0; { Uint32 dl; const ndb_mgm_configuration_iterator * p = ctx.m_config.getOwnConfigIterator(); if(p && !ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &dl) && dl){ fs = NEW_BLOCK(VoidFs)(ctx); } else { fs = NEW_BLOCK(Ndbfs)(ctx); } } theList[0] = pg = NEW_BLOCK(Pgman)(ctx); theList[1] = lg = NEW_BLOCK(Lgman)(ctx); theList[2] = ts = NEW_BLOCK(Tsman)(ctx, pg, lg); theList[3] = NEW_BLOCK(Dbacc)(ctx); theList[4] = NEW_BLOCK(Cmvmi)(ctx); theList[5] = fs; theList[6] = dbdict = NEW_BLOCK(Dbdict)(ctx); theList[7] = dbdih = NEW_BLOCK(Dbdih)(ctx); theList[8] = NEW_BLOCK(Dblqh)(ctx); theList[9] = NEW_BLOCK(Dbtc)(ctx); theList[10] = NEW_BLOCK(Dbtup)(ctx, pg); theList[11] = NEW_BLOCK(Ndbcntr)(ctx); theList[12] = NEW_BLOCK(Qmgr)(ctx); theList[13] = NEW_BLOCK(Trix)(ctx); theList[14] = NEW_BLOCK(Backup)(ctx); theList[15] = NEW_BLOCK(DbUtil)(ctx); theList[16] = NEW_BLOCK(Suma)(ctx); theList[17] = NEW_BLOCK(Dbtux)(ctx); theList[18] = NEW_BLOCK(Restore)(ctx); assert(NO_OF_BLOCKS == 19); }
// GSN_READ_CONFIG_REQ void DblqhProxy::callREAD_CONFIG_REQ(Signal* signal) { const ReadConfigReq* req = (const ReadConfigReq*)signal->getDataPtr(); ndbrequire(req->noOfParameters == 0); const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); ndbrequire(p != 0); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_TABLE, &c_tableRecSize)); c_tableRec = (Uint8*)allocRecord("TableRec", sizeof(Uint8), c_tableRecSize); D("proxy:" << V(c_tableRecSize)); Uint32 i; for (i = 0; i < c_tableRecSize; i++) c_tableRec[i] = 0; backREAD_CONFIG_REQ(signal); }
void Qmgr::initData() { creadyDistCom = ZFALSE; // Records with constant sizes nodeRec = new NodeRec[MAX_NODES]; cnoCommitFailedNodes = 0; c_maxDynamicId = 0; c_clusterNodes.clear(); c_stopReq.senderRef = 0; /** * Check sanity for NodeVersion */ ndbrequire((Uint32)NodeInfo::DB == 0); ndbrequire((Uint32)NodeInfo::API == 1); ndbrequire((Uint32)NodeInfo::MGM == 2); NodeRecPtr nodePtr; nodePtr.i = getOwnNodeId(); ptrAss(nodePtr, nodeRec); nodePtr.p->blockRef = reference(); c_connectedNodes.set(getOwnNodeId()); setNodeInfo(getOwnNodeId()).m_version = NDB_VERSION; /** * Timeouts */ const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); ndbrequire(p != 0); Uint32 hbDBAPI = 1500; ndb_mgm_get_int_parameter(p, CFG_DB_API_HEARTBEAT_INTERVAL, &hbDBAPI); setHbApiDelay(hbDBAPI); }//Qmgr::initData()
void Backup::execREAD_CONFIG_REQ(Signal* signal) { const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); Uint32 ref = req->senderRef; Uint32 senderData = req->senderData; ndbrequire(req->noOfParameters == 0); const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); ndbrequire(p != 0); c_defaults.m_disk_write_speed = 10 * (1024 * 1024); c_defaults.m_disk_write_speed_sr = 100 * (1024 * 1024); c_defaults.m_disk_synch_size = 4 * (1024 * 1024); c_defaults.m_o_direct = true; Uint32 noBackups = 0, noTables = 0, noAttribs = 0, noFrags = 0; ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &c_defaults.m_diskless)); ndb_mgm_get_int_parameter(p, CFG_DB_O_DIRECT, &c_defaults.m_o_direct); ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED_SR, &c_defaults.m_disk_write_speed_sr); ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED, &c_defaults.m_disk_write_speed); ndb_mgm_get_int_parameter(p, CFG_DB_DISK_SYNCH_SIZE, &c_defaults.m_disk_synch_size); ndb_mgm_get_int_parameter(p, CFG_DB_COMPRESSED_BACKUP, &c_defaults.m_compressed_backup); ndb_mgm_get_int_parameter(p, CFG_DB_COMPRESSED_LCP, &c_defaults.m_compressed_lcp); m_backup_report_frequency = 0; ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_REPORT_FREQUENCY, &m_backup_report_frequency); /* We adjust the disk speed parameters from bytes per second to rather be words per 100 milliseconds. We convert disk synch size from bytes per second to words per second. */ c_defaults.m_disk_write_speed /= (4 * 10); c_defaults.m_disk_write_speed_sr /= (4 * 10); /* Temporary fix, we divide the speed by number of ldm threads since we now can write in all ldm threads in parallel. Since previously we could write in 2 threads we also multiply by 2 if number of ldm threads is at least 2. The real fix will be to make the speed of writing more adaptable and also to use the real configured value and also add a new max disk speed value that can be used when one needs to write faster. */ Uint32 num_ldm_threads = globalData.ndbMtLqhThreads; if (num_ldm_threads == 0) { /* We are running with ndbd binary */ jam(); num_ldm_threads = 1; } c_defaults.m_disk_write_speed /= num_ldm_threads; c_defaults.m_disk_write_speed_sr /= num_ldm_threads; if (num_ldm_threads > 1) { jam(); c_defaults.m_disk_write_speed *= 2; c_defaults.m_disk_write_speed_sr *= 2; } ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_BACKUPS, &noBackups); // ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES, &noTables)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &noTables)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_ATTRIBUTES, &noAttribs)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_FRAG_CONNECT, &noFrags)); noAttribs++; //RT 527 bug fix c_nodePool.setSize(MAX_NDB_NODES); c_backupPool.setSize(noBackups + 1); c_backupFilePool.setSize(3 * noBackups + 1); c_tablePool.setSize(noBackups * noTables + 1); c_triggerPool.setSize(noBackups * 3 * noTables); c_fragmentPool.setSize(noBackups * noFrags + 1); Uint32 szDataBuf = (2 * 1024 * 1024); Uint32 szLogBuf = (2 * 1024 * 1024); Uint32 szWrite = 32768, maxWriteSize = (256 * 1024); ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_DATA_BUFFER_MEM, &szDataBuf); ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_LOG_BUFFER_MEM, &szLogBuf); ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_WRITE_SIZE, &szWrite); ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_MAX_WRITE_SIZE, &maxWriteSize); if (maxWriteSize < szWrite) { /** * max can't be lower than min */ maxWriteSize = szWrite; } if ((maxWriteSize % szWrite) != 0) { /** * max needs to be a multiple of min */ maxWriteSize = (maxWriteSize + szWrite - 1) / szWrite; maxWriteSize *= szWrite; } /** * add min writesize to buffer size...and the alignment added here and there */ Uint32 extra = szWrite + 4 * (/* align * 512b */ 128); szDataBuf += extra; szLogBuf += extra; c_defaults.m_logBufferSize = szLogBuf; c_defaults.m_dataBufferSize = szDataBuf; c_defaults.m_minWriteSize = szWrite; c_defaults.m_maxWriteSize = maxWriteSize; c_defaults.m_lcp_buffer_size = szDataBuf; Uint32 szMem = 0; ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_MEM, &szMem); szMem += 3 * extra; // (data+log+lcp); Uint32 noPages = (szMem + sizeof(Page32) - 1) / sizeof(Page32) + (c_defaults.m_lcp_buffer_size + sizeof(Page32) - 1) / sizeof(Page32); // We need to allocate an additional of 2 pages. 1 page because of a bug in // ArrayPool and another one for DICTTAINFO. c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2, true); { // Init all tables SLList<Table> tables(c_tablePool); TablePtr ptr; while (tables.seizeFirst(ptr)) { new (ptr.p) Table(c_fragmentPool); } while (tables.releaseFirst()); } { SLList<BackupFile> ops(c_backupFilePool); BackupFilePtr ptr; while (ops.seizeFirst(ptr)) { new (ptr.p) BackupFile(* this, c_pagePool); } while (ops.releaseFirst()); } { SLList<BackupRecord> recs(c_backupPool); BackupRecordPtr ptr; while (recs.seizeFirst(ptr)) { new (ptr.p) BackupRecord(* this, c_tablePool, c_backupFilePool, c_triggerPool); } while (recs.releaseFirst()); } // Initialize BAT for interface to file system { Page32Ptr p; ndbrequire(c_pagePool.seizeId(p, 0)); c_startOfPages = (Uint32 *)p.p; c_pagePool.release(p); NewVARIABLE* bat = allocateBat(1); bat[0].WA = c_startOfPages; bat[0].nrr = c_pagePool.getSize()*sizeof(Page32)/sizeof(Uint32); } ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); conf->senderRef = reference(); conf->senderData = senderData; sendSignal(ref, GSN_READ_CONFIG_CONF, signal, ReadConfigConf::SignalLength, JBB); }
void Dbtux::execREAD_CONFIG_REQ(Signal* signal) { jamEntry(); const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); Uint32 ref = req->senderRef; Uint32 senderData = req->senderData; ndbrequire(req->noOfParameters == 0); Uint32 nIndex; Uint32 nFragment; Uint32 nAttribute; Uint32 nScanOp; Uint32 nScanBatch; Uint32 nStatAutoUpdate; Uint32 nStatSaveSize; Uint32 nStatSaveScale; Uint32 nStatTriggerPct; Uint32 nStatTriggerScale; Uint32 nStatUpdateDelay; const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); ndbrequire(p != 0); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_INDEX, &nIndex)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_FRAGMENT, &nFragment)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_ATTRIBUTE, &nAttribute)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_SCAN_OP, &nScanOp)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_BATCH_SIZE, &nScanBatch)); nStatAutoUpdate = 0; ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_AUTO_UPDATE, &nStatAutoUpdate); nStatSaveSize = 32768; ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_SAVE_SIZE, &nStatSaveSize); nStatSaveScale = 100; ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_SAVE_SCALE, &nStatSaveScale); nStatTriggerPct = 100; ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_TRIGGER_PCT, &nStatTriggerPct); nStatTriggerScale = 100; ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_TRIGGER_SCALE, &nStatTriggerScale); nStatUpdateDelay = 60; ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_UPDATE_DELAY, &nStatUpdateDelay); const Uint32 nDescPage = (nIndex * DescHeadSize + nAttribute * KeyTypeSize + nAttribute * AttributeHeaderSize + DescPageSize - 1) / DescPageSize; const Uint32 nScanBoundWords = nScanOp * ScanBoundSegmentSize * 4; const Uint32 nScanLock = nScanOp * nScanBatch; const Uint32 nStatOp = 8; c_indexPool.setSize(nIndex); c_fragPool.setSize(nFragment); c_descPagePool.setSize(nDescPage); c_fragOpPool.setSize(MaxIndexFragments); c_scanOpPool.setSize(nScanOp); c_scanBoundPool.setSize(nScanBoundWords); c_scanLockPool.setSize(nScanLock); c_statOpPool.setSize(nStatOp); c_indexStatAutoUpdate = nStatAutoUpdate; c_indexStatSaveSize = nStatSaveSize; c_indexStatSaveScale = nStatSaveScale; c_indexStatTriggerPct = nStatTriggerPct; c_indexStatTriggerScale = nStatTriggerScale; c_indexStatUpdateDelay = nStatUpdateDelay; /* * Index id is physical array index. We seize and initialize all * index records now. */ IndexPtr indexPtr; while (1) { jam(); refresh_watch_dog(); c_indexPool.seize(indexPtr); if (indexPtr.i == RNIL) { jam(); break; } new (indexPtr.p) Index(); } // allocate buffers c_ctx.jamBuffer = jamBuffer(); c_ctx.c_searchKey = (Uint32*)allocRecord("c_searchKey", sizeof(Uint32), MaxAttrDataSize); c_ctx.c_entryKey = (Uint32*)allocRecord("c_entryKey", sizeof(Uint32), MaxAttrDataSize); c_ctx.c_dataBuffer = (Uint32*)allocRecord("c_dataBuffer", sizeof(Uint64), (MaxXfrmDataSize + 1) >> 1); #ifdef VM_TRACE c_ctx.c_debugBuffer = (char*)allocRecord("c_debugBuffer", sizeof(char), DebugBufferBytes); #endif // ack ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); conf->senderRef = reference(); conf->senderData = senderData; sendSignal(ref, GSN_READ_CONFIG_CONF, signal, ReadConfigConf::SignalLength, JBB); }
static int get_multithreaded_config(EmulatorData& ed) { // multithreaded is compiled in ndbd/ndbmtd for now globalData.isNdbMt = SimulatedBlock::isMultiThreaded(); if (!globalData.isNdbMt) { ndbout << "NDBMT: non-mt" << endl; return 0; } THRConfig & conf = ed.theConfiguration->m_thr_config; Uint32 threadcount = conf.getThreadCount(); ndbout << "NDBMT: MaxNoOfExecutionThreads=" << threadcount << endl; globalData.isNdbMtLqh = true; { if (conf.getMtClassic()) { globalData.isNdbMtLqh = false; } } if (!globalData.isNdbMtLqh) return 0; Uint32 threads = conf.getThreadCount(THRConfig::T_LDM); Uint32 workers = threads; { ndb_mgm_configuration * conf = ed.theConfiguration->getClusterConfig(); if (conf == 0) { abort(); } ndb_mgm_configuration_iterator * p = ndb_mgm_create_configuration_iterator(conf, CFG_SECTION_NODE); if (ndb_mgm_find(p, CFG_NODE_ID, globalData.ownId)) { abort(); } ndb_mgm_get_int_parameter(p, CFG_NDBMT_LQH_WORKERS, &workers); } #ifdef VM_TRACE // testing { const char* p; p = NdbEnv_GetEnv("NDBMT_LQH_WORKERS", (char*)0, 0); if (p != 0) workers = atoi(p); } #endif ndbout << "NDBMT: workers=" << workers << " threads=" << threads << endl; assert(workers != 0 && workers <= MAX_NDBMT_LQH_WORKERS); assert(threads != 0 && threads <= MAX_NDBMT_LQH_THREADS); assert(workers % threads == 0); globalData.ndbMtLqhWorkers = workers; globalData.ndbMtLqhThreads = threads; return 0; }
static int init_global_memory_manager(EmulatorData &ed, Uint32 *watchCounter) { const ndb_mgm_configuration_iterator * p = ed.theConfiguration->getOwnConfigIterator(); if (p == 0) { abort(); } Uint32 numa = 0; ndb_mgm_get_int_parameter(p, CFG_DB_NUMA, &numa); if (numa == 1) { int res = NdbNuma_setInterleaved(); g_eventLogger->info("numa_set_interleave_mask(numa_all_nodes) : %s", res == 0 ? "OK" : "no numa support"); } Uint64 shared_mem = 8*1024*1024; ndb_mgm_get_int64_parameter(p, CFG_DB_SGA, &shared_mem); Uint32 shared_pages = Uint32(shared_mem /= GLOBAL_PAGE_SIZE); Uint32 tupmem = 0; if (ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE, &tupmem)) { g_eventLogger->alert("Failed to get CFG_TUP_PAGE parameter from " "config, exiting."); return -1; } { /** * IndexMemory */ Uint32 accpages = compute_acc_32kpages(p); tupmem += accpages; // Add to RG_DATAMEM } Uint32 lqhInstances = 1; if (globalData.isNdbMtLqh) { lqhInstances = globalData.ndbMtLqhWorkers; } if (tupmem) { Resource_limit rl; rl.m_min = tupmem; rl.m_max = tupmem; rl.m_resource_id = RG_DATAMEM; ed.m_mem_manager->set_resource_limit(rl); } Uint32 maxopen = 4 * 4; // 4 redo parts, max 4 files per part Uint32 filebuffer = NDB_FILE_BUFFER_SIZE; Uint32 filepages = (filebuffer / GLOBAL_PAGE_SIZE) * maxopen; { /** * RedoBuffer */ Uint32 redomem = 0; ndb_mgm_get_int_parameter(p, CFG_DB_REDO_BUFFER, &redomem); if (redomem) { redomem /= GLOBAL_PAGE_SIZE; Uint32 tmp = redomem & 15; if (tmp != 0) { redomem += (16 - tmp); } filepages += lqhInstances * redomem; // Add to RG_FILE_BUFFERS } } if (filepages) { Resource_limit rl; rl.m_min = filepages; rl.m_max = filepages; rl.m_resource_id = RG_FILE_BUFFERS; ed.m_mem_manager->set_resource_limit(rl); } Uint32 jbpages = compute_jb_pages(&ed); if (jbpages) { Resource_limit rl; rl.m_min = jbpages; rl.m_max = jbpages; rl.m_resource_id = RG_JOBBUFFER; ed.m_mem_manager->set_resource_limit(rl); } Uint32 sbpages = 0; if (globalTransporterRegistry.get_using_default_send_buffer() == false) { Uint64 mem = globalTransporterRegistry.get_total_max_send_buffer(); sbpages = Uint32((mem + GLOBAL_PAGE_SIZE - 1) / GLOBAL_PAGE_SIZE); Resource_limit rl; rl.m_min = sbpages; rl.m_max = sbpages; rl.m_resource_id = RG_TRANSPORTER_BUFFERS; ed.m_mem_manager->set_resource_limit(rl); } Uint32 pgman_pages = 0; { /** * Disk page buffer memory */ Uint64 page_buffer = 64*1024*1024; ndb_mgm_get_int64_parameter(p, CFG_DB_DISK_PAGE_BUFFER_MEMORY,&page_buffer); Uint32 pages = 0; pages += Uint32(page_buffer / GLOBAL_PAGE_SIZE); // in pages pages += LCP_RESTORE_BUFFER * lqhInstances; pgman_pages += pages; pgman_pages += 64; Resource_limit rl; rl.m_min = pgman_pages; rl.m_max = pgman_pages; rl.m_resource_id = RG_DISK_PAGE_BUFFER; // Add to RG_DISK_PAGE_BUFFER ed.m_mem_manager->set_resource_limit(rl); } Uint32 sum = shared_pages + tupmem + filepages + jbpages + sbpages + pgman_pages; if (sum) { Resource_limit rl; rl.m_min = 0; rl.m_max = sum; rl.m_resource_id = 0; ed.m_mem_manager->set_resource_limit(rl); } if (!ed.m_mem_manager->init(watchCounter)) { struct ndb_mgm_param_info dm; struct ndb_mgm_param_info sga; size_t size; size = sizeof(ndb_mgm_param_info); ndb_mgm_get_db_parameter_info(CFG_DB_DATA_MEM, &dm, &size); size = sizeof(ndb_mgm_param_info); ndb_mgm_get_db_parameter_info(CFG_DB_SGA, &sga, &size); g_eventLogger->alert("Malloc (%lld bytes) for %s and %s failed, exiting", Uint64(shared_mem + tupmem) * GLOBAL_PAGE_SIZE, dm.m_name, sga.m_name); return -1; } Uint32 late_alloc = 0; ndb_mgm_get_int_parameter(p, CFG_DB_LATE_ALLOC, &late_alloc); Uint32 memlock = 0; ndb_mgm_get_int_parameter(p, CFG_DB_MEMLOCK, &memlock); if (late_alloc) { /** * Only map these groups that are required for ndb to even "start" */ Uint32 rg[] = { RG_JOBBUFFER, RG_FILE_BUFFERS, RG_TRANSPORTER_BUFFERS, 0 }; ed.m_mem_manager->map(watchCounter, memlock, rg); } else { ed.m_mem_manager->map(watchCounter, memlock); // Map all } return 0; // Success }
Cmvmi::Cmvmi(Block_context& ctx) : SimulatedBlock(CMVMI, ctx) ,subscribers(subscriberPool) { BLOCK_CONSTRUCTOR(Cmvmi); Uint32 long_sig_buffer_size; const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); ndbrequire(p != 0); ndb_mgm_get_int_parameter(p, CFG_DB_LONG_SIGNAL_BUFFER, &long_sig_buffer_size); long_sig_buffer_size= long_sig_buffer_size / 256; g_sectionSegmentPool.setSize(long_sig_buffer_size, false,true,true,CFG_DB_LONG_SIGNAL_BUFFER); // Add received signals addRecSignal(GSN_CONNECT_REP, &Cmvmi::execCONNECT_REP); addRecSignal(GSN_DISCONNECT_REP, &Cmvmi::execDISCONNECT_REP); addRecSignal(GSN_NDB_TAMPER, &Cmvmi::execNDB_TAMPER, true); addRecSignal(GSN_SET_LOGLEVELORD, &Cmvmi::execSET_LOGLEVELORD); addRecSignal(GSN_EVENT_REP, &Cmvmi::execEVENT_REP); addRecSignal(GSN_STTOR, &Cmvmi::execSTTOR); addRecSignal(GSN_READ_CONFIG_REQ, &Cmvmi::execREAD_CONFIG_REQ); addRecSignal(GSN_CLOSE_COMREQ, &Cmvmi::execCLOSE_COMREQ); addRecSignal(GSN_ENABLE_COMORD, &Cmvmi::execENABLE_COMORD); addRecSignal(GSN_OPEN_COMREQ, &Cmvmi::execOPEN_COMREQ); addRecSignal(GSN_TEST_ORD, &Cmvmi::execTEST_ORD); addRecSignal(GSN_TAMPER_ORD, &Cmvmi::execTAMPER_ORD); addRecSignal(GSN_STOP_ORD, &Cmvmi::execSTOP_ORD); addRecSignal(GSN_START_ORD, &Cmvmi::execSTART_ORD); addRecSignal(GSN_EVENT_SUBSCRIBE_REQ, &Cmvmi::execEVENT_SUBSCRIBE_REQ); addRecSignal(GSN_DUMP_STATE_ORD, &Cmvmi::execDUMP_STATE_ORD); addRecSignal(GSN_TESTSIG, &Cmvmi::execTESTSIG); addRecSignal(GSN_NODE_START_REP, &Cmvmi::execNODE_START_REP, true); subscriberPool.setSize(5); const ndb_mgm_configuration_iterator * db = m_ctx.m_config.getOwnConfigIterator(); for(unsigned j = 0; j<LogLevel::LOGLEVEL_CATEGORIES; j++){ Uint32 logLevel; if(!ndb_mgm_get_int_parameter(db, CFG_MIN_LOGLEVEL+j, &logLevel)){ clogLevel.setLogLevel((LogLevel::EventCategory)j, logLevel); } } ndb_mgm_configuration_iterator * iter = m_ctx.m_config.getClusterConfigIterator(); for(ndb_mgm_first(iter); ndb_mgm_valid(iter); ndb_mgm_next(iter)){ jam(); Uint32 nodeId; Uint32 nodeType; ndbrequire(!ndb_mgm_get_int_parameter(iter,CFG_NODE_ID, &nodeId)); ndbrequire(!ndb_mgm_get_int_parameter(iter,CFG_TYPE_OF_SECTION,&nodeType)); switch(nodeType){ case NodeInfo::DB: c_dbNodes.set(nodeId); break; case NodeInfo::API: case NodeInfo::MGM: break; default: ndbrequire(false); } setNodeInfo(nodeId).m_type = nodeType; } setNodeInfo(getOwnNodeId()).m_connected = true; setNodeInfo(getOwnNodeId()).m_version = ndbGetOwnVersion(); }
void Restore::execREAD_CONFIG_REQ(Signal* signal) { jamEntry(); const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); Uint32 ref = req->senderRef; Uint32 senderData = req->senderData; ndbrequire(req->noOfParameters == 0); const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); ndbrequire(p != 0); #if 0 Uint32 noBackups = 0, noTables = 0, noAttribs = 0; ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &m_diskless)); ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_BACKUPS, &noBackups); // ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES, &noTables)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &noTables)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_ATTRIBUTES, &noAttribs)); noAttribs++; //RT 527 bug fix c_backupPool.setSize(noBackups); c_backupFilePool.setSize(3 * noBackups); c_tablePool.setSize(noBackups * noTables); c_attributePool.setSize(noBackups * noAttribs); c_triggerPool.setSize(noBackups * 3 * noTables); // 2 = no of replicas c_fragmentPool.setSize(noBackups * NO_OF_FRAG_PER_NODE * noTables); Uint32 szMem = 0; ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_MEM, &szMem); Uint32 noPages = (szMem + sizeof(Page32) - 1) / sizeof(Page32); // We need to allocate an additional of 2 pages. 1 page because of a bug in // ArrayPool and another one for DICTTAINFO. c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2); Uint32 szDataBuf = (2 * 1024 * 1024); Uint32 szLogBuf = (2 * 1024 * 1024); Uint32 szWrite = 32768; ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_DATA_BUFFER_MEM, &szDataBuf); ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_LOG_BUFFER_MEM, &szLogBuf); ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_WRITE_SIZE, &szWrite); c_defaults.m_logBufferSize = szLogBuf; c_defaults.m_dataBufferSize = szDataBuf; c_defaults.m_minWriteSize = szWrite; c_defaults.m_maxWriteSize = szWrite; { // Init all tables ArrayList<Table> tables(c_tablePool); TablePtr ptr; while(tables.seize(ptr)){ new (ptr.p) Table(c_attributePool, c_fragmentPool); } tables.release(); } { ArrayList<BackupFile> ops(c_backupFilePool); BackupFilePtr ptr; while(ops.seize(ptr)){ new (ptr.p) BackupFile(* this, c_pagePool); } ops.release(); } { ArrayList<BackupRecord> recs(c_backupPool); BackupRecordPtr ptr; while(recs.seize(ptr)){ new (ptr.p) BackupRecord(* this, c_pagePool, c_tablePool, c_backupFilePool, c_triggerPool); } recs.release(); } // Initialize BAT for interface to file system { Page32Ptr p; ndbrequire(c_pagePool.seizeId(p, 0)); c_startOfPages = (Uint32 *)p.p; c_pagePool.release(p); NewVARIABLE* bat = allocateBat(1); bat[0].WA = c_startOfPages; bat[0].nrr = c_pagePool.getSize()*sizeof(Page32)/sizeof(Uint32); } #endif m_file_pool.setSize(1); Uint32 cnt = 2*MAX_ATTRIBUTES_IN_TABLE; cnt += PAGES; cnt += List::getSegmentSize()-1; cnt /= List::getSegmentSize(); cnt += 2; m_databuffer_pool.setSize(cnt); ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); conf->senderRef = reference(); conf->senderData = senderData; sendSignal(ref, GSN_READ_CONFIG_CONF, signal, ReadConfigConf::SignalLength, JBB); }
void Qmgr::initData() { creadyDistCom = ZFALSE; // Records with constant sizes nodeRec = new NodeRec[MAX_NODES]; for (Uint32 i = 0; i<MAX_NODES; i++) { nodeRec[i].m_secret = 0; } c_maxDynamicId = 0; c_clusterNodes.clear(); c_stopReq.senderRef = 0; /** * Check sanity for NodeVersion */ ndbrequire((Uint32)NodeInfo::DB == 0); ndbrequire((Uint32)NodeInfo::API == 1); ndbrequire((Uint32)NodeInfo::MGM == 2); m_micro_gcp_enabled = false; m_hb_order_config_used = false; NodeRecPtr nodePtr; nodePtr.i = getOwnNodeId(); ptrAss(nodePtr, nodeRec); nodePtr.p->blockRef = reference(); ndbrequire(getNodeInfo(getOwnNodeId()).m_type == NodeInfo::DB); c_connectedNodes.set(getOwnNodeId()); setNodeInfo(getOwnNodeId()).m_version = NDB_VERSION; /** * Timeouts */ const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); ndbrequire(p != 0); Uint32 hbDBAPI = 1500; ndb_mgm_get_int_parameter(p, CFG_DB_API_HEARTBEAT_INTERVAL, &hbDBAPI); setHbApiDelay(hbDBAPI); const NDB_TICKS now = NdbTick_getCurrentTicks(); //OJA bug#17757895 interface_check_timer.setDelay(1000); interface_check_timer.reset(now); #ifdef ERROR_INSERT nodeFailCount = 0; #endif cfailureNr = 1; ccommitFailureNr = 1; cprepareFailureNr = 1; cfailedNodes.clear(); cprepFailedNodes.clear(); ccommitFailedNodes.clear(); creadyDistCom = ZFALSE; cpresident = ZNIL; c_start.m_president_candidate = ZNIL; c_start.m_president_candidate_gci = 0; cpdistref = 0; cneighbourh = ZNIL; cneighbourl = ZNIL; cdelayRegreq = ZDELAY_REGREQ; c_allow_api_connect = 0; ctoStatus = Q_NOT_ACTIVE; for (nodePtr.i = 1; nodePtr.i < MAX_NODES; nodePtr.i++) { ptrAss(nodePtr, nodeRec); nodePtr.p->ndynamicId = 0; nodePtr.p->hbOrder = 0; Uint32 cnt = 0; Uint32 type = getNodeInfo(nodePtr.i).m_type; switch(type){ case NodeInfo::DB: jam(); nodePtr.p->phase = ZINIT; c_definedNodes.set(nodePtr.i); break; case NodeInfo::API: jam(); nodePtr.p->phase = ZAPI_INACTIVE; break; case NodeInfo::MGM: jam(); /** * cmvmi allows ndb_mgmd to connect directly */ nodePtr.p->phase = ZAPI_INACTIVE; break; default: jam(); nodePtr.p->phase = ZAPI_INACTIVE; } set_hb_count(nodePtr.i) = cnt; nodePtr.p->sendPrepFailReqStatus = Q_NOT_ACTIVE; nodePtr.p->sendCommitFailReqStatus = Q_NOT_ACTIVE; nodePtr.p->sendPresToStatus = Q_NOT_ACTIVE; nodePtr.p->failState = NORMAL; }//for }//Qmgr::initData()
bool ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32 nodeid){ char buf[255]; ndb_mgm_configuration_iterator * it; it = ndb_mgm_create_configuration_iterator((struct ndb_mgm_configuration *)conf, CFG_SECTION_NODE); if(it == 0){ BaseString::snprintf(buf, 255, "Unable to create config iterator"); setError(CR_ERROR, buf); return false; } NdbAutoPtr<ndb_mgm_configuration_iterator> ptr(it); if(ndb_mgm_find(it, CFG_NODE_ID, nodeid) != 0){ BaseString::snprintf(buf, 255, "Unable to find node with id: %d", nodeid); setError(CR_ERROR, buf); return false; } const char * hostname; if(ndb_mgm_get_string_parameter(it, CFG_NODE_HOST, &hostname)){ BaseString::snprintf(buf, 255, "Unable to get hostname(%d) from config",CFG_NODE_HOST); setError(CR_ERROR, buf); return false; } const char * datadir; if(!ndb_mgm_get_string_parameter(it, CFG_NODE_DATADIR, &datadir)){ NdbConfig_SetPath(datadir); } if (hostname && hostname[0] != 0 && !SocketServer::tryBind(0,hostname)) { BaseString::snprintf(buf, 255, "Config hostname(%s) don't match a local interface," " tried to bind, error = %d - %s", hostname, errno, strerror(errno)); setError(CR_ERROR, buf); return false; } unsigned int _type; if(ndb_mgm_get_int_parameter(it, CFG_TYPE_OF_SECTION, &_type)){ BaseString::snprintf(buf, 255, "Unable to get type of node(%d) from config", CFG_TYPE_OF_SECTION); setError(CR_ERROR, buf); return false; } if(_type != m_node_type){ const char *type_s, *alias_s, *type_s2, *alias_s2; alias_s= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)m_node_type, &type_s); alias_s2= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)_type, &type_s2); BaseString::snprintf(buf, 255, "This node type %s(%s) and config " "node type %s(%s) don't match for nodeid %d", alias_s, type_s, alias_s2, type_s2, nodeid); setError(CR_ERROR, buf); return false; } /** * Check hostnames */ ndb_mgm_configuration_iterator iter(* conf, CFG_SECTION_CONNECTION); for(iter.first(); iter.valid(); iter.next()){ Uint32 type = CONNECTION_TYPE_TCP + 1; if(iter.get(CFG_TYPE_OF_SECTION, &type)) continue; if(type != CONNECTION_TYPE_TCP) continue; Uint32 nodeId1, nodeId2, remoteNodeId; if(iter.get(CFG_CONNECTION_NODE_1, &nodeId1)) continue; if(iter.get(CFG_CONNECTION_NODE_2, &nodeId2)) continue; if(nodeId1 != nodeid && nodeId2 != nodeid) continue; remoteNodeId = (nodeid == nodeId1 ? nodeId2 : nodeId1); const char * name; struct in_addr addr; BaseString tmp; if(!iter.get(CFG_CONNECTION_HOSTNAME_1, &name) && strlen(name)){ if(Ndb_getInAddr(&addr, name) != 0){ tmp.assfmt("Unable to lookup/illegal hostname %s, " "connection from node %d to node %d", name, nodeid, remoteNodeId); setError(CR_ERROR, tmp.c_str()); return false; } } if(!iter.get(CFG_CONNECTION_HOSTNAME_2, &name) && strlen(name)){ if(Ndb_getInAddr(&addr, name) != 0){ tmp.assfmt("Unable to lookup/illegal hostname %s, " "connection from node %d to node %d", name, nodeid, remoteNodeId); setError(CR_ERROR, tmp.c_str()); return false; } } } return true; }
Dbtc::Dbtc(Block_context& ctx, Uint32 instanceNo): SimulatedBlock(DBTC, ctx, instanceNo), c_theDefinedTriggers(c_theDefinedTriggerPool), c_firedTriggerHash(c_theFiredTriggerPool), c_maxNumberOfDefinedTriggers(0), c_maxNumberOfFiredTriggers(0), c_theIndexes(c_theIndexPool), c_maxNumberOfIndexes(0), c_maxNumberOfIndexOperations(0), c_fk_hash(c_fk_pool), m_commitAckMarkerHash(m_commitAckMarkerPool) { BLOCK_CONSTRUCTOR(Dbtc); const ndb_mgm_configuration_iterator * p = ctx.m_config.getOwnConfigIterator(); ndbrequire(p != 0); Uint32 transactionBufferMemory = 0; Uint32 maxNoOfIndexes = 0, maxNoOfConcurrentIndexOperations = 0; Uint32 maxNoOfTriggers = 0, maxNoOfFiredTriggers = 0; ndb_mgm_get_int_parameter(p, CFG_DB_TRANS_BUFFER_MEM, &transactionBufferMemory); ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &maxNoOfIndexes); ndb_mgm_get_int_parameter(p, CFG_DB_NO_INDEX_OPS, &maxNoOfConcurrentIndexOperations); ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS, &maxNoOfTriggers); ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGER_OPS, &maxNoOfFiredTriggers); c_transactionBufferSpace = transactionBufferMemory / AttributeBuffer::getSegmentSize(); c_maxNumberOfIndexes = maxNoOfIndexes; c_maxNumberOfIndexOperations = maxNoOfConcurrentIndexOperations; c_maxNumberOfDefinedTriggers = maxNoOfTriggers; c_maxNumberOfFiredTriggers = maxNoOfFiredTriggers; // Transit signals addRecSignal(GSN_PACKED_SIGNAL, &Dbtc::execPACKED_SIGNAL); addRecSignal(GSN_ABORTED, &Dbtc::execABORTED); addRecSignal(GSN_ATTRINFO, &Dbtc::execATTRINFO); addRecSignal(GSN_CONTINUEB, &Dbtc::execCONTINUEB); addRecSignal(GSN_KEYINFO, &Dbtc::execKEYINFO); addRecSignal(GSN_SCAN_NEXTREQ, &Dbtc::execSCAN_NEXTREQ); addRecSignal(GSN_TAKE_OVERTCREQ, &Dbtc::execTAKE_OVERTCREQ); addRecSignal(GSN_TAKE_OVERTCCONF, &Dbtc::execTAKE_OVERTCCONF); addRecSignal(GSN_LQHKEYREF, &Dbtc::execLQHKEYREF); // Received signals addRecSignal(GSN_DUMP_STATE_ORD, &Dbtc::execDUMP_STATE_ORD); addRecSignal(GSN_DBINFO_SCANREQ, &Dbtc::execDBINFO_SCANREQ); addRecSignal(GSN_SEND_PACKED, &Dbtc::execSEND_PACKED, true); addRecSignal(GSN_SCAN_HBREP, &Dbtc::execSCAN_HBREP); addRecSignal(GSN_COMPLETED, &Dbtc::execCOMPLETED); addRecSignal(GSN_COMMITTED, &Dbtc::execCOMMITTED); addRecSignal(GSN_DIH_SCAN_GET_NODES_CONF, &Dbtc::execDIH_SCAN_GET_NODES_CONF); addRecSignal(GSN_DIH_SCAN_GET_NODES_REF, &Dbtc::execDIH_SCAN_GET_NODES_REF); addRecSignal(GSN_DIVERIFYCONF, &Dbtc::execDIVERIFYCONF); addRecSignal(GSN_DIH_SCAN_TAB_CONF, &Dbtc::execDIH_SCAN_TAB_CONF); addRecSignal(GSN_DIH_SCAN_TAB_REF, &Dbtc::execDIH_SCAN_TAB_REF); addRecSignal(GSN_GCP_NOMORETRANS, &Dbtc::execGCP_NOMORETRANS); addRecSignal(GSN_LQHKEYCONF, &Dbtc::execLQHKEYCONF); addRecSignal(GSN_NDB_STTOR, &Dbtc::execNDB_STTOR); addRecSignal(GSN_READ_NODESCONF, &Dbtc::execREAD_NODESCONF); addRecSignal(GSN_READ_NODESREF, &Dbtc::execREAD_NODESREF); addRecSignal(GSN_STTOR, &Dbtc::execSTTOR); addRecSignal(GSN_TC_COMMITREQ, &Dbtc::execTC_COMMITREQ); addRecSignal(GSN_TC_CLOPSIZEREQ, &Dbtc::execTC_CLOPSIZEREQ); addRecSignal(GSN_TCGETOPSIZEREQ, &Dbtc::execTCGETOPSIZEREQ); addRecSignal(GSN_TCKEYREQ, &Dbtc::execTCKEYREQ); addRecSignal(GSN_TCRELEASEREQ, &Dbtc::execTCRELEASEREQ); addRecSignal(GSN_TCSEIZEREQ, &Dbtc::execTCSEIZEREQ); addRecSignal(GSN_TCROLLBACKREQ, &Dbtc::execTCROLLBACKREQ); addRecSignal(GSN_TC_HBREP, &Dbtc::execTC_HBREP); addRecSignal(GSN_TC_SCHVERREQ, &Dbtc::execTC_SCHVERREQ); addRecSignal(GSN_TAB_COMMITREQ, &Dbtc::execTAB_COMMITREQ); addRecSignal(GSN_SCAN_TABREQ, &Dbtc::execSCAN_TABREQ); addRecSignal(GSN_SCAN_FRAGCONF, &Dbtc::execSCAN_FRAGCONF); addRecSignal(GSN_SCAN_FRAGREF, &Dbtc::execSCAN_FRAGREF); addRecSignal(GSN_READ_CONFIG_REQ, &Dbtc::execREAD_CONFIG_REQ, true); addRecSignal(GSN_LQH_TRANSCONF, &Dbtc::execLQH_TRANSCONF); addRecSignal(GSN_COMPLETECONF, &Dbtc::execCOMPLETECONF); addRecSignal(GSN_COMMITCONF, &Dbtc::execCOMMITCONF); addRecSignal(GSN_ABORTCONF, &Dbtc::execABORTCONF); addRecSignal(GSN_NODE_FAILREP, &Dbtc::execNODE_FAILREP); addRecSignal(GSN_INCL_NODEREQ, &Dbtc::execINCL_NODEREQ); addRecSignal(GSN_TIME_SIGNAL, &Dbtc::execTIME_SIGNAL); addRecSignal(GSN_API_FAILREQ, &Dbtc::execAPI_FAILREQ); addRecSignal(GSN_TC_COMMIT_ACK, &Dbtc::execTC_COMMIT_ACK); addRecSignal(GSN_ABORT_ALL_REQ, &Dbtc::execABORT_ALL_REQ); addRecSignal(GSN_CREATE_TRIG_IMPL_REQ, &Dbtc::execCREATE_TRIG_IMPL_REQ); addRecSignal(GSN_DROP_TRIG_IMPL_REQ, &Dbtc::execDROP_TRIG_IMPL_REQ); addRecSignal(GSN_FIRE_TRIG_ORD, &Dbtc::execFIRE_TRIG_ORD); addRecSignal(GSN_TRIG_ATTRINFO, &Dbtc::execTRIG_ATTRINFO); addRecSignal(GSN_CREATE_INDX_IMPL_REQ, &Dbtc::execCREATE_INDX_IMPL_REQ); addRecSignal(GSN_DROP_INDX_IMPL_REQ, &Dbtc::execDROP_INDX_IMPL_REQ); addRecSignal(GSN_TCINDXREQ, &Dbtc::execTCINDXREQ); addRecSignal(GSN_INDXKEYINFO, &Dbtc::execINDXKEYINFO); addRecSignal(GSN_INDXATTRINFO, &Dbtc::execINDXATTRINFO); addRecSignal(GSN_ALTER_INDX_IMPL_REQ, &Dbtc::execALTER_INDX_IMPL_REQ); addRecSignal(GSN_TRANSID_AI_R, &Dbtc::execTRANSID_AI_R); addRecSignal(GSN_KEYINFO20_R, &Dbtc::execKEYINFO20_R); addRecSignal(GSN_SIGNAL_DROPPED_REP, &Dbtc::execSIGNAL_DROPPED_REP, true); // Index table lookup addRecSignal(GSN_TCKEYCONF, &Dbtc::execTCKEYCONF); addRecSignal(GSN_TCKEYREF, &Dbtc::execTCKEYREF); addRecSignal(GSN_TRANSID_AI, &Dbtc::execTRANSID_AI); addRecSignal(GSN_TCROLLBACKREP, &Dbtc::execTCROLLBACKREP); //addRecSignal(GSN_CREATE_TAB_REQ, &Dbtc::execCREATE_TAB_REQ); addRecSignal(GSN_DROP_TAB_REQ, &Dbtc::execDROP_TAB_REQ); addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dbtc::execPREP_DROP_TAB_REQ); addRecSignal(GSN_ALTER_TAB_REQ, &Dbtc::execALTER_TAB_REQ); addRecSignal(GSN_ROUTE_ORD, &Dbtc::execROUTE_ORD); addRecSignal(GSN_TCKEY_FAILREFCONF_R, &Dbtc::execTCKEY_FAILREFCONF_R); addRecSignal(GSN_FIRE_TRIG_REF, &Dbtc::execFIRE_TRIG_REF); addRecSignal(GSN_FIRE_TRIG_CONF, &Dbtc::execFIRE_TRIG_CONF); addRecSignal(GSN_CREATE_FK_IMPL_REQ, &Dbtc::execCREATE_FK_IMPL_REQ); addRecSignal(GSN_DROP_FK_IMPL_REQ, &Dbtc::execDROP_FK_IMPL_REQ); addRecSignal(GSN_SCAN_TABREF, &Dbtc::execSCAN_TABREF); addRecSignal(GSN_SCAN_TABCONF, &Dbtc::execSCAN_TABCONF); addRecSignal(GSN_KEYINFO20, &Dbtc::execKEYINFO20); cacheRecord = 0; apiConnectRecord = 0; tcConnectRecord = 0; hostRecord = 0; tableRecord = 0; scanRecord = 0; gcpRecord = 0; tcFailRecord = 0; c_apiConTimer = 0; c_apiConTimer_line = 0; cpackedListIndex = 0; c_ongoing_take_over_cnt = 0; #ifdef VM_TRACE { void* tmp[] = { &apiConnectptr, &tcConnectptr, &cachePtr, &hostptr, &timeOutptr, &scanFragptr, &tcNodeFailptr }; init_globals_list(tmp, sizeof(tmp)/sizeof(tmp[0])); } #endif cacheRecord = 0; apiConnectRecord = 0; tcConnectRecord = 0; hostRecord = 0; tableRecord = 0; scanRecord = 0; gcpRecord = 0; tcFailRecord = 0; c_apiConTimer = 0; c_apiConTimer_line = 0; csystemStart = SSS_FALSE; m_deferred_enabled = ~Uint32(0); m_max_writes_per_trans = ~Uint32(0); }//Dbtc::Dbtc()