/** * Unlike accessing the peer database, this only returns currently * connected peers and includes data not yet saved in the peer db. */ fc::vector<db::peer::record> node::active_peers()const { if( !my->_thread.is_current() ) { return my->_thread.async( [this](){ return active_peers(); } ).wait(); } fc::vector<db::peer::record> recs(my->_dist_to_con.size()); auto itr = my->_dist_to_con.begin(); auto end = my->_dist_to_con.end(); int i = 0; while( itr != end ) { recs[i] = itr->second->get_db_record(); ++i; ++itr; } return recs; }
void Backup::execREAD_CONFIG_REQ(Signal* signal) { const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); Uint32 ref = req->senderRef; Uint32 senderData = req->senderData; ndbrequire(req->noOfParameters == 0); const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); ndbrequire(p != 0); c_defaults.m_disk_write_speed = 10 * (1024 * 1024); c_defaults.m_disk_write_speed_sr = 100 * (1024 * 1024); c_defaults.m_disk_synch_size = 4 * (1024 * 1024); c_defaults.m_o_direct = true; Uint32 noBackups = 0, noTables = 0, noAttribs = 0, noFrags = 0; ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &c_defaults.m_diskless)); ndb_mgm_get_int_parameter(p, CFG_DB_O_DIRECT, &c_defaults.m_o_direct); ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED_SR, &c_defaults.m_disk_write_speed_sr); ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED, &c_defaults.m_disk_write_speed); ndb_mgm_get_int_parameter(p, CFG_DB_DISK_SYNCH_SIZE, &c_defaults.m_disk_synch_size); ndb_mgm_get_int_parameter(p, CFG_DB_COMPRESSED_BACKUP, &c_defaults.m_compressed_backup); ndb_mgm_get_int_parameter(p, CFG_DB_COMPRESSED_LCP, &c_defaults.m_compressed_lcp); m_backup_report_frequency = 0; ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_REPORT_FREQUENCY, &m_backup_report_frequency); /* We adjust the disk speed parameters from bytes per second to rather be words per 100 milliseconds. We convert disk synch size from bytes per second to words per second. */ c_defaults.m_disk_write_speed /= (4 * 10); c_defaults.m_disk_write_speed_sr /= (4 * 10); /* Temporary fix, we divide the speed by number of ldm threads since we now can write in all ldm threads in parallel. Since previously we could write in 2 threads we also multiply by 2 if number of ldm threads is at least 2. The real fix will be to make the speed of writing more adaptable and also to use the real configured value and also add a new max disk speed value that can be used when one needs to write faster. */ Uint32 num_ldm_threads = globalData.ndbMtLqhThreads; if (num_ldm_threads == 0) { /* We are running with ndbd binary */ jam(); num_ldm_threads = 1; } c_defaults.m_disk_write_speed /= num_ldm_threads; c_defaults.m_disk_write_speed_sr /= num_ldm_threads; if (num_ldm_threads > 1) { jam(); c_defaults.m_disk_write_speed *= 2; c_defaults.m_disk_write_speed_sr *= 2; } ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_BACKUPS, &noBackups); // ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES, &noTables)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &noTables)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_ATTRIBUTES, &noAttribs)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_FRAG_CONNECT, &noFrags)); noAttribs++; //RT 527 bug fix c_nodePool.setSize(MAX_NDB_NODES); c_backupPool.setSize(noBackups + 1); c_backupFilePool.setSize(3 * noBackups + 1); c_tablePool.setSize(noBackups * noTables + 1); c_triggerPool.setSize(noBackups * 3 * noTables); c_fragmentPool.setSize(noBackups * noFrags + 1); Uint32 szDataBuf = (2 * 1024 * 1024); Uint32 szLogBuf = (2 * 1024 * 1024); Uint32 szWrite = 32768, maxWriteSize = (256 * 1024); ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_DATA_BUFFER_MEM, &szDataBuf); ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_LOG_BUFFER_MEM, &szLogBuf); ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_WRITE_SIZE, &szWrite); ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_MAX_WRITE_SIZE, &maxWriteSize); if (maxWriteSize < szWrite) { /** * max can't be lower than min */ maxWriteSize = szWrite; } if ((maxWriteSize % szWrite) != 0) { /** * max needs to be a multiple of min */ maxWriteSize = (maxWriteSize + szWrite - 1) / szWrite; maxWriteSize *= szWrite; } /** * add min writesize to buffer size...and the alignment added here and there */ Uint32 extra = szWrite + 4 * (/* align * 512b */ 128); szDataBuf += extra; szLogBuf += extra; c_defaults.m_logBufferSize = szLogBuf; c_defaults.m_dataBufferSize = szDataBuf; c_defaults.m_minWriteSize = szWrite; c_defaults.m_maxWriteSize = maxWriteSize; c_defaults.m_lcp_buffer_size = szDataBuf; Uint32 szMem = 0; ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_MEM, &szMem); szMem += 3 * extra; // (data+log+lcp); Uint32 noPages = (szMem + sizeof(Page32) - 1) / sizeof(Page32) + (c_defaults.m_lcp_buffer_size + sizeof(Page32) - 1) / sizeof(Page32); // We need to allocate an additional of 2 pages. 1 page because of a bug in // ArrayPool and another one for DICTTAINFO. c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2, true); { // Init all tables SLList<Table> tables(c_tablePool); TablePtr ptr; while (tables.seizeFirst(ptr)) { new (ptr.p) Table(c_fragmentPool); } while (tables.releaseFirst()); } { SLList<BackupFile> ops(c_backupFilePool); BackupFilePtr ptr; while (ops.seizeFirst(ptr)) { new (ptr.p) BackupFile(* this, c_pagePool); } while (ops.releaseFirst()); } { SLList<BackupRecord> recs(c_backupPool); BackupRecordPtr ptr; while (recs.seizeFirst(ptr)) { new (ptr.p) BackupRecord(* this, c_tablePool, c_backupFilePool, c_triggerPool); } while (recs.releaseFirst()); } // Initialize BAT for interface to file system { Page32Ptr p; ndbrequire(c_pagePool.seizeId(p, 0)); c_startOfPages = (Uint32 *)p.p; c_pagePool.release(p); NewVARIABLE* bat = allocateBat(1); bat[0].WA = c_startOfPages; bat[0].nrr = c_pagePool.getSize()*sizeof(Page32)/sizeof(Uint32); } ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); conf->senderRef = reference(); conf->senderData = senderData; sendSignal(ref, GSN_READ_CONFIG_CONF, signal, ReadConfigConf::SignalLength, JBB); }
void Restore::execREAD_CONFIG_REQ(Signal* signal) { jamEntry(); const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); Uint32 ref = req->senderRef; Uint32 senderData = req->senderData; ndbrequire(req->noOfParameters == 0); const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); ndbrequire(p != 0); #if 0 Uint32 noBackups = 0, noTables = 0, noAttribs = 0; ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &m_diskless)); ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_BACKUPS, &noBackups); // ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES, &noTables)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &noTables)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_ATTRIBUTES, &noAttribs)); noAttribs++; //RT 527 bug fix c_backupPool.setSize(noBackups); c_backupFilePool.setSize(3 * noBackups); c_tablePool.setSize(noBackups * noTables); c_attributePool.setSize(noBackups * noAttribs); c_triggerPool.setSize(noBackups * 3 * noTables); // 2 = no of replicas c_fragmentPool.setSize(noBackups * NO_OF_FRAG_PER_NODE * noTables); Uint32 szMem = 0; ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_MEM, &szMem); Uint32 noPages = (szMem + sizeof(Page32) - 1) / sizeof(Page32); // We need to allocate an additional of 2 pages. 1 page because of a bug in // ArrayPool and another one for DICTTAINFO. c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2); Uint32 szDataBuf = (2 * 1024 * 1024); Uint32 szLogBuf = (2 * 1024 * 1024); Uint32 szWrite = 32768; ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_DATA_BUFFER_MEM, &szDataBuf); ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_LOG_BUFFER_MEM, &szLogBuf); ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_WRITE_SIZE, &szWrite); c_defaults.m_logBufferSize = szLogBuf; c_defaults.m_dataBufferSize = szDataBuf; c_defaults.m_minWriteSize = szWrite; c_defaults.m_maxWriteSize = szWrite; { // Init all tables ArrayList<Table> tables(c_tablePool); TablePtr ptr; while(tables.seize(ptr)){ new (ptr.p) Table(c_attributePool, c_fragmentPool); } tables.release(); } { ArrayList<BackupFile> ops(c_backupFilePool); BackupFilePtr ptr; while(ops.seize(ptr)){ new (ptr.p) BackupFile(* this, c_pagePool); } ops.release(); } { ArrayList<BackupRecord> recs(c_backupPool); BackupRecordPtr ptr; while(recs.seize(ptr)){ new (ptr.p) BackupRecord(* this, c_pagePool, c_tablePool, c_backupFilePool, c_triggerPool); } recs.release(); } // Initialize BAT for interface to file system { Page32Ptr p; ndbrequire(c_pagePool.seizeId(p, 0)); c_startOfPages = (Uint32 *)p.p; c_pagePool.release(p); NewVARIABLE* bat = allocateBat(1); bat[0].WA = c_startOfPages; bat[0].nrr = c_pagePool.getSize()*sizeof(Page32)/sizeof(Uint32); } #endif m_file_pool.setSize(1); Uint32 cnt = 2*MAX_ATTRIBUTES_IN_TABLE; cnt += PAGES; cnt += List::getSegmentSize()-1; cnt /= List::getSegmentSize(); cnt += 2; m_databuffer_pool.setSize(cnt); ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); conf->senderRef = reference(); conf->senderData = senderData; sendSignal(ref, GSN_READ_CONFIG_CONF, signal, ReadConfigConf::SignalLength, JBB); }