void GetTableCall::run() { DEBUG_PRINT("GetTableCall::run() [%s.%s]", arg1, arg2); NdbDictionary::Dictionary * dict; return_val = -1; if(strlen(arg1)) { arg0->ndb->setDatabaseName(arg1); } dict = arg0->ndb->getDictionary(); ndb_table = dict->getTable(arg2); if(ndb_table) { return_val = dict->listIndexes(idx_list, arg2); } if(return_val == 0) { /* Fetch the indexes now. These calls may perform network IO, populating the (connection) global and (Ndb) local dictionary caches. Later, in the JavaScript main thread, we will call getIndex() again knowing that the caches are populated. */ for(unsigned int i = 0 ; i < idx_list.count ; i++) { const NdbDictionary::Index * idx = dict->getIndex(idx_list.elements[i].name, arg2); /* It is possible to get an index for a recently dropped table rather than the desired table. This is a known bug likely to be fixed later. */ if(ndb_table->getObjectVersion() != dict->getTable(idx->getTable())->getObjectVersion()) { dict->invalidateIndex(idx); idx = dict->getIndex(idx_list.elements[i].name, arg2); } } } }
int NDBT_TestSuite::createTables(Ndb_cluster_connection& con) const { Ndb ndb(&con, "TEST_DB"); ndb.init(1); NdbDictionary::Dictionary* pDict = ndb.getDictionary(); for(unsigned i = 0; i<m_tables_in_test.size(); i++) { const char *tab_name= m_tables_in_test[i].c_str(); if (pDict->dropTable(tab_name) != 0 && pDict->getNdbError().code != 723) // No such table { g_err << "runCreateTables: Failed to drop table " << tab_name << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } if(NDBT_Tables::createTable(&ndb, tab_name, !getLogging()) != 0) { g_err << "runCreateTables: Failed to create table " << tab_name << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } if (i == 0){ // Update ctx with a pointer to the first created table const NdbDictionary::Table* pTab2 = pDict->getTable(tab_name); ctx->setTab(pTab2); } g_info << "created " << tab_name << endl; } return NDBT_OK; }
int runDDL(NDBT_Context* ctx, NDBT_Step* step){ Ndb* pNdb= GETNDB(step); NdbDictionary::Dictionary* pDict = pNdb->getDictionary(); const int tables = NDBT_Tables::getNumTables(); while(!ctx->isTestStopped()) { const int tab_no = rand() % (tables); NdbDictionary::Table tab = *NDBT_Tables::getTable(tab_no); BaseString name= tab.getName(); name.appfmt("-%d", step->getStepNo()); tab.setName(name.c_str()); if(pDict->createTable(tab) == 0) { HugoTransactions hugoTrans(* pDict->getTable(name.c_str())); if (hugoTrans.loadTable(pNdb, 10000) != 0){ return NDBT_FAILED; } while(pDict->dropTable(tab.getName()) != 0 && pDict->getNdbError().code != 4009) g_err << pDict->getNdbError() << endl; sleep(1); } } return NDBT_OK; }
static int runCheckTableExists(NDBT_Context* ctx, NDBT_Step* step) { Ndb ndb(&ctx->m_cluster_connection, "TEST_DB"); ndb.init(1); NdbDictionary::Dictionary* pDict = ndb.getDictionary(); const NdbDictionary::Table* pTab = ctx->getTab(); const char *tab_name= pTab->getName(); const NdbDictionary::Table* pDictTab = pDict->getTable(tab_name); if (pDictTab == NULL) { g_err << "runCheckTableExists : Failed to find table " << tab_name << endl; g_err << "Required schema : " << *((NDBT_Table*)pTab) << endl; return NDBT_FAILED; } /* Todo : better check that table in DB is same as * table we expect */ // Update ctx with a pointer to dict table ctx->setTab(pDictTab); ctx->setProperty("$table", tab_name); return NDBT_OK; }
static int runCreateTable(NDBT_Context* ctx, NDBT_Step* step) { Ndb ndb(&ctx->m_cluster_connection, "TEST_DB"); ndb.init(1); NdbDictionary::Dictionary* pDict = ndb.getDictionary(); const NdbDictionary::Table* pTab = ctx->getTab(); const char *tab_name= pTab->getName(); if (pDict->dropTable(tab_name) != 0 && pDict->getNdbError().code != 723) // No such table { g_err << "runCreateTable: Failed to drop table " << tab_name << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } if(NDBT_Tables::createTable(&ndb, tab_name, !ctx->getSuite()->getLogging()) != 0) { g_err << "runCreateTable: Failed to create table " << tab_name << pDict->getNdbError() << endl; return NDBT_FAILED; } // Update ctx with a pointer to the created table const NdbDictionary::Table* pTab2 = pDict->getTable(tab_name); ctx->setTab(pTab2); ctx->setProperty("$table", tab_name); return NDBT_OK; }
bool BackupRestore::createSystable(const TableS & tables){ if (!m_restore && !m_restore_meta && !m_restore_epoch) return true; const char *tablename = tables.getTableName(); if( strcmp(tablename, NDB_REP_DB "/def/" NDB_APPLY_TABLE) != 0 && strcmp(tablename, NDB_REP_DB "/def/" NDB_SCHEMA_TABLE) != 0 ) { return true; } BaseString tmp(tablename); Vector<BaseString> split; if(tmp.split(split, "/") != 3){ err << "Invalid table name format " << tablename << endl; return false; } m_ndb->setDatabaseName(split[0].c_str()); m_ndb->setSchemaName(split[1].c_str()); NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); if( dict->getTable(split[2].c_str()) != NULL ){ return true; } return table(tables); }
int runCreateIndexT1(NDBT_Context* ctx, NDBT_Step* step) { Ndb* pNdb = GETNDB(step); NdbDictionary::Dictionary* pDict = pNdb->getDictionary(); const NdbDictionary::Table* pTab = pDict->getTable("T1"); if (pTab == 0) { g_err << "getTable(T1) error: " << pDict->getNdbError() << endl; return NDBT_FAILED; } NdbDictionary::Index ind; ind.setName("T1X1"); ind.setTable("T1"); ind.setType(NdbDictionary::Index::OrderedIndex); ind.setLogging(false); ind.addColumn("KOL2"); ind.addColumn("KOL3"); ind.addColumn("KOL4"); if (pDict->createIndex(ind, *pTab) != 0) { g_err << "createIndex(T1X1) error: " << pDict->getNdbError() << endl; return NDBT_FAILED; } return NDBT_OK; }
static int createDropEvent(NDBT_Context* ctx, NDBT_Step* step) { Ndb* pNdb = GETNDB(step); NdbDictionary::Dictionary *myDict = pNdb->getDictionary(); if (ctx->getProperty("NoDDL", Uint32(0)) == 0) { for (unsigned i = 0; i<table_list.size(); i++) { int res = NDBT_OK; const NdbDictionary::Table* tab = myDict->getTable(table_list[i].c_str()); if (tab == 0) { continue; } if ((res = createEvent(pNdb, *tab) != NDBT_OK)) { return res; } if ((res = dropEvent(pNdb, *tab)) != NDBT_OK) { return res; } } } return NDBT_OK; }
void TableTailer::createListenerEvent() { NdbDictionary::Dictionary *myDict = mNdbConnection->getDictionary(); if (!myDict) LOG_NDB_API_ERROR(mNdbConnection->getNdbError()); const NdbDictionary::Table *table = myDict->getTable(mTable.mTableName.c_str()); if (!table) LOG_NDB_API_ERROR(myDict->getNdbError()); NdbDictionary::Event myEvent(mEventName.c_str(), *table); for(int i=0; i< mTable.mNoEvents; i++){ myEvent.addTableEvent(mTable.mWatchEvents[i]); } const char* columns[mTable.mNoColumns]; for(int i=0; i< mTable.mNoColumns; i++){ columns[i] = mTable.mColumnNames[i].c_str(); } myEvent.addEventColumns(mTable.mNoColumns, columns); //myEvent.mergeEvents(merge_events); // Add event to database if (myDict->createEvent(myEvent) == 0) myEvent.print(); else if (myDict->getNdbError().classification == NdbError::SchemaObjectExists) { LOG_ERROR("Event creation failed, event exists, dropping Event..."); if (myDict->dropEvent(mEventName.c_str())) LOG_NDB_API_ERROR(myDict->getNdbError()); // try again // Add event to database if (myDict->createEvent(myEvent)) LOG_NDB_API_ERROR(myDict->getNdbError()); } else LOG_NDB_API_ERROR(myDict->getNdbError()); }
bool BackupRestore::table_equal(const TableS &tableS) { if (!m_restore) return true; const char *tablename = tableS.getTableName(); if(tableS.m_dictTable == NULL){ ndbout<<"Table %s has no m_dictTable " << tablename << endl; return false; } /** * Ignore blob tables */ if(match_blob(tablename) >= 0) return true; const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* tableS.m_dictTable); if ((int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined){ return true; } BaseString tmp(tablename); Vector<BaseString> split; if(tmp.split(split, "/") != 3){ err << "Invalid table name format " << tablename << endl; return false; } m_ndb->setDatabaseName(split[0].c_str()); m_ndb->setSchemaName(split[1].c_str()); NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); const NdbDictionary::Table* tab = dict->getTable(split[2].c_str()); if(tab == 0){ err << "Unable to find table: " << split[2].c_str() << endl; return false; } if(tab->getNoOfColumns() != tableS.m_dictTable->getNoOfColumns()) { ndbout_c("m_columns.size %d != %d",tab->getNoOfColumns(), tableS.m_dictTable->getNoOfColumns()); return false; } for(int i = 0; i<tab->getNoOfColumns(); i++) { if(!tab->getColumn(i)->equal(*(tableS.m_dictTable->getColumn(i)))) { ndbout_c("m_columns %s != %s",tab->getColumn(i)->getName(), tableS.m_dictTable->getColumn(i)->getName()); return false; } } return true; }
int create_table(){ NdbDictionary::Dictionary* dict = g_ndb->getDictionary(); assert(dict); if(g_paramters[P_CREATE].value){ g_ndb->getDictionary()->dropTable(g_tablename); const NdbDictionary::Table * pTab = NDBT_Tables::getTable(g_tablename); assert(pTab); NdbDictionary::Table copy = * pTab; copy.setLogging(false); if(dict->createTable(copy) != 0){ g_err << "Failed to create table: " << g_tablename << endl; return -1; } NdbDictionary::Index x(g_indexname); x.setTable(g_tablename); x.setType(NdbDictionary::Index::OrderedIndex); x.setLogging(false); for (unsigned k = 0; k < copy.getNoOfColumns(); k++){ if(copy.getColumn(k)->getPrimaryKey()){ x.addColumnName(copy.getColumn(k)->getName()); } } if(dict->createIndex(x) != 0){ g_err << "Failed to create index: " << endl; return -1; } } g_table = dict->getTable(g_tablename); g_index = dict->getIndex(g_indexname, g_tablename); assert(g_table); assert(g_index); if(g_paramters[P_CREATE].value) { int rows = g_paramters[P_ROWS].value; HugoTransactions hugoTrans(* g_table); if (hugoTrans.loadTable(g_ndb, rows)){ g_err.println("Failed to load %s with %d rows", g_table->getName(), rows); return -1; } } return 0; }
int runLoadAll(NDBT_Context* ctx, NDBT_Step* step) { Ndb* pNdb = GETNDB(step); NdbDictionary::Dictionary * pDict = pNdb->getDictionary(); int records = ctx->getNumRecords(); int result = NDBT_OK; for (unsigned i = 0; i<table_list.size(); i++) { const NdbDictionary::Table* tab = pDict->getTable(table_list[i].c_str()); HugoTransactions trans(* tab); trans.loadTable(pNdb, records); trans.scanUpdateRecords(pNdb, records); } return result; }
int create_table() { NdbDictionary::Dictionary* dict = g_ndb->getDictionary(); assert(dict); if(g_paramters[P_CREATE].value) { const NdbDictionary::Table * pTab = NDBT_Tables::getTable(g_table); assert(pTab); NdbDictionary::Table copy = * pTab; copy.setLogging(false); if(dict->createTable(copy) != 0) { g_err << "Failed to create table: " << g_table << endl; return -1; } NdbDictionary::Index x(g_ordered); x.setTable(g_table); x.setType(NdbDictionary::Index::OrderedIndex); x.setLogging(false); for (unsigned k = 0; k < copy.getNoOfColumns(); k++) { if(copy.getColumn(k)->getPrimaryKey()) { x.addColumn(copy.getColumn(k)->getName()); } } if(dict->createIndex(x) != 0) { g_err << "Failed to create index: " << endl; return -1; } x.setName(g_unique); x.setType(NdbDictionary::Index::UniqueHashIndex); if(dict->createIndex(x) != 0) { g_err << "Failed to create index: " << endl; return -1; } } g_tab = dict->getTable(g_table); g_i_unique = dict->getIndex(g_unique, g_table); g_i_ordered = dict->getIndex(g_ordered, g_table); assert(g_tab); assert(g_i_unique); assert(g_i_ordered); return 0; }
/* * ForeignKeyMetadata = { name : "" , // Constraint name columnNames : null , // an ordered array of column numbers targetTable : "" , // referenced table name targetDatabase : "" , // referenced database name targetColumnNames: null , // an ordered array of target column names }; */ Handle<Object> GetTableCall::buildDBForeignKey(const NdbDictionary::ForeignKey *fk) { HandleScope scope; DictionaryNameSplitter localSplitter; Local<Object> js_fk = Object::New(); localSplitter.splitName(fk->getName()); // e.g. "12/20/fkname" js_fk->Set(String::NewSymbol("name"), String::New(localSplitter.part3)); // get child column names unsigned int childColumnCount = fk->getChildColumnCount(); Local<Array> fk_child_column_names = Array::New(childColumnCount); for (unsigned i = 0; i < childColumnCount; ++i) { int columnNumber = fk->getChildColumnNo(i); const NdbDictionary::Column * column = ndb_table->getColumn(columnNumber); fk_child_column_names->Set(i, String::New(column->getName())); } js_fk->Set(String::NewSymbol("columnNames"), fk_child_column_names); // get parent table (which might be in a different database) const char * fk_parent_name = fk->getParentTable(); localSplitter.splitName(fk_parent_name); const char * parent_db_name = localSplitter.part1; const char * parent_table_name = localSplitter.part3; js_fk->Set(String::NewSymbol("targetTable"), String::New(parent_table_name)); js_fk->Set(String::NewSymbol("targetDatabase"), String::New(parent_db_name)); ndb->setDatabaseName(parent_db_name); const NdbDictionary::Table * parent_table = dict->getTable(parent_table_name); ndb->setDatabaseName(dbName); // get parent column names unsigned int parentColumnCount = fk->getParentColumnCount(); Local<Array> fk_parent_column_names = Array::New(parentColumnCount); for (unsigned i = 0; i < parentColumnCount; ++i) { int columnNumber = fk->getParentColumnNo(i); const NdbDictionary::Column * column = parent_table->getColumn(columnNumber); fk_parent_column_names->Set(i, String::New( column->getName())); } js_fk->Set(String::NewSymbol("targetColumnNames"), fk_parent_column_names); return scope.Close(js_fk); }
int stressNDB_rep1(NDBT_Context* ctx, NDBT_Step* step) { Ndb* ndb=GETNDB(step); NdbDictionary::Dictionary* myDict = ndb->getDictionary(); const NdbDictionary::Table * table = myDict->getTable("rep1"); HugoTransactions hugoTrans(* table); while(!ctx->isTestStopped()) { if (hugoTrans.pkUpdateRecords(GETNDB(step), ctx->getNumRecords(), 1, 30) == NDBT_FAILED) { g_err << "pkUpdate Failed!" << endl; return NDBT_FAILED; } if (hugoTrans.scanUpdateRecords(GETNDB(step), ctx->getNumRecords(), 1, 30) == NDBT_FAILED) { g_err << "scanUpdate Failed!" << endl; return NDBT_FAILED; } } return NDBT_OK; }
void NDBT_TestSuite::execute(Ndb_cluster_connection& con, Ndb* ndb, const NdbDictionary::Table* pTab, const char* _testname){ int result; for (unsigned t = 0; t < tests.size(); t++){ if (_testname != NULL && strcasecmp(tests[t]->getName(), _testname) != 0) continue; if (tests[t]->m_all_tables && tests[t]->m_has_run) { continue; } if (tests[t]->isVerify(pTab) == false) { continue; } tests[t]->initBeforeTest(); NdbDictionary::Dictionary* pDict = ndb->getDictionary(); const NdbDictionary::Table* pTab2 = pDict->getTable(pTab->getName()); if (createTable == true){ if(pTab2 != 0 && pDict->dropTable(pTab->getName()) != 0){ numTestsFail++; numTestsExecuted++; g_err << "ERROR0: Failed to drop table " << pTab->getName() << endl; tests[t]->saveTestResult(pTab, FAILED_TO_CREATE); continue; } if (NDBT_Tables::createTable(ndb, pTab->getName(), false, false, g_create_hook, this) != 0) { numTestsFail++; numTestsExecuted++; g_err << "ERROR1: Failed to create table " << pTab->getName() << pDict->getNdbError() << endl; tests[t]->saveTestResult(pTab, FAILED_TO_CREATE); continue; } pTab2 = pDict->getTable(pTab->getName()); } else if(!pTab2) { pTab2 = pTab; } ctx = new NDBT_Context(con); ctx->setTab(pTab2); ctx->setNumRecords(records); ctx->setNumLoops(loops); if(remote_mgm != NULL) ctx->setRemoteMgm(remote_mgm); ctx->setSuite(this); result = tests[t]->execute(ctx); tests[t]->saveTestResult(pTab, result); if (result != NDBT_OK) numTestsFail++; else numTestsOk++; numTestsExecuted++; if (result == NDBT_OK && createTable == true && createAllTables == false){ pDict->dropTable(pTab->getName()); } tests[t]->m_has_run = true; delete ctx; } }
bool BackupRestore::endOfTables(){ if(!m_restore_meta) return true; NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); for(size_t i = 0; i<m_indexes.size(); i++){ NdbTableImpl & indtab = NdbTableImpl::getImpl(* m_indexes[i]); Vector<BaseString> split; { BaseString tmp(indtab.m_primaryTable.c_str()); if (tmp.split(split, "/") != 3) { err << "Invalid table name format `" << indtab.m_primaryTable.c_str() << "`" << endl; return false; } } m_ndb->setDatabaseName(split[0].c_str()); m_ndb->setSchemaName(split[1].c_str()); const NdbDictionary::Table * prim = dict->getTable(split[2].c_str()); if(prim == 0){ err << "Unable to find base table `" << split[2].c_str() << "` for index `" << indtab.getName() << "`" << endl; return false; } NdbTableImpl& base = NdbTableImpl::getImpl(*prim); NdbIndexImpl* idx; Vector<BaseString> split_idx; { BaseString tmp(indtab.getName()); if (tmp.split(split_idx, "/") != 4) { err << "Invalid index name format `" << indtab.getName() << "`" << endl; return false; } } if(NdbDictInterface::create_index_obj_from_table(&idx, &indtab, &base)) { err << "Failed to create index `" << split_idx[3] << "` on " << split[2].c_str() << endl; return false; } idx->setName(split_idx[3].c_str()); if(dict->createIndex(* idx) != 0) { delete idx; err << "Failed to create index `" << split_idx[3].c_str() << "` on `" << split[2].c_str() << "`" << endl << dict->getNdbError() << endl; return false; } delete idx; info << "Successfully created index `" << split_idx[3].c_str() << "` on `" << split[2].c_str() << "`" << endl; } return true; }
bool BackupRestore::table(const TableS & table){ if (!m_restore && !m_restore_meta) return true; const char * name = table.getTableName(); /** * Ignore blob tables */ if(match_blob(name) >= 0) return true; const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable); if ((int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined){ m_indexes.push_back(table.m_dictTable); return true; } BaseString tmp(name); Vector<BaseString> split; if(tmp.split(split, "/") != 3){ err << "Invalid table name format `" << name << "`" << endl; return false; } m_ndb->setDatabaseName(split[0].c_str()); m_ndb->setSchemaName(split[1].c_str()); NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); if(m_restore_meta) { NdbDictionary::Table copy(*table.m_dictTable); copy.setName(split[2].c_str()); Uint32 id; if (copy.getTablespace(&id)) { debug << "Connecting " << name << " to tablespace oldid: " << id << flush; NdbDictionary::Tablespace* ts = m_tablespaces[id]; debug << " newid: " << ts->getObjectId() << endl; copy.setTablespace(* ts); } if (copy.getDefaultNoPartitionsFlag()) { /* Table was defined with default number of partitions. We can restore it with whatever is the default in this cluster. We use the max_rows parameter in calculating the default number. */ Uint32 no_nodes = m_cluster_connection->no_db_nodes(); copy.setFragmentCount(get_no_fragments(copy.getMaxRows(), no_nodes)); set_default_nodegroups(©); } else { /* Table was defined with specific number of partitions. It should be restored with the same number of partitions. It will either be restored in the same node groups as when backup was taken or by using a node group map supplied to the ndb_restore program. */ Uint16 *ng_array = (Uint16*)copy.getFragmentData(); Uint16 no_parts = copy.getFragmentCount(); if (map_nodegroups(ng_array, no_parts)) { if (translate_frm(©)) { err << "Create table " << table.getTableName() << " failed: "; err << "Translate frm error" << endl; return false; } } copy.setFragmentData((const void *)ng_array, no_parts << 1); } /** * Force of varpart was introduced in 5.1.18, telco 6.1.7 and 6.2.1 * Since default from mysqld is to add force of varpart (disable with * ROW_FORMAT=FIXED) we force varpart onto tables when they are restored * from backups taken with older versions. This will be wrong if * ROW_FORMAT=FIXED was used on original table, however the likelyhood of * this is low, since ROW_FORMAT= was a NOOP in older versions. */ if (table.getBackupVersion() < MAKE_VERSION(5,1,18)) copy.setForceVarPart(true); else if (getMajor(table.getBackupVersion()) == 6 && (table.getBackupVersion() < MAKE_VERSION(6,1,7) || table.getBackupVersion() == MAKE_VERSION(6,2,0))) copy.setForceVarPart(true); /* update min and max rows to reflect the table, this to ensure that memory is allocated properly in the ndb kernel */ copy.setMinRows(table.getNoOfRecords()); if (table.getNoOfRecords() > copy.getMaxRows()) { copy.setMaxRows(table.getNoOfRecords()); } NdbTableImpl &tableImpl = NdbTableImpl::getImpl(copy); if (table.getBackupVersion() < MAKE_VERSION(5,1,0) && !m_no_upgrade){ for(int i= 0; i < copy.getNoOfColumns(); i++) { NdbDictionary::Column::Type t = copy.getColumn(i)->getType(); if (t == NdbDictionary::Column::Varchar || t == NdbDictionary::Column::Varbinary) tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeShortVar); if (t == NdbDictionary::Column::Longvarchar || t == NdbDictionary::Column::Longvarbinary) tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeMediumVar); } } if (dict->createTable(copy) == -1) { err << "Create table `" << table.getTableName() << "` failed: " << dict->getNdbError() << endl; if (dict->getNdbError().code == 771) { /* The user on the cluster where the backup was created had specified specific node groups for partitions. Some of these node groups didn't exist on this cluster. We will warn the user of this and inform him of his option. */ err << "The node groups defined in the table didn't exist in this"; err << " cluster." << endl << "There is an option to use the"; err << " the parameter ndb-nodegroup-map to define a mapping from"; err << endl << "the old nodegroups to new nodegroups" << endl; } return false; } info << "Successfully restored table `" << table.getTableName() << "`" << endl; } const NdbDictionary::Table* tab = dict->getTable(split[2].c_str()); if(tab == 0){ err << "Unable to find table: `" << split[2].c_str() << "`" << endl; return false; } if(m_restore_meta) { if (tab->getFrmData()) { // a MySQL Server table is restored, thus an event should be created BaseString event_name("REPL$"); event_name.append(split[0].c_str()); event_name.append("/"); event_name.append(split[2].c_str()); NdbDictionary::Event my_event(event_name.c_str()); my_event.setTable(*tab); my_event.addTableEvent(NdbDictionary::Event::TE_ALL); // add all columns to the event bool has_blobs = false; for(int a= 0; a < tab->getNoOfColumns(); a++) { my_event.addEventColumn(a); NdbDictionary::Column::Type t = tab->getColumn(a)->getType(); if (t == NdbDictionary::Column::Blob || t == NdbDictionary::Column::Text) has_blobs = true; } if (has_blobs) my_event.mergeEvents(true); while ( dict->createEvent(my_event) ) // Add event to database { if (dict->getNdbError().classification == NdbError::SchemaObjectExists) { info << "Event for table " << table.getTableName() << " already exists, removing.\n"; if (!dict->dropEvent(my_event.getName())) continue; } err << "Create table event for " << table.getTableName() << " failed: " << dict->getNdbError() << endl; dict->dropTable(split[2].c_str()); return false; } info << "Successfully restored table event " << event_name << endl ; } } const NdbDictionary::Table* null = 0; m_new_tables.fill(table.m_dictTable->getTableId(), null); m_new_tables[table.m_dictTable->getTableId()] = tab; return true; }
int runBasic(NDBT_Context* ctx, NDBT_Step* step) { Uint32 useRangeScanT1 = ctx->getProperty("UseRangeScanT1", (uint32)0); Ndb* pNdb = GETNDB(step); NdbDictionary::Dictionary * pDict = pNdb->getDictionary(); int records = ctx->getNumRecords(); int result = NDBT_OK; int l = 0; while (!ctx->isTestStopped()) { for (unsigned i = 0; i<table_list.size(); i++) { const NdbDictionary::Table* tab = pDict->getTable(table_list[i].c_str()); HugoTransactions trans(* tab); switch(l % 4) { case 0: trans.loadTable(pNdb, records); trans.scanUpdateRecords(pNdb, records); trans.pkUpdateRecords(pNdb, records); trans.pkReadUnlockRecords(pNdb, records); break; case 1: trans.scanUpdateRecords(pNdb, records); // TODO make pkInterpretedUpdateRecords work on any table // (or check if it does) if (strcmp(tab->getName(), "T1") == 0) trans.pkInterpretedUpdateRecords(pNdb, records); if (strcmp(tab->getName(), "T1") == 0 && useRangeScanT1) { const NdbDictionary::Index* pInd = pDict->getIndex("T1X1", "T1"); if (pInd == 0) { g_err << "getIndex(T1X1) error: " << pDict->getNdbError() << endl; return NDBT_FAILED; } // bug#13834481 - bound values do not matter const Uint32 lo = 0x11110000; const Uint32 hi = 0xaaaa0000; HugoTransactions::HugoBound bound_arr[6]; int bound_cnt = 0; for (int j = 0; j <= 1; j++) { int n = rand() % 4; for (int i = 0; i < n; i++) { HugoTransactions::HugoBound& b = bound_arr[bound_cnt++]; b.attr = i; b.type = (j == 0 ? 0 : 2); // LE/GE b.value = (j == 0 ? &lo : &hi); } } g_info << "range scan T1 with " << bound_cnt << " bounds" << endl; if (trans.scanReadRecords(pNdb, pInd, records, 0, 0, NdbOperation::LM_Read, 0, bound_cnt, bound_arr) != 0) { const NdbError& err = trans.getNdbError(); /* * bug#13834481 symptoms include timeouts and error 1231. * Check for any non-temporary error. */ if (err.status == NdbError::TemporaryError) { g_info << "range scan T1 temporary error: " << err << endl; } if (err.status != NdbError::TemporaryError) { g_err << "range scan T1 permanent error: " << err << endl; return NDBT_FAILED; } } } trans.clearTable(pNdb, records/2); trans.loadTable(pNdb, records/2); break; case 2: trans.clearTable(pNdb, records/2); trans.loadTable(pNdb, records/2); trans.clearTable(pNdb, records/2); break; case 3: if (createDropEvent(ctx, step)) { return NDBT_FAILED; } break; } } l++; } return result; }
bool BackupRestore::table(const TableS & table){ if (!m_restore && !m_restore_meta) return true; const char * name = table.getTableName(); /** * Ignore blob tables */ if(match_blob(name) >= 0) return true; const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable); if(tmptab.m_indexType != NdbDictionary::Index::Undefined){ m_indexes.push_back(table.m_dictTable); return true; } BaseString tmp(name); Vector<BaseString> split; if(tmp.split(split, "/") != 3){ err << "Invalid table name format `" << name << "`" << endl; return false; } m_ndb->setDatabaseName(split[0].c_str()); m_ndb->setSchemaName(split[1].c_str()); NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); if(m_restore_meta){ NdbDictionary::Table copy(*table.m_dictTable); copy.setName(split[2].c_str()); /* update min and max rows to reflect the table, this to ensure that memory is allocated properly in the ndb kernel */ copy.setMinRows(table.getNoOfRecords()); if (table.getNoOfRecords() > copy.getMaxRows()) { copy.setMaxRows(table.getNoOfRecords()); } if (dict->createTable(copy) == -1) { err << "Create table `" << table.getTableName() << "` failed: " << dict->getNdbError() << endl; return false; } info << "Successfully restored table `" << table.getTableName() << "`" << endl; } const NdbDictionary::Table* tab = dict->getTable(split[2].c_str()); if(tab == 0){ err << "Unable to find table: `" << split[2].c_str() << "`" << endl; return false; } const NdbDictionary::Table* null = 0; m_new_tables.fill(table.m_dictTable->getTableId(), null); m_new_tables[table.m_dictTable->getTableId()] = tab; return true; }
int main(int argc, const char** argv){ ndb_init(); int _help = 0; const char* db = 0; const char* connectstring1 = 0; const char* connectstring2 = 0; struct getargs args[] = { { "connectstring1", 'c', arg_string, &connectstring1, "connectstring1", "" }, { "connectstring2", 'C', arg_string, &connectstring2, "connectstring2", "" }, { "database", 'd', arg_string, &db, "Database", "" }, { "usage", '?', arg_flag, &_help, "Print help", "" } }; int num_args = sizeof(args) / sizeof(args[0]); int optind = 0, i; char desc[] = "<tabname>+ \nThis program listen to events on specified tables\n"; if(getarg(args, num_args, argc, argv, &optind) || argv[optind] == NULL || _help) { arg_printusage(args, num_args, argv[0], desc); return NDBT_ProgramExit(NDBT_WRONGARGS); } // Connect to Ndb Ndb_cluster_connection con(connectstring1); if(con.connect(12, 5, 1) != 0) { return NDBT_ProgramExit(NDBT_FAILED); } Ndb MyNdb( &con, db ? db : "TEST_DB" ); if(MyNdb.init() != 0){ ERR(MyNdb.getNdbError()); return NDBT_ProgramExit(NDBT_FAILED); } // Connect to Ndb and wait for it to become ready while(MyNdb.waitUntilReady() != 0) ndbout << "Waiting for ndb to become ready..." << endl; Ndb_cluster_connection *con2 = NULL; Ndb *ndb2 = NULL; if (connectstring2) { con2 = new Ndb_cluster_connection(connectstring2); if(con2->connect(12, 5, 1) != 0) { return NDBT_ProgramExit(NDBT_FAILED); } ndb2 = new Ndb( con2, db ? db : "TEST_DB" ); if(ndb2->init() != 0){ ERR(ndb2->getNdbError()); return NDBT_ProgramExit(NDBT_FAILED); } // Connect to Ndb and wait for it to become ready while(ndb2->waitUntilReady() != 0) ndbout << "Waiting for ndb to become ready..." << endl; } int result = 0; NdbDictionary::Dictionary *myDict = MyNdb.getDictionary(); Vector<NdbDictionary::Event*> events; Vector<NdbEventOperation*> event_ops; int sz = 0; for(i= optind; i<argc; i++) { const NdbDictionary::Table* table= myDict->getTable(argv[i]); if(!table) { ndbout_c("Could not find table: %s, skipping", argv[i]); continue; } BaseString name; name.appfmt("EV-%s", argv[i]); NdbDictionary::Event *myEvent= new NdbDictionary::Event(name.c_str()); myEvent->setTable(table->getName()); myEvent->addTableEvent(NdbDictionary::Event::TE_ALL); for(int a = 0; a < table->getNoOfColumns(); a++){ myEvent->addEventColumn(a); } if (myDict->createEvent(* myEvent)) { if(myDict->getNdbError().classification == NdbError::SchemaObjectExists) { g_info << "Event creation failed event exists. Removing...\n"; if (myDict->dropEvent(name.c_str())) { g_err << "Failed to drop event: " << myDict->getNdbError() << endl; result = 1; goto end; } // try again if (myDict->createEvent(* myEvent)) { g_err << "Failed to create event: " << myDict->getNdbError() << endl; result = 1; goto end; } } else { g_err << "Failed to create event: " << myDict->getNdbError() << endl; result = 1; goto end; } } events.push_back(myEvent); NdbEventOperation* pOp = MyNdb.createEventOperation(name.c_str()); if ( pOp == NULL ) { g_err << "Event operation creation failed" << endl; result = 1; goto end; } event_values.push_back(Vector<NdbRecAttr *>()); event_pre_values.push_back(Vector<NdbRecAttr *>()); for (int a = 0; a < table->getNoOfColumns(); a++) { event_values[sz]. push_back(pOp->getValue(table->getColumn(a)->getName())); event_pre_values[sz]. push_back(pOp->getPreValue(table->getColumn(a)->getName())); } event_ops.push_back(pOp); { struct Table_info ti; ti.id = sz; table_infos.push_back(ti); } pOp->setCustomData((void *)&table_infos[sz]); sz++; } for(i= 0; i<(int)event_ops.size(); i++) { if (event_ops[i]->execute()) { g_err << "operation execution failed: " << event_ops[i]->getNdbError() << endl; result = 1; goto end; } } struct Trans_arg trans_arg; while(true) { while(MyNdb.pollEvents(100) == 0); NdbEventOperation* pOp= MyNdb.nextEvent(); while(pOp) { Uint64 gci= pOp->getGCI(); Uint64 cnt_i= 0, cnt_u= 0, cnt_d= 0; if (ndb2) do_begin(ndb2, trans_arg); do { switch(pOp->getEventType()) { case NdbDictionary::Event::TE_INSERT: cnt_i++; if (ndb2) do_insert(trans_arg, pOp); break; case NdbDictionary::Event::TE_DELETE: cnt_d++; if (ndb2) do_delete(trans_arg, pOp); break; case NdbDictionary::Event::TE_UPDATE: cnt_u++; if (ndb2) do_update(trans_arg, pOp); break; case NdbDictionary::Event::TE_CLUSTER_FAILURE: break; case NdbDictionary::Event::TE_ALTER: break; case NdbDictionary::Event::TE_DROP: break; case NdbDictionary::Event::TE_NODE_FAILURE: break; case NdbDictionary::Event::TE_SUBSCRIBE: case NdbDictionary::Event::TE_UNSUBSCRIBE: break; default: /* We should REALLY never get here. */ ndbout_c("Error: unknown event type: %u", (Uint32)pOp->getEventType()); abort(); } } while ((pOp= MyNdb.nextEvent()) && gci == pOp->getGCI()); if (ndb2) do_commit(trans_arg); ndbout_c("GCI: %lld events: %lld(I) %lld(U) %lld(D)", gci, cnt_i, cnt_u, cnt_d); } } end: for(i= 0; i<(int)event_ops.size(); i++) MyNdb.dropEventOperation(event_ops[i]); if (ndb2) delete ndb2; if (con2) delete con2; return NDBT_ProgramExit(NDBT_OK); }
int supersizeme(Ndb * ndb,char * db, char * tbl, bool ftScan, bool ignoreData) { bool varFound; bool varFoundui; int dm_per_rec=0; int im_per_rec=0; int disk_per_rec=0; int noOfOrderedIndexes=0, noOfUniqueHashIndexes=0, noOfBlobs=0; int tmpDm=0,tmpIm=0, tmpDisk=0; ndb->setDatabaseName(db); NdbDictionary::Dictionary * dict = ndb->getDictionary(); const NdbDictionary::Table * table = dict->getTable(tbl); if(table == 0) { printf( "table %s in database %s not found!\n", tbl,db); return -1; } bool isTable=false; printf("\nCalculating storage cost per record for table %s\n", table->getName()); calculate_dm( ndb, table, NULL, tmpDm, tmpDisk, ftScan, noOfBlobs,ignoreData,varFound ); // Gerald there is at least 1 PK (hidden or real) and not return by listIndexes() // So add according OH + increment noOfUniqueHashIndexes tmpIm = OH_PK; noOfUniqueHashIndexes++; dm_per_rec +=tmpDm; disk_per_rec +=tmpDisk; im_per_rec +=tmpIm; NdbDictionary::Dictionary::List list; dict->listIndexes(list, *table); int no_attrs=table->getNoOfColumns(); for (unsigned i = 0; i < list.count; i++) { NdbDictionary::Dictionary::List::Element& elt = list.elements[i]; if (verbose) { printf("Analysing element : %s, Type : %s \n", elt.name, elementTypeStr[elt.type] ); } switch (elt.type) { case NdbDictionary::Object::UniqueHashIndex: { const NdbDictionary::Index * ix = dict->getIndex(elt.name, table->getName()); printf( "---\tWARNING! Unique Index found named (\"%s\"): \n",elt.name); int pk_cols=0; calculate_dm_ui(ndb, table, ix, tmpDm, tmpDisk, ftScan, noOfBlobs, pk_cols,varFoundui); printf( "---\t\tUnique Index Cost - DataMemory per record = %d and IndexMemory = %d\n", tmpDm, tmpIm ); //Gerald : OH_PK already include and OH_UNIQUE8HASH_INDEX is included by calculate_dm_ui // tmpIm = OH_PK; dm_per_rec += tmpDm; disk_per_rec += tmpDisk; im_per_rec += tmpIm; isTable = true; noOfUniqueHashIndexes++; //no_attrs+=(ix->getNoOfColumns()+pk_cols); } break; case NdbDictionary::Object::OrderedIndex: tmpDm = OH_ORDERED_INDEX; tmpIm = 0; printf( "---\tOrdered Index found named (%s" "). Additional cost per record is = %d" " bytes of DataMemory.\n", elt.name, tmpDm ); dm_per_rec += tmpDm; isTable = true; noOfOrderedIndexes++; break; default: break; } } int rows = 0; if (select_count(ndb, table, 240, &rows, NdbOperation::LM_CommittedRead) <0){ printf( "counting rows failed\n" ); return 0; } printf("\nRecord size (incl OH):" "\n\t#Rows found=%d records " "\n\t#OrderedIndexes=%d" "\n\t#UniqueHashIndexes=%d " "\n\t#blob/text=%d " "\n\t#attributes=%d " "\n\tDataMemory=%d bytes " "\n\tIndexMemory=%d bytes" "\n\tDiskspace=%d bytes\n\n", rows, noOfOrderedIndexes, noOfUniqueHashIndexes, noOfBlobs, no_attrs, dm_per_rec, im_per_rec, disk_per_rec); printf("\n\nAppending the following to %s.csv \n",db); printf("%s,%s,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n\n", db, table->getName(), rows, 1, noOfOrderedIndexes, noOfUniqueHashIndexes, noOfBlobs, no_attrs, im_per_rec, dm_per_rec, disk_per_rec, varFound ? 1:0, varFoundui ? 1:0); char filename[255]; if(g_analyze_all) { if(!g_multi_db) sprintf(filename,"%s.csv",db); else strcpy(filename,"all_databases.csv"); } else sprintf(filename,"%s_%s.csv",db,tbl); FILE * fh = fopen(filename,"a+"); char row[128]; sprintf(row, "%s,%s,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n" , db , table->getName() , rows , 1, noOfOrderedIndexes, noOfUniqueHashIndexes, noOfBlobs, no_attrs, im_per_rec , dm_per_rec , disk_per_rec, varFound ? 1:0, varFoundui ? 1:0); fwrite(row, strlen(row),1, fh); fclose(fh); return 1; }
static int testcase(Ndb_cluster_connection&cc, int flag) { ndbout << "--- case " << flag << " ---" << endl; sprintf(tab, "TB%02d", flag); alignAddr = ! (flag & 1); ndbout << (alignAddr ? "align addresses" : "mis-align addresses") << endl; alignSize = ! (flag & 2); ndbout << (alignSize ? "align data sizes" : "mis-align data sizes") << endl; useBuf = ! (flag & 4); ndbout << (useBuf ? "use our buffers" : "use ndbapi buffers") << endl; noRandom = ! (flag & 8); ndbout << (noRandom ? "simple sizes" : "randomize sizes") << endl; int smax = 0, stot = 0, i; if (xverbose) ndbout << "- define table " << tab << endl; for (i = 0; i < attrcnt; i++) { col& c = ccol[i]; memset(&c, 0, sizeof(c)); sprintf(c.aAttrName, "C%d", i); if (i == 0) { c.aAttrType = UnSigned; c.aAttrSize = 32; c.aArraySize = 1; c.aTupleKey = TupleKey; c.nullable = false; } else { c.aAttrType = String; c.aAttrSize = 8; c.aArraySize = makeSize(i); if (smax < c.aArraySize) smax = c.aArraySize; stot += c.aArraySize; c.aTupleKey = NoKey; c.nullable = true; if (xverbose) ndbout << "-- column " << i << " size=" << c.aArraySize << endl; } c.buf = toAligned(c.data); c.bufsiz = (int)(sizeof(c.data) - (c.buf - c.data)); } ndbout << "tab=" << tab << " cols=" << attrcnt << " size max=" << smax << " tot=" << stot << endl; if ((tcon = NdbSchemaCon::startSchemaTrans(ndb)) == 0) return ndberror("startSchemaTransaction"); if ((top = tcon->getNdbSchemaOp()) == 0) return ndberror("getNdbSchemaOp"); if (top->createTable(tab) < 0) return ndberror("createTable"); for (i = 0; i < attrcnt; i++) { col& c = ccol[i]; if (top->createAttribute( c.aAttrName, c.aTupleKey, c.aAttrSize, c.aArraySize, c.aAttrType, MMBased, c.nullable ) < 0) return ndberror("createAttribute col=%d", i); } if (tcon->execute() < 0) { if (! (tcon->getNdbError().code == 721 && existok)) return ndberror("execute"); ndbout << "using " << tab << endl; } else { ndbout << "created " << tab << endl; } top = 0; tcon = 0; if (xverbose) ndbout << "- delete" << endl; int delcnt = 0; for (key = 0; key < opercnt; key++) { if ((con = ndb->startTransaction()) == 0) return ndberror("startTransaction key=%d", key); if ((op = con->getNdbOperation(tab)) == 0) return ndberror("getNdbOperation key=%d", key); if (op->deleteTuple() < 0) return ndberror("deleteTuple key=%d", key); for (i = 0; i < attrcnt; i++) { col& c = ccol[i]; if (i == 0) { if (op->equal(c.aAttrName, (char*)&key, sizeof(key)) < 0) return ndberror("equal key=%d", key); } else { } } if (con->execute(Commit) < 0) { if (con->getNdbError().code != 626) return ndberror("execute key=%d", key); } else { delcnt++; } ndb->closeTransaction(con); } con = 0; op = 0; ndbout << "deleted " << delcnt << endl; if (xverbose) ndbout << "- insert" << endl; for (key = 0; key < opercnt; key++) { int off = makeOff(key); if ((con = ndb->startTransaction()) == 0) return ndberror("startTransaction key=%d", key); if ((op = con->getNdbOperation(tab)) == 0) return ndberror("getNdbOperation key=%d", key); if (op->insertTuple() < 0) return ndberror("insertTuple key=%d", key); for (i = 0; i < attrcnt; i++) { col& c = ccol[i]; if (i == 0) { if (op->equal(c.aAttrName, (char*)&key, sizeof(key)) < 0) return ndberror("equal key=%d", key); } else { memset(c.buf, 'A', c.bufsiz); for (int j = 0; j < c.aArraySize; j++) c.buf[j + off] = byteVal(key, i, j); if (op->setValue(c.aAttrName, c.buf + off, c.aArraySize) < 0) return ndberror("setValue key=%d col=%d", key, i); } } if (con->execute(Commit) < 0) return ndberror("execute key=%d", key); ndb->closeTransaction(con); } con = 0; op = 0; ndbout << "inserted " << key << endl; if (xverbose) ndbout << "- select" << endl; for (key = 0; key < opercnt; key++) { int off = makeOff(key); if (xverbose) ndbout << "-- key " << key << " off=" << off << endl; if ((con = ndb->startTransaction()) == 0) return ndberror("startTransaction key=%d", key); if ((op = con->getNdbOperation(tab)) == 0) return ndberror("getNdbOperation key=%d", key); if (op->readTuple() < 0) return ndberror("readTuple key=%d", key); for (i = 0; i < attrcnt; i++) { col& c = ccol[i]; if (i == 0) { if (op->equal(c.aAttrName, (char*)&key, sizeof(key)) < 0) return ndberror("equal key=%d", key); } else { if (xverbose) { char tmp[20]; if (useBuf) sprintf(tmp, "0x%p", c.buf + off); else strcpy(tmp, "ndbapi"); ndbout << "--- column " << i << " addr=" << tmp << endl; } memset(c.buf, 'B', c.bufsiz); if (useBuf) { if (op->getValue(c.aAttrName, c.buf + off) < 0) return ndberror("getValue key=%d col=%d", key, i); } else { if ((c.aRa = op->getValue(c.aAttrName)) == 0) return ndberror("getValue key=%d col=%d", key, i); } } } if (con->execute(Commit) != 0) return ndberror("execute key=%d", key); for (i = 0; i < attrcnt; i++) { col& c = ccol[i]; if (i == 0) { } else if (useBuf) { int j; for (j = 0; j < off; j++) { if (c.buf[j] != 'B') { return chkerror("mismatch before key=%d col=%d pos=%d ok=%02x bad=%02x", key, i, j, 'B', c.buf[j]); } } for (j = 0; j < c.aArraySize; j++) { if (c.buf[j + off] != byteVal(key, i, j)) { return chkerror("mismatch key=%d col=%d pos=%d ok=%02x bad=%02x", key, i, j, byteVal(key, i, j), c.buf[j]); } } for (j = c.aArraySize + off; j < c.bufsiz; j++) { if (c.buf[j] != 'B') { return chkerror("mismatch after key=%d col=%d pos=%d ok=%02x bad=%02x", key, i, j, 'B', c.buf[j]); } } } else { char* buf = c.aRa->aRef(); if (buf == 0) return ndberror("null aRef key=%d col%d", key, i); for (int j = 0; j < c.aArraySize; j++) { if (buf[j] != byteVal(key, i, j)) { return chkerror("mismatch key=%d col=%d pos=%d ok=%02x bad=%02x", key, i, j, byteVal(key, i, j), buf[j]); } } } } ndb->closeTransaction(con); } con = 0; op = 0; ndbout << "selected " << key << endl; if (xverbose) ndbout << "- scan" << endl; char found[MaxOper]; int k; NdbDictionary::Dictionary * dict = ndb->getDictionary(); const NdbDictionary::Table * table = dict->getTable(tab); for (k = 0; k < opercnt; k++) found[k] = 0; for (key = 0; key < opercnt; key++) { int off = makeOff(key); NdbInterpretedCode codeObj(table); NdbInterpretedCode *code= &codeObj; if (xverbose) ndbout << "-- key " << key << " off=" << off << endl; int newkey = 0; if ((con = ndb->startTransaction()) == 0) return ndberror("startTransaction key=%d", key); if ((op = sop = con->getNdbScanOperation(tab)) == 0) return ndberror("getNdbOperation key=%d", key); if (sop->readTuples(1)) return ndberror("openScanRead key=%d", key); { col& c = ccol[0]; Uint32 colNum= table->getColumn(c.aAttrName)->getAttrId(); if (code->load_const_u32(1, key) < 0) return ndberror("load_const_u32"); if (code->read_attr(2, colNum) < 0) return ndberror("read_attr"); if (code->branch_eq(1, 2, 0) < 0) return ndberror("branch_eq"); if (code->interpret_exit_nok() < 0) return ndberror("interpret_exit_nok"); if (code->def_label(0) < 0) return ndberror("def_label"); if (code->interpret_exit_ok() < 0) return ndberror("interpret_exit_ok"); if (code->finalise() != 0) return ndberror("finalise"); if (sop->setInterpretedCode(code) != 0) return ndberror("setInterpretedCode"); } for (i = 0; i < attrcnt; i++) { col& c = ccol[i]; if (i == 0) { if (op->getValue(c.aAttrName, (char*)&newkey) < 0) return ndberror("getValue key=%d col=%d", key, i); } else { if (xverbose) { char tmp[20]; if (useBuf) sprintf(tmp, "0x%p", c.buf + off); else strcpy(tmp, "ndbapi"); ndbout << "--- column " << i << " addr=" << tmp << endl; } memset(c.buf, 'C', c.bufsiz); if (useBuf) { if (op->getValue(c.aAttrName, c.buf + off) < 0) return ndberror("getValue key=%d col=%d", key, i); } else { if ((c.aRa = op->getValue(c.aAttrName)) == 0) return ndberror("getValue key=%d col=%d", key, i); } } } if (con->execute(NoCommit) < 0) return ndberror("executeScan key=%d", key); int ret, cnt = 0; while ((ret = sop->nextResult()) == 0) { if (key != newkey) return ndberror("unexpected key=%d newkey=%d", key, newkey); for (i = 1; i < attrcnt; i++) { col& c = ccol[i]; if (useBuf) { int j; for (j = 0; j < off; j++) { if (c.buf[j] != 'C') { return chkerror("mismatch before key=%d col=%d pos=%d ok=%02x bad=%02x", key, i, j, 'C', c.buf[j]); } } for (j = 0; j < c.aArraySize; j++) { if (c.buf[j + off] != byteVal(key, i, j)) { return chkerror("mismatch key=%d col=%d pos=%d ok=%02x bad=%02x", key, i, j, byteVal(key, i, j), c.buf[j]); } } for (j = c.aArraySize + off; j < c.bufsiz; j++) { if (c.buf[j] != 'C') { return chkerror("mismatch after key=%d col=%d pos=%d ok=%02x bad=%02x", key, i, j, 'C', c.buf[j]); } } } else { char* buf = c.aRa->aRef(); if (buf == 0) return ndberror("null aRef key=%d col%d", key, i); for (int j = 0; j < c.aArraySize; j++) { if (buf[j] != byteVal(key, i, j)) { return chkerror("mismatch key=%d col=%d pos=%d ok=%02x bad=%02x", key, i, j, byteVal(key, i, j), buf[j]); } } } } cnt++; } if (ret < 0) return ndberror("nextScanResult key=%d", key); if (cnt != 1) return ndberror("scan key=%d found %d", key, cnt); found[key] = 1; ndb->closeTransaction(con); } con = 0; op = 0; for (k = 0; k < opercnt; k++) if (! found[k]) return ndberror("key %d not found", k); ndbout << "scanned " << key << endl; ndbout << "done" << endl; return 0; }
int create_table(){ NdbDictionary::Dictionary* dict = g_ndb->getDictionary(); assert(dict); if(g_paramters[P_CREATE].value){ g_ndb->getDictionary()->dropTable(g_tablename); const NdbDictionary::Table * pTab = NDBT_Tables::getTable(g_tablename); assert(pTab); NdbDictionary::Table copy = * pTab; copy.setLogging(false); if(dict->createTable(copy) != 0){ g_err << "Failed to create table: " << g_tablename << endl; return -1; } NdbDictionary::Index x(g_indexname); x.setTable(g_tablename); x.setType(NdbDictionary::Index::OrderedIndex); x.setLogging(false); for (unsigned k = 0; k < (unsigned) copy.getNoOfColumns(); k++){ if(copy.getColumn(k)->getPrimaryKey()){ x.addColumnName(copy.getColumn(k)->getName()); } } if(dict->createIndex(x) != 0){ g_err << "Failed to create index: " << endl; return -1; } } g_table = dict->getTable(g_tablename); g_index = dict->getIndex(g_indexname, g_tablename); assert(g_table); assert(g_index); /* Obtain NdbRecord instances for the table and index */ { NdbDictionary::RecordSpecification spec[ NDB_MAX_ATTRIBUTES_IN_TABLE ]; Uint32 offset=0; Uint32 cols= g_table->getNoOfColumns(); for (Uint32 colNum=0; colNum<cols; colNum++) { const NdbDictionary::Column* col= g_table->getColumn(colNum); Uint32 colLength= col->getLength(); spec[colNum].column= col; spec[colNum].offset= offset; offset+= colLength; spec[colNum].nullbit_byte_offset= offset++; spec[colNum].nullbit_bit_in_byte= 0; } g_table_record= dict->createRecord(g_table, &spec[0], cols, sizeof(NdbDictionary::RecordSpecification)); assert(g_table_record); } { NdbDictionary::RecordSpecification spec[ NDB_MAX_ATTRIBUTES_IN_TABLE ]; Uint32 offset=0; Uint32 cols= g_index->getNoOfColumns(); for (Uint32 colNum=0; colNum<cols; colNum++) { /* Get column from the underlying table */ // TODO : Add this mechanism to dict->createRecord // TODO : Add NdbRecord queryability methods so that an NdbRecord can // be easily built and later used to read out data. const NdbDictionary::Column* col= g_table->getColumn(g_index->getColumn(colNum)->getName()); Uint32 colLength= col->getLength(); spec[colNum].column= col; spec[colNum].offset= offset; offset+= colLength; spec[colNum].nullbit_byte_offset= offset++; spec[colNum].nullbit_bit_in_byte= 0; } g_index_record= dict->createRecord(g_index, &spec[0], cols, sizeof(NdbDictionary::RecordSpecification)); assert(g_index_record); } if(g_paramters[P_CREATE].value) { int rows = g_paramters[P_ROWS].value; HugoTransactions hugoTrans(* g_table); if (hugoTrans.loadTable(g_ndb, rows)){ g_err.println("Failed to load %s with %d rows", g_table->getName(), rows); return -1; } } return 0; }
int main(int argc, const char** argv){ ndb_init(); int _help = 0; int _p = 0; const char * db = "TEST_DB"; const char* _connectstr = NULL; struct getargs args[] = { { "database", 'd', arg_string, &db, "database", 0 }, { "connstr", 'c', arg_string, &_connectstr, "Connect string", "cs" }, { "partitions", 'p', arg_integer, &_p, "New no of partitions", 0}, { "usage", '?', arg_flag, &_help, "Print help", "" } }; int num_args = sizeof(args) / sizeof(args[0]); int optind = 0; char desc[] = "tabname\n" \ "This program will alter no of partitions of table in Ndb.\n"; if(getarg(args, num_args, argc, argv, &optind) || _help){ arg_printusage(args, num_args, argv[0], desc); return NDBT_ProgramExit(NDBT_WRONGARGS); } if(argv[optind] == NULL) { arg_printusage(args, num_args, argv[0], desc); return NDBT_ProgramExit(NDBT_WRONGARGS); } // Connect to Ndb Ndb_cluster_connection con(_connectstr); if(con.connect(12, 5, 1) != 0) { return NDBT_ProgramExit(NDBT_FAILED); } Ndb MyNdb(&con, db ); if(MyNdb.init() != 0){ ERR(MyNdb.getNdbError()); return NDBT_ProgramExit(NDBT_FAILED); } while(MyNdb.waitUntilReady() != 0) ndbout << "Waiting for ndb to become ready..." << endl; NdbDictionary::Dictionary* MyDic = MyNdb.getDictionary(); for (int i = optind; i<argc; i++) { printf("altering %s/%s...", db, argv[i]); const NdbDictionary::Table* oldTable = MyDic->getTable(argv[i]); if (oldTable == 0) { ndbout << "Failed to retrieve table " << argv[i] << ": " << MyDic->getNdbError() << endl; return NDBT_ProgramExit(NDBT_FAILED); } NdbDictionary::Table newTable = *oldTable; newTable.setFragmentCount(_p); if (MyDic->beginSchemaTrans() != 0) goto err; if (MyDic->prepareHashMap(*oldTable, newTable) != 0) goto err; if (MyDic->alterTable(*oldTable, newTable) != 0) goto err; if (MyDic->endSchemaTrans()) goto err; ndbout_c("done"); } return NDBT_ProgramExit(NDBT_OK); err: NdbError err = MyDic->getNdbError(); if (MyDic->hasSchemaTrans()) MyDic->endSchemaTrans(NdbDictionary::Dictionary::SchemaTransAbort); ndbout << "Failed! " << err << endl; return NDBT_ProgramExit(NDBT_FAILED); }
void GetTableCall::run() { DEBUG_PRINT("GetTableCall::run() [%s.%s]", arg1, arg2); return_val = -1; /* dbName is optional; if not present, set it from ndb database name */ if(strlen(dbName)) { ndb->setDatabaseName(dbName); } else { dbName = ndb->getDatabaseName(); } dict = ndb->getDictionary(); ndb_table = dict->getTable(tableName); if(ndb_table) { /* Ndb object used to create NdbRecords and to cache auto-increment values */ per_table_ndb = new Ndb(& ndb->get_ndb_cluster_connection()); DEBUG_PRINT("per_table_ndb %s.%s %p\n", dbName, tableName, per_table_ndb); per_table_ndb->init(); /* List the indexes */ return_val = dict->listIndexes(idx_list, tableName); } if(return_val == 0) { /* Fetch the indexes now. These calls may perform network IO, populating the (connection) global and (Ndb) local dictionary caches. Later, in the JavaScript main thread, we will call getIndex() again knowing that the caches are populated. */ for(unsigned int i = 0 ; i < idx_list.count ; i++) { const NdbDictionary::Index * idx = dict->getIndex(idx_list.elements[i].name, tableName); /* It is possible to get an index for a recently dropped table rather than the desired table. This is a known bug likely to be fixed later. */ const char * idx_table_name = idx->getTable(); const NdbDictionary::Table * idx_table = dict->getTable(idx_table_name); if(idx_table == 0 || idx_table->getObjectVersion() != ndb_table->getObjectVersion()) { dict->invalidateIndex(idx); idx = dict->getIndex(idx_list.elements[i].name, tableName); } } } else { DEBUG_PRINT("listIndexes() returned %i", return_val); ndbError = & dict->getNdbError(); return; } /* List the foreign keys and keep the list around for doAsyncCallback to create js objects * Currently there is no listForeignKeys so we use the more generic listDependentObjects * specifying the table metadata object. */ return_val = dict->listDependentObjects(fk_list, *ndb_table); if (return_val == 0) { /* Fetch the foreign keys and associated parent tables now. * These calls may perform network IO, populating * the (connection) global and (Ndb) local dictionary caches. Later, * in the JavaScript main thread, we will call getForeignKey() again knowing * that the caches are populated. * We only care about foreign keys where this table is the child table, not the parent table. */ for(unsigned int i = 0 ; i < fk_list.count ; i++) { NdbDictionary::ForeignKey fk; if (fk_list.elements[i].type == NdbDictionary::Object::ForeignKey) { const char * fk_name = fk_list.elements[i].name; int fkGetCode = dict->getForeignKey(fk, fk_name); DEBUG_PRINT("getForeignKey for %s returned %i", fk_name, fkGetCode); // see if the foreign key child table is this table if(splitNameMatchesDbAndTable(fk.getChildTable())) { // the foreign key child table is this table; get the parent table ++fk_count; DEBUG_PRINT("Getting ParentTable"); splitter.splitName(fk.getParentTable()); ndb->setDatabaseName(splitter.part1); // temp for next call const NdbDictionary::Table * parent_table = dict->getTable(splitter.part3); ndb->setDatabaseName(dbName); // back to expected value DEBUG_PRINT("Parent table getTable returned %s", parent_table->getName()); } } } } else { DEBUG_PRINT("listDependentObjects() returned %i", return_val); ndbError = & dict->getNdbError(); } }
int runPostUpgradeChecks(NDBT_Context* ctx, NDBT_Step* step) { /** * Table will be dropped/recreated * automatically by NDBT... * so when we enter here, this is already tested */ NdbBackup backup; ndbout << "Starting backup..." << flush; if (backup.start() != 0) { ndbout << "Failed" << endl; return NDBT_FAILED; } ndbout << "done" << endl; if ((ctx->getProperty("NoDDL", Uint32(0)) == 0) && (ctx->getProperty("KeepFS", Uint32(0)) != 0)) { /** * Bug48227 * Upgrade with FS 6.3->7.0, followed by table * create, followed by Sys restart resulted in * table loss. */ Ndb* pNdb = GETNDB(step); NdbDictionary::Dictionary *pDict = pNdb->getDictionary(); { NdbDictionary::Dictionary::List l; pDict->listObjects(l); for (Uint32 i = 0; i<l.count; i++) ndbout_c("found %u : %s", l.elements[i].id, l.elements[i].name); } pDict->dropTable("I3"); if (NDBT_Tables::createTable(pNdb, "I3")) { ndbout_c("Failed to create table!"); ndbout << pDict->getNdbError() << endl; return NDBT_FAILED; } { NdbDictionary::Dictionary::List l; pDict->listObjects(l); for (Uint32 i = 0; i<l.count; i++) ndbout_c("found %u : %s", l.elements[i].id, l.elements[i].name); } NdbRestarter res; if (res.restartAll() != 0) { ndbout_c("restartAll() failed"); return NDBT_FAILED; } if (res.waitClusterStarted() != 0) { ndbout_c("waitClusterStarted() failed"); return NDBT_FAILED; } if (pDict->getTable("I3") == 0) { ndbout_c("Table disappered"); return NDBT_FAILED; } } return NDBT_OK; }
int main(int argc, char** argv) { NDB_INIT(argv[0]); const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; #ifndef DBUG_OFF opt_debug= "d:t:F:L"; #endif if ((ho_error=handle_options(&argc, &argv, my_long_options, ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); DBUG_ENTER("main"); Ndb_cluster_connection con(opt_connect_str); if(con.connect(12, 5, 1)) { DBUG_RETURN(NDBT_ProgramExit(NDBT_FAILED)); } Ndb ndb(&con,_dbname); ndb.init(); while (ndb.waitUntilReady() != 0); NdbDictionary::Dictionary * dict = ndb.getDictionary(); int no_error= 1; int i; // create all tables Vector<const NdbDictionary::Table*> pTabs; if (argc == 0) { NDBT_Tables::dropAllTables(&ndb); NDBT_Tables::createAllTables(&ndb); for (i= 0; no_error && i < NDBT_Tables::getNumTables(); i++) { const NdbDictionary::Table *pTab= dict->getTable(NDBT_Tables::getTable(i)->getName()); if (pTab == 0) { ndbout << "Failed to create table" << endl; ndbout << dict->getNdbError() << endl; no_error= 0; break; } pTabs.push_back(pTab); } } else { for (i= 0; no_error && argc; argc--, i++) { dict->dropTable(argv[i]); NDBT_Tables::createTable(&ndb, argv[i]); const NdbDictionary::Table *pTab= dict->getTable(argv[i]); if (pTab == 0) { ndbout << "Failed to create table" << endl; ndbout << dict->getNdbError() << endl; no_error= 0; break; } pTabs.push_back(pTab); } } pTabs.push_back(NULL); // create an event for each table for (i= 0; no_error && pTabs[i]; i++) { HugoTransactions ht(*pTabs[i]); if (ht.createEvent(&ndb)){ no_error= 0; break; } } // create an event operation for each event Vector<NdbEventOperation *> pOps; for (i= 0; no_error && pTabs[i]; i++) { char buf[1024]; sprintf(buf, "%s_EVENT", pTabs[i]->getName()); NdbEventOperation *pOp= ndb.createEventOperation(buf, 1000); if ( pOp == NULL ) { no_error= 0; break; } pOps.push_back(pOp); } // get storage for each event operation for (i= 0; no_error && pTabs[i]; i++) { int n_columns= pTabs[i]->getNoOfColumns(); for (int j = 0; j < n_columns; j++) { pOps[i]->getValue(pTabs[i]->getColumn(j)->getName()); pOps[i]->getPreValue(pTabs[i]->getColumn(j)->getName()); } } // start receiving events for (i= 0; no_error && pTabs[i]; i++) { if ( pOps[i]->execute() ) { no_error= 0; break; } } // create a "shadow" table for each table Vector<const NdbDictionary::Table*> pShadowTabs; for (i= 0; no_error && pTabs[i]; i++) { char buf[1024]; sprintf(buf, "%s_SHADOW", pTabs[i]->getName()); dict->dropTable(buf); if (dict->getTable(buf)) { no_error= 0; break; } NdbDictionary::Table table_shadow(*pTabs[i]); table_shadow.setName(buf); dict->createTable(table_shadow); pShadowTabs.push_back(dict->getTable(buf)); if (!pShadowTabs[i]) { no_error= 0; break; } } // create a hugo operation per table Vector<HugoOperations *> hugo_ops; for (i= 0; no_error && pTabs[i]; i++) { hugo_ops.push_back(new HugoOperations(*pTabs[i])); } int n_records= 3; // insert n_records records per table do { if (start_transaction(&ndb, hugo_ops)) { no_error= 0; break; } for (i= 0; no_error && pTabs[i]; i++) { hugo_ops[i]->pkInsertRecord(&ndb, 0, n_records); } if (execute_commit(&ndb, hugo_ops)) { no_error= 0; break; } if(close_transaction(&ndb, hugo_ops)) { no_error= 0; break; } } while(0); // copy events and verify do { if (copy_events(&ndb) < 0) { no_error= 0; break; } if (verify_copy(&ndb, pTabs, pShadowTabs)) { no_error= 0; break; } } while (0); // update n_records-1 records in first table do { if (start_transaction(&ndb, hugo_ops)) { no_error= 0; break; } hugo_ops[0]->pkUpdateRecord(&ndb, n_records-1); if (execute_commit(&ndb, hugo_ops)) { no_error= 0; break; } if(close_transaction(&ndb, hugo_ops)) { no_error= 0; break; } } while(0); // copy events and verify do { if (copy_events(&ndb) < 0) { no_error= 0; break; } if (verify_copy(&ndb, pTabs, pShadowTabs)) { no_error= 0; break; } } while (0); { NdbRestarts restarts; for (int j= 0; j < 10; j++) { // restart a node if (no_error) { int timeout = 240; if (restarts.executeRestart("RestartRandomNodeAbort", timeout)) { no_error= 0; break; } } // update all n_records records on all tables if (start_transaction(&ndb, hugo_ops)) { no_error= 0; break; } for (int r= 0; r < n_records; r++) { for (i= 0; pTabs[i]; i++) { hugo_ops[i]->pkUpdateRecord(&ndb, r); } } if (execute_commit(&ndb, hugo_ops)) { no_error= 0; break; } if(close_transaction(&ndb, hugo_ops)) { no_error= 0; break; } // copy events and verify if (copy_events(&ndb) < 0) { no_error= 0; break; } if (verify_copy(&ndb, pTabs, pShadowTabs)) { no_error= 0; break; } } } // drop the event operations for (i= 0; i < (int)pOps.size(); i++) { if (ndb.dropEventOperation(pOps[i])) { no_error= 0; } } if (no_error) DBUG_RETURN(NDBT_ProgramExit(NDBT_OK)); DBUG_RETURN(NDBT_ProgramExit(NDBT_FAILED)); }
static int copy_events(Ndb *ndb) { DBUG_ENTER("copy_events"); int r= 0; NdbDictionary::Dictionary * dict = ndb->getDictionary(); while (1) { int res= ndb->pollEvents(1000); // wait for event or 1000 ms DBUG_PRINT("info", ("pollEvents res=%d", res)); if (res <= 0) { break; } int error= 0; NdbEventOperation *pOp; while ((pOp= ndb->nextEvent(&error))) { char buf[1024]; sprintf(buf, "%s_SHADOW", pOp->getTable()->getName()); const NdbDictionary::Table *table= dict->getTable(buf); if (table == 0) { g_err << "unable to find table " << buf << endl; DBUG_RETURN(-1); } if (pOp->isOverrun()) { g_err << "buffer overrun\n"; DBUG_RETURN(-1); } r++; Uint32 gci= pOp->getGCI(); if (!pOp->isConsistent()) { g_err << "A node failure has occured and events might be missing\n"; DBUG_RETURN(-1); } int noRetries= 0; do { NdbTransaction *trans= ndb->startTransaction(); if (trans == 0) { g_err << "startTransaction failed " << ndb->getNdbError().code << " " << ndb->getNdbError().message << endl; DBUG_RETURN(-1); } NdbOperation *op= trans->getNdbOperation(table); if (op == 0) { g_err << "getNdbOperation failed " << trans->getNdbError().code << " " << trans->getNdbError().message << endl; DBUG_RETURN(-1); } switch (pOp->getEventType()) { case NdbDictionary::Event::TE_INSERT: if (op->insertTuple()) { g_err << "insertTuple " << op->getNdbError().code << " " << op->getNdbError().message << endl; DBUG_RETURN(-1); } break; case NdbDictionary::Event::TE_DELETE: if (op->deleteTuple()) { g_err << "deleteTuple " << op->getNdbError().code << " " << op->getNdbError().message << endl; DBUG_RETURN(-1); } break; case NdbDictionary::Event::TE_UPDATE: if (op->updateTuple()) { g_err << "updateTuple " << op->getNdbError().code << " " << op->getNdbError().message << endl; DBUG_RETURN(-1); } break; default: abort(); } { for (const NdbRecAttr *pk= pOp->getFirstPkAttr(); pk; pk= pk->next()) { if (pk->isNULL()) { g_err << "internal error: primary key isNull()=" << pk->isNULL() << endl; DBUG_RETURN(NDBT_FAILED); } if (op->equal(pk->getColumn()->getColumnNo(),pk->aRef())) { g_err << "equal " << pk->getColumn()->getColumnNo() << " " << op->getNdbError().code << " " << op->getNdbError().message << endl; DBUG_RETURN(NDBT_FAILED); } } } switch (pOp->getEventType()) { case NdbDictionary::Event::TE_INSERT: { for (const NdbRecAttr *data= pOp->getFirstDataAttr(); data; data= data->next()) { if (data->isNULL() < 0 || op->setValue(data->getColumn()->getColumnNo(), data->isNULL() ? 0:data->aRef())) { g_err << "setValue(insert) " << data->getColumn()->getColumnNo() << " " << op->getNdbError().code << " " << op->getNdbError().message << endl; DBUG_RETURN(-1); } } break; } case NdbDictionary::Event::TE_DELETE: break; case NdbDictionary::Event::TE_UPDATE: { for (const NdbRecAttr *data= pOp->getFirstDataAttr(); data; data= data->next()) { if (data->isNULL() >= 0 && op->setValue(data->getColumn()->getColumnNo(), data->isNULL() ? 0:data->aRef())) { g_err << "setValue(update) " << data->getColumn()->getColumnNo() << " " << op->getNdbError().code << " " << op->getNdbError().message << endl; DBUG_RETURN(NDBT_FAILED); } } break; } case NdbDictionary::Event::TE_ALL: abort(); } if (trans->execute(Commit) == 0) { trans->close(); // everything ok break; } if (noRetries++ == 10 || trans->getNdbError().status != NdbError::TemporaryError) { g_err << "execute " << r << " failed " << trans->getNdbError().code << " " << trans->getNdbError().message << endl; trans->close(); DBUG_RETURN(-1); } trans->close(); NdbSleep_MilliSleep(100); // sleep before retying } while(1); } // for if (error) { g_err << "nextEvent()\n"; DBUG_RETURN(-1); } } // while(1) DBUG_RETURN(r); }
int NDBT_TestSuite::execute(int argc, const char** argv){ int res = NDBT_FAILED; /* Arguments: Run only a subset of tests -n testname Which test to run Recommendations to test functions: --records Number of records to use(default: 10000) --loops Number of loops to execute in the test(default: 100) Other parameters should: * be calculated from the above two parameters * be divided into different test cases, ex. one testcase runs with FragmentType = Single and another perfoms the same test with FragmentType = Large * let the test case iterate over all/subset of appropriate parameters ex. iterate over FragmentType = Single to FragmentType = AllLarge Remeber that the intention is that it should be _easy_ to run a complete test suite without any greater knowledge of what should be tested ie. keep arguments at a minimum */ char **_argv= (char **)argv; if (!my_progname) my_progname= _argv[0]; const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&_argv); int ho_error; #ifndef DBUG_OFF opt_debug= "d:t:i:F:L"; #endif if ((ho_error=handle_options(&argc, &_argv, my_long_options, ndb_std_get_one_option))) { usage(); return NDBT_ProgramExit(NDBT_WRONGARGS); } if (opt_print == true){ printExecutionTree(); return 0; } if (opt_print_html == true){ printExecutionTreeHTML(); return 0; } if (opt_print_cases == true){ printCases(); return 0; } if (opt_verbose) setOutputLevel(2); // Show g_info else setOutputLevel(0); // Show only g_err ? remote_mgm = opt_remote_mgm; records = opt_records; loops = opt_loops; timer = opt_timer; Ndb_cluster_connection con; if(con.connect(12, 5, 1)) { return NDBT_ProgramExit(NDBT_FAILED); } if (opt_seed == 0) { opt_seed = NdbTick_CurrentMillisecond(); } ndbout_c("random seed: %u", opt_seed); srand(opt_seed); srandom(opt_seed); global_flag_skip_invalidate_cache = 1; { Ndb ndb(&con, "TEST_DB"); ndb.init(1024); if (ndb.waitUntilReady(500)){ g_err << "Ndb was not ready" << endl; return NDBT_ProgramExit(NDBT_FAILED); } NdbDictionary::Dictionary* pDict = ndb.getDictionary(); int num_tables= argc; if (argc == 0) num_tables = NDBT_Tables::getNumTables(); for(int i = 0; i<num_tables; i++) { if (argc == 0) m_tables_in_test.push_back(NDBT_Tables::getTable(i)->getName()); else m_tables_in_test.push_back(_argv[i]); if (createAllTables == true) { const char *tab_name= m_tables_in_test[i].c_str(); const NdbDictionary::Table* pTab = pDict->getTable(tab_name); if (pTab && pDict->dropTable(tab_name) != 0) { g_err << "ERROR0: Failed to drop table " << tab_name << pDict->getNdbError() << endl; return NDBT_ProgramExit(NDBT_FAILED); } if(NDBT_Tables::createTable(&ndb, tab_name) != 0) { g_err << "ERROR1: Failed to create table " << tab_name << pDict->getNdbError() << endl; return NDBT_ProgramExit(NDBT_FAILED); } } } } if(argc == 0){ // No table specified res = executeAll(con, opt_testname); } else { testSuiteTimer.doStart(); for(int i = 0; i<argc; i++){ executeOne(con, _argv[i], opt_testname); } testSuiteTimer.doStop(); res = report(opt_testname); } if (res == NDBT_OK && createAllTables == true) { Ndb ndb(&con, "TEST_DB"); ndb.init(1024); if (ndb.waitUntilReady(500)){ g_err << "Ndb was not ready" << endl; return NDBT_ProgramExit(NDBT_FAILED); } NdbDictionary::Dictionary* pDict = ndb.getDictionary(); for(unsigned i = 0; i<m_tables_in_test.size(); i++) { pDict->dropTable(m_tables_in_test[i].c_str()); } } return NDBT_ProgramExit(res); }