int NDBT_TestSuite::createTables(Ndb_cluster_connection& con) const { Ndb ndb(&con, "TEST_DB"); ndb.init(1); NdbDictionary::Dictionary* pDict = ndb.getDictionary(); for(unsigned i = 0; i<m_tables_in_test.size(); i++) { const char *tab_name= m_tables_in_test[i].c_str(); if (pDict->dropTable(tab_name) != 0 && pDict->getNdbError().code != 723) // No such table { g_err << "runCreateTables: Failed to drop table " << tab_name << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } if(NDBT_Tables::createTable(&ndb, tab_name, !getLogging()) != 0) { g_err << "runCreateTables: Failed to create table " << tab_name << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } if (i == 0){ // Update ctx with a pointer to the first created table const NdbDictionary::Table* pTab2 = pDict->getTable(tab_name); ctx->setTab(pTab2); } g_info << "created " << tab_name << endl; } return NDBT_OK; }
static int runCreateTable(NDBT_Context* ctx, NDBT_Step* step) { Ndb ndb(&ctx->m_cluster_connection, "TEST_DB"); ndb.init(1); NdbDictionary::Dictionary* pDict = ndb.getDictionary(); const NdbDictionary::Table* pTab = ctx->getTab(); const char *tab_name= pTab->getName(); if (pDict->dropTable(tab_name) != 0 && pDict->getNdbError().code != 723) // No such table { g_err << "runCreateTable: Failed to drop table " << tab_name << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } if(NDBT_Tables::createTable(&ndb, tab_name, !ctx->getSuite()->getLogging()) != 0) { g_err << "runCreateTable: Failed to create table " << tab_name << pDict->getNdbError() << endl; return NDBT_FAILED; } // Update ctx with a pointer to the created table const NdbDictionary::Table* pTab2 = pDict->getTable(tab_name); ctx->setTab(pTab2); ctx->setProperty("$table", tab_name); return NDBT_OK; }
int runDDL(NDBT_Context* ctx, NDBT_Step* step){ Ndb* pNdb= GETNDB(step); NdbDictionary::Dictionary* pDict = pNdb->getDictionary(); const int tables = NDBT_Tables::getNumTables(); while(!ctx->isTestStopped()) { const int tab_no = rand() % (tables); NdbDictionary::Table tab = *NDBT_Tables::getTable(tab_no); BaseString name= tab.getName(); name.appfmt("-%d", step->getStepNo()); tab.setName(name.c_str()); if(pDict->createTable(tab) == 0) { HugoTransactions hugoTrans(* pDict->getTable(name.c_str())); if (hugoTrans.loadTable(pNdb, 10000) != 0){ return NDBT_FAILED; } while(pDict->dropTable(tab.getName()) != 0 && pDict->getNdbError().code != 4009) g_err << pDict->getNdbError() << endl; sleep(1); } } return NDBT_OK; }
static int runDropTable(NDBT_Context* ctx, NDBT_Step* step) { const char * tab_name = ctx->getProperty("$table", (const char*)0); if (tab_name) { Ndb ndb(&ctx->m_cluster_connection, "TEST_DB"); ndb.init(1); NdbDictionary::Dictionary* pDict = ndb.getDictionary(); pDict->dropTable(tab_name); } return NDBT_OK; }
int NDBT_TestSuite::dropTables(Ndb_cluster_connection& con) const { Ndb ndb(&con, "TEST_DB"); ndb.init(1); NdbDictionary::Dictionary* pDict = ndb.getDictionary(); for(unsigned i = 0; i<m_tables_in_test.size(); i++) { const char *tab_name= m_tables_in_test[i].c_str(); pDict->dropTable(tab_name); } return NDBT_OK; }
static int drop_all_tables() { NdbDictionary::Dictionary * dict = g_ndb->getDictionary(); require(dict); BaseString db = g_ndb->getDatabaseName(); BaseString schema = g_ndb->getSchemaName(); NdbDictionary::Dictionary::List list; if (dict->listObjects(list, NdbDictionary::Object::TypeUndefined) == -1){ g_err << "Failed to list tables: " << endl << dict->getNdbError() << endl; return -1; } for (unsigned i = 0; i < list.count; i++) { NdbDictionary::Dictionary::List::Element& elt = list.elements[i]; switch (elt.type) { case NdbDictionary::Object::SystemTable: case NdbDictionary::Object::UserTable: g_ndb->setDatabaseName(elt.database); g_ndb->setSchemaName(elt.schema); if(dict->dropTable(elt.name) != 0){ g_err << "Failed to drop table: " << elt.database << "/" << elt.schema << "/" << elt.name <<endl; g_err << dict->getNdbError() << endl; return -1; } break; case NdbDictionary::Object::UniqueHashIndex: case NdbDictionary::Object::OrderedIndex: case NdbDictionary::Object::HashIndexTrigger: case NdbDictionary::Object::IndexTrigger: case NdbDictionary::Object::SubscriptionTrigger: case NdbDictionary::Object::ReadOnlyConstraint: default: break; } } g_ndb->setDatabaseName(db.c_str()); g_ndb->setSchemaName(schema.c_str()); return 0; }
int main(int argc, char** argv) { NDB_INIT(argv[0]); const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; #ifndef DBUG_OFF opt_debug= "d:t:F:L"; #endif if ((ho_error=handle_options(&argc, &argv, my_long_options, ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); DBUG_ENTER("main"); Ndb_cluster_connection con(opt_connect_str); if(con.connect(12, 5, 1)) { DBUG_RETURN(NDBT_ProgramExit(NDBT_FAILED)); } Ndb ndb(&con,_dbname); ndb.init(); while (ndb.waitUntilReady() != 0); NdbDictionary::Dictionary * dict = ndb.getDictionary(); int no_error= 1; int i; // create all tables Vector<const NdbDictionary::Table*> pTabs; if (argc == 0) { NDBT_Tables::dropAllTables(&ndb); NDBT_Tables::createAllTables(&ndb); for (i= 0; no_error && i < NDBT_Tables::getNumTables(); i++) { const NdbDictionary::Table *pTab= dict->getTable(NDBT_Tables::getTable(i)->getName()); if (pTab == 0) { ndbout << "Failed to create table" << endl; ndbout << dict->getNdbError() << endl; no_error= 0; break; } pTabs.push_back(pTab); } } else { for (i= 0; no_error && argc; argc--, i++) { dict->dropTable(argv[i]); NDBT_Tables::createTable(&ndb, argv[i]); const NdbDictionary::Table *pTab= dict->getTable(argv[i]); if (pTab == 0) { ndbout << "Failed to create table" << endl; ndbout << dict->getNdbError() << endl; no_error= 0; break; } pTabs.push_back(pTab); } } pTabs.push_back(NULL); // create an event for each table for (i= 0; no_error && pTabs[i]; i++) { HugoTransactions ht(*pTabs[i]); if (ht.createEvent(&ndb)){ no_error= 0; break; } } // create an event operation for each event Vector<NdbEventOperation *> pOps; for (i= 0; no_error && pTabs[i]; i++) { char buf[1024]; sprintf(buf, "%s_EVENT", pTabs[i]->getName()); NdbEventOperation *pOp= ndb.createEventOperation(buf, 1000); if ( pOp == NULL ) { no_error= 0; break; } pOps.push_back(pOp); } // get storage for each event operation for (i= 0; no_error && pTabs[i]; i++) { int n_columns= pTabs[i]->getNoOfColumns(); for (int j = 0; j < n_columns; j++) { pOps[i]->getValue(pTabs[i]->getColumn(j)->getName()); pOps[i]->getPreValue(pTabs[i]->getColumn(j)->getName()); } } // start receiving events for (i= 0; no_error && pTabs[i]; i++) { if ( pOps[i]->execute() ) { no_error= 0; break; } } // create a "shadow" table for each table Vector<const NdbDictionary::Table*> pShadowTabs; for (i= 0; no_error && pTabs[i]; i++) { char buf[1024]; sprintf(buf, "%s_SHADOW", pTabs[i]->getName()); dict->dropTable(buf); if (dict->getTable(buf)) { no_error= 0; break; } NdbDictionary::Table table_shadow(*pTabs[i]); table_shadow.setName(buf); dict->createTable(table_shadow); pShadowTabs.push_back(dict->getTable(buf)); if (!pShadowTabs[i]) { no_error= 0; break; } } // create a hugo operation per table Vector<HugoOperations *> hugo_ops; for (i= 0; no_error && pTabs[i]; i++) { hugo_ops.push_back(new HugoOperations(*pTabs[i])); } int n_records= 3; // insert n_records records per table do { if (start_transaction(&ndb, hugo_ops)) { no_error= 0; break; } for (i= 0; no_error && pTabs[i]; i++) { hugo_ops[i]->pkInsertRecord(&ndb, 0, n_records); } if (execute_commit(&ndb, hugo_ops)) { no_error= 0; break; } if(close_transaction(&ndb, hugo_ops)) { no_error= 0; break; } } while(0); // copy events and verify do { if (copy_events(&ndb) < 0) { no_error= 0; break; } if (verify_copy(&ndb, pTabs, pShadowTabs)) { no_error= 0; break; } } while (0); // update n_records-1 records in first table do { if (start_transaction(&ndb, hugo_ops)) { no_error= 0; break; } hugo_ops[0]->pkUpdateRecord(&ndb, n_records-1); if (execute_commit(&ndb, hugo_ops)) { no_error= 0; break; } if(close_transaction(&ndb, hugo_ops)) { no_error= 0; break; } } while(0); // copy events and verify do { if (copy_events(&ndb) < 0) { no_error= 0; break; } if (verify_copy(&ndb, pTabs, pShadowTabs)) { no_error= 0; break; } } while (0); { NdbRestarts restarts; for (int j= 0; j < 10; j++) { // restart a node if (no_error) { int timeout = 240; if (restarts.executeRestart("RestartRandomNodeAbort", timeout)) { no_error= 0; break; } } // update all n_records records on all tables if (start_transaction(&ndb, hugo_ops)) { no_error= 0; break; } for (int r= 0; r < n_records; r++) { for (i= 0; pTabs[i]; i++) { hugo_ops[i]->pkUpdateRecord(&ndb, r); } } if (execute_commit(&ndb, hugo_ops)) { no_error= 0; break; } if(close_transaction(&ndb, hugo_ops)) { no_error= 0; break; } // copy events and verify if (copy_events(&ndb) < 0) { no_error= 0; break; } if (verify_copy(&ndb, pTabs, pShadowTabs)) { no_error= 0; break; } } } // drop the event operations for (i= 0; i < (int)pOps.size(); i++) { if (ndb.dropEventOperation(pOps[i])) { no_error= 0; } } if (no_error) DBUG_RETURN(NDBT_ProgramExit(NDBT_OK)); DBUG_RETURN(NDBT_ProgramExit(NDBT_FAILED)); }
bool BackupRestore::table(const TableS & table){ if (!m_restore && !m_restore_meta) return true; const char * name = table.getTableName(); /** * Ignore blob tables */ if(match_blob(name) >= 0) return true; const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable); if ((int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined){ m_indexes.push_back(table.m_dictTable); return true; } BaseString tmp(name); Vector<BaseString> split; if(tmp.split(split, "/") != 3){ err << "Invalid table name format `" << name << "`" << endl; return false; } m_ndb->setDatabaseName(split[0].c_str()); m_ndb->setSchemaName(split[1].c_str()); NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); if(m_restore_meta) { NdbDictionary::Table copy(*table.m_dictTable); copy.setName(split[2].c_str()); Uint32 id; if (copy.getTablespace(&id)) { debug << "Connecting " << name << " to tablespace oldid: " << id << flush; NdbDictionary::Tablespace* ts = m_tablespaces[id]; debug << " newid: " << ts->getObjectId() << endl; copy.setTablespace(* ts); } if (copy.getDefaultNoPartitionsFlag()) { /* Table was defined with default number of partitions. We can restore it with whatever is the default in this cluster. We use the max_rows parameter in calculating the default number. */ Uint32 no_nodes = m_cluster_connection->no_db_nodes(); copy.setFragmentCount(get_no_fragments(copy.getMaxRows(), no_nodes)); set_default_nodegroups(©); } else { /* Table was defined with specific number of partitions. It should be restored with the same number of partitions. It will either be restored in the same node groups as when backup was taken or by using a node group map supplied to the ndb_restore program. */ Uint16 *ng_array = (Uint16*)copy.getFragmentData(); Uint16 no_parts = copy.getFragmentCount(); if (map_nodegroups(ng_array, no_parts)) { if (translate_frm(©)) { err << "Create table " << table.getTableName() << " failed: "; err << "Translate frm error" << endl; return false; } } copy.setFragmentData((const void *)ng_array, no_parts << 1); } /** * Force of varpart was introduced in 5.1.18, telco 6.1.7 and 6.2.1 * Since default from mysqld is to add force of varpart (disable with * ROW_FORMAT=FIXED) we force varpart onto tables when they are restored * from backups taken with older versions. This will be wrong if * ROW_FORMAT=FIXED was used on original table, however the likelyhood of * this is low, since ROW_FORMAT= was a NOOP in older versions. */ if (table.getBackupVersion() < MAKE_VERSION(5,1,18)) copy.setForceVarPart(true); else if (getMajor(table.getBackupVersion()) == 6 && (table.getBackupVersion() < MAKE_VERSION(6,1,7) || table.getBackupVersion() == MAKE_VERSION(6,2,0))) copy.setForceVarPart(true); /* update min and max rows to reflect the table, this to ensure that memory is allocated properly in the ndb kernel */ copy.setMinRows(table.getNoOfRecords()); if (table.getNoOfRecords() > copy.getMaxRows()) { copy.setMaxRows(table.getNoOfRecords()); } NdbTableImpl &tableImpl = NdbTableImpl::getImpl(copy); if (table.getBackupVersion() < MAKE_VERSION(5,1,0) && !m_no_upgrade){ for(int i= 0; i < copy.getNoOfColumns(); i++) { NdbDictionary::Column::Type t = copy.getColumn(i)->getType(); if (t == NdbDictionary::Column::Varchar || t == NdbDictionary::Column::Varbinary) tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeShortVar); if (t == NdbDictionary::Column::Longvarchar || t == NdbDictionary::Column::Longvarbinary) tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeMediumVar); } } if (dict->createTable(copy) == -1) { err << "Create table `" << table.getTableName() << "` failed: " << dict->getNdbError() << endl; if (dict->getNdbError().code == 771) { /* The user on the cluster where the backup was created had specified specific node groups for partitions. Some of these node groups didn't exist on this cluster. We will warn the user of this and inform him of his option. */ err << "The node groups defined in the table didn't exist in this"; err << " cluster." << endl << "There is an option to use the"; err << " the parameter ndb-nodegroup-map to define a mapping from"; err << endl << "the old nodegroups to new nodegroups" << endl; } return false; } info << "Successfully restored table `" << table.getTableName() << "`" << endl; } const NdbDictionary::Table* tab = dict->getTable(split[2].c_str()); if(tab == 0){ err << "Unable to find table: `" << split[2].c_str() << "`" << endl; return false; } if(m_restore_meta) { if (tab->getFrmData()) { // a MySQL Server table is restored, thus an event should be created BaseString event_name("REPL$"); event_name.append(split[0].c_str()); event_name.append("/"); event_name.append(split[2].c_str()); NdbDictionary::Event my_event(event_name.c_str()); my_event.setTable(*tab); my_event.addTableEvent(NdbDictionary::Event::TE_ALL); // add all columns to the event bool has_blobs = false; for(int a= 0; a < tab->getNoOfColumns(); a++) { my_event.addEventColumn(a); NdbDictionary::Column::Type t = tab->getColumn(a)->getType(); if (t == NdbDictionary::Column::Blob || t == NdbDictionary::Column::Text) has_blobs = true; } if (has_blobs) my_event.mergeEvents(true); while ( dict->createEvent(my_event) ) // Add event to database { if (dict->getNdbError().classification == NdbError::SchemaObjectExists) { info << "Event for table " << table.getTableName() << " already exists, removing.\n"; if (!dict->dropEvent(my_event.getName())) continue; } err << "Create table event for " << table.getTableName() << " failed: " << dict->getNdbError() << endl; dict->dropTable(split[2].c_str()); return false; } info << "Successfully restored table event " << event_name << endl ; } } const NdbDictionary::Table* null = 0; m_new_tables.fill(table.m_dictTable->getTableId(), null); m_new_tables[table.m_dictTable->getTableId()] = tab; return true; }
int runRestoreBankAndVerify(NDBT_Context* ctx, NDBT_Step* step){ NdbRestarter restarter; NdbBackup backup(GETNDB(step)->getNodeId()+1); unsigned minBackupId = ctx->getProperty("MinBackupId"); unsigned maxBackupId = ctx->getProperty("MaxBackupId"); unsigned backupId = minBackupId; int result = NDBT_OK; int errSumAccounts = 0; int errValidateGL = 0; ndbout << " maxBackupId = " << maxBackupId << endl; ndbout << " minBackupId = " << minBackupId << endl; while (backupId <= maxBackupId){ // TEMPORARY FIX // To erase all tables from cache(s) // To be removed, maybe replaced by ndb.invalidate(); { Bank bank(ctx->m_cluster_connection); if (bank.dropBank() != NDBT_OK){ result = NDBT_FAILED; break; } } // END TEMPORARY FIX ndbout << "Performing restart" << endl; if (restarter.restartAll(false) != 0) return NDBT_FAILED; if (restarter.waitClusterStarted() != 0) return NDBT_FAILED; ndbout << "Dropping " << tabname << endl; NdbDictionary::Dictionary* pDict = GETNDB(step)->getDictionary(); pDict->dropTable(tabname); ndbout << "Restoring backup " << backupId << endl; if (backup.restore(backupId) == -1){ return NDBT_FAILED; } ndbout << "Backup " << backupId << " restored" << endl; // Let bank verify Bank bank(ctx->m_cluster_connection); int wait = 0; int yield = 1; if (bank.performSumAccounts(wait, yield) != 0){ ndbout << "bank.performSumAccounts FAILED" << endl; ndbout << " backupId = " << backupId << endl << endl; result = NDBT_FAILED; errSumAccounts++; } if (bank.performValidateAllGLs() != 0){ ndbout << "bank.performValidateAllGLs FAILED" << endl; ndbout << " backupId = " << backupId << endl << endl; result = NDBT_FAILED; errValidateGL++; } backupId++; } if (result != NDBT_OK){ ndbout << "Verification of backup failed" << endl << " errValidateGL="<<errValidateGL<<endl << " errSumAccounts="<<errSumAccounts<<endl << endl; } return result; }
int NDBT_TestSuite::execute(int argc, const char** argv){ int res = NDBT_FAILED; /* Arguments: Run only a subset of tests -n testname Which test to run Recommendations to test functions: --records Number of records to use(default: 10000) --loops Number of loops to execute in the test(default: 100) Other parameters should: * be calculated from the above two parameters * be divided into different test cases, ex. one testcase runs with FragmentType = Single and another perfoms the same test with FragmentType = Large * let the test case iterate over all/subset of appropriate parameters ex. iterate over FragmentType = Single to FragmentType = AllLarge Remeber that the intention is that it should be _easy_ to run a complete test suite without any greater knowledge of what should be tested ie. keep arguments at a minimum */ char **_argv= (char **)argv; if (!my_progname) my_progname= _argv[0]; const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&_argv); int ho_error; #ifndef DBUG_OFF opt_debug= "d:t:i:F:L"; #endif if ((ho_error=handle_options(&argc, &_argv, my_long_options, ndb_std_get_one_option))) { usage(); return NDBT_ProgramExit(NDBT_WRONGARGS); } if (opt_print == true){ printExecutionTree(); return 0; } if (opt_print_html == true){ printExecutionTreeHTML(); return 0; } if (opt_print_cases == true){ printCases(); return 0; } if (opt_verbose) setOutputLevel(2); // Show g_info else setOutputLevel(0); // Show only g_err ? remote_mgm = opt_remote_mgm; records = opt_records; loops = opt_loops; timer = opt_timer; Ndb_cluster_connection con; if(con.connect(12, 5, 1)) { return NDBT_ProgramExit(NDBT_FAILED); } if (opt_seed == 0) { opt_seed = NdbTick_CurrentMillisecond(); } ndbout_c("random seed: %u", opt_seed); srand(opt_seed); srandom(opt_seed); global_flag_skip_invalidate_cache = 1; { Ndb ndb(&con, "TEST_DB"); ndb.init(1024); if (ndb.waitUntilReady(500)){ g_err << "Ndb was not ready" << endl; return NDBT_ProgramExit(NDBT_FAILED); } NdbDictionary::Dictionary* pDict = ndb.getDictionary(); int num_tables= argc; if (argc == 0) num_tables = NDBT_Tables::getNumTables(); for(int i = 0; i<num_tables; i++) { if (argc == 0) m_tables_in_test.push_back(NDBT_Tables::getTable(i)->getName()); else m_tables_in_test.push_back(_argv[i]); if (createAllTables == true) { const char *tab_name= m_tables_in_test[i].c_str(); const NdbDictionary::Table* pTab = pDict->getTable(tab_name); if (pTab && pDict->dropTable(tab_name) != 0) { g_err << "ERROR0: Failed to drop table " << tab_name << pDict->getNdbError() << endl; return NDBT_ProgramExit(NDBT_FAILED); } if(NDBT_Tables::createTable(&ndb, tab_name) != 0) { g_err << "ERROR1: Failed to create table " << tab_name << pDict->getNdbError() << endl; return NDBT_ProgramExit(NDBT_FAILED); } } } } if(argc == 0){ // No table specified res = executeAll(con, opt_testname); } else { testSuiteTimer.doStart(); for(int i = 0; i<argc; i++){ executeOne(con, _argv[i], opt_testname); } testSuiteTimer.doStop(); res = report(opt_testname); } if (res == NDBT_OK && createAllTables == true) { Ndb ndb(&con, "TEST_DB"); ndb.init(1024); if (ndb.waitUntilReady(500)){ g_err << "Ndb was not ready" << endl; return NDBT_ProgramExit(NDBT_FAILED); } NdbDictionary::Dictionary* pDict = ndb.getDictionary(); for(unsigned i = 0; i<m_tables_in_test.size(); i++) { pDict->dropTable(m_tables_in_test[i].c_str()); } } return NDBT_ProgramExit(res); }
void NDBT_TestSuite::execute(Ndb_cluster_connection& con, Ndb* ndb, const NdbDictionary::Table* pTab, const char* _testname){ int result; for (unsigned t = 0; t < tests.size(); t++){ if (_testname != NULL && strcasecmp(tests[t]->getName(), _testname) != 0) continue; if (tests[t]->m_all_tables && tests[t]->m_has_run) { continue; } if (tests[t]->isVerify(pTab) == false) { continue; } tests[t]->initBeforeTest(); NdbDictionary::Dictionary* pDict = ndb->getDictionary(); const NdbDictionary::Table* pTab2 = pDict->getTable(pTab->getName()); if (createTable == true){ if(pTab2 != 0 && pDict->dropTable(pTab->getName()) != 0){ numTestsFail++; numTestsExecuted++; g_err << "ERROR0: Failed to drop table " << pTab->getName() << endl; tests[t]->saveTestResult(pTab, FAILED_TO_CREATE); continue; } if (NDBT_Tables::createTable(ndb, pTab->getName(), false, false, g_create_hook, this) != 0) { numTestsFail++; numTestsExecuted++; g_err << "ERROR1: Failed to create table " << pTab->getName() << pDict->getNdbError() << endl; tests[t]->saveTestResult(pTab, FAILED_TO_CREATE); continue; } pTab2 = pDict->getTable(pTab->getName()); } else if(!pTab2) { pTab2 = pTab; } ctx = new NDBT_Context(con); ctx->setTab(pTab2); ctx->setNumRecords(records); ctx->setNumLoops(loops); if(remote_mgm != NULL) ctx->setRemoteMgm(remote_mgm); ctx->setSuite(this); result = tests[t]->execute(ctx); tests[t]->saveTestResult(pTab, result); if (result != NDBT_OK) numTestsFail++; else numTestsOk++; numTestsExecuted++; if (result == NDBT_OK && createTable == true && createAllTables == false){ pDict->dropTable(pTab->getName()); } tests[t]->m_has_run = true; delete ctx; } }
int runPostUpgradeChecks(NDBT_Context* ctx, NDBT_Step* step) { /** * Table will be dropped/recreated * automatically by NDBT... * so when we enter here, this is already tested */ NdbBackup backup; ndbout << "Starting backup..." << flush; if (backup.start() != 0) { ndbout << "Failed" << endl; return NDBT_FAILED; } ndbout << "done" << endl; if ((ctx->getProperty("NoDDL", Uint32(0)) == 0) && (ctx->getProperty("KeepFS", Uint32(0)) != 0)) { /** * Bug48227 * Upgrade with FS 6.3->7.0, followed by table * create, followed by Sys restart resulted in * table loss. */ Ndb* pNdb = GETNDB(step); NdbDictionary::Dictionary *pDict = pNdb->getDictionary(); { NdbDictionary::Dictionary::List l; pDict->listObjects(l); for (Uint32 i = 0; i<l.count; i++) ndbout_c("found %u : %s", l.elements[i].id, l.elements[i].name); } pDict->dropTable("I3"); if (NDBT_Tables::createTable(pNdb, "I3")) { ndbout_c("Failed to create table!"); ndbout << pDict->getNdbError() << endl; return NDBT_FAILED; } { NdbDictionary::Dictionary::List l; pDict->listObjects(l); for (Uint32 i = 0; i<l.count; i++) ndbout_c("found %u : %s", l.elements[i].id, l.elements[i].name); } NdbRestarter res; if (res.restartAll() != 0) { ndbout_c("restartAll() failed"); return NDBT_FAILED; } if (res.waitClusterStarted() != 0) { ndbout_c("waitClusterStarted() failed"); return NDBT_FAILED; } if (pDict->getTable("I3") == 0) { ndbout_c("Table disappered"); return NDBT_FAILED; } } return NDBT_OK; }