bool BackupRestore::createSystable(const TableS & tables){ if (!m_restore && !m_restore_meta && !m_restore_epoch) return true; const char *tablename = tables.getTableName(); if( strcmp(tablename, NDB_REP_DB "/def/" NDB_APPLY_TABLE) != 0 && strcmp(tablename, NDB_REP_DB "/def/" NDB_SCHEMA_TABLE) != 0 ) { return true; } BaseString tmp(tablename); Vector<BaseString> split; if(tmp.split(split, "/") != 3){ err << "Invalid table name format " << tablename << endl; return false; } m_ndb->setDatabaseName(split[0].c_str()); m_ndb->setSchemaName(split[1].c_str()); NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); if( dict->getTable(split[2].c_str()) != NULL ){ return true; } return table(tables); }
static void createIndex(Ndb &myNdb, bool includePrimary, unsigned int noOfIndexes) { Uint64 before, after; NdbDictionary::Dictionary* dict = myNdb.getDictionary(); char indexName[] = "PNUMINDEX0000"; int res; for(unsigned int indexNum = 0; indexNum < noOfIndexes; indexNum++) { sprintf(indexName, "PNUMINDEX%.4u", indexNum); NdbDictionary::Index index(indexName); index.setTable("PERSON"); index.setType(NdbDictionary::Index::UniqueHashIndex); if (includePrimary) { const char* attr_arr[] = {"NAME", "PNUM1", "PNUM3"}; index.addIndexColumns(3, attr_arr); } else { const char* attr_arr[] = {"PNUM1", "PNUM3"}; index.addIndexColumns(2, attr_arr); } before = NdbTick_CurrentMillisecond(); if ((res = dict->createIndex(index)) == -1) { error_handler(dict->getNdbError()); } after = NdbTick_CurrentMillisecond(); ndbout << "Created index " << indexName << ", " << after - before << " msec" << endl; } }
int runCreateIndexT1(NDBT_Context* ctx, NDBT_Step* step) { Ndb* pNdb = GETNDB(step); NdbDictionary::Dictionary* pDict = pNdb->getDictionary(); const NdbDictionary::Table* pTab = pDict->getTable("T1"); if (pTab == 0) { g_err << "getTable(T1) error: " << pDict->getNdbError() << endl; return NDBT_FAILED; } NdbDictionary::Index ind; ind.setName("T1X1"); ind.setTable("T1"); ind.setType(NdbDictionary::Index::OrderedIndex); ind.setLogging(false); ind.addColumn("KOL2"); ind.addColumn("KOL3"); ind.addColumn("KOL4"); if (pDict->createIndex(ind, *pTab) != 0) { g_err << "createIndex(T1X1) error: " << pDict->getNdbError() << endl; return NDBT_FAILED; } return NDBT_OK; }
static int createDropEvent(NDBT_Context* ctx, NDBT_Step* step) { Ndb* pNdb = GETNDB(step); NdbDictionary::Dictionary *myDict = pNdb->getDictionary(); if (ctx->getProperty("NoDDL", Uint32(0)) == 0) { for (unsigned i = 0; i<table_list.size(); i++) { int res = NDBT_OK; const NdbDictionary::Table* tab = myDict->getTable(table_list[i].c_str()); if (tab == 0) { continue; } if ((res = createEvent(pNdb, *tab) != NDBT_OK)) { return res; } if ((res = dropEvent(pNdb, *tab)) != NDBT_OK) { return res; } } } return NDBT_OK; }
static int runCheckTableExists(NDBT_Context* ctx, NDBT_Step* step) { Ndb ndb(&ctx->m_cluster_connection, "TEST_DB"); ndb.init(1); NdbDictionary::Dictionary* pDict = ndb.getDictionary(); const NdbDictionary::Table* pTab = ctx->getTab(); const char *tab_name= pTab->getName(); const NdbDictionary::Table* pDictTab = pDict->getTable(tab_name); if (pDictTab == NULL) { g_err << "runCheckTableExists : Failed to find table " << tab_name << endl; g_err << "Required schema : " << *((NDBT_Table*)pTab) << endl; return NDBT_FAILED; } /* Todo : better check that table in DB is same as * table we expect */ // Update ctx with a pointer to dict table ctx->setTab(pDictTab); ctx->setProperty("$table", tab_name); return NDBT_OK; }
void TableTailer::createListenerEvent() { NdbDictionary::Dictionary *myDict = mNdbConnection->getDictionary(); if (!myDict) LOG_NDB_API_ERROR(mNdbConnection->getNdbError()); const NdbDictionary::Table *table = myDict->getTable(mTable.mTableName.c_str()); if (!table) LOG_NDB_API_ERROR(myDict->getNdbError()); NdbDictionary::Event myEvent(mEventName.c_str(), *table); for(int i=0; i< mTable.mNoEvents; i++){ myEvent.addTableEvent(mTable.mWatchEvents[i]); } const char* columns[mTable.mNoColumns]; for(int i=0; i< mTable.mNoColumns; i++){ columns[i] = mTable.mColumnNames[i].c_str(); } myEvent.addEventColumns(mTable.mNoColumns, columns); //myEvent.mergeEvents(merge_events); // Add event to database if (myDict->createEvent(myEvent) == 0) myEvent.print(); else if (myDict->getNdbError().classification == NdbError::SchemaObjectExists) { LOG_ERROR("Event creation failed, event exists, dropping Event..."); if (myDict->dropEvent(mEventName.c_str())) LOG_NDB_API_ERROR(myDict->getNdbError()); // try again // Add event to database if (myDict->createEvent(myEvent)) LOG_NDB_API_ERROR(myDict->getNdbError()); } else LOG_NDB_API_ERROR(myDict->getNdbError()); }
bool BackupRestore::table_equal(const TableS &tableS) { if (!m_restore) return true; const char *tablename = tableS.getTableName(); if(tableS.m_dictTable == NULL){ ndbout<<"Table %s has no m_dictTable " << tablename << endl; return false; } /** * Ignore blob tables */ if(match_blob(tablename) >= 0) return true; const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* tableS.m_dictTable); if ((int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined){ return true; } BaseString tmp(tablename); Vector<BaseString> split; if(tmp.split(split, "/") != 3){ err << "Invalid table name format " << tablename << endl; return false; } m_ndb->setDatabaseName(split[0].c_str()); m_ndb->setSchemaName(split[1].c_str()); NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); const NdbDictionary::Table* tab = dict->getTable(split[2].c_str()); if(tab == 0){ err << "Unable to find table: " << split[2].c_str() << endl; return false; } if(tab->getNoOfColumns() != tableS.m_dictTable->getNoOfColumns()) { ndbout_c("m_columns.size %d != %d",tab->getNoOfColumns(), tableS.m_dictTable->getNoOfColumns()); return false; } for(int i = 0; i<tab->getNoOfColumns(); i++) { if(!tab->getColumn(i)->equal(*(tableS.m_dictTable->getColumn(i)))) { ndbout_c("m_columns %s != %s",tab->getColumn(i)->getName(), tableS.m_dictTable->getColumn(i)->getName()); return false; } } return true; }
int NDBT_TestSuite::dropTables(Ndb_cluster_connection& con) const { Ndb ndb(&con, "TEST_DB"); ndb.init(1); NdbDictionary::Dictionary* pDict = ndb.getDictionary(); for(unsigned i = 0; i<m_tables_in_test.size(); i++) { const char *tab_name= m_tables_in_test[i].c_str(); pDict->dropTable(tab_name); } return NDBT_OK; }
static int runDropTable(NDBT_Context* ctx, NDBT_Step* step) { const char * tab_name = ctx->getProperty("$table", (const char*)0); if (tab_name) { Ndb ndb(&ctx->m_cluster_connection, "TEST_DB"); ndb.init(1); NdbDictionary::Dictionary* pDict = ndb.getDictionary(); pDict->dropTable(tab_name); } return NDBT_OK; }
int runInterpretedUKLookup(NDBT_Context* ctx, NDBT_Step* step) { const NdbDictionary::Table * pTab = ctx->getTab(); Ndb* pNdb = GETNDB(step); NdbDictionary::Dictionary * dict = pNdb->getDictionary(); const NdbDictionary::Index* pIdx= dict->getIndex(pkIdxName, pTab->getName()); CHK_RET_FAILED(pIdx != 0); const NdbRecord * pRowRecord = pTab->getDefaultRecord(); CHK_RET_FAILED(pRowRecord != 0); const NdbRecord * pIdxRecord = pIdx->getDefaultRecord(); CHK_RET_FAILED(pIdxRecord != 0); const Uint32 len = NdbDictionary::getRecordRowLength(pRowRecord); Uint8 * pRow = new Uint8[len]; bzero(pRow, len); HugoCalculator calc(* pTab); calc.equalForRow(pRow, pRowRecord, 0); NdbTransaction* pTrans = pNdb->startTransaction(); CHK_RET_FAILED(pTrans != 0); NdbInterpretedCode code; code.interpret_exit_ok(); code.finalise(); NdbOperation::OperationOptions opts; bzero(&opts, sizeof(opts)); opts.optionsPresent = NdbOperation::OperationOptions::OO_INTERPRETED; opts.interpretedCode = &code; const NdbOperation * pOp = pTrans->readTuple(pIdxRecord, (char*)pRow, pRowRecord, (char*)pRow, NdbOperation::LM_Read, 0, &opts, sizeof(opts)); CHK_RET_FAILED(pOp); int res = pTrans->execute(Commit, AbortOnError); CHK_RET_FAILED(res == 0); delete [] pRow; return NDBT_OK; }
int create_table(){ NdbDictionary::Dictionary* dict = g_ndb->getDictionary(); assert(dict); if(g_paramters[P_CREATE].value){ g_ndb->getDictionary()->dropTable(g_tablename); const NdbDictionary::Table * pTab = NDBT_Tables::getTable(g_tablename); assert(pTab); NdbDictionary::Table copy = * pTab; copy.setLogging(false); if(dict->createTable(copy) != 0){ g_err << "Failed to create table: " << g_tablename << endl; return -1; } NdbDictionary::Index x(g_indexname); x.setTable(g_tablename); x.setType(NdbDictionary::Index::OrderedIndex); x.setLogging(false); for (unsigned k = 0; k < copy.getNoOfColumns(); k++){ if(copy.getColumn(k)->getPrimaryKey()){ x.addColumnName(copy.getColumn(k)->getName()); } } if(dict->createIndex(x) != 0){ g_err << "Failed to create index: " << endl; return -1; } } g_table = dict->getTable(g_tablename); g_index = dict->getIndex(g_indexname, g_tablename); assert(g_table); assert(g_index); if(g_paramters[P_CREATE].value) { int rows = g_paramters[P_ROWS].value; HugoTransactions hugoTrans(* g_table); if (hugoTrans.loadTable(g_ndb, rows)){ g_err.println("Failed to load %s with %d rows", g_table->getName(), rows); return -1; } } return 0; }
bool BackupRestore::table(const TableS & table){ if (!m_restore_meta) { return true; } NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); if (dict->createTable(*table.m_dictTable) == -1) { err << "Create table " << table.getTableName() << " failed: " << dict->getNdbError() << endl; return false; } info << "Successfully restored table " << table.getTableName()<< endl ; return true; }
void ListTablesCall::doAsyncCallback(Local<Object> ctx) { DEBUG_MARKER(UDEB_DETAIL); Handle<Value> cb_args[2]; const char * & dbName = arg1; DEBUG_PRINT("RETURN VAL: %d", return_val); if(return_val == -1) { cb_args[0] = NdbError_Wrapper(dict->getNdbError()); cb_args[1] = Null(); } else { cb_args[0] = Null(); // no error /* ListObjects has returned tables in all databases; we need to filter here on database name. */ int * stack = new int[list.count]; unsigned int nmatch = 0; for(unsigned i = 0; i < list.count ; i++) { if(strcmp(dbName, list.elements[i].database) == 0) { stack[nmatch++] = i; } } DEBUG_PRINT("arg1/nmatch/list.count: %s/%d/%d", arg1,nmatch,list.count); Local<Array> cb_list = Array::New(nmatch); for(unsigned int i = 0; i < nmatch ; i++) { cb_list->Set(i, String::New(list.elements[stack[i]].name)); } cb_args[1] = cb_list; delete[] stack; } callback->Call(ctx, 2, cb_args); }
void GetTableCall::run() { DEBUG_PRINT("GetTableCall::run() [%s.%s]", arg1, arg2); NdbDictionary::Dictionary * dict; return_val = -1; if(strlen(arg1)) { arg0->ndb->setDatabaseName(arg1); } dict = arg0->ndb->getDictionary(); ndb_table = dict->getTable(arg2); if(ndb_table) { return_val = dict->listIndexes(idx_list, arg2); } if(return_val == 0) { /* Fetch the indexes now. These calls may perform network IO, populating the (connection) global and (Ndb) local dictionary caches. Later, in the JavaScript main thread, we will call getIndex() again knowing that the caches are populated. */ for(unsigned int i = 0 ; i < idx_list.count ; i++) { const NdbDictionary::Index * idx = dict->getIndex(idx_list.elements[i].name, arg2); /* It is possible to get an index for a recently dropped table rather than the desired table. This is a known bug likely to be fixed later. */ if(ndb_table->getObjectVersion() != dict->getTable(idx->getTable())->getObjectVersion()) { dict->invalidateIndex(idx); idx = dict->getIndex(idx_list.elements[i].name, arg2); } } } }
int runLoadAll(NDBT_Context* ctx, NDBT_Step* step) { Ndb* pNdb = GETNDB(step); NdbDictionary::Dictionary * pDict = pNdb->getDictionary(); int records = ctx->getNumRecords(); int result = NDBT_OK; for (unsigned i = 0; i<table_list.size(); i++) { const NdbDictionary::Table* tab = pDict->getTable(table_list[i].c_str()); HugoTransactions trans(* tab); trans.loadTable(pNdb, records); trans.scanUpdateRecords(pNdb, records); } return result; }
static int drop_all_tables() { NdbDictionary::Dictionary * dict = g_ndb->getDictionary(); require(dict); BaseString db = g_ndb->getDatabaseName(); BaseString schema = g_ndb->getSchemaName(); NdbDictionary::Dictionary::List list; if (dict->listObjects(list, NdbDictionary::Object::TypeUndefined) == -1){ g_err << "Failed to list tables: " << endl << dict->getNdbError() << endl; return -1; } for (unsigned i = 0; i < list.count; i++) { NdbDictionary::Dictionary::List::Element& elt = list.elements[i]; switch (elt.type) { case NdbDictionary::Object::SystemTable: case NdbDictionary::Object::UserTable: g_ndb->setDatabaseName(elt.database); g_ndb->setSchemaName(elt.schema); if(dict->dropTable(elt.name) != 0){ g_err << "Failed to drop table: " << elt.database << "/" << elt.schema << "/" << elt.name <<endl; g_err << dict->getNdbError() << endl; return -1; } break; case NdbDictionary::Object::UniqueHashIndex: case NdbDictionary::Object::OrderedIndex: case NdbDictionary::Object::HashIndexTrigger: case NdbDictionary::Object::IndexTrigger: case NdbDictionary::Object::SubscriptionTrigger: case NdbDictionary::Object::ReadOnlyConstraint: default: break; } } g_ndb->setDatabaseName(db.c_str()); g_ndb->setSchemaName(schema.c_str()); return 0; }
static int dropEvent(Ndb *pNdb, const NdbDictionary::Table &tab) { char eventName[1024]; sprintf(eventName,"%s_EVENT",tab.getName()); NdbDictionary::Dictionary *myDict = pNdb->getDictionary(); if (!myDict) { g_err << "Dictionary not found " << pNdb->getNdbError().code << " " << pNdb->getNdbError().message << endl; return NDBT_FAILED; } if (myDict->dropEvent(eventName)) { g_err << "Failed to drop event: " << myDict->getNdbError().code << " : " << myDict->getNdbError().message << endl; return NDBT_FAILED; } return NDBT_OK; }
static int runCreateTable(NDBT_Context* ctx, NDBT_Step* step) { Ndb ndb(&ctx->m_cluster_connection, "TEST_DB"); ndb.init(1); NdbDictionary::Dictionary* pDict = ndb.getDictionary(); const NdbDictionary::Table* pTab = ctx->getTab(); const char *tab_name= pTab->getName(); if (pDict->dropTable(tab_name) != 0 && pDict->getNdbError().code != 723) // No such table { g_err << "runCreateTable: Failed to drop table " << tab_name << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } if(NDBT_Tables::createTable(&ndb, tab_name, !ctx->getSuite()->getLogging()) != 0) { g_err << "runCreateTable: Failed to create table " << tab_name << pDict->getNdbError() << endl; return NDBT_FAILED; } // Update ctx with a pointer to the created table const NdbDictionary::Table* pTab2 = pDict->getTable(tab_name); ctx->setTab(pTab2); ctx->setProperty("$table", tab_name); return NDBT_OK; }
int NDBT_TestSuite::createTables(Ndb_cluster_connection& con) const { Ndb ndb(&con, "TEST_DB"); ndb.init(1); NdbDictionary::Dictionary* pDict = ndb.getDictionary(); for(unsigned i = 0; i<m_tables_in_test.size(); i++) { const char *tab_name= m_tables_in_test[i].c_str(); if (pDict->dropTable(tab_name) != 0 && pDict->getNdbError().code != 723) // No such table { g_err << "runCreateTables: Failed to drop table " << tab_name << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } if(NDBT_Tables::createTable(&ndb, tab_name, !getLogging()) != 0) { g_err << "runCreateTables: Failed to create table " << tab_name << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } if (i == 0){ // Update ctx with a pointer to the first created table const NdbDictionary::Table* pTab2 = pDict->getTable(tab_name); ctx->setTab(pTab2); } g_info << "created " << tab_name << endl; } return NDBT_OK; }
int runDDL(NDBT_Context* ctx, NDBT_Step* step){ Ndb* pNdb= GETNDB(step); NdbDictionary::Dictionary* pDict = pNdb->getDictionary(); const int tables = NDBT_Tables::getNumTables(); while(!ctx->isTestStopped()) { const int tab_no = rand() % (tables); NdbDictionary::Table tab = *NDBT_Tables::getTable(tab_no); BaseString name= tab.getName(); name.appfmt("-%d", step->getStepNo()); tab.setName(name.c_str()); if(pDict->createTable(tab) == 0) { HugoTransactions hugoTrans(* pDict->getTable(name.c_str())); if (hugoTrans.loadTable(pNdb, 10000) != 0){ return NDBT_FAILED; } while(pDict->dropTable(tab.getName()) != 0 && pDict->getNdbError().code != 4009) g_err << pDict->getNdbError() << endl; sleep(1); } } return NDBT_OK; }
static int runDropIndex(NDBT_Context* ctx, NDBT_Step* step) { const NdbDictionary::Table* pTab = ctx->getTab(); Ndb* pNdb = GETNDB(step); NdbDictionary::Dictionary* pDic = pNdb->getDictionary(); NdbDictionary::Dictionary::List list; if (pDic->listIndexes(list, pTab->getName()) != 0) { g_err << pTab->getName() << ": listIndexes failed" << endl; ERR(pDic->getNdbError()); return NDBT_FAILED; } for (unsigned i = 0; i < list.count; i++) { NDBT_Index* pInd = new NDBT_Index(list.elements[i].name); pInd->setTable(pTab->getName()); g_info << "Drop index:" << endl << *pInd; if (pInd->dropIndexInDb(pNdb) != 0) { return NDBT_FAILED; } } return NDBT_OK; }
int stressNDB_rep1(NDBT_Context* ctx, NDBT_Step* step) { Ndb* ndb=GETNDB(step); NdbDictionary::Dictionary* myDict = ndb->getDictionary(); const NdbDictionary::Table * table = myDict->getTable("rep1"); HugoTransactions hugoTrans(* table); while(!ctx->isTestStopped()) { if (hugoTrans.pkUpdateRecords(GETNDB(step), ctx->getNumRecords(), 1, 30) == NDBT_FAILED) { g_err << "pkUpdate Failed!" << endl; return NDBT_FAILED; } if (hugoTrans.scanUpdateRecords(GETNDB(step), ctx->getNumRecords(), 1, 30) == NDBT_FAILED) { g_err << "scanUpdate Failed!" << endl; return NDBT_FAILED; } } return NDBT_OK; }
int create_table() { NdbDictionary::Dictionary* dict = g_ndb->getDictionary(); assert(dict); if(g_paramters[P_CREATE].value) { const NdbDictionary::Table * pTab = NDBT_Tables::getTable(g_table); assert(pTab); NdbDictionary::Table copy = * pTab; copy.setLogging(false); if(dict->createTable(copy) != 0) { g_err << "Failed to create table: " << g_table << endl; return -1; } NdbDictionary::Index x(g_ordered); x.setTable(g_table); x.setType(NdbDictionary::Index::OrderedIndex); x.setLogging(false); for (unsigned k = 0; k < copy.getNoOfColumns(); k++) { if(copy.getColumn(k)->getPrimaryKey()) { x.addColumn(copy.getColumn(k)->getName()); } } if(dict->createIndex(x) != 0) { g_err << "Failed to create index: " << endl; return -1; } x.setName(g_unique); x.setType(NdbDictionary::Index::UniqueHashIndex); if(dict->createIndex(x) != 0) { g_err << "Failed to create index: " << endl; return -1; } } g_tab = dict->getTable(g_table); g_i_unique = dict->getIndex(g_unique, g_table); g_i_ordered = dict->getIndex(g_ordered, g_table); assert(g_tab); assert(g_i_unique); assert(g_i_ordered); return 0; }
/* * ForeignKeyMetadata = { name : "" , // Constraint name columnNames : null , // an ordered array of column numbers targetTable : "" , // referenced table name targetDatabase : "" , // referenced database name targetColumnNames: null , // an ordered array of target column names }; */ Handle<Object> GetTableCall::buildDBForeignKey(const NdbDictionary::ForeignKey *fk) { HandleScope scope; DictionaryNameSplitter localSplitter; Local<Object> js_fk = Object::New(); localSplitter.splitName(fk->getName()); // e.g. "12/20/fkname" js_fk->Set(String::NewSymbol("name"), String::New(localSplitter.part3)); // get child column names unsigned int childColumnCount = fk->getChildColumnCount(); Local<Array> fk_child_column_names = Array::New(childColumnCount); for (unsigned i = 0; i < childColumnCount; ++i) { int columnNumber = fk->getChildColumnNo(i); const NdbDictionary::Column * column = ndb_table->getColumn(columnNumber); fk_child_column_names->Set(i, String::New(column->getName())); } js_fk->Set(String::NewSymbol("columnNames"), fk_child_column_names); // get parent table (which might be in a different database) const char * fk_parent_name = fk->getParentTable(); localSplitter.splitName(fk_parent_name); const char * parent_db_name = localSplitter.part1; const char * parent_table_name = localSplitter.part3; js_fk->Set(String::NewSymbol("targetTable"), String::New(parent_table_name)); js_fk->Set(String::NewSymbol("targetDatabase"), String::New(parent_db_name)); ndb->setDatabaseName(parent_db_name); const NdbDictionary::Table * parent_table = dict->getTable(parent_table_name); ndb->setDatabaseName(dbName); // get parent column names unsigned int parentColumnCount = fk->getParentColumnCount(); Local<Array> fk_parent_column_names = Array::New(parentColumnCount); for (unsigned i = 0; i < parentColumnCount; ++i) { int columnNumber = fk->getParentColumnNo(i); const NdbDictionary::Column * column = parent_table->getColumn(columnNumber); fk_parent_column_names->Set(i, String::New( column->getName())); } js_fk->Set(String::NewSymbol("targetColumnNames"), fk_parent_column_names); return scope.Close(js_fk); }
int main(int argc, char** argv) { NDB_INIT(argv[0]); const char *load_default_groups[]= { "mysql_cluster",0 }; load_defaults("my",load_default_groups,&argc,&argv); int ho_error; #ifndef DBUG_OFF opt_debug= "d:t:F:L"; #endif if ((ho_error=handle_options(&argc, &argv, my_long_options, ndb_std_get_one_option))) return NDBT_ProgramExit(NDBT_WRONGARGS); DBUG_ENTER("main"); Ndb_cluster_connection con(opt_connect_str); if(con.connect(12, 5, 1)) { DBUG_RETURN(NDBT_ProgramExit(NDBT_FAILED)); } Ndb ndb(&con,_dbname); ndb.init(); while (ndb.waitUntilReady() != 0); NdbDictionary::Dictionary * dict = ndb.getDictionary(); int no_error= 1; int i; // create all tables Vector<const NdbDictionary::Table*> pTabs; if (argc == 0) { NDBT_Tables::dropAllTables(&ndb); NDBT_Tables::createAllTables(&ndb); for (i= 0; no_error && i < NDBT_Tables::getNumTables(); i++) { const NdbDictionary::Table *pTab= dict->getTable(NDBT_Tables::getTable(i)->getName()); if (pTab == 0) { ndbout << "Failed to create table" << endl; ndbout << dict->getNdbError() << endl; no_error= 0; break; } pTabs.push_back(pTab); } } else { for (i= 0; no_error && argc; argc--, i++) { dict->dropTable(argv[i]); NDBT_Tables::createTable(&ndb, argv[i]); const NdbDictionary::Table *pTab= dict->getTable(argv[i]); if (pTab == 0) { ndbout << "Failed to create table" << endl; ndbout << dict->getNdbError() << endl; no_error= 0; break; } pTabs.push_back(pTab); } } pTabs.push_back(NULL); // create an event for each table for (i= 0; no_error && pTabs[i]; i++) { HugoTransactions ht(*pTabs[i]); if (ht.createEvent(&ndb)){ no_error= 0; break; } } // create an event operation for each event Vector<NdbEventOperation *> pOps; for (i= 0; no_error && pTabs[i]; i++) { char buf[1024]; sprintf(buf, "%s_EVENT", pTabs[i]->getName()); NdbEventOperation *pOp= ndb.createEventOperation(buf, 1000); if ( pOp == NULL ) { no_error= 0; break; } pOps.push_back(pOp); } // get storage for each event operation for (i= 0; no_error && pTabs[i]; i++) { int n_columns= pTabs[i]->getNoOfColumns(); for (int j = 0; j < n_columns; j++) { pOps[i]->getValue(pTabs[i]->getColumn(j)->getName()); pOps[i]->getPreValue(pTabs[i]->getColumn(j)->getName()); } } // start receiving events for (i= 0; no_error && pTabs[i]; i++) { if ( pOps[i]->execute() ) { no_error= 0; break; } } // create a "shadow" table for each table Vector<const NdbDictionary::Table*> pShadowTabs; for (i= 0; no_error && pTabs[i]; i++) { char buf[1024]; sprintf(buf, "%s_SHADOW", pTabs[i]->getName()); dict->dropTable(buf); if (dict->getTable(buf)) { no_error= 0; break; } NdbDictionary::Table table_shadow(*pTabs[i]); table_shadow.setName(buf); dict->createTable(table_shadow); pShadowTabs.push_back(dict->getTable(buf)); if (!pShadowTabs[i]) { no_error= 0; break; } } // create a hugo operation per table Vector<HugoOperations *> hugo_ops; for (i= 0; no_error && pTabs[i]; i++) { hugo_ops.push_back(new HugoOperations(*pTabs[i])); } int n_records= 3; // insert n_records records per table do { if (start_transaction(&ndb, hugo_ops)) { no_error= 0; break; } for (i= 0; no_error && pTabs[i]; i++) { hugo_ops[i]->pkInsertRecord(&ndb, 0, n_records); } if (execute_commit(&ndb, hugo_ops)) { no_error= 0; break; } if(close_transaction(&ndb, hugo_ops)) { no_error= 0; break; } } while(0); // copy events and verify do { if (copy_events(&ndb) < 0) { no_error= 0; break; } if (verify_copy(&ndb, pTabs, pShadowTabs)) { no_error= 0; break; } } while (0); // update n_records-1 records in first table do { if (start_transaction(&ndb, hugo_ops)) { no_error= 0; break; } hugo_ops[0]->pkUpdateRecord(&ndb, n_records-1); if (execute_commit(&ndb, hugo_ops)) { no_error= 0; break; } if(close_transaction(&ndb, hugo_ops)) { no_error= 0; break; } } while(0); // copy events and verify do { if (copy_events(&ndb) < 0) { no_error= 0; break; } if (verify_copy(&ndb, pTabs, pShadowTabs)) { no_error= 0; break; } } while (0); { NdbRestarts restarts; for (int j= 0; j < 10; j++) { // restart a node if (no_error) { int timeout = 240; if (restarts.executeRestart("RestartRandomNodeAbort", timeout)) { no_error= 0; break; } } // update all n_records records on all tables if (start_transaction(&ndb, hugo_ops)) { no_error= 0; break; } for (int r= 0; r < n_records; r++) { for (i= 0; pTabs[i]; i++) { hugo_ops[i]->pkUpdateRecord(&ndb, r); } } if (execute_commit(&ndb, hugo_ops)) { no_error= 0; break; } if(close_transaction(&ndb, hugo_ops)) { no_error= 0; break; } // copy events and verify if (copy_events(&ndb) < 0) { no_error= 0; break; } if (verify_copy(&ndb, pTabs, pShadowTabs)) { no_error= 0; break; } } } // drop the event operations for (i= 0; i < (int)pOps.size(); i++) { if (ndb.dropEventOperation(pOps[i])) { no_error= 0; } } if (no_error) DBUG_RETURN(NDBT_ProgramExit(NDBT_OK)); DBUG_RETURN(NDBT_ProgramExit(NDBT_FAILED)); }
bool BackupRestore::endOfTables(){ if(!m_restore_meta) return true; NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); for(size_t i = 0; i<m_indexes.size(); i++){ NdbTableImpl & indtab = NdbTableImpl::getImpl(* m_indexes[i]); Vector<BaseString> split; { BaseString tmp(indtab.m_primaryTable.c_str()); if (tmp.split(split, "/") != 3) { err << "Invalid table name format `" << indtab.m_primaryTable.c_str() << "`" << endl; return false; } } m_ndb->setDatabaseName(split[0].c_str()); m_ndb->setSchemaName(split[1].c_str()); const NdbDictionary::Table * prim = dict->getTable(split[2].c_str()); if(prim == 0){ err << "Unable to find base table `" << split[2].c_str() << "` for index `" << indtab.getName() << "`" << endl; return false; } NdbTableImpl& base = NdbTableImpl::getImpl(*prim); NdbIndexImpl* idx; Vector<BaseString> split_idx; { BaseString tmp(indtab.getName()); if (tmp.split(split_idx, "/") != 4) { err << "Invalid index name format `" << indtab.getName() << "`" << endl; return false; } } if(NdbDictInterface::create_index_obj_from_table(&idx, &indtab, &base)) { err << "Failed to create index `" << split_idx[3] << "` on " << split[2].c_str() << endl; return false; } idx->setName(split_idx[3].c_str()); if(dict->createIndex(* idx) != 0) { delete idx; err << "Failed to create index `" << split_idx[3].c_str() << "` on `" << split[2].c_str() << "`" << endl << dict->getNdbError() << endl; return false; } delete idx; info << "Successfully created index `" << split_idx[3].c_str() << "` on `" << split[2].c_str() << "`" << endl; } return true; }
bool BackupRestore::object(Uint32 type, const void * ptr) { if (!m_restore_meta) return true; NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); switch(type){ case DictTabInfo::Tablespace: { NdbDictionary::Tablespace old(*(NdbDictionary::Tablespace*)ptr); Uint32 id = old.getObjectId(); if (!m_no_restore_disk) { NdbDictionary::LogfileGroup * lg = m_logfilegroups[old.getDefaultLogfileGroupId()]; old.setDefaultLogfileGroup(* lg); info << "Creating tablespace: " << old.getName() << "..." << flush; int ret = dict->createTablespace(old); if (ret) { NdbError errobj= dict->getNdbError(); info << "FAILED" << endl; err << "Create tablespace failed: " << old.getName() << ": " << errobj << endl; return false; } info << "done" << endl; } NdbDictionary::Tablespace curr = dict->getTablespace(old.getName()); NdbError errobj = dict->getNdbError(); if ((int) errobj.classification == (int) ndberror_cl_none) { NdbDictionary::Tablespace* currptr = new NdbDictionary::Tablespace(curr); NdbDictionary::Tablespace * null = 0; m_tablespaces.set(currptr, id, null); debug << "Retreived tablespace: " << currptr->getName() << " oldid: " << id << " newid: " << currptr->getObjectId() << " " << (void*)currptr << endl; return true; } err << "Failed to retrieve tablespace \"" << old.getName() << "\": " << errobj << endl; return false; break; } case DictTabInfo::LogfileGroup: { NdbDictionary::LogfileGroup old(*(NdbDictionary::LogfileGroup*)ptr); Uint32 id = old.getObjectId(); if (!m_no_restore_disk) { info << "Creating logfile group: " << old.getName() << "..." << flush; int ret = dict->createLogfileGroup(old); if (ret) { NdbError errobj= dict->getNdbError(); info << "FAILED" << endl; err << "Create logfile group failed: " << old.getName() << ": " << errobj << endl; return false; } info << "done" << endl; } NdbDictionary::LogfileGroup curr = dict->getLogfileGroup(old.getName()); NdbError errobj = dict->getNdbError(); if ((int) errobj.classification == (int) ndberror_cl_none) { NdbDictionary::LogfileGroup* currptr = new NdbDictionary::LogfileGroup(curr); NdbDictionary::LogfileGroup * null = 0; m_logfilegroups.set(currptr, id, null); debug << "Retreived logfile group: " << currptr->getName() << " oldid: " << id << " newid: " << currptr->getObjectId() << " " << (void*)currptr << endl; return true; } err << "Failed to retrieve logfile group \"" << old.getName() << "\": " << errobj << endl; return false; break; } case DictTabInfo::Datafile: { if (!m_no_restore_disk) { NdbDictionary::Datafile old(*(NdbDictionary::Datafile*)ptr); NdbDictionary::ObjectId objid; old.getTablespaceId(&objid); NdbDictionary::Tablespace * ts = m_tablespaces[objid.getObjectId()]; debug << "Connecting datafile " << old.getPath() << " to tablespace: oldid: " << objid.getObjectId() << " newid: " << ts->getObjectId() << endl; old.setTablespace(* ts); info << "Creating datafile \"" << old.getPath() << "\"..." << flush; if (dict->createDatafile(old)) { NdbError errobj= dict->getNdbError(); info << "FAILED" << endl; err << "Create datafile failed: " << old.getPath() << ": " << errobj << endl; return false; } info << "done" << endl; } return true; break; } case DictTabInfo::Undofile: { if (!m_no_restore_disk) { NdbDictionary::Undofile old(*(NdbDictionary::Undofile*)ptr); NdbDictionary::ObjectId objid; old.getLogfileGroupId(&objid); NdbDictionary::LogfileGroup * lg = m_logfilegroups[objid.getObjectId()]; debug << "Connecting undofile " << old.getPath() << " to logfile group: oldid: " << objid.getObjectId() << " newid: " << lg->getObjectId() << " " << (void*)lg << endl; old.setLogfileGroup(* lg); info << "Creating undofile \"" << old.getPath() << "\"..." << flush; if (dict->createUndofile(old)) { NdbError errobj= dict->getNdbError(); info << "FAILED" << endl; err << "Create undofile failed: " << old.getPath() << ": " << errobj << endl; return false; } info << "done" << endl; } return true; break; } } return true; }
bool BackupRestore::table(const TableS & table){ if (!m_restore && !m_restore_meta) return true; const char * name = table.getTableName(); /** * Ignore blob tables */ if(match_blob(name) >= 0) return true; const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable); if ((int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined){ m_indexes.push_back(table.m_dictTable); return true; } BaseString tmp(name); Vector<BaseString> split; if(tmp.split(split, "/") != 3){ err << "Invalid table name format `" << name << "`" << endl; return false; } m_ndb->setDatabaseName(split[0].c_str()); m_ndb->setSchemaName(split[1].c_str()); NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); if(m_restore_meta) { NdbDictionary::Table copy(*table.m_dictTable); copy.setName(split[2].c_str()); Uint32 id; if (copy.getTablespace(&id)) { debug << "Connecting " << name << " to tablespace oldid: " << id << flush; NdbDictionary::Tablespace* ts = m_tablespaces[id]; debug << " newid: " << ts->getObjectId() << endl; copy.setTablespace(* ts); } if (copy.getDefaultNoPartitionsFlag()) { /* Table was defined with default number of partitions. We can restore it with whatever is the default in this cluster. We use the max_rows parameter in calculating the default number. */ Uint32 no_nodes = m_cluster_connection->no_db_nodes(); copy.setFragmentCount(get_no_fragments(copy.getMaxRows(), no_nodes)); set_default_nodegroups(©); } else { /* Table was defined with specific number of partitions. It should be restored with the same number of partitions. It will either be restored in the same node groups as when backup was taken or by using a node group map supplied to the ndb_restore program. */ Uint16 *ng_array = (Uint16*)copy.getFragmentData(); Uint16 no_parts = copy.getFragmentCount(); if (map_nodegroups(ng_array, no_parts)) { if (translate_frm(©)) { err << "Create table " << table.getTableName() << " failed: "; err << "Translate frm error" << endl; return false; } } copy.setFragmentData((const void *)ng_array, no_parts << 1); } /** * Force of varpart was introduced in 5.1.18, telco 6.1.7 and 6.2.1 * Since default from mysqld is to add force of varpart (disable with * ROW_FORMAT=FIXED) we force varpart onto tables when they are restored * from backups taken with older versions. This will be wrong if * ROW_FORMAT=FIXED was used on original table, however the likelyhood of * this is low, since ROW_FORMAT= was a NOOP in older versions. */ if (table.getBackupVersion() < MAKE_VERSION(5,1,18)) copy.setForceVarPart(true); else if (getMajor(table.getBackupVersion()) == 6 && (table.getBackupVersion() < MAKE_VERSION(6,1,7) || table.getBackupVersion() == MAKE_VERSION(6,2,0))) copy.setForceVarPart(true); /* update min and max rows to reflect the table, this to ensure that memory is allocated properly in the ndb kernel */ copy.setMinRows(table.getNoOfRecords()); if (table.getNoOfRecords() > copy.getMaxRows()) { copy.setMaxRows(table.getNoOfRecords()); } NdbTableImpl &tableImpl = NdbTableImpl::getImpl(copy); if (table.getBackupVersion() < MAKE_VERSION(5,1,0) && !m_no_upgrade){ for(int i= 0; i < copy.getNoOfColumns(); i++) { NdbDictionary::Column::Type t = copy.getColumn(i)->getType(); if (t == NdbDictionary::Column::Varchar || t == NdbDictionary::Column::Varbinary) tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeShortVar); if (t == NdbDictionary::Column::Longvarchar || t == NdbDictionary::Column::Longvarbinary) tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeMediumVar); } } if (dict->createTable(copy) == -1) { err << "Create table `" << table.getTableName() << "` failed: " << dict->getNdbError() << endl; if (dict->getNdbError().code == 771) { /* The user on the cluster where the backup was created had specified specific node groups for partitions. Some of these node groups didn't exist on this cluster. We will warn the user of this and inform him of his option. */ err << "The node groups defined in the table didn't exist in this"; err << " cluster." << endl << "There is an option to use the"; err << " the parameter ndb-nodegroup-map to define a mapping from"; err << endl << "the old nodegroups to new nodegroups" << endl; } return false; } info << "Successfully restored table `" << table.getTableName() << "`" << endl; } const NdbDictionary::Table* tab = dict->getTable(split[2].c_str()); if(tab == 0){ err << "Unable to find table: `" << split[2].c_str() << "`" << endl; return false; } if(m_restore_meta) { if (tab->getFrmData()) { // a MySQL Server table is restored, thus an event should be created BaseString event_name("REPL$"); event_name.append(split[0].c_str()); event_name.append("/"); event_name.append(split[2].c_str()); NdbDictionary::Event my_event(event_name.c_str()); my_event.setTable(*tab); my_event.addTableEvent(NdbDictionary::Event::TE_ALL); // add all columns to the event bool has_blobs = false; for(int a= 0; a < tab->getNoOfColumns(); a++) { my_event.addEventColumn(a); NdbDictionary::Column::Type t = tab->getColumn(a)->getType(); if (t == NdbDictionary::Column::Blob || t == NdbDictionary::Column::Text) has_blobs = true; } if (has_blobs) my_event.mergeEvents(true); while ( dict->createEvent(my_event) ) // Add event to database { if (dict->getNdbError().classification == NdbError::SchemaObjectExists) { info << "Event for table " << table.getTableName() << " already exists, removing.\n"; if (!dict->dropEvent(my_event.getName())) continue; } err << "Create table event for " << table.getTableName() << " failed: " << dict->getNdbError() << endl; dict->dropTable(split[2].c_str()); return false; } info << "Successfully restored table event " << event_name << endl ; } } const NdbDictionary::Table* null = 0; m_new_tables.fill(table.m_dictTable->getTableId(), null); m_new_tables[table.m_dictTable->getTableId()] = tab; return true; }
static int copy_events(Ndb *ndb) { DBUG_ENTER("copy_events"); int r= 0; NdbDictionary::Dictionary * dict = ndb->getDictionary(); while (1) { int res= ndb->pollEvents(1000); // wait for event or 1000 ms DBUG_PRINT("info", ("pollEvents res=%d", res)); if (res <= 0) { break; } int error= 0; NdbEventOperation *pOp; while ((pOp= ndb->nextEvent(&error))) { char buf[1024]; sprintf(buf, "%s_SHADOW", pOp->getTable()->getName()); const NdbDictionary::Table *table= dict->getTable(buf); if (table == 0) { g_err << "unable to find table " << buf << endl; DBUG_RETURN(-1); } if (pOp->isOverrun()) { g_err << "buffer overrun\n"; DBUG_RETURN(-1); } r++; Uint32 gci= pOp->getGCI(); if (!pOp->isConsistent()) { g_err << "A node failure has occured and events might be missing\n"; DBUG_RETURN(-1); } int noRetries= 0; do { NdbTransaction *trans= ndb->startTransaction(); if (trans == 0) { g_err << "startTransaction failed " << ndb->getNdbError().code << " " << ndb->getNdbError().message << endl; DBUG_RETURN(-1); } NdbOperation *op= trans->getNdbOperation(table); if (op == 0) { g_err << "getNdbOperation failed " << trans->getNdbError().code << " " << trans->getNdbError().message << endl; DBUG_RETURN(-1); } switch (pOp->getEventType()) { case NdbDictionary::Event::TE_INSERT: if (op->insertTuple()) { g_err << "insertTuple " << op->getNdbError().code << " " << op->getNdbError().message << endl; DBUG_RETURN(-1); } break; case NdbDictionary::Event::TE_DELETE: if (op->deleteTuple()) { g_err << "deleteTuple " << op->getNdbError().code << " " << op->getNdbError().message << endl; DBUG_RETURN(-1); } break; case NdbDictionary::Event::TE_UPDATE: if (op->updateTuple()) { g_err << "updateTuple " << op->getNdbError().code << " " << op->getNdbError().message << endl; DBUG_RETURN(-1); } break; default: abort(); } { for (const NdbRecAttr *pk= pOp->getFirstPkAttr(); pk; pk= pk->next()) { if (pk->isNULL()) { g_err << "internal error: primary key isNull()=" << pk->isNULL() << endl; DBUG_RETURN(NDBT_FAILED); } if (op->equal(pk->getColumn()->getColumnNo(),pk->aRef())) { g_err << "equal " << pk->getColumn()->getColumnNo() << " " << op->getNdbError().code << " " << op->getNdbError().message << endl; DBUG_RETURN(NDBT_FAILED); } } } switch (pOp->getEventType()) { case NdbDictionary::Event::TE_INSERT: { for (const NdbRecAttr *data= pOp->getFirstDataAttr(); data; data= data->next()) { if (data->isNULL() < 0 || op->setValue(data->getColumn()->getColumnNo(), data->isNULL() ? 0:data->aRef())) { g_err << "setValue(insert) " << data->getColumn()->getColumnNo() << " " << op->getNdbError().code << " " << op->getNdbError().message << endl; DBUG_RETURN(-1); } } break; } case NdbDictionary::Event::TE_DELETE: break; case NdbDictionary::Event::TE_UPDATE: { for (const NdbRecAttr *data= pOp->getFirstDataAttr(); data; data= data->next()) { if (data->isNULL() >= 0 && op->setValue(data->getColumn()->getColumnNo(), data->isNULL() ? 0:data->aRef())) { g_err << "setValue(update) " << data->getColumn()->getColumnNo() << " " << op->getNdbError().code << " " << op->getNdbError().message << endl; DBUG_RETURN(NDBT_FAILED); } } break; } case NdbDictionary::Event::TE_ALL: abort(); } if (trans->execute(Commit) == 0) { trans->close(); // everything ok break; } if (noRetries++ == 10 || trans->getNdbError().status != NdbError::TemporaryError) { g_err << "execute " << r << " failed " << trans->getNdbError().code << " " << trans->getNdbError().message << endl; trans->close(); DBUG_RETURN(-1); } trans->close(); NdbSleep_MilliSleep(100); // sleep before retying } while(1); } // for if (error) { g_err << "nextEvent()\n"; DBUG_RETURN(-1); } } // while(1) DBUG_RETURN(r); }
int NDBT_Tables::create_default_tablespace(Ndb* pNdb) { NdbDictionary::Dictionary* pDict = pNdb->getDictionary(); int res; NdbDictionary::LogfileGroup lg = pDict->getLogfileGroup("DEFAULT-LG"); if (strcmp(lg.getName(), "DEFAULT-LG") != 0) { lg.setName("DEFAULT-LG"); lg.setUndoBufferSize(8*1024*1024); res = pDict->createLogfileGroup(lg); if(res != 0){ g_err << "Failed to create logfilegroup:" << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } } { NdbDictionary::Undofile uf = pDict->getUndofile(0, "undofile01.dat"); if (strcmp(uf.getPath(), "undofile01.dat") != 0) { uf.setPath("undofile01.dat"); uf.setSize(32*1024*1024); uf.setLogfileGroup("DEFAULT-LG"); res = pDict->createUndofile(uf, true); if(res != 0){ g_err << "Failed to create undofile:" << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } } } { NdbDictionary::Undofile uf = pDict->getUndofile(0, "undofile02.dat"); if (strcmp(uf.getPath(), "undofile02.dat") != 0) { uf.setPath("undofile02.dat"); uf.setSize(32*1024*1024); uf.setLogfileGroup("DEFAULT-LG"); res = pDict->createUndofile(uf, true); if(res != 0){ g_err << "Failed to create undofile:" << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } } } NdbDictionary::Tablespace ts = pDict->getTablespace("DEFAULT-TS"); if (strcmp(ts.getName(), "DEFAULT-TS") != 0) { ts.setName("DEFAULT-TS"); ts.setExtentSize(1024*1024); ts.setDefaultLogfileGroup("DEFAULT-LG"); res = pDict->createTablespace(ts); if(res != 0){ g_err << "Failed to create tablespace:" << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } } { NdbDictionary::Datafile df = pDict->getDatafile(0, "datafile01.dat"); if (strcmp(df.getPath(), "datafile01.dat") != 0) { df.setPath("datafile01.dat"); df.setSize(64*1024*1024); df.setTablespace("DEFAULT-TS"); res = pDict->createDatafile(df, true); if(res != 0){ g_err << "Failed to create datafile:" << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } } } { NdbDictionary::Datafile df = pDict->getDatafile(0, "datafile02.dat"); if (strcmp(df.getPath(), "datafile02.dat") != 0) { df.setPath("datafile02.dat"); df.setSize(64*1024*1024); df.setTablespace("DEFAULT-TS"); res = pDict->createDatafile(df, true); if(res != 0){ g_err << "Failed to create datafile:" << endl << pDict->getNdbError() << endl; return NDBT_FAILED; } } } return NDBT_OK; }