bool BackupRestore::table(const TableS & table){ if (!m_restore && !m_restore_meta) return true; const char * name = table.getTableName(); /** * Ignore blob tables */ if(match_blob(name) >= 0) return true; const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable); if ((int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined){ m_indexes.push_back(table.m_dictTable); return true; } BaseString tmp(name); Vector<BaseString> split; if(tmp.split(split, "/") != 3){ err << "Invalid table name format `" << name << "`" << endl; return false; } m_ndb->setDatabaseName(split[0].c_str()); m_ndb->setSchemaName(split[1].c_str()); NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); if(m_restore_meta) { NdbDictionary::Table copy(*table.m_dictTable); copy.setName(split[2].c_str()); Uint32 id; if (copy.getTablespace(&id)) { debug << "Connecting " << name << " to tablespace oldid: " << id << flush; NdbDictionary::Tablespace* ts = m_tablespaces[id]; debug << " newid: " << ts->getObjectId() << endl; copy.setTablespace(* ts); } if (copy.getDefaultNoPartitionsFlag()) { /* Table was defined with default number of partitions. We can restore it with whatever is the default in this cluster. We use the max_rows parameter in calculating the default number. */ Uint32 no_nodes = m_cluster_connection->no_db_nodes(); copy.setFragmentCount(get_no_fragments(copy.getMaxRows(), no_nodes)); set_default_nodegroups(©); } else { /* Table was defined with specific number of partitions. It should be restored with the same number of partitions. It will either be restored in the same node groups as when backup was taken or by using a node group map supplied to the ndb_restore program. */ Uint16 *ng_array = (Uint16*)copy.getFragmentData(); Uint16 no_parts = copy.getFragmentCount(); if (map_nodegroups(ng_array, no_parts)) { if (translate_frm(©)) { err << "Create table " << table.getTableName() << " failed: "; err << "Translate frm error" << endl; return false; } } copy.setFragmentData((const void *)ng_array, no_parts << 1); } /** * Force of varpart was introduced in 5.1.18, telco 6.1.7 and 6.2.1 * Since default from mysqld is to add force of varpart (disable with * ROW_FORMAT=FIXED) we force varpart onto tables when they are restored * from backups taken with older versions. This will be wrong if * ROW_FORMAT=FIXED was used on original table, however the likelyhood of * this is low, since ROW_FORMAT= was a NOOP in older versions. */ if (table.getBackupVersion() < MAKE_VERSION(5,1,18)) copy.setForceVarPart(true); else if (getMajor(table.getBackupVersion()) == 6 && (table.getBackupVersion() < MAKE_VERSION(6,1,7) || table.getBackupVersion() == MAKE_VERSION(6,2,0))) copy.setForceVarPart(true); /* update min and max rows to reflect the table, this to ensure that memory is allocated properly in the ndb kernel */ copy.setMinRows(table.getNoOfRecords()); if (table.getNoOfRecords() > copy.getMaxRows()) { copy.setMaxRows(table.getNoOfRecords()); } NdbTableImpl &tableImpl = NdbTableImpl::getImpl(copy); if (table.getBackupVersion() < MAKE_VERSION(5,1,0) && !m_no_upgrade){ for(int i= 0; i < copy.getNoOfColumns(); i++) { NdbDictionary::Column::Type t = copy.getColumn(i)->getType(); if (t == NdbDictionary::Column::Varchar || t == NdbDictionary::Column::Varbinary) tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeShortVar); if (t == NdbDictionary::Column::Longvarchar || t == NdbDictionary::Column::Longvarbinary) tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeMediumVar); } } if (dict->createTable(copy) == -1) { err << "Create table `" << table.getTableName() << "` failed: " << dict->getNdbError() << endl; if (dict->getNdbError().code == 771) { /* The user on the cluster where the backup was created had specified specific node groups for partitions. Some of these node groups didn't exist on this cluster. We will warn the user of this and inform him of his option. */ err << "The node groups defined in the table didn't exist in this"; err << " cluster." << endl << "There is an option to use the"; err << " the parameter ndb-nodegroup-map to define a mapping from"; err << endl << "the old nodegroups to new nodegroups" << endl; } return false; } info << "Successfully restored table `" << table.getTableName() << "`" << endl; } const NdbDictionary::Table* tab = dict->getTable(split[2].c_str()); if(tab == 0){ err << "Unable to find table: `" << split[2].c_str() << "`" << endl; return false; } if(m_restore_meta) { if (tab->getFrmData()) { // a MySQL Server table is restored, thus an event should be created BaseString event_name("REPL$"); event_name.append(split[0].c_str()); event_name.append("/"); event_name.append(split[2].c_str()); NdbDictionary::Event my_event(event_name.c_str()); my_event.setTable(*tab); my_event.addTableEvent(NdbDictionary::Event::TE_ALL); // add all columns to the event bool has_blobs = false; for(int a= 0; a < tab->getNoOfColumns(); a++) { my_event.addEventColumn(a); NdbDictionary::Column::Type t = tab->getColumn(a)->getType(); if (t == NdbDictionary::Column::Blob || t == NdbDictionary::Column::Text) has_blobs = true; } if (has_blobs) my_event.mergeEvents(true); while ( dict->createEvent(my_event) ) // Add event to database { if (dict->getNdbError().classification == NdbError::SchemaObjectExists) { info << "Event for table " << table.getTableName() << " already exists, removing.\n"; if (!dict->dropEvent(my_event.getName())) continue; } err << "Create table event for " << table.getTableName() << " failed: " << dict->getNdbError() << endl; dict->dropTable(split[2].c_str()); return false; } info << "Successfully restored table event " << event_name << endl ; } } const NdbDictionary::Table* null = 0; m_new_tables.fill(table.m_dictTable->getTableId(), null); m_new_tables[table.m_dictTable->getTableId()] = tab; return true; }
bool BackupRestore::table(const TableS & table){ if (!m_restore && !m_restore_meta) return true; const char * name = table.getTableName(); /** * Ignore blob tables */ if(match_blob(name) >= 0) return true; const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable); if(tmptab.m_indexType != NdbDictionary::Index::Undefined){ m_indexes.push_back(table.m_dictTable); return true; } BaseString tmp(name); Vector<BaseString> split; if(tmp.split(split, "/") != 3){ err << "Invalid table name format `" << name << "`" << endl; return false; } m_ndb->setDatabaseName(split[0].c_str()); m_ndb->setSchemaName(split[1].c_str()); NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); if(m_restore_meta){ NdbDictionary::Table copy(*table.m_dictTable); copy.setName(split[2].c_str()); /* update min and max rows to reflect the table, this to ensure that memory is allocated properly in the ndb kernel */ copy.setMinRows(table.getNoOfRecords()); if (table.getNoOfRecords() > copy.getMaxRows()) { copy.setMaxRows(table.getNoOfRecords()); } if (dict->createTable(copy) == -1) { err << "Create table `" << table.getTableName() << "` failed: " << dict->getNdbError() << endl; return false; } info << "Successfully restored table `" << table.getTableName() << "`" << endl; } const NdbDictionary::Table* tab = dict->getTable(split[2].c_str()); if(tab == 0){ err << "Unable to find table: `" << split[2].c_str() << "`" << endl; return false; } const NdbDictionary::Table* null = 0; m_new_tables.fill(table.m_dictTable->getTableId(), null); m_new_tables[table.m_dictTable->getTableId()] = tab; return true; }