Exemple #1
0
int
runDDL(NDBT_Context* ctx, NDBT_Step* step){
  Ndb* pNdb= GETNDB(step);
  NdbDictionary::Dictionary* pDict = pNdb->getDictionary();
  
  const int tables = NDBT_Tables::getNumTables();
  while(!ctx->isTestStopped())
  {
    const int tab_no = rand() % (tables);
    NdbDictionary::Table tab = *NDBT_Tables::getTable(tab_no);
    BaseString name= tab.getName();
    name.appfmt("-%d", step->getStepNo());
    tab.setName(name.c_str());
    if(pDict->createTable(tab) == 0)
    {
      HugoTransactions hugoTrans(* pDict->getTable(name.c_str()));
      if (hugoTrans.loadTable(pNdb, 10000) != 0){
	return NDBT_FAILED;
      }
      
      while(pDict->dropTable(tab.getName()) != 0 &&
	    pDict->getNdbError().code != 4009)
	g_err << pDict->getNdbError() << endl;
      
      sleep(1);

    }
  }
  return NDBT_OK;
}
Exemple #2
0
int
create_table(){
  NdbDictionary::Dictionary* dict = g_ndb->getDictionary();
  assert(dict);
  if(g_paramters[P_CREATE].value){
    g_ndb->getDictionary()->dropTable(g_tablename);
    const NdbDictionary::Table * pTab = NDBT_Tables::getTable(g_tablename);
    assert(pTab);
    NdbDictionary::Table copy = * pTab;
    copy.setLogging(false);
    if(dict->createTable(copy) != 0){
      g_err << "Failed to create table: " << g_tablename << endl;
      return -1;
    }

    NdbDictionary::Index x(g_indexname);
    x.setTable(g_tablename);
    x.setType(NdbDictionary::Index::OrderedIndex);
    x.setLogging(false);
    for (unsigned k = 0; k < copy.getNoOfColumns(); k++){
      if(copy.getColumn(k)->getPrimaryKey()){
	x.addColumnName(copy.getColumn(k)->getName());
      }
    }

    if(dict->createIndex(x) != 0){
      g_err << "Failed to create index: " << endl;
      return -1;
    }
  }
  g_table = dict->getTable(g_tablename);
  g_index = dict->getIndex(g_indexname, g_tablename);
  assert(g_table);
  assert(g_index);

  if(g_paramters[P_CREATE].value)
  {
    int rows = g_paramters[P_ROWS].value;
    HugoTransactions hugoTrans(* g_table);
    if (hugoTrans.loadTable(g_ndb, rows)){
      g_err.println("Failed to load %s with %d rows", 
		    g_table->getName(), rows);
      return -1;
    }
  }
  
  return 0;
}
bool
BackupRestore::table(const TableS & table){
  if (!m_restore_meta) 
  {
    return true;
  }
  NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
  if (dict->createTable(*table.m_dictTable) == -1) 
  {
    err << "Create table " << table.getTableName() << " failed: "
	<< dict->getNdbError() << endl;
    return false;
  }
  info << "Successfully restored table " << table.getTableName()<< endl ;
  return true;
}
int
create_table() {
    NdbDictionary::Dictionary* dict = g_ndb->getDictionary();
    assert(dict);
    if(g_paramters[P_CREATE].value) {
        const NdbDictionary::Table * pTab = NDBT_Tables::getTable(g_table);
        assert(pTab);
        NdbDictionary::Table copy = * pTab;
        copy.setLogging(false);
        if(dict->createTable(copy) != 0) {
            g_err << "Failed to create table: " << g_table << endl;
            return -1;
        }

        NdbDictionary::Index x(g_ordered);
        x.setTable(g_table);
        x.setType(NdbDictionary::Index::OrderedIndex);
        x.setLogging(false);
        for (unsigned k = 0; k < copy.getNoOfColumns(); k++) {
            if(copy.getColumn(k)->getPrimaryKey()) {
                x.addColumn(copy.getColumn(k)->getName());
            }
        }

        if(dict->createIndex(x) != 0) {
            g_err << "Failed to create index: " << endl;
            return -1;
        }

        x.setName(g_unique);
        x.setType(NdbDictionary::Index::UniqueHashIndex);
        if(dict->createIndex(x) != 0) {
            g_err << "Failed to create index: " << endl;
            return -1;
        }
    }
    g_tab = dict->getTable(g_table);
    g_i_unique = dict->getIndex(g_unique, g_table);
    g_i_ordered = dict->getIndex(g_ordered, g_table);
    assert(g_tab);
    assert(g_i_unique);
    assert(g_i_ordered);
    return 0;
}
int
main(int argc, char** argv)
{
  NDB_INIT(argv[0]);
  const char *load_default_groups[]= { "mysql_cluster",0 };
  load_defaults("my",load_default_groups,&argc,&argv);

  int ho_error;
#ifndef DBUG_OFF
  opt_debug= "d:t:F:L";
#endif
  if ((ho_error=handle_options(&argc, &argv, my_long_options, 
			       ndb_std_get_one_option)))
    return NDBT_ProgramExit(NDBT_WRONGARGS);

  DBUG_ENTER("main");
  Ndb_cluster_connection con(opt_connect_str);
  if(con.connect(12, 5, 1))
  {
    DBUG_RETURN(NDBT_ProgramExit(NDBT_FAILED));
  }
  

  Ndb ndb(&con,_dbname);
  ndb.init();
  while (ndb.waitUntilReady() != 0);

  NdbDictionary::Dictionary * dict = ndb.getDictionary();
  int no_error= 1;
  int i;

  // create all tables
  Vector<const NdbDictionary::Table*> pTabs;
  if (argc == 0)
  {
    NDBT_Tables::dropAllTables(&ndb);
    NDBT_Tables::createAllTables(&ndb);
    for (i= 0; no_error && i < NDBT_Tables::getNumTables(); i++)
    {
      const NdbDictionary::Table *pTab= dict->getTable(NDBT_Tables::getTable(i)->getName());
      if (pTab == 0)
      {
	ndbout << "Failed to create table" << endl;
	ndbout << dict->getNdbError() << endl;
	no_error= 0;
	break;
      }
      pTabs.push_back(pTab);
    }
  }
  else
  {
    for (i= 0; no_error && argc; argc--, i++)
    {
      dict->dropTable(argv[i]);
      NDBT_Tables::createTable(&ndb, argv[i]);
      const NdbDictionary::Table *pTab= dict->getTable(argv[i]);
      if (pTab == 0)
      {
	ndbout << "Failed to create table" << endl;
	ndbout << dict->getNdbError() << endl;
	no_error= 0;
	break;
      }
      pTabs.push_back(pTab);
    }
  }
  pTabs.push_back(NULL);

  // create an event for each table
  for (i= 0; no_error && pTabs[i]; i++)
  {
    HugoTransactions ht(*pTabs[i]);
    if (ht.createEvent(&ndb)){
      no_error= 0;
      break;
    }
  }

  // create an event operation for each event
  Vector<NdbEventOperation *> pOps;
  for (i= 0; no_error && pTabs[i]; i++)
  {
    char buf[1024];
    sprintf(buf, "%s_EVENT", pTabs[i]->getName());
    NdbEventOperation *pOp= ndb.createEventOperation(buf, 1000);
    if ( pOp == NULL )
    {
      no_error= 0;
      break;
    }
    pOps.push_back(pOp);
  }

  // get storage for each event operation
  for (i= 0; no_error && pTabs[i]; i++)
  {
    int n_columns= pTabs[i]->getNoOfColumns();
    for (int j = 0; j < n_columns; j++) {
      pOps[i]->getValue(pTabs[i]->getColumn(j)->getName());
      pOps[i]->getPreValue(pTabs[i]->getColumn(j)->getName());
    }
  }

  // start receiving events
  for (i= 0; no_error && pTabs[i]; i++)
  {
    if ( pOps[i]->execute() )
    {
      no_error= 0;
      break;
    }
  }

  // create a "shadow" table for each table
  Vector<const NdbDictionary::Table*> pShadowTabs;
  for (i= 0; no_error && pTabs[i]; i++)
  {
    char buf[1024];
    sprintf(buf, "%s_SHADOW", pTabs[i]->getName());

    dict->dropTable(buf);
    if (dict->getTable(buf))
    {
      no_error= 0;
      break;
    }

    NdbDictionary::Table table_shadow(*pTabs[i]);
    table_shadow.setName(buf);
    dict->createTable(table_shadow);
    pShadowTabs.push_back(dict->getTable(buf));
    if (!pShadowTabs[i])
    {
      no_error= 0;
      break;
    }
  }

  // create a hugo operation per table
  Vector<HugoOperations *> hugo_ops;
  for (i= 0; no_error && pTabs[i]; i++)
  {
    hugo_ops.push_back(new HugoOperations(*pTabs[i]));
  }

  int n_records= 3;
  // insert n_records records per table
  do {
    if (start_transaction(&ndb, hugo_ops))
    {
      no_error= 0;
      break;
    }
    for (i= 0; no_error && pTabs[i]; i++)
    {
      hugo_ops[i]->pkInsertRecord(&ndb, 0, n_records);
    }
    if (execute_commit(&ndb, hugo_ops))
    {
      no_error= 0;
      break;
    }
    if(close_transaction(&ndb, hugo_ops))
    {
      no_error= 0;
      break;
    }
  } while(0);

  // copy events and verify
  do {
    if (copy_events(&ndb) < 0)
    {
      no_error= 0;
      break;
    }
    if (verify_copy(&ndb, pTabs, pShadowTabs))
    {
      no_error= 0;
      break;
    }
  } while (0);

  // update n_records-1 records in first table
  do {
    if (start_transaction(&ndb, hugo_ops))
    {
      no_error= 0;
      break;
    }

    hugo_ops[0]->pkUpdateRecord(&ndb, n_records-1);

    if (execute_commit(&ndb, hugo_ops))
    {
      no_error= 0;
      break;
    }
    if(close_transaction(&ndb, hugo_ops))
    {
      no_error= 0;
      break;
    }
  } while(0);

  // copy events and verify
  do {
    if (copy_events(&ndb) < 0)
    {
      no_error= 0;
      break;
    }
    if (verify_copy(&ndb, pTabs, pShadowTabs))
    {
      no_error= 0;
      break;
    }
  } while (0);


  {
    NdbRestarts restarts;
    for (int j= 0; j < 10; j++)
    {
      // restart a node
      if (no_error)
      {
	int timeout = 240;
	if (restarts.executeRestart("RestartRandomNodeAbort", timeout))
	{
	  no_error= 0;
	  break;
	}
      }

      // update all n_records records on all tables
      if (start_transaction(&ndb, hugo_ops))
      {
	no_error= 0;
	break;
      }

      for (int r= 0; r < n_records; r++)
      {
	for (i= 0; pTabs[i]; i++)
	{
	  hugo_ops[i]->pkUpdateRecord(&ndb, r);
	}
      }
      if (execute_commit(&ndb, hugo_ops))
      {
	no_error= 0;
	break;
      }
      if(close_transaction(&ndb, hugo_ops))
      {
	no_error= 0;
	break;
      }

      // copy events and verify
      if (copy_events(&ndb) < 0)
      {
	no_error= 0;
	break;
      }
      if (verify_copy(&ndb, pTabs, pShadowTabs))
      {
	no_error= 0;
	break;
      }
    }
  }

  // drop the event operations
  for (i= 0; i < (int)pOps.size(); i++)
  {
    if (ndb.dropEventOperation(pOps[i]))
    {
      no_error= 0;
    }
  }

  if (no_error)
    DBUG_RETURN(NDBT_ProgramExit(NDBT_OK));
  DBUG_RETURN(NDBT_ProgramExit(NDBT_FAILED));
}
Exemple #6
0
bool
BackupRestore::table(const TableS & table){
  if (!m_restore && !m_restore_meta)
    return true;

  const char * name = table.getTableName();
 
  /**
   * Ignore blob tables
   */
  if(match_blob(name) >= 0)
    return true;
  
  const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable);
  if ((int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined){
    m_indexes.push_back(table.m_dictTable);
    return true;
  }
  
  BaseString tmp(name);
  Vector<BaseString> split;
  if(tmp.split(split, "/") != 3){
    err << "Invalid table name format `" << name << "`" << endl;
    return false;
  }

  m_ndb->setDatabaseName(split[0].c_str());
  m_ndb->setSchemaName(split[1].c_str());
  
  NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
  if(m_restore_meta)
  {
    NdbDictionary::Table copy(*table.m_dictTable);

    copy.setName(split[2].c_str());
    Uint32 id;
    if (copy.getTablespace(&id))
    {
      debug << "Connecting " << name << " to tablespace oldid: " << id << flush;
      NdbDictionary::Tablespace* ts = m_tablespaces[id];
      debug << " newid: " << ts->getObjectId() << endl;
      copy.setTablespace(* ts);
    }
    
    if (copy.getDefaultNoPartitionsFlag())
    {
      /*
        Table was defined with default number of partitions. We can restore
        it with whatever is the default in this cluster.
        We use the max_rows parameter in calculating the default number.
      */
      Uint32 no_nodes = m_cluster_connection->no_db_nodes();
      copy.setFragmentCount(get_no_fragments(copy.getMaxRows(),
                            no_nodes));
      set_default_nodegroups(&copy);
    }
    else
    {
      /*
        Table was defined with specific number of partitions. It should be
        restored with the same number of partitions. It will either be
        restored in the same node groups as when backup was taken or by
        using a node group map supplied to the ndb_restore program.
      */
      Uint16 *ng_array = (Uint16*)copy.getFragmentData();
      Uint16 no_parts = copy.getFragmentCount();
      if (map_nodegroups(ng_array, no_parts))
      {
        if (translate_frm(&copy))
        {
          err << "Create table " << table.getTableName() << " failed: ";
          err << "Translate frm error" << endl;
          return false;
        }
      }
      copy.setFragmentData((const void *)ng_array, no_parts << 1);
    }

    /**
     * Force of varpart was introduced in 5.1.18, telco 6.1.7 and 6.2.1
     * Since default from mysqld is to add force of varpart (disable with
     * ROW_FORMAT=FIXED) we force varpart onto tables when they are restored
     * from backups taken with older versions. This will be wrong if
     * ROW_FORMAT=FIXED was used on original table, however the likelyhood of
     * this is low, since ROW_FORMAT= was a NOOP in older versions.
     */

    if (table.getBackupVersion() < MAKE_VERSION(5,1,18))
      copy.setForceVarPart(true);
    else if (getMajor(table.getBackupVersion()) == 6 &&
             (table.getBackupVersion() < MAKE_VERSION(6,1,7) ||
              table.getBackupVersion() == MAKE_VERSION(6,2,0)))
      copy.setForceVarPart(true);

    /*
      update min and max rows to reflect the table, this to
      ensure that memory is allocated properly in the ndb kernel
    */
    copy.setMinRows(table.getNoOfRecords());
    if (table.getNoOfRecords() > copy.getMaxRows())
    {
      copy.setMaxRows(table.getNoOfRecords());
    }
    
    NdbTableImpl &tableImpl = NdbTableImpl::getImpl(copy);
    if (table.getBackupVersion() < MAKE_VERSION(5,1,0) && !m_no_upgrade){
      for(int i= 0; i < copy.getNoOfColumns(); i++)
      {
        NdbDictionary::Column::Type t = copy.getColumn(i)->getType();

        if (t == NdbDictionary::Column::Varchar ||
          t == NdbDictionary::Column::Varbinary)
          tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeShortVar);
        if (t == NdbDictionary::Column::Longvarchar ||
          t == NdbDictionary::Column::Longvarbinary)
          tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeMediumVar);
      }
    }

    if (dict->createTable(copy) == -1) 
    {
      err << "Create table `" << table.getTableName() << "` failed: "
          << dict->getNdbError() << endl;
      if (dict->getNdbError().code == 771)
      {
        /*
          The user on the cluster where the backup was created had specified
          specific node groups for partitions. Some of these node groups
          didn't exist on this cluster. We will warn the user of this and
          inform him of his option.
        */
        err << "The node groups defined in the table didn't exist in this";
        err << " cluster." << endl << "There is an option to use the";
        err << " the parameter ndb-nodegroup-map to define a mapping from";
        err << endl << "the old nodegroups to new nodegroups" << endl; 
      }
      return false;
    }
    info << "Successfully restored table `"
         << table.getTableName() << "`" << endl;
  }  
  
  const NdbDictionary::Table* tab = dict->getTable(split[2].c_str());
  if(tab == 0){
    err << "Unable to find table: `" << split[2].c_str() << "`" << endl;
    return false;
  }
  if(m_restore_meta)
  {
    if (tab->getFrmData())
    {
      // a MySQL Server table is restored, thus an event should be created
      BaseString event_name("REPL$");
      event_name.append(split[0].c_str());
      event_name.append("/");
      event_name.append(split[2].c_str());

      NdbDictionary::Event my_event(event_name.c_str());
      my_event.setTable(*tab);
      my_event.addTableEvent(NdbDictionary::Event::TE_ALL);

      // add all columns to the event
      bool has_blobs = false;
      for(int a= 0; a < tab->getNoOfColumns(); a++)
      {
	my_event.addEventColumn(a);
        NdbDictionary::Column::Type t = tab->getColumn(a)->getType();
        if (t == NdbDictionary::Column::Blob ||
            t == NdbDictionary::Column::Text)
          has_blobs = true;
      }
      if (has_blobs)
        my_event.mergeEvents(true);

      while ( dict->createEvent(my_event) ) // Add event to database
      {
	if (dict->getNdbError().classification == NdbError::SchemaObjectExists)
	{
	  info << "Event for table " << table.getTableName()
	       << " already exists, removing.\n";
	  if (!dict->dropEvent(my_event.getName()))
	    continue;
	}
	err << "Create table event for " << table.getTableName() << " failed: "
	    << dict->getNdbError() << endl;
	dict->dropTable(split[2].c_str());
	return false;
      }
      info << "Successfully restored table event " << event_name << endl ;
    }
  }
  const NdbDictionary::Table* null = 0;
  m_new_tables.fill(table.m_dictTable->getTableId(), null);
  m_new_tables[table.m_dictTable->getTableId()] = tab;
  return true;
}
Exemple #7
0
int
create_table(){
  NdbDictionary::Dictionary* dict = g_ndb->getDictionary();
  assert(dict);
  if(g_paramters[P_CREATE].value){
    g_ndb->getDictionary()->dropTable(g_tablename);
    const NdbDictionary::Table * pTab = NDBT_Tables::getTable(g_tablename);
    assert(pTab);
    NdbDictionary::Table copy = * pTab;
    copy.setLogging(false);
    if(dict->createTable(copy) != 0){
      g_err << "Failed to create table: " << g_tablename << endl;
      return -1;
    }

    NdbDictionary::Index x(g_indexname);
    x.setTable(g_tablename);
    x.setType(NdbDictionary::Index::OrderedIndex);
    x.setLogging(false);
    for (unsigned k = 0; k < (unsigned) copy.getNoOfColumns(); k++){
      if(copy.getColumn(k)->getPrimaryKey()){
	x.addColumnName(copy.getColumn(k)->getName());
      }
    }

    if(dict->createIndex(x) != 0){
      g_err << "Failed to create index: " << endl;
      return -1;
    }
  }
  g_table = dict->getTable(g_tablename);
  g_index = dict->getIndex(g_indexname, g_tablename);
  assert(g_table);
  assert(g_index);

  /* Obtain NdbRecord instances for the table and index */
  {
    NdbDictionary::RecordSpecification spec[ NDB_MAX_ATTRIBUTES_IN_TABLE ];
    
    Uint32 offset=0;
    Uint32 cols= g_table->getNoOfColumns();
    for (Uint32 colNum=0; colNum<cols; colNum++)
    {
      const NdbDictionary::Column* col= g_table->getColumn(colNum);
      Uint32 colLength= col->getLength();
      
      spec[colNum].column= col;
      spec[colNum].offset= offset;

      offset+= colLength;

      spec[colNum].nullbit_byte_offset= offset++;
      spec[colNum].nullbit_bit_in_byte= 0;
    }
  
    g_table_record= dict->createRecord(g_table,
                                       &spec[0],
                                       cols,
                                       sizeof(NdbDictionary::RecordSpecification));

    assert(g_table_record);
  }
  {
    NdbDictionary::RecordSpecification spec[ NDB_MAX_ATTRIBUTES_IN_TABLE ];
    
    Uint32 offset=0;
    Uint32 cols= g_index->getNoOfColumns();
    for (Uint32 colNum=0; colNum<cols; colNum++)
    {
      /* Get column from the underlying table */
      // TODO : Add this mechanism to dict->createRecord
      // TODO : Add NdbRecord queryability methods so that an NdbRecord can
      // be easily built and later used to read out data.
      const NdbDictionary::Column* col= 
        g_table->getColumn(g_index->getColumn(colNum)->getName());
      Uint32 colLength= col->getLength();
      
      spec[colNum].column= col;
      spec[colNum].offset= offset;

      offset+= colLength;

      spec[colNum].nullbit_byte_offset= offset++;
      spec[colNum].nullbit_bit_in_byte= 0;
    }
  
    g_index_record= dict->createRecord(g_index,
                                       &spec[0],
                                       cols,
                                       sizeof(NdbDictionary::RecordSpecification));

    assert(g_index_record);
  }


  if(g_paramters[P_CREATE].value)
  {
    int rows = g_paramters[P_ROWS].value;
    HugoTransactions hugoTrans(* g_table);
    if (hugoTrans.loadTable(g_ndb, rows)){
      g_err.println("Failed to load %s with %d rows", 
		    g_table->getName(), rows);
      return -1;
    }
  }
  
  return 0;
}
Exemple #8
0
static void createTable(Ndb &myNdb, bool storeInACC, bool twoKey, bool longKey)
{
  NdbDictionary::Dictionary* dict = myNdb.getDictionary();
  NdbDictionary::Table table("PERSON");
  //NdbDictionary::Column column(); // Bug
  NdbDictionary::Column column;
  int res;

  column.setName("NAME");
  column.setType(NdbDictionary::Column::Char);
  column.setLength((longKey)?
		   1024       // 1KB => long key
		   :12);
  column.setPrimaryKey(true);
  column.setNullable(false);
  table.addColumn(column);

  if (twoKey) {
    column.setName("KEY2");
    column.setType(NdbDictionary::Column::Unsigned);
    column.setLength(1);
    column.setPrimaryKey(true);
    column.setNullable(false);
    table.addColumn(column);
  }

  column.setName("PNUM1");
  column.setType(NdbDictionary::Column::Unsigned);
  column.setLength(1);
  column.setPrimaryKey(false);
  column.setNullable(false);
  table.addColumn(column);

  column.setName("PNUM2");
  column.setType(NdbDictionary::Column::Unsigned);
  column.setLength(1);
  column.setPrimaryKey(false);
  column.setNullable(false);
  table.addColumn(column);

  column.setName("PNUM3");
  column.setType(NdbDictionary::Column::Unsigned);
  column.setLength(1);
  column.setPrimaryKey(false);
  column.setNullable(false);
  table.addColumn(column);

  column.setName("PNUM4");
  column.setType(NdbDictionary::Column::Unsigned);
  column.setLength(1);
  column.setPrimaryKey(false);
  column.setNullable(false);
  table.addColumn(column);

  column.setName("AGE");
  column.setType(NdbDictionary::Column::Unsigned);
  column.setLength(1);
  column.setPrimaryKey(false);
  column.setNullable(false);
  table.addColumn(column);

  column.setName("STRING_AGE");
  column.setType(NdbDictionary::Column::Char);
  column.setLength(1);
  column.setLength(256);
  column.setPrimaryKey(false);
  column.setNullable(false);
  table.addColumn(column);

  if ((res = dict->createTable(table)) == -1) {
    error_handler(dict->getNdbError());
  }
  else
      ndbout << "Created table" << ((longKey)?" with long key":"") <<endl;
}
Exemple #9
0
bool
BackupRestore::table(const TableS & table){
  if (!m_restore && !m_restore_meta)
    return true;

  const char * name = table.getTableName();
  
  /**
   * Ignore blob tables
   */
  if(match_blob(name) >= 0)
    return true;
  
  const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable);
  if(tmptab.m_indexType != NdbDictionary::Index::Undefined){
    m_indexes.push_back(table.m_dictTable);
    return true;
  }
  
  BaseString tmp(name);
  Vector<BaseString> split;
  if(tmp.split(split, "/") != 3){
    err << "Invalid table name format `" << name << "`" << endl;
    return false;
  }

  m_ndb->setDatabaseName(split[0].c_str());
  m_ndb->setSchemaName(split[1].c_str());
  
  NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
  if(m_restore_meta){
    NdbDictionary::Table copy(*table.m_dictTable);

    copy.setName(split[2].c_str());

    /*
      update min and max rows to reflect the table, this to
      ensure that memory is allocated properly in the ndb kernel
    */
    copy.setMinRows(table.getNoOfRecords());
    if (table.getNoOfRecords() > copy.getMaxRows())
    {
      copy.setMaxRows(table.getNoOfRecords());
    }

    if (dict->createTable(copy) == -1) 
    {
      err << "Create table `" << table.getTableName() << "` failed: "
	  << dict->getNdbError() << endl;
      return false;
    }
    info << "Successfully restored table `"
         << table.getTableName() << "`" << endl;
  }  
  
  const NdbDictionary::Table* tab = dict->getTable(split[2].c_str());
  if(tab == 0){
    err << "Unable to find table: `" << split[2].c_str() << "`" << endl;
    return false;
  }
  const NdbDictionary::Table* null = 0;
  m_new_tables.fill(table.m_dictTable->getTableId(), null);
  m_new_tables[table.m_dictTable->getTableId()] = tab;
  return true;
}