コード例 #1
0
void GetTableCall::run() {
  DEBUG_PRINT("GetTableCall::run() [%s.%s]", arg1, arg2);
  NdbDictionary::Dictionary * dict;
  return_val = -1;
  
  if(strlen(arg1)) {
    arg0->ndb->setDatabaseName(arg1);
  }
  dict = arg0->ndb->getDictionary();
  ndb_table = dict->getTable(arg2);
  if(ndb_table) {
    return_val = dict->listIndexes(idx_list, arg2);
  }
  if(return_val == 0) {
    /* Fetch the indexes now.  These calls may perform network IO, populating 
       the (connection) global and (Ndb) local dictionary caches.  Later,
       in the JavaScript main thread, we will call getIndex() again knowing
       that the caches are populated.
    */
    for(unsigned int i = 0 ; i < idx_list.count ; i++) { 
      const NdbDictionary::Index * idx = dict->getIndex(idx_list.elements[i].name, arg2);
      /* It is possible to get an index for a recently dropped table rather 
         than the desired table.  This is a known bug likely to be fixed later.
      */
      if(ndb_table->getObjectVersion() != 
         dict->getTable(idx->getTable())->getObjectVersion()) 
      {
        dict->invalidateIndex(idx);
        idx = dict->getIndex(idx_list.elements[i].name, arg2);
      }
    }
  }
}
コード例 #2
0
ファイル: testOrderedIndex.cpp プロジェクト: 4T-Shirt/mysql
static int
runDropIndex(NDBT_Context* ctx, NDBT_Step* step)
{
  const NdbDictionary::Table* pTab = ctx->getTab();
  Ndb* pNdb = GETNDB(step);
  NdbDictionary::Dictionary* pDic = pNdb->getDictionary();
  NdbDictionary::Dictionary::List list;
  if (pDic->listIndexes(list, pTab->getName()) != 0) {
    g_err << pTab->getName() << ": listIndexes failed" << endl;
    ERR(pDic->getNdbError());
    return NDBT_FAILED;
  }
  for (unsigned i = 0; i < list.count; i++) {
    NDBT_Index* pInd = new NDBT_Index(list.elements[i].name);
    pInd->setTable(pTab->getName());
    g_info << "Drop index:" << endl << *pInd;
    if (pInd->dropIndexInDb(pNdb) != 0) {
      return NDBT_FAILED;
    }
  }
  return NDBT_OK;
}
コード例 #3
0
ファイル: DBDictionaryImpl.cpp プロジェクト: bear-qv/mysql-js
void GetTableCall::run() {
  DEBUG_PRINT("GetTableCall::run() [%s.%s]", arg1, arg2);
  return_val = -1;

  /* dbName is optional; if not present, set it from ndb database name */
  if(strlen(dbName)) {
    ndb->setDatabaseName(dbName);
  } else {
    dbName = ndb->getDatabaseName();
  }
  dict = ndb->getDictionary();
  ndb_table = dict->getTable(tableName);
  if(ndb_table) {
    /* Ndb object used to create NdbRecords and to cache auto-increment values */
    per_table_ndb = new Ndb(& ndb->get_ndb_cluster_connection());
    DEBUG_PRINT("per_table_ndb %s.%s %p\n", dbName, tableName, per_table_ndb);
    per_table_ndb->init();

    /* List the indexes */
    return_val = dict->listIndexes(idx_list, tableName);
  }
  if(return_val == 0) {
    /* Fetch the indexes now.  These calls may perform network IO, populating 
       the (connection) global and (Ndb) local dictionary caches.  Later,
       in the JavaScript main thread, we will call getIndex() again knowing
       that the caches are populated.
    */
    for(unsigned int i = 0 ; i < idx_list.count ; i++) { 
      const NdbDictionary::Index * idx = dict->getIndex(idx_list.elements[i].name, tableName);
      /* It is possible to get an index for a recently dropped table rather 
         than the desired table.  This is a known bug likely to be fixed later.
      */
      const char * idx_table_name = idx->getTable();
      const NdbDictionary::Table * idx_table = dict->getTable(idx_table_name);
      if(idx_table == 0 || idx_table->getObjectVersion() != ndb_table->getObjectVersion()) 
      {
        dict->invalidateIndex(idx);
        idx = dict->getIndex(idx_list.elements[i].name, tableName);
      }
    }
  }
  else {
    DEBUG_PRINT("listIndexes() returned %i", return_val);
    ndbError = & dict->getNdbError();
    return;
  }
  /* List the foreign keys and keep the list around for doAsyncCallback to create js objects
   * Currently there is no listForeignKeys so we use the more generic listDependentObjects
   * specifying the table metadata object.
   */
  return_val = dict->listDependentObjects(fk_list, *ndb_table);
  if (return_val == 0) {
    /* Fetch the foreign keys and associated parent tables now.
     * These calls may perform network IO, populating
     * the (connection) global and (Ndb) local dictionary caches.  Later,
     * in the JavaScript main thread, we will call getForeignKey() again knowing
     * that the caches are populated.
     * We only care about foreign keys where this table is the child table, not the parent table.
     */
    for(unsigned int i = 0 ; i < fk_list.count ; i++) {
      NdbDictionary::ForeignKey fk;
      if (fk_list.elements[i].type == NdbDictionary::Object::ForeignKey) {
        const char * fk_name = fk_list.elements[i].name;
        int fkGetCode = dict->getForeignKey(fk, fk_name);
        DEBUG_PRINT("getForeignKey for %s returned %i", fk_name, fkGetCode);
        // see if the foreign key child table is this table
        if(splitNameMatchesDbAndTable(fk.getChildTable())) {
          // the foreign key child table is this table; get the parent table
          ++fk_count;
          DEBUG_PRINT("Getting ParentTable");
          splitter.splitName(fk.getParentTable());
          ndb->setDatabaseName(splitter.part1);  // temp for next call
          const NdbDictionary::Table * parent_table = dict->getTable(splitter.part3);
          ndb->setDatabaseName(dbName);  // back to expected value
          DEBUG_PRINT("Parent table getTable returned %s", parent_table->getName());
        }
      }
    }
  }
  else {
    DEBUG_PRINT("listDependentObjects() returned %i", return_val);
    ndbError = & dict->getNdbError();
  }
}
コード例 #4
0
ファイル: sizer.cpp プロジェクト: billyxue/sizer
int supersizeme(Ndb * ndb,char * db, char * tbl, bool ftScan, bool ignoreData)
{
  bool varFound;
  bool varFoundui;
  int dm_per_rec=0;
  int im_per_rec=0;
  int disk_per_rec=0;
  int noOfOrderedIndexes=0, noOfUniqueHashIndexes=0, noOfBlobs=0;
  int tmpDm=0,tmpIm=0, tmpDisk=0;
  ndb->setDatabaseName(db);
  NdbDictionary::Dictionary * dict  = ndb->getDictionary();
  const NdbDictionary::Table * table  = dict->getTable(tbl);
  if(table == 0)
    {
      printf( "table %s in database %s not found!\n", tbl,db);
      return -1;
    }

  bool isTable=false;
  
  
  printf("\nCalculating storage cost per record for table %s\n", table->getName());
  
  calculate_dm( ndb, table, NULL, tmpDm, tmpDisk, ftScan, noOfBlobs,ignoreData,varFound );
  // Gerald there is at least 1 PK (hidden or real) and not return by listIndexes()
  // So add according OH + increment noOfUniqueHashIndexes
  tmpIm = OH_PK;
  noOfUniqueHashIndexes++;
  
  dm_per_rec +=tmpDm;
  disk_per_rec +=tmpDisk;
  im_per_rec +=tmpIm;

  NdbDictionary::Dictionary::List list;
  dict->listIndexes(list, *table);   
  int no_attrs=table->getNoOfColumns();
  for (unsigned i = 0; i < list.count; i++) 
  {      
    NdbDictionary::Dictionary::List::Element& elt = list.elements[i];

    if (verbose) 
    {
        printf("Analysing element : %s, Type : %s \n",
               elt.name, elementTypeStr[elt.type] );
    }
    switch (elt.type) 
    {
      case NdbDictionary::Object::UniqueHashIndex:
	    {
	      const NdbDictionary::Index * ix  = dict->getIndex(elt.name, table->getName());
	      printf(  "---\tWARNING! Unique Index found named (\"%s\"): \n",elt.name);
	      int pk_cols=0;
	      calculate_dm_ui(ndb, table, ix, tmpDm, tmpDisk, ftScan, noOfBlobs, pk_cols,varFoundui);
	      printf(  "---\t\tUnique Index Cost -  DataMemory per record = %d and IndexMemory  = %d\n", tmpDm, tmpIm );
	      //Gerald : OH_PK already include and OH_UNIQUE8HASH_INDEX is included by calculate_dm_ui
          // tmpIm = OH_PK;
	      dm_per_rec += tmpDm;
	      disk_per_rec += tmpDisk;
	      im_per_rec += tmpIm;
	      isTable = true;
	      noOfUniqueHashIndexes++;
	      //no_attrs+=(ix->getNoOfColumns()+pk_cols);
        }
        break;

      case NdbDictionary::Object::OrderedIndex:
	    tmpDm = OH_ORDERED_INDEX;
	    tmpIm = 0;
	    printf(  "---\tOrdered Index found named (%s"
		         "). Additional cost per record is = %d" 
		         " bytes of DataMemory.\n",
		         elt.name, tmpDm  );
	    dm_per_rec += tmpDm;
	    isTable = true;
	    noOfOrderedIndexes++;
	    break;

      default: 
	    break;
    }
  }


  int rows = 0;
  
  if (select_count(ndb, table, 240, &rows, 
		   NdbOperation::LM_CommittedRead) <0){
    printf( "counting rows failed\n" );
    return 0;
  }
  
  printf("\nRecord size (incl OH):"
	 "\n\t#Rows found=%d records "  
	 "\n\t#OrderedIndexes=%d"  
	 "\n\t#UniqueHashIndexes=%d "  
	 "\n\t#blob/text=%d "  
	 "\n\t#attributes=%d "  
	 "\n\tDataMemory=%d bytes "  
	 "\n\tIndexMemory=%d bytes" 
	 "\n\tDiskspace=%d bytes\n\n",
	 rows,
	 noOfOrderedIndexes,
	 noOfUniqueHashIndexes,
	 noOfBlobs,
	 no_attrs,
	 dm_per_rec, 
	 im_per_rec,
	 disk_per_rec);

  printf("\n\nAppending the following to %s.csv \n",db);
  printf("%s,%s,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n\n",
	 db,
	 table->getName(), 
	 rows,
	 1,
	 noOfOrderedIndexes,
	 noOfUniqueHashIndexes,
	 noOfBlobs,
	 no_attrs,
	 im_per_rec,
	 dm_per_rec,
	 disk_per_rec,
     varFound ? 1:0,
     varFoundui ? 1:0);


  
  char filename[255];  
  if(g_analyze_all)
    {
      if(!g_multi_db)
	sprintf(filename,"%s.csv",db); 
      else	
	strcpy(filename,"all_databases.csv"); 
    }
  else
    sprintf(filename,"%s_%s.csv",db,tbl); 

  
  FILE * fh =  fopen(filename,"a+");
  char row[128];
  sprintf(row, "%s,%s,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n" ,	  
	  db ,
	  table->getName() ,
	  rows ,
	  1,
	  noOfOrderedIndexes,
	  noOfUniqueHashIndexes,
	  noOfBlobs,
	  no_attrs,
	  im_per_rec ,
	  dm_per_rec ,
	  disk_per_rec,
      varFound ? 1:0,
      varFoundui ? 1:0);
  fwrite(row, strlen(row),1, fh);
  fclose(fh);


  return 1;
}