int runSetup(NDBT_Context* ctx, NDBT_Step* step, int waitGroupSize){

  int records = ctx->getNumRecords();
  int batchSize = ctx->getProperty("BatchSize", 1);
  int transactions = (records / 100) + 1;
  int operations = (records / transactions) + 1;
  Ndb* pNdb = GETNDB(step);

  HugoAsynchTransactions hugoTrans(*ctx->getTab());
  if (hugoTrans.loadTableAsynch(pNdb, records, batchSize,
				transactions, operations) != 0){
    return NDBT_FAILED;
  }

  Ndb_cluster_connection* conn = &pNdb->get_ndb_cluster_connection();

  /* The first call to create_multi_ndb_wait_group() should succeed ... */
  global_poll_group = conn->create_ndb_wait_group(waitGroupSize);
  if(global_poll_group == 0) {
    return NDBT_FAILED;
  }

  /* and subsequent calls should fail */
  if(conn->create_ndb_wait_group(waitGroupSize) != 0) {
    return NDBT_FAILED;
  }

  return NDBT_OK;
}
Handle<Value> getConnectionStatistics(const Arguments &args) {
  HandleScope scope;
  Uint64 ndb_stats[Ndb::NumClientStatistics];

  Ndb *ndb = unwrapPointer<Ndb *>(args.Holder());
  Ndb_cluster_connection & c = ndb->get_ndb_cluster_connection();

  c.collect_client_stats(ndb_stats, Ndb::NumClientStatistics);

  Local<Object> stats = Object::New();
  for(int i = 0 ; i < Ndb::NumClientStatistics ; i ++) {
    stats->Set(String::NewSymbol(ndb->getClientStatName(i)),
               Number::New(ndb_stats[i]),
               ReadOnly);
  }
  return scope.Close(stats);
}
int runCleanup(NDBT_Context* ctx, NDBT_Step* step){
  int records = ctx->getNumRecords();
  int batchSize = ctx->getProperty("BatchSize", 1);
  int transactions = (records / 100) + 1;
  int operations = (records / transactions) + 1;
  Ndb* pNdb = GETNDB(step);

  HugoAsynchTransactions hugoTrans(*ctx->getTab());
  if (hugoTrans.pkDelRecordsAsynch(pNdb,  records, batchSize,
				   transactions, operations) != 0){
    return NDBT_FAILED;
  }

  pNdb->get_ndb_cluster_connection().release_ndb_wait_group(global_poll_group);

  return NDBT_OK;
}
Example #4
0
void getConnectionStatistics(const Arguments &args) {
  EscapableHandleScope scope(args.GetIsolate());
  Uint64 ndb_stats[Ndb::NumClientStatistics];

  Ndb *ndb = unwrapPointer<Ndb *>(args.Holder());
  Ndb_cluster_connection & c = ndb->get_ndb_cluster_connection();

  c.collect_client_stats(ndb_stats, Ndb::NumClientStatistics);

  Local<Object> stats = Object::New(args.GetIsolate());
  for(int i = 0 ; i < Ndb::NumClientStatistics ; i ++) {
    stats->Set(String::NewFromUtf8(args.GetIsolate(), ndb->getClientStatName(i)),
               Number::New(args.GetIsolate(), ndb_stats[i]));
  }

  args.GetReturnValue().Set(scope.Escape(stats));
}
Example #5
0
void GetTableCall::run() {
  DEBUG_PRINT("GetTableCall::run() [%s.%s]", arg1, arg2);
  return_val = -1;

  /* dbName is optional; if not present, set it from ndb database name */
  if(strlen(dbName)) {
    ndb->setDatabaseName(dbName);
  } else {
    dbName = ndb->getDatabaseName();
  }
  dict = ndb->getDictionary();
  ndb_table = dict->getTable(tableName);
  if(ndb_table) {
    /* Ndb object used to create NdbRecords and to cache auto-increment values */
    per_table_ndb = new Ndb(& ndb->get_ndb_cluster_connection());
    DEBUG_PRINT("per_table_ndb %s.%s %p\n", dbName, tableName, per_table_ndb);
    per_table_ndb->init();

    /* List the indexes */
    return_val = dict->listIndexes(idx_list, tableName);
  }
  if(return_val == 0) {
    /* Fetch the indexes now.  These calls may perform network IO, populating 
       the (connection) global and (Ndb) local dictionary caches.  Later,
       in the JavaScript main thread, we will call getIndex() again knowing
       that the caches are populated.
    */
    for(unsigned int i = 0 ; i < idx_list.count ; i++) { 
      const NdbDictionary::Index * idx = dict->getIndex(idx_list.elements[i].name, tableName);
      /* It is possible to get an index for a recently dropped table rather 
         than the desired table.  This is a known bug likely to be fixed later.
      */
      const char * idx_table_name = idx->getTable();
      const NdbDictionary::Table * idx_table = dict->getTable(idx_table_name);
      if(idx_table == 0 || idx_table->getObjectVersion() != ndb_table->getObjectVersion()) 
      {
        dict->invalidateIndex(idx);
        idx = dict->getIndex(idx_list.elements[i].name, tableName);
      }
    }
  }
  else {
    DEBUG_PRINT("listIndexes() returned %i", return_val);
    ndbError = & dict->getNdbError();
    return;
  }
  /* List the foreign keys and keep the list around for doAsyncCallback to create js objects
   * Currently there is no listForeignKeys so we use the more generic listDependentObjects
   * specifying the table metadata object.
   */
  return_val = dict->listDependentObjects(fk_list, *ndb_table);
  if (return_val == 0) {
    /* Fetch the foreign keys and associated parent tables now.
     * These calls may perform network IO, populating
     * the (connection) global and (Ndb) local dictionary caches.  Later,
     * in the JavaScript main thread, we will call getForeignKey() again knowing
     * that the caches are populated.
     * We only care about foreign keys where this table is the child table, not the parent table.
     */
    for(unsigned int i = 0 ; i < fk_list.count ; i++) {
      NdbDictionary::ForeignKey fk;
      if (fk_list.elements[i].type == NdbDictionary::Object::ForeignKey) {
        const char * fk_name = fk_list.elements[i].name;
        int fkGetCode = dict->getForeignKey(fk, fk_name);
        DEBUG_PRINT("getForeignKey for %s returned %i", fk_name, fkGetCode);
        // see if the foreign key child table is this table
        if(splitNameMatchesDbAndTable(fk.getChildTable())) {
          // the foreign key child table is this table; get the parent table
          ++fk_count;
          DEBUG_PRINT("Getting ParentTable");
          splitter.splitName(fk.getParentTable());
          ndb->setDatabaseName(splitter.part1);  // temp for next call
          const NdbDictionary::Table * parent_table = dict->getTable(splitter.part3);
          ndb->setDatabaseName(dbName);  // back to expected value
          DEBUG_PRINT("Parent table getTable returned %s", parent_table->getName());
        }
      }
    }
  }
  else {
    DEBUG_PRINT("listDependentObjects() returned %i", return_val);
    ndbError = & dict->getNdbError();
  }
}
int runPkReadMultiBasic(NDBT_Context* ctx, NDBT_Step* step){
  int loops = ctx->getNumLoops();
  int records = ctx->getNumRecords();
  const int MAX_NDBS = 200;
  Ndb* pNdb = GETNDB(step);
  Ndb_cluster_connection* conn = &pNdb->get_ndb_cluster_connection();

  int i = 0;
  HugoOperations hugoOps(*ctx->getTab());

  Ndb* ndbObjs[ MAX_NDBS ];
  NdbTransaction* transArray[ MAX_NDBS ];
  Ndb ** ready_ndbs;

  for (int j=0; j < MAX_NDBS; j++)
  {
    Ndb* ndb = new Ndb(conn);
    check(ndb->init() == 0, (*ndb));
    ndbObjs[ j ] = ndb;
  }

  while (i<loops) {
    ndbout << "Loop : " << i << ": ";
    int recordsLeft = records;

    do
    {
      /* Define and execute Pk read requests on
       * different Ndb objects
       */
      int ndbcnt = 0;
      int pollcnt = 0;
      int lumpsize = 1 + myRandom48(MIN(recordsLeft, MAX_NDBS));
      while(lumpsize &&
            recordsLeft &&
            ndbcnt < MAX_NDBS)
      {
        Ndb* ndb = ndbObjs[ ndbcnt ];
        NdbTransaction* trans = ndb->startTransaction();
        check(trans != NULL, (*ndb));
        NdbOperation* readOp = trans->getNdbOperation(ctx->getTab());
        check(readOp != NULL, (*trans));
        check(readOp->readTuple() == 0, (*readOp));
        check(hugoOps.equalForRow(readOp, recordsLeft) == 0, hugoOps);

        /* Read all other cols */
        for (int k=0; k < ctx->getTab()->getNoOfColumns(); k++)
        {
          check(readOp->getValue(ctx->getTab()->getColumn(k)) != NULL,
                (*readOp));
        }

        /* Now send em off */
        trans->executeAsynchPrepare(NdbTransaction::Commit,
                                    NULL,
                                    NULL,
                                    NdbOperation::AbortOnError);
        ndb->sendPreparedTransactions();

        transArray[ndbcnt] = trans;
        global_poll_group->addNdb(ndb);

        ndbcnt++;
        pollcnt++;
        recordsLeft--;
        lumpsize--;
      };

      /* Ok, now wait for the Ndbs to complete */
      while (pollcnt)
      {
        /* Occasionally check with no timeout */
        Uint32 timeout_millis = myRandom48(2)?10000:0;
        int count = global_poll_group->wait(ready_ndbs, timeout_millis);

        if (count > 0)
        {
          for (int y=0; y < count; y++)
          {
            Ndb *ndb = ready_ndbs[y];
            check(ndb->pollNdb(0, 1) != 0, (*ndb));
          }
          pollcnt -= count;
        }
      }

      /* Ok, now close the transactions */
      for (int t=0; t < ndbcnt; t++)
      {
        transArray[t]->close();
      }
    } while (recordsLeft);

    i++;
  }

  for (int j=0; j < MAX_NDBS; j++)
  {
    delete ndbObjs[ j ];
  }

  return NDBT_OK;
}
/* Version 2 of the API will allow the wait group to grow on 
   demand, so we start small.
*/
int runSetup_v2(NDBT_Context* ctx, NDBT_Step* step) {
  Ndb* pNdb = GETNDB(step);
  Ndb_cluster_connection* conn = &pNdb->get_ndb_cluster_connection();
  global_ndb_pool = new NdbPool(conn);
  return runSetup(ctx, step, 7);
}