void GetTableCall::run() { DEBUG_PRINT("GetTableCall::run() [%s.%s]", arg1, arg2); NdbDictionary::Dictionary * dict; return_val = -1; if(strlen(arg1)) { arg0->ndb->setDatabaseName(arg1); } dict = arg0->ndb->getDictionary(); ndb_table = dict->getTable(arg2); if(ndb_table) { return_val = dict->listIndexes(idx_list, arg2); } if(return_val == 0) { /* Fetch the indexes now. These calls may perform network IO, populating the (connection) global and (Ndb) local dictionary caches. Later, in the JavaScript main thread, we will call getIndex() again knowing that the caches are populated. */ for(unsigned int i = 0 ; i < idx_list.count ; i++) { const NdbDictionary::Index * idx = dict->getIndex(idx_list.elements[i].name, arg2); /* It is possible to get an index for a recently dropped table rather than the desired table. This is a known bug likely to be fixed later. */ if(ndb_table->getObjectVersion() != dict->getTable(idx->getTable())->getObjectVersion()) { dict->invalidateIndex(idx); idx = dict->getIndex(idx_list.elements[i].name, arg2); } } } }
int create_table() { NdbDictionary::Dictionary* dict = g_ndb->getDictionary(); assert(dict); if(g_paramters[P_CREATE].value) { const NdbDictionary::Table * pTab = NDBT_Tables::getTable(g_table); assert(pTab); NdbDictionary::Table copy = * pTab; copy.setLogging(false); if(dict->createTable(copy) != 0) { g_err << "Failed to create table: " << g_table << endl; return -1; } NdbDictionary::Index x(g_ordered); x.setTable(g_table); x.setType(NdbDictionary::Index::OrderedIndex); x.setLogging(false); for (unsigned k = 0; k < copy.getNoOfColumns(); k++) { if(copy.getColumn(k)->getPrimaryKey()) { x.addColumn(copy.getColumn(k)->getName()); } } if(dict->createIndex(x) != 0) { g_err << "Failed to create index: " << endl; return -1; } x.setName(g_unique); x.setType(NdbDictionary::Index::UniqueHashIndex); if(dict->createIndex(x) != 0) { g_err << "Failed to create index: " << endl; return -1; } } g_tab = dict->getTable(g_table); g_i_unique = dict->getIndex(g_unique, g_table); g_i_ordered = dict->getIndex(g_ordered, g_table); assert(g_tab); assert(g_i_unique); assert(g_i_ordered); return 0; }
int runInterpretedUKLookup(NDBT_Context* ctx, NDBT_Step* step) { const NdbDictionary::Table * pTab = ctx->getTab(); Ndb* pNdb = GETNDB(step); NdbDictionary::Dictionary * dict = pNdb->getDictionary(); const NdbDictionary::Index* pIdx= dict->getIndex(pkIdxName, pTab->getName()); CHK_RET_FAILED(pIdx != 0); const NdbRecord * pRowRecord = pTab->getDefaultRecord(); CHK_RET_FAILED(pRowRecord != 0); const NdbRecord * pIdxRecord = pIdx->getDefaultRecord(); CHK_RET_FAILED(pIdxRecord != 0); const Uint32 len = NdbDictionary::getRecordRowLength(pRowRecord); Uint8 * pRow = new Uint8[len]; bzero(pRow, len); HugoCalculator calc(* pTab); calc.equalForRow(pRow, pRowRecord, 0); NdbTransaction* pTrans = pNdb->startTransaction(); CHK_RET_FAILED(pTrans != 0); NdbInterpretedCode code; code.interpret_exit_ok(); code.finalise(); NdbOperation::OperationOptions opts; bzero(&opts, sizeof(opts)); opts.optionsPresent = NdbOperation::OperationOptions::OO_INTERPRETED; opts.interpretedCode = &code; const NdbOperation * pOp = pTrans->readTuple(pIdxRecord, (char*)pRow, pRowRecord, (char*)pRow, NdbOperation::LM_Read, 0, &opts, sizeof(opts)); CHK_RET_FAILED(pOp); int res = pTrans->execute(Commit, AbortOnError); CHK_RET_FAILED(res == 0); delete [] pRow; return NDBT_OK; }
int create_table(){ NdbDictionary::Dictionary* dict = g_ndb->getDictionary(); assert(dict); if(g_paramters[P_CREATE].value){ g_ndb->getDictionary()->dropTable(g_tablename); const NdbDictionary::Table * pTab = NDBT_Tables::getTable(g_tablename); assert(pTab); NdbDictionary::Table copy = * pTab; copy.setLogging(false); if(dict->createTable(copy) != 0){ g_err << "Failed to create table: " << g_tablename << endl; return -1; } NdbDictionary::Index x(g_indexname); x.setTable(g_tablename); x.setType(NdbDictionary::Index::OrderedIndex); x.setLogging(false); for (unsigned k = 0; k < copy.getNoOfColumns(); k++){ if(copy.getColumn(k)->getPrimaryKey()){ x.addColumnName(copy.getColumn(k)->getName()); } } if(dict->createIndex(x) != 0){ g_err << "Failed to create index: " << endl; return -1; } } g_table = dict->getTable(g_tablename); g_index = dict->getIndex(g_indexname, g_tablename); assert(g_table); assert(g_index); if(g_paramters[P_CREATE].value) { int rows = g_paramters[P_ROWS].value; HugoTransactions hugoTrans(* g_table); if (hugoTrans.loadTable(g_ndb, rows)){ g_err.println("Failed to load %s with %d rows", g_table->getName(), rows); return -1; } } return 0; }
int create_table(){ NdbDictionary::Dictionary* dict = g_ndb->getDictionary(); assert(dict); if(g_paramters[P_CREATE].value){ g_ndb->getDictionary()->dropTable(g_tablename); const NdbDictionary::Table * pTab = NDBT_Tables::getTable(g_tablename); assert(pTab); NdbDictionary::Table copy = * pTab; copy.setLogging(false); if(dict->createTable(copy) != 0){ g_err << "Failed to create table: " << g_tablename << endl; return -1; } NdbDictionary::Index x(g_indexname); x.setTable(g_tablename); x.setType(NdbDictionary::Index::OrderedIndex); x.setLogging(false); for (unsigned k = 0; k < (unsigned) copy.getNoOfColumns(); k++){ if(copy.getColumn(k)->getPrimaryKey()){ x.addColumnName(copy.getColumn(k)->getName()); } } if(dict->createIndex(x) != 0){ g_err << "Failed to create index: " << endl; return -1; } } g_table = dict->getTable(g_tablename); g_index = dict->getIndex(g_indexname, g_tablename); assert(g_table); assert(g_index); /* Obtain NdbRecord instances for the table and index */ { NdbDictionary::RecordSpecification spec[ NDB_MAX_ATTRIBUTES_IN_TABLE ]; Uint32 offset=0; Uint32 cols= g_table->getNoOfColumns(); for (Uint32 colNum=0; colNum<cols; colNum++) { const NdbDictionary::Column* col= g_table->getColumn(colNum); Uint32 colLength= col->getLength(); spec[colNum].column= col; spec[colNum].offset= offset; offset+= colLength; spec[colNum].nullbit_byte_offset= offset++; spec[colNum].nullbit_bit_in_byte= 0; } g_table_record= dict->createRecord(g_table, &spec[0], cols, sizeof(NdbDictionary::RecordSpecification)); assert(g_table_record); } { NdbDictionary::RecordSpecification spec[ NDB_MAX_ATTRIBUTES_IN_TABLE ]; Uint32 offset=0; Uint32 cols= g_index->getNoOfColumns(); for (Uint32 colNum=0; colNum<cols; colNum++) { /* Get column from the underlying table */ // TODO : Add this mechanism to dict->createRecord // TODO : Add NdbRecord queryability methods so that an NdbRecord can // be easily built and later used to read out data. const NdbDictionary::Column* col= g_table->getColumn(g_index->getColumn(colNum)->getName()); Uint32 colLength= col->getLength(); spec[colNum].column= col; spec[colNum].offset= offset; offset+= colLength; spec[colNum].nullbit_byte_offset= offset++; spec[colNum].nullbit_bit_in_byte= 0; } g_index_record= dict->createRecord(g_index, &spec[0], cols, sizeof(NdbDictionary::RecordSpecification)); assert(g_index_record); } if(g_paramters[P_CREATE].value) { int rows = g_paramters[P_ROWS].value; HugoTransactions hugoTrans(* g_table); if (hugoTrans.loadTable(g_ndb, rows)){ g_err.println("Failed to load %s with %d rows", g_table->getName(), rows); return -1; } } return 0; }
/* doAsyncCallback() runs in the main thread. We don't want it to block. TODO: verify whether any IO is done by checking WaitMetaRequestCount at the start and end. */ void GetTableCall::doAsyncCallback(Local<Object> ctx) { const char *tableName; HandleScope scope; DEBUG_PRINT("GetTableCall::doAsyncCallback: return_val %d", return_val); /* User callback arguments */ Handle<Value> cb_args[2]; cb_args[0] = Null(); cb_args[1] = Null(); /* TableMetadata = { database : "" , // Database name name : "" , // Table Name columns : [] , // ordered array of DBColumn objects indexes : [] , // array of DBIndex objects partitionKey : [] , // ordered array of column numbers in the partition key }; */ if(ndb_table && ! return_val) { Local<Object> table = NdbDictTableEnv.newWrapper(); const NdbDictionary::Table * js_ndb_table = ndb_table; wrapPointerInObject(js_ndb_table, NdbDictTableEnv, table); // database table->Set(String::NewSymbol("database"), String::New(arg1)); // name tableName = ndb_table->getName(); table->Set(String::NewSymbol("name"), String::New(tableName)); // partitionKey int nPartitionKeys = 0; Handle<Array> partitionKeys = Array::New(); table->Set(String::NewSymbol("partitionKey"), partitionKeys); // columns Local<Array> columns = Array::New(ndb_table->getNoOfColumns()); for(int i = 0 ; i < ndb_table->getNoOfColumns() ; i++) { const NdbDictionary::Column *ndb_col = ndb_table->getColumn(i); Handle<Object> col = buildDBColumn(ndb_col); columns->Set(i, col); if(ndb_col->getPartitionKey()) { /* partition key */ partitionKeys->Set(nPartitionKeys++, String::New(ndb_col->getName())); } } table->Set(String::NewSymbol("columns"), columns); // indexes (primary key & secondary) Local<Array> js_indexes = Array::New(idx_list.count + 1); js_indexes->Set(0, buildDBIndex_PK()); // primary key for(unsigned int i = 0 ; i < idx_list.count ; i++) { // secondary indexes const NdbDictionary::Index * idx = dict->getIndex(idx_list.elements[i].name, arg2); js_indexes->Set(i+1, buildDBIndex(idx)); } table->Set(String::NewSymbol("indexes"), js_indexes, ReadOnly); // Table Record (implementation artifact; not part of spec) DEBUG_PRINT("Creating Table Record"); Record * rec = new Record(dict, ndb_table->getNoOfColumns()); for(int i = 0 ; i < ndb_table->getNoOfColumns() ; i++) { rec->addColumn(ndb_table->getColumn(i)); } rec->completeTableRecord(ndb_table); table->Set(String::NewSymbol("record"), Record_Wrapper(rec)); // foreign keys (only foreign keys for which this table is the child) // now create the javascript foreign key metadata objects for dictionary objects cached earlier Local<Array> js_fks = Array::New(fk_count); int fk_number = 0; for(unsigned int i = 0 ; i < fk_list.count ; i++) { NdbDictionary::ForeignKey fk; if (fk_list.elements[i].type == NdbDictionary::Object::ForeignKey) { const char * fk_name = fk_list.elements[i].name; int fkGetCode = dict->getForeignKey(fk, fk_name); DEBUG_PRINT("getForeignKey for %s returned %i", fk_name, fkGetCode); // see if the foreign key child table is this table if(splitNameMatchesDbAndTable(fk.getChildTable())) { // the foreign key child table is this table; build the fk object DEBUG_PRINT("Adding foreign key for %s at %i", fk.getName(), fk_number); js_fks->Set(fk_number++, buildDBForeignKey(&fk)); } } } table->Set(String::NewSymbol("foreignKeys"), js_fks, ReadOnly); // Autoincrement Cache Impl (also not part of spec) if(per_table_ndb) { table->Set(String::NewSymbol("per_table_ndb"), Ndb_Wrapper(per_table_ndb)); } // User Callback cb_args[1] = table; } else { cb_args[0] = NdbError_Wrapper(* ndbError); } callback->Call(ctx, 2, cb_args); }
void GetTableCall::run() { DEBUG_PRINT("GetTableCall::run() [%s.%s]", arg1, arg2); return_val = -1; /* dbName is optional; if not present, set it from ndb database name */ if(strlen(dbName)) { ndb->setDatabaseName(dbName); } else { dbName = ndb->getDatabaseName(); } dict = ndb->getDictionary(); ndb_table = dict->getTable(tableName); if(ndb_table) { /* Ndb object used to create NdbRecords and to cache auto-increment values */ per_table_ndb = new Ndb(& ndb->get_ndb_cluster_connection()); DEBUG_PRINT("per_table_ndb %s.%s %p\n", dbName, tableName, per_table_ndb); per_table_ndb->init(); /* List the indexes */ return_val = dict->listIndexes(idx_list, tableName); } if(return_val == 0) { /* Fetch the indexes now. These calls may perform network IO, populating the (connection) global and (Ndb) local dictionary caches. Later, in the JavaScript main thread, we will call getIndex() again knowing that the caches are populated. */ for(unsigned int i = 0 ; i < idx_list.count ; i++) { const NdbDictionary::Index * idx = dict->getIndex(idx_list.elements[i].name, tableName); /* It is possible to get an index for a recently dropped table rather than the desired table. This is a known bug likely to be fixed later. */ const char * idx_table_name = idx->getTable(); const NdbDictionary::Table * idx_table = dict->getTable(idx_table_name); if(idx_table == 0 || idx_table->getObjectVersion() != ndb_table->getObjectVersion()) { dict->invalidateIndex(idx); idx = dict->getIndex(idx_list.elements[i].name, tableName); } } } else { DEBUG_PRINT("listIndexes() returned %i", return_val); ndbError = & dict->getNdbError(); return; } /* List the foreign keys and keep the list around for doAsyncCallback to create js objects * Currently there is no listForeignKeys so we use the more generic listDependentObjects * specifying the table metadata object. */ return_val = dict->listDependentObjects(fk_list, *ndb_table); if (return_val == 0) { /* Fetch the foreign keys and associated parent tables now. * These calls may perform network IO, populating * the (connection) global and (Ndb) local dictionary caches. Later, * in the JavaScript main thread, we will call getForeignKey() again knowing * that the caches are populated. * We only care about foreign keys where this table is the child table, not the parent table. */ for(unsigned int i = 0 ; i < fk_list.count ; i++) { NdbDictionary::ForeignKey fk; if (fk_list.elements[i].type == NdbDictionary::Object::ForeignKey) { const char * fk_name = fk_list.elements[i].name; int fkGetCode = dict->getForeignKey(fk, fk_name); DEBUG_PRINT("getForeignKey for %s returned %i", fk_name, fkGetCode); // see if the foreign key child table is this table if(splitNameMatchesDbAndTable(fk.getChildTable())) { // the foreign key child table is this table; get the parent table ++fk_count; DEBUG_PRINT("Getting ParentTable"); splitter.splitName(fk.getParentTable()); ndb->setDatabaseName(splitter.part1); // temp for next call const NdbDictionary::Table * parent_table = dict->getTable(splitter.part3); ndb->setDatabaseName(dbName); // back to expected value DEBUG_PRINT("Parent table getTable returned %s", parent_table->getName()); } } } } else { DEBUG_PRINT("listDependentObjects() returned %i", return_val); ndbError = & dict->getNdbError(); } }
int supersizeme(Ndb * ndb,char * db, char * tbl, bool ftScan, bool ignoreData) { bool varFound; bool varFoundui; int dm_per_rec=0; int im_per_rec=0; int disk_per_rec=0; int noOfOrderedIndexes=0, noOfUniqueHashIndexes=0, noOfBlobs=0; int tmpDm=0,tmpIm=0, tmpDisk=0; ndb->setDatabaseName(db); NdbDictionary::Dictionary * dict = ndb->getDictionary(); const NdbDictionary::Table * table = dict->getTable(tbl); if(table == 0) { printf( "table %s in database %s not found!\n", tbl,db); return -1; } bool isTable=false; printf("\nCalculating storage cost per record for table %s\n", table->getName()); calculate_dm( ndb, table, NULL, tmpDm, tmpDisk, ftScan, noOfBlobs,ignoreData,varFound ); // Gerald there is at least 1 PK (hidden or real) and not return by listIndexes() // So add according OH + increment noOfUniqueHashIndexes tmpIm = OH_PK; noOfUniqueHashIndexes++; dm_per_rec +=tmpDm; disk_per_rec +=tmpDisk; im_per_rec +=tmpIm; NdbDictionary::Dictionary::List list; dict->listIndexes(list, *table); int no_attrs=table->getNoOfColumns(); for (unsigned i = 0; i < list.count; i++) { NdbDictionary::Dictionary::List::Element& elt = list.elements[i]; if (verbose) { printf("Analysing element : %s, Type : %s \n", elt.name, elementTypeStr[elt.type] ); } switch (elt.type) { case NdbDictionary::Object::UniqueHashIndex: { const NdbDictionary::Index * ix = dict->getIndex(elt.name, table->getName()); printf( "---\tWARNING! Unique Index found named (\"%s\"): \n",elt.name); int pk_cols=0; calculate_dm_ui(ndb, table, ix, tmpDm, tmpDisk, ftScan, noOfBlobs, pk_cols,varFoundui); printf( "---\t\tUnique Index Cost - DataMemory per record = %d and IndexMemory = %d\n", tmpDm, tmpIm ); //Gerald : OH_PK already include and OH_UNIQUE8HASH_INDEX is included by calculate_dm_ui // tmpIm = OH_PK; dm_per_rec += tmpDm; disk_per_rec += tmpDisk; im_per_rec += tmpIm; isTable = true; noOfUniqueHashIndexes++; //no_attrs+=(ix->getNoOfColumns()+pk_cols); } break; case NdbDictionary::Object::OrderedIndex: tmpDm = OH_ORDERED_INDEX; tmpIm = 0; printf( "---\tOrdered Index found named (%s" "). Additional cost per record is = %d" " bytes of DataMemory.\n", elt.name, tmpDm ); dm_per_rec += tmpDm; isTable = true; noOfOrderedIndexes++; break; default: break; } } int rows = 0; if (select_count(ndb, table, 240, &rows, NdbOperation::LM_CommittedRead) <0){ printf( "counting rows failed\n" ); return 0; } printf("\nRecord size (incl OH):" "\n\t#Rows found=%d records " "\n\t#OrderedIndexes=%d" "\n\t#UniqueHashIndexes=%d " "\n\t#blob/text=%d " "\n\t#attributes=%d " "\n\tDataMemory=%d bytes " "\n\tIndexMemory=%d bytes" "\n\tDiskspace=%d bytes\n\n", rows, noOfOrderedIndexes, noOfUniqueHashIndexes, noOfBlobs, no_attrs, dm_per_rec, im_per_rec, disk_per_rec); printf("\n\nAppending the following to %s.csv \n",db); printf("%s,%s,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n\n", db, table->getName(), rows, 1, noOfOrderedIndexes, noOfUniqueHashIndexes, noOfBlobs, no_attrs, im_per_rec, dm_per_rec, disk_per_rec, varFound ? 1:0, varFoundui ? 1:0); char filename[255]; if(g_analyze_all) { if(!g_multi_db) sprintf(filename,"%s.csv",db); else strcpy(filename,"all_databases.csv"); } else sprintf(filename,"%s_%s.csv",db,tbl); FILE * fh = fopen(filename,"a+"); char row[128]; sprintf(row, "%s,%s,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n" , db , table->getName() , rows , 1, noOfOrderedIndexes, noOfUniqueHashIndexes, noOfBlobs, no_attrs, im_per_rec , dm_per_rec , disk_per_rec, varFound ? 1:0, varFoundui ? 1:0); fwrite(row, strlen(row),1, fh); fclose(fh); return 1; }
int runBasic(NDBT_Context* ctx, NDBT_Step* step) { Uint32 useRangeScanT1 = ctx->getProperty("UseRangeScanT1", (uint32)0); Ndb* pNdb = GETNDB(step); NdbDictionary::Dictionary * pDict = pNdb->getDictionary(); int records = ctx->getNumRecords(); int result = NDBT_OK; int l = 0; while (!ctx->isTestStopped()) { for (unsigned i = 0; i<table_list.size(); i++) { const NdbDictionary::Table* tab = pDict->getTable(table_list[i].c_str()); HugoTransactions trans(* tab); switch(l % 4) { case 0: trans.loadTable(pNdb, records); trans.scanUpdateRecords(pNdb, records); trans.pkUpdateRecords(pNdb, records); trans.pkReadUnlockRecords(pNdb, records); break; case 1: trans.scanUpdateRecords(pNdb, records); // TODO make pkInterpretedUpdateRecords work on any table // (or check if it does) if (strcmp(tab->getName(), "T1") == 0) trans.pkInterpretedUpdateRecords(pNdb, records); if (strcmp(tab->getName(), "T1") == 0 && useRangeScanT1) { const NdbDictionary::Index* pInd = pDict->getIndex("T1X1", "T1"); if (pInd == 0) { g_err << "getIndex(T1X1) error: " << pDict->getNdbError() << endl; return NDBT_FAILED; } // bug#13834481 - bound values do not matter const Uint32 lo = 0x11110000; const Uint32 hi = 0xaaaa0000; HugoTransactions::HugoBound bound_arr[6]; int bound_cnt = 0; for (int j = 0; j <= 1; j++) { int n = rand() % 4; for (int i = 0; i < n; i++) { HugoTransactions::HugoBound& b = bound_arr[bound_cnt++]; b.attr = i; b.type = (j == 0 ? 0 : 2); // LE/GE b.value = (j == 0 ? &lo : &hi); } } g_info << "range scan T1 with " << bound_cnt << " bounds" << endl; if (trans.scanReadRecords(pNdb, pInd, records, 0, 0, NdbOperation::LM_Read, 0, bound_cnt, bound_arr) != 0) { const NdbError& err = trans.getNdbError(); /* * bug#13834481 symptoms include timeouts and error 1231. * Check for any non-temporary error. */ if (err.status == NdbError::TemporaryError) { g_info << "range scan T1 temporary error: " << err << endl; } if (err.status != NdbError::TemporaryError) { g_err << "range scan T1 permanent error: " << err << endl; return NDBT_FAILED; } } } trans.clearTable(pNdb, records/2); trans.loadTable(pNdb, records/2); break; case 2: trans.clearTable(pNdb, records/2); trans.loadTable(pNdb, records/2); trans.clearTable(pNdb, records/2); break; case 3: if (createDropEvent(ctx, step)) { return NDBT_FAILED; } break; } } l++; } return result; }
/* doAsyncCallback() runs in the main thread. We don't want it to block. TODO: verify whether any IO is done by checking WaitMetaRequestCount at the start and end. */ void GetTableCall::doAsyncCallback(Local<Object> ctx) { const char *ndbTableName; EscapableHandleScope scope(isolate); DEBUG_PRINT("GetTableCall::doAsyncCallback: return_val %d", return_val); /* User callback arguments */ Handle<Value> cb_args[2]; cb_args[0] = Null(isolate); cb_args[1] = Null(isolate); /* TableMetadata = { database : "" , // Database name name : "" , // Table Name columns : [] , // ordered array of DBColumn objects indexes : [] , // array of DBIndex objects partitionKey : [] , // ordered array of column numbers in the partition key sparseContainer : null // default column for sparse fields }; */ if(ndb_table && ! return_val) { Local<Object> table = NdbDictTableEnv.wrap(ndb_table)->ToObject(); // database table->Set(SYMBOL(isolate, "database"), String::NewFromUtf8(isolate, arg1)); // name ndbTableName = ndb_table->getName(); table->Set(SYMBOL(isolate, "name"), String::NewFromUtf8(isolate, ndbTableName)); // partitionKey int nPartitionKeys = 0; Handle<Array> partitionKeys = Array::New(isolate); table->Set(SYMBOL(isolate, "partitionKey"), partitionKeys); // sparseContainer table->Set(SYMBOL(isolate,"sparseContainer"), Null(isolate)); // columns Local<Array> columns = Array::New(isolate, ndb_table->getNoOfColumns()); for(int i = 0 ; i < ndb_table->getNoOfColumns() ; i++) { const NdbDictionary::Column *ndb_col = ndb_table->getColumn(i); Handle<Object> col = buildDBColumn(ndb_col); columns->Set(i, col); if(ndb_col->getPartitionKey()) { /* partition key */ partitionKeys->Set(nPartitionKeys++, String::NewFromUtf8(isolate, ndb_col->getName())); } if( ! strcmp(ndb_col->getName(), "SPARSE_FIELDS") && ( (! strncmp(getColumnType(ndb_col), "VARCHAR", 7) && (getEncoderCharsetForColumn(ndb_col)->isUnicode)) || ( ! strncmp(getColumnType(ndb_col), "VARBINARY", 9) || ! strncmp(getColumnType(ndb_col), "JSON", 4)))) { table->Set(SYMBOL(isolate,"sparseContainer"), String::NewFromUtf8(isolate, ndb_col->getName())); } } table->Set(SYMBOL(isolate, "columns"), columns); // indexes (primary key & secondary) Local<Array> js_indexes = Array::New(isolate, idx_list.count + 1); js_indexes->Set(0, buildDBIndex_PK()); // primary key for(unsigned int i = 0 ; i < idx_list.count ; i++) { // secondary indexes const NdbDictionary::Index * idx = dict->getIndex(idx_list.elements[i].name, arg2); js_indexes->Set(i+1, buildDBIndex(idx)); } SET_RO_PROPERTY(table, SYMBOL(isolate, "indexes"), js_indexes); // foreign keys (only foreign keys for which this table is the child) // now create the javascript foreign key metadata objects for dictionary objects cached earlier Local<Array> js_fks = Array::New(isolate, fk_count); int fk_number = 0; for(unsigned int i = 0 ; i < fk_list.count ; i++) { NdbDictionary::ForeignKey fk; if (fk_list.elements[i].type == NdbDictionary::Object::ForeignKey) { const char * fk_name = fk_list.elements[i].name; int fkGetCode = dict->getForeignKey(fk, fk_name); DEBUG_PRINT("getForeignKey for %s returned %i", fk_name, fkGetCode); // see if the foreign key child table is this table if(splitNameMatchesDbAndTable(fk.getChildTable())) { // the foreign key child table is this table; build the fk object DEBUG_PRINT("Adding foreign key for %s at %i", fk.getName(), fk_number); js_fks->Set(fk_number++, buildDBForeignKey(&fk)); } } } SET_RO_PROPERTY(table, SYMBOL(isolate, "foreignKeys"), js_fks); // Autoincrement Cache Impl (also not part of spec) if(per_table_ndb) { table->Set(SYMBOL(isolate, "per_table_ndb"), Ndb_Wrapper(per_table_ndb)); } // User Callback cb_args[1] = table; } else { cb_args[0] = NdbError_Wrapper(* ndbError); } ToLocal(& callback)->Call(ctx, 2, cb_args); }