DBResult_ptr DatabaseODBC::internalSelectQuery(const std::string &query) { if(!m_connected) return DBResult_ptr(); #ifdef __DEBUG_SQL__ std::cout << "ODBC QUERY: " << query << std::endl; #endif std::string buf = _parse(query); SQLHSTMT stmt; SQLRETURN ret = SQLAllocHandle(SQL_HANDLE_STMT, m_handle, &stmt); if(!RETURN_SUCCESS(ret)){ std::cout << "Failed to allocate ODBC SQLHSTMT statement." << std::endl; return DBResult_ptr(); } ret = SQLExecDirect(stmt, (SQLCHAR*)buf.c_str(), buf.length() ); if(!RETURN_SUCCESS(ret)){ std::cout << "SQLExecDirect(): " << query << ": ODBC ERROR." << std::endl; return DBResult_ptr(); } DBResult_ptr results(new ODBCResult(stmt), boost::bind(&DatabaseDriver::freeResult, this, _1)); return verifyResult(results); }
DBResult* DatabaseODBC::storeQuery(const std::string &query) { if(!m_connected) return NULL; #ifdef __DEBUG_SQL__ std::cout << "ODBC QUERY: " << query << std::endl; #endif std::string buf = _parse(query); SQLHSTMT stmt; SQLRETURN ret = SQLAllocHandle(SQL_HANDLE_STMT, m_handle, &stmt); if(!RETURN_SUCCESS(ret)){ std::cout << "Failed to allocate ODBC SQLHSTMT statement." << std::endl; return NULL; } ret = SQLExecDirect(stmt, (SQLCHAR*)buf.c_str(), buf.length() ); if(!RETURN_SUCCESS(ret)){ std::cout << "SQLExecDirect(): " << query << ": ODBC ERROR." << std::endl; return NULL; } DBResult* results = new ODBCResult(stmt); return verifyResult(results); }
bool DatabaseODBC::internalQuery(const std::string &query) { if(!m_connected) return false; #ifdef __DEBUG_SQL__ std::cout << "ODBC QUERY: " << query << std::endl; #endif std::string buf = _parse(query); SQLHSTMT stmt; SQLRETURN ret = SQLAllocHandle(SQL_HANDLE_STMT, m_handle, &stmt); if(!RETURN_SUCCESS(ret)){ std::cout << "Failed to allocate ODBC SQLHSTMT statement." << std::endl; return false; } ret = SQLExecDirect(stmt, (SQLCHAR*)buf.c_str(), buf.length() ); if(!RETURN_SUCCESS(ret)){ std::cout << "SQLExecDirect(): " << query << ": ODBC ERROR." << std::endl; return false; } return true; }
DatabaseODBC::DatabaseODBC() { m_connected = false; char* dns = new char[SQL_MAX_DSN_LENGTH]; char* user = new char[32]; char* pass = new char[32]; strcpy((char*)dns, g_config.getString(ConfigManager::SQL_DB).c_str()); strcpy((char*)user, g_config.getString(ConfigManager::SQL_USER).c_str()); strcpy((char*)pass, g_config.getString(ConfigManager::SQL_PASS).c_str()); SQLRETURN ret = SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &m_env); if(!RETURN_SUCCESS(ret)){ std::cout << "Failed to allocate ODBC SQLHENV enviroment handle." << std::endl; m_env = NULL; return; } ret = SQLSetEnvAttr(m_env, SQL_ATTR_ODBC_VERSION, (SQLPOINTER*)SQL_OV_ODBC3, 0); if(!RETURN_SUCCESS(ret)){ std::cout << "SQLSetEnvAttr(SQL_ATTR_ODBC_VERSION): Failed to switch to ODBC 3 version." << std::endl; SQLFreeHandle(SQL_HANDLE_ENV, m_env); m_env = NULL; } if(m_env == NULL){ std::cout << "ODBC SQLHENV enviroment not initialized." << std::endl; return; } ret = SQLAllocHandle(SQL_HANDLE_DBC, m_env, &m_handle); if(!RETURN_SUCCESS(ret)){ std::cout << "Failed to allocate ODBC SQLHDBC connection handle." << std::endl; m_handle = NULL; return; } ret = SQLSetConnectAttr(m_handle, SQL_ATTR_CONNECTION_TIMEOUT, (SQLPOINTER*)5, 0); if(!RETURN_SUCCESS(ret)){ std::cout << "SQLSetConnectAttr(SQL_ATTR_CONNECTION_TIMEOUT): Failed to set connection timeout." << std::endl; SQLFreeHandle(SQL_HANDLE_DBC, m_handle); m_handle = NULL; return; } ret = SQLConnect(m_handle, (SQLCHAR*)dns, SQL_NTS, (SQLCHAR*)user, SQL_NTS, (SQLCHAR*)pass, SQL_NTS); if(!RETURN_SUCCESS(ret)){ std::cout << "Failed to connect to ODBC via DSN: " << dns << " (user " << user << ")" << std::endl; SQLFreeHandle(SQL_HANDLE_DBC, m_handle); m_handle = NULL; return; } m_connected = true; }
Status Engine::Remove(const String& key) { WARNING_ASSERT(data_file && index_file); WARNING_ASSERT(index_file->Exist(key)); int page_id; RETHROW_ON_EXCEPTION(index_file->Get(key, page_id)); // The highest bit: determine whether there is a conflict if (page_id & 0x80000000) { if (data_file->Contains(page_id & 0x7fffffff, key)) { // Try to fetch major items here RETHROW_ON_EXCEPTION(data_file->Remove(page_id & 0x7fffffff, key)); } else { // Otherwise, use the slow method as fallback RETHROW_ON_EXCEPTION(data_file->Remove(key)); } } else { RETHROW_ON_EXCEPTION(index_file->Remove(key)); RETHROW_ON_EXCEPTION(data_file->Remove(page_id, key)); } RETURN_SUCCESS(); }
Status Engine::UpdateChanges() { WARNING_ASSERT(data_file && index_file); RETHROW_ON_EXCEPTION(data_file->UpdateChanges()); RETHROW_ON_EXCEPTION(index_file->UpdateChanges()); RETURN_SUCCESS(); }
Status Engine::CloseDb() { WARNING_ASSERT(data_file && index_file); RETHROW_ON_EXCEPTION(data_file->Close()); RETHROW_ON_EXCEPTION(index_file->Close()); delete data_file; delete index_file; data_file = NULL; index_file = NULL; db_name = ""; RETURN_SUCCESS(); }
Status Daemon::Start(const String& file, int32_t port) { // RETHROW_ON_EXCEPTION(engine.CreateDb(file)); RETHROW_ON_EXCEPTION(engine.OpenDb(file)); auto read_callback_bind = std::bind(&Daemon::read_callback, this, std::placeholders::_1, std::placeholders::_2); RETHROW_ON_EXCEPTION(tcpServer.Start(port, read_callback_bind)); RETHROW_ON_EXCEPTION(epoll_thread.Start()); RETURN_SUCCESS(); }
Status Engine::OpenDb(const String& file) { WARNING_ASSERT(!data_file && !index_file); data_file = new DataFile(data_paged_file); index_file = new IndexFile(index_paged_file); RETHROW_ON_EXCEPTION(data_file->OpenFile(file + ".DATA")); RETHROW_ON_EXCEPTION(index_file->OpenFile(file + ".INDEX")); db_name = file; RETURN_SUCCESS(); }
int32_t ODBCResult::getDataInt(const std::string &s) { listNames_t::iterator it = m_listNames.find(s); if(it != m_listNames.end() ){ int32_t value; SQLRETURN ret = SQLGetData(m_handle, it->second, SQL_C_SLONG, &value, 0, NULL); if( RETURN_SUCCESS(ret) ) return value; else std::cout << "Error during getDataInt(" << s << ")." << std::endl; } std::cout << "Error during getDataInt(" << s << ")." << std::endl; return 0; // Failed }
const char* ODBCResult::getDataStream(const std::string &s, unsigned long &size) { listNames_t::iterator it = m_listNames.find(s); if(it != m_listNames.end() ) { char* value = new char[1024]; SQLRETURN ret = SQLGetData(m_handle, it->second, SQL_C_BINARY, value, 1024, (SQLLEN*)&size); if( RETURN_SUCCESS(ret) ) return value; else std::cout << "Error during getDataStream(" << s << ")." << std::endl; } std::cout << "Error during getDataStream(" << s << ")." << std::endl; return 0; // Failed }
std::string ODBCResult::getDataString(const std::string &s) { listNames_t::iterator it = m_listNames.find(s); if(it != m_listNames.end() ) { char* value = new char[1024]; SQLRETURN ret = SQLGetData(m_handle, it->second, SQL_C_CHAR, value, 1024, NULL); if( RETURN_SUCCESS(ret) ){ std::string buff = std::string(value); return buff; } else{ std::cout << "Error during getDataString(" << s << ")." << std::endl; } } std::cout << "Error during getDataString(" << s << ")." << std::endl; return std::string(""); // Failed }
Datum kafka_consume_end_tr(PG_FUNCTION_ARGS) { text *topic; text *qualified; Relation consumers; Oid id; bool found; HASH_SEQ_STATUS iter; KafkaConsumerProc *proc; if (PG_ARGISNULL(0)) elog(ERROR, "topic cannot be null"); if (PG_ARGISNULL(1)) elog(ERROR, "relation cannot be null"); topic = PG_GETARG_TEXT_P(0); qualified = PG_GETARG_TEXT_P(1); consumers = open_pipeline_kafka_consumers(); id = get_consumer_id(consumers, qualified, topic); if (!OidIsValid(id)) elog(ERROR, "there are no consumers for that topic and relation"); hash_search(consumer_groups, &id, HASH_REMOVE, &found); if (!found) elog(ERROR, "no consumer processes are running for that topic and relation"); hash_seq_init(&iter, consumer_procs); while ((proc = (KafkaConsumerProc *) hash_seq_search(&iter)) != NULL) { if (proc->consumer_id != id) continue; TerminateBackgroundWorker(&proc->worker); hash_search(consumer_procs, &id, HASH_REMOVE, NULL); } heap_close(consumers, NoLock); RETURN_SUCCESS(); }
Status Engine::Put(const String& key, const String& value) { WARNING_ASSERT(data_file && index_file); if (index_file->Exist(key)) { // If I just want to update it, or conflict in same page, behave // normally is ok. int page_id; RETHROW_ON_EXCEPTION(index_file->Get(key, page_id)); if (data_file->Contains(page_id, key)) { if (!(data_file->Put(page_id, key, value) == STATUS_SUCCESS)) { RETHROW_ON_EXCEPTION(data_file->Remove(page_id, key)); RETHROW_ON_EXCEPTION(data_file->Put(key, value, page_id)); RETHROW_ON_EXCEPTION(index_file->Update(key, page_id)); } } else { // Otherwise it will fallback. RETHROW_ON_EXCEPTION(data_file->Put(key, value)); RETHROW_ON_EXCEPTION(index_file->Update(key, page_id | 0x80000000)); } } else { // New entry here. Just insert it normally (very slow) int page_id; // XXX: To faster insert cost static int scan = 0; RETHROW_ON_EXCEPTION(data_file->Put(key, value, page_id, scan)); // printf("XXX %d\n", page_id); scan = page_id; RETHROW_ON_EXCEPTION(index_file->Put(key, page_id)); } RETURN_SUCCESS(); }
int main (void) { ncptl_int i; debug_printf ("\tTesting ncptl_func_bits() ...\n"); for (i=0; i<BIGNUM; i++) { ncptl_int numbits = ncptl_func_bits (i); if (1<<numbits < i) { debug_printf ("\t ncptl_func_bits(%" NICS ") --> %" NICS " [too small]\n", i, numbits); RETURN_FAILURE(); } if (1<<(numbits-1) > i) { debug_printf ("\t ncptl_func_bits(%" NICS ") --> %" NICS " [too large]\n", i, numbits); RETURN_FAILURE(); } } RETURN_SUCCESS(); }
int main (int argc, char *argv[]) { uint64_t starttime, stoptime, elapsedtime; double timing_error; const double error_threshold = 5.0; /* Allow a 5% error. */ int i; /* Initialize the run-time library. */ debug_printf ("\tTesting ncptl_time() ...\n"); ncptl_init (NCPTL_RUN_TIME_VERSION, argv[0]); /* Measure what should be about a million microseconds. */ for (i=3; i>0; i--) { starttime = ncptl_time(); sleep (1); stoptime = ncptl_time(); elapsedtime = stoptime - starttime; /* Complain if we're far off. */ timing_error = 100.0 * fabs (((double)elapsedtime-1.0e+6) / 1.0e+6); debug_printf ("\t Starting time (usecs): %25" PRIu64 "\n", starttime); debug_printf ("\t Ending time (usecs): %25" PRIu64 "\n", stoptime); debug_printf ("\t Elapsed time (usecs): %25" PRIu64 "\n", elapsedtime); debug_printf ("\t Expected value (usecs): %25llu\n", 1000000ULL); debug_printf ("\t Error: %27.1lf%%\n", timing_error); if (timing_error <= error_threshold) RETURN_SUCCESS(); if (i > 1) debug_printf ("\tTrying again ...\n"); else debug_printf ("\tGiving up.\n"); } /* Return successfully. */ ncptl_finalize(); argc = 0; /* Try to avoid "unused parameter" warnings. */ RETURN_FAILURE(); }
Datum kafka_consume_begin_all(PG_FUNCTION_ARGS) { HeapTuple tup = NULL; HeapScanDesc scan; Relation consumers = open_pipeline_kafka_consumers(); scan = heap_beginscan(consumers, GetTransactionSnapshot(), 0, NULL); while ((tup = heap_getnext(scan, ForwardScanDirection)) != NULL) { Oid id = HeapTupleGetOid(tup); KafkaConsumer consumer; load_consumer_state(id, &consumer); if (!launch_consumer_group(consumers, &consumer, RD_KAFKA_OFFSET_END)) RETURN_FAILURE(); } heap_endscan(scan); heap_close(consumers, NoLock); RETURN_SUCCESS(); }
int main (void) { ncptl_int n, d; debug_printf ("\tTesting ncptl_func_modulo() ...\n"); for (n=-10; n<10; n++) for (d=-10; d<10; d++) if (d) { ncptl_int result = ncptl_func_modulo (n, d); /* n modulo d */ ncptl_int some_integer; /* Integer that might make the modulo expression true */ int found_match = 0; /* 1 if some_integer makes the modulo expression true */ /* coNCePTuaL guarantees a positive remainder. */ if (result < 0) { debug_printf ("\t ncptl_func_modulo (%" NICS ", %" NICS ") --> %" NICS " [should not be negative]\n", n, d, result); RETURN_FAILURE(); } /* n=result (mod d) <--> n-result = some_integer*d for some * some_integer. */ for (some_integer=-ncptl_func_abs(n); some_integer<=ncptl_func_abs(n); some_integer++) if (n-result == some_integer*d) { found_match = 1; break; } if (!found_match) { debug_printf ("\t ncptl_func_modulo (%" NICS ", %" NICS ") --> %" NICS " [incorrect result]\n", n, d, result); RETURN_FAILURE(); } debug_printf ("\t ncptl_func_modulo (%" NICS ", %" NICS ") --> %" NICS "\n", n, d, result); } RETURN_SUCCESS(); }
int main (int argc, char *argv[]) { char *test_argv[6]; /* Hardwired argv[] for testing. */ ncptl_int testvar; /* Variable that ncptl_parse_command_line() should set */ char *stringvar; /* A string variable for ncptl_parse_command_line() */ NCPTL_CMDLINE arglist[] = { /* Arguments to test. */ { NCPTL_TYPE_INT, NULL, "testing", 't', "Test of ncptl_parse_command_line()", {0} }, { NCPTL_TYPE_STRING, NULL, "somestring", 's', "Another test of ncptl_parse_command_line()", {0} } }; /* Initialize the run-time library and the ARGLIST array. */ debug_printf ("\tTesting ncptl_parse_command_line() ...\n"); ncptl_fast_init = 1; /* We don't need accurate timing for this test. */ ncptl_init (NCPTL_RUN_TIME_VERSION, argv[0]); arglist[0].variable = (CMDLINE_VALUE *) &testvar; arglist[0].defaultvalue.intval = 123; arglist[1].variable = (CMDLINE_VALUE *) &stringvar; arglist[1].defaultvalue.stringval = "abc123"; /* Ensure that testvar receives its default value when given an * empty command line. */ test_argv[0] = argv[0]; test_argv[1] = NULL; testvar = 999; stringvar = "xxx999"; ncptl_parse_command_line(1, test_argv, arglist, 2); debug_printf ("\tExpected 123; got %" NICS ".\n", testvar); debug_printf ("\tExpected \"abc123\"; got \"%s\".\n", stringvar); if (testvar != 123) RETURN_FAILURE(); if (strcmp (stringvar, "abc123")) RETURN_FAILURE(); /* Ensure that short arguments work. */ test_argv[1] = "-t"; test_argv[2] = "456"; test_argv[3] = "-s"; test_argv[4] = "def456"; test_argv[5] = NULL; testvar = 999; stringvar = "xxx999"; ncptl_parse_command_line(5, test_argv, arglist, 2); debug_printf ("\tExpected 456; got %" NICS ".\n", testvar); debug_printf ("\tExpected \"def456\"; got \"%s\".\n", stringvar); if (testvar != 456) RETURN_FAILURE(); if (strcmp (stringvar, "def456")) RETURN_FAILURE(); /* Ensure that long arguments work. */ #if defined(USE_POPT) || defined(USE_GETOPT_LONG) test_argv[1] = "--testing"; test_argv[2] = "789"; test_argv[3] = "--somestring"; test_argv[4] = "ghi789"; test_argv[5] = NULL; testvar = 999; stringvar = "xxx999"; ncptl_parse_command_line(5, test_argv, arglist, 2); debug_printf ("\tExpected 789; got %" NICS ".\n", testvar); debug_printf ("\tExpected \"ghi789\"; got \"%s\".\n", stringvar); if (testvar != 789) RETURN_FAILURE(); if (strcmp (stringvar, "ghi789")) RETURN_FAILURE(); #endif /* Ensure that suffixed arguments work. */ test_argv[1] = "-t"; test_argv[2] = "1011e+2"; test_argv[3] = NULL; testvar = 999; ncptl_parse_command_line(3, test_argv, arglist, 2); debug_printf ("\tExpected 101100; got %" NICS ".\n", testvar); if (testvar != 101100) RETURN_FAILURE(); /* Return successfully. */ ncptl_finalize(); argc = 0; /* Try to avoid "unused parameter" warnings. */ RETURN_SUCCESS(); }
Status Engine::UnlinkDb(const String& file) { RETHROW_ON_EXCEPTION(PagedFile::Unlink(file + ".DATA")); RETHROW_ON_EXCEPTION(PagedFile::Unlink(file + ".INDEX")); RETURN_SUCCESS(); }
Status Daemon::Join() { epoll_thread.Join(); RETURN_SUCCESS(); }
DBResult_ptr ODBCResult::advance() { SQLRETURN ret = SQLFetch(m_handle); m_rowAvailable = RETURN_SUCCESS(ret); return m_rowAvailable ? shared_from_this() : DBResult_ptr(); }
Status Daemon::UpdateChanges() { RETHROW_ON_EXCEPTION(engine.UpdateChanges()); RETURN_SUCCESS(); }
Status Daemon::Stop() { RETHROW_ON_EXCEPTION(tcpServer.Stop()); RETHROW_ON_EXCEPTION(engine.CloseDb()); RETURN_SUCCESS(); }
Datum kafka_consume_begin_tr(PG_FUNCTION_ARGS) { text *topic; text *qualified_name; RangeVar *relname; Relation rel; Relation consumers; Oid id; bool result; text *format; text *delimiter; text *quote; text *escape; int batchsize; int parallelism; int64 offset; KafkaConsumer consumer; if (PG_ARGISNULL(0)) elog(ERROR, "topic cannot be null"); if (PG_ARGISNULL(1)) elog(ERROR, "relation cannot be null"); if (PG_ARGISNULL(2)) elog(ERROR, "format cannot be null"); topic = PG_GETARG_TEXT_P(0); qualified_name = PG_GETARG_TEXT_P(1); format = PG_GETARG_TEXT_P(2); if (PG_ARGISNULL(3)) delimiter = NULL; else delimiter = PG_GETARG_TEXT_P(3); if (PG_ARGISNULL(4)) quote = NULL; else quote = PG_GETARG_TEXT_P(4); if (PG_ARGISNULL(5)) escape = NULL; else escape = PG_GETARG_TEXT_P(5); if (PG_ARGISNULL(6)) batchsize = 1000; else batchsize = PG_GETARG_INT32(6); if (PG_ARGISNULL(7)) parallelism = 1; else parallelism = PG_GETARG_INT32(7); if (PG_ARGISNULL(8)) offset = RD_KAFKA_OFFSET_NULL; else offset = PG_GETARG_INT64(8); /* there's no point in progressing if there aren't any brokers */ if (!get_all_brokers()) elog(ERROR, "add at least one broker with kafka_add_broker"); /* verify that the target relation actually exists */ relname = makeRangeVarFromNameList(textToQualifiedNameList(qualified_name)); rel = heap_openrv(relname, NoLock); if (IsInferredStream(RelationGetRelid(rel))) ereport(ERROR, (errmsg("target stream must be static"), errhint("Use CREATE STREAM to create a stream that can consume a Kafka topic."))); heap_close(rel, NoLock); consumers = open_pipeline_kafka_consumers(); id = create_or_update_consumer(consumers, qualified_name, topic, format, delimiter, quote, escape, batchsize, parallelism); load_consumer_state(id, &consumer); result = launch_consumer_group(consumers, &consumer, offset); heap_close(consumers, NoLock); if (result) RETURN_SUCCESS(); else RETURN_FAILURE(); }
int main (void) { ncptl_int parent2[] = { /* Map to parent in a 2-ary tree */ -1, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6 }; ncptl_int parent3[] = { /* Map to parent in a 3-ary tree */ -1, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }; ncptl_int child2[][2] = { /* Map to children in a 2-ary tree */ { 1, 2}, { 3, 4}, { 5, 6}, { 7, 8}, { 9, 10}, {11, 12}, {13, 14} }; ncptl_int child3[][3] = { /* Map to children in a 3-ary tree */ { 1, 2, 3}, { 4, 5, 6}, { 7, 8, 9}, {10, 11, 12} }; ncptl_int mesh_neighbor_pos[] = { /* Map to {+x, +y, +z} neighbor in a 4x3x2 mesh */ 17, 18, 19, -1, 21, 22, 23, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; ncptl_int torus_neighbor_pos[] = { /* Map to {+x, +y, +z} neighbor in a 4x3x2 torus */ 17, 18, 19, 16, 21, 22, 23, 20, 13, 14, 15, 12, 5, 6, 7, 4, 9, 10, 11, 8, 1, 2, 3, 0 }; ncptl_int partial_torus_neighbor_pos[] = { /* Map to {+x, +y, +z} neighbor in a 4x3x2 mesh that wraps in y only */ 17, 18, 19, -1, 21, 22, 23, -1, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; ncptl_int parent2k[] = { /* Map to parent in a 2-nomial tree */ -1, 0, 0, 1, 0, 1, 2, 3 }; ncptl_int parent3k[] = { /* Map to parent in a 3-nomial tree */ -1, 0, 0, 0, 1, 2, 0, 1, 2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8 }; ncptl_int child2k[][3] = { /* Map to children in a 2-nomial tree */ { 1, 2, 4}, { 3, 5, -1}, { 6, -1, -1}, { 7, -1, -1}, {-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1} }; ncptl_int child3k[][6] = { /* Map to children in a 3-nomial tree */ { 1, 2, 3, 6, 9, 18}, { 4, 7, 10, 19, -1, -1}, { 5, 8, 11, 20, -1, -1}, {12, 21, -1, -1, -1, -1}, {13, 22, -1, -1, -1, -1}, {14, 23, -1, -1, -1, -1}, {15, 24, -1, -1, -1, -1}, {16, 25, -1, -1, -1, -1}, {17, 26, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1, -1} }; ncptl_int knomial_sizes[3]; /* Three different tree sizes to try */ ncptl_int i, j, k; ncptl_int x, y, z; /* Test ncptl_func_tree_parent(). */ debug_printf ("\tTesting ncptl_func_tree_parent() ...\n"); for (i=0; i<(ncptl_int)(sizeof(parent2)/sizeof(ncptl_int)); i++) { debug_printf ("\t ncptl_func_tree_parent (%" NICS ", 2) --> %" NICS, i, ncptl_func_tree_parent(i, 2)); if (ncptl_func_tree_parent(i, 2) != parent2[i]) { debug_printf (" (should be %" NICS ")\n", parent2[i]); RETURN_FAILURE(); } else debug_printf ("\n"); } for (i=0; i<(ncptl_int)(sizeof(parent3)/sizeof(ncptl_int)); i++) { debug_printf ("\t ncptl_func_tree_parent (%" NICS ", 3) --> %" NICS, i, ncptl_func_tree_parent(i, 3)); if (ncptl_func_tree_parent(i, 3) != parent3[i]) { debug_printf (" (should be %" NICS ")\n", parent3[i]); RETURN_FAILURE(); } else debug_printf ("\n"); } debug_printf ("\n"); /* Test ncptl_func_tree_child(). */ debug_printf ("\tTesting ncptl_func_tree_child() ...\n"); for (i=0; i<(ncptl_int)(sizeof(child2)/(2*sizeof(ncptl_int))); i++) for (j=0; j<2; j++) { debug_printf ("\t ncptl_func_tree_child (%" NICS ", %" NICS ", 2) --> %" NICS, i, j, ncptl_func_tree_child(i, j, 2)); if (ncptl_func_tree_child(i, j, 2) != child2[i][j]) { debug_printf (" (should be %" NICS ")\n", child2[i][j]); RETURN_FAILURE(); } else debug_printf ("\n"); } for (i=0; i<(ncptl_int)(sizeof(child3)/(3*sizeof(ncptl_int))); i++) for (j=0; j<3; j++) { debug_printf ("\t ncptl_func_tree_child (%" NICS ", %" NICS ", 3) --> %" NICS, i, j, ncptl_func_tree_child(i, j, 3)); if (ncptl_func_tree_child(i, j, 3) != child3[i][j]) { debug_printf (" (should be %" NICS ")\n", child3[i][j]); RETURN_FAILURE(); } else debug_printf ("\n"); } debug_printf ("\n"); /* Test ncptl_func_mesh_neighbor(). */ debug_printf ("\tTesting ncptl_func_mesh_neighbor() ...\n"); for (i=0; i<(ncptl_int)(sizeof(mesh_neighbor_pos)/sizeof(ncptl_int)); i++) { ncptl_int neighbor = ncptl_func_mesh_neighbor (4, 3, 2, 0, 0, 0, i, +1, +1, +1); debug_printf ("\t ncptl_func_mesh_neighbor (4, 3, 2, 0, 0, 0, %2" NICS ", +1, +1, +1) --> %3" NICS, i, neighbor); if (mesh_neighbor_pos[i] != neighbor) { debug_printf (" (should be %" NICS ")\n", mesh_neighbor_pos[i]); RETURN_FAILURE(); } else debug_printf ("\n"); } for (i=0; i<(ncptl_int)(sizeof(torus_neighbor_pos)/sizeof(ncptl_int)); i++) { ncptl_int neighbor = ncptl_func_mesh_neighbor (4, 3, 2, 1, 1, 1, i, +1, +1, +1); debug_printf ("\t ncptl_func_mesh_neighbor (4, 3, 2, 1, 1, 1, %2" NICS ", +1, +1, +1) --> %3" NICS, i, neighbor); if (torus_neighbor_pos[i] != neighbor) { debug_printf (" (should be %" NICS ")\n", torus_neighbor_pos[i]); RETURN_FAILURE(); } else debug_printf ("\n"); } for (i=0; i<(ncptl_int)(sizeof(partial_torus_neighbor_pos)/sizeof(ncptl_int)); i++) { ncptl_int neighbor = ncptl_func_mesh_neighbor (4, 3, 2, 0, 1, 0, i, +1, +1, +1); debug_printf ("\t ncptl_func_mesh_neighbor (4, 3, 2, 0, 1, 0, %2" NICS ", +1, +1, +1) --> %3" NICS, i, neighbor); if (partial_torus_neighbor_pos[i] != neighbor) { debug_printf (" (should be %" NICS ")\n", partial_torus_neighbor_pos[i]); RETURN_FAILURE(); } else debug_printf ("\n"); } debug_printf ("\n"); /* Test ncptl_func_mesh_coord(). */ debug_printf ("\tTesting ncptl_func_mesh_coord() ...\n"); for (z=0; z<GRIDDEPTH; z++) for (y=0; y<GRIDHEIGHT; y++) for (x=0; x<GRIDWIDTH; x++) { ncptl_int taskID = x + GRIDWIDTH*(y + GRIDHEIGHT*z); ncptl_int coords[3]; for (i=0; i<3; i++) coords[i] = ncptl_func_mesh_coord (GRIDWIDTH, GRIDHEIGHT, GRIDDEPTH, taskID, i); debug_printf ("\t ncptl_func_mesh_coord (%d, %d, %d, %2" NICS ", {0,1,2}) --> {%" NICS ",%" NICS ",%" NICS "}", GRIDWIDTH, GRIDHEIGHT, GRIDDEPTH, taskID, coords[0], coords[1], coords[2]); if (x!=coords[0] || y!=coords[1] || z!=coords[2]) { debug_printf (" (should be {%" NICS ",%" NICS ",%" NICS "})\n", x, y, z); RETURN_FAILURE(); } debug_printf ("\n"); } debug_printf ("\n"); /* Test ncptl_func_mesh_distance(). */ debug_printf ("\tTesting ncptl_func_mesh_distance() ...\n"); for (z=0; z<GRIDDEPTH; z++) for (y=0; y<GRIDHEIGHT; y++) for (x=0; x<GRIDWIDTH; x++) { ncptl_int taskID_1 = x + GRIDWIDTH*(y + GRIDHEIGHT*z); ncptl_int xdelta, ydelta, zdelta; for (zdelta=0; zdelta<GRIDDEPTH; zdelta++) for (ydelta=0; ydelta<GRIDHEIGHT; ydelta++) for (xdelta=0; xdelta<GRIDWIDTH; xdelta++) { ncptl_int newz = (z + zdelta) % GRIDDEPTH; ncptl_int newy = (y + ydelta) % GRIDHEIGHT; ncptl_int newx = (x + xdelta) % GRIDWIDTH; ncptl_int abs_xdelta = (x <= newx) ? xdelta : GRIDWIDTH - xdelta; ncptl_int abs_ydelta = (y <= newy) ? ydelta : GRIDHEIGHT - ydelta; ncptl_int abs_zdelta = (z <= newz) ? zdelta : GRIDDEPTH - zdelta; ncptl_int taskID_2 = newx + GRIDWIDTH*(newy + GRIDHEIGHT*newz); ncptl_int expected_meshdist; ncptl_int meshdist; ncptl_int expected_torusdist; ncptl_int torusdist; /* Determine the correct distances. */ expected_meshdist = abs_xdelta + abs_ydelta + abs_zdelta; expected_torusdist = 0; expected_torusdist += abs_xdelta <= GRIDWIDTH/2 ? abs_xdelta : GRIDWIDTH - abs_xdelta; expected_torusdist += abs_ydelta <= GRIDHEIGHT/2 ? abs_ydelta : GRIDHEIGHT - abs_ydelta; expected_torusdist += abs_zdelta <= GRIDDEPTH/2 ? abs_zdelta : GRIDDEPTH - abs_zdelta; /* Validate distance on a mesh. */ meshdist = ncptl_func_mesh_distance (GRIDWIDTH, GRIDHEIGHT, GRIDDEPTH, 0, 0, 0, taskID_1, taskID_2); debug_printf ("\t ncptl_func_mesh_distance (%d, %d, %d, 0, 0, 0, %" NICS ", %" NICS ") --> %" NICS, GRIDWIDTH, GRIDHEIGHT, GRIDDEPTH, taskID_1, taskID_2, meshdist); if (meshdist != expected_meshdist) { debug_printf (" (should be %" NICS ")\n", expected_meshdist); RETURN_FAILURE(); } debug_printf ("\n"); /* Validate distance on a full torus. */ torusdist = ncptl_func_mesh_distance (GRIDWIDTH, GRIDHEIGHT, GRIDDEPTH, 1, 1, 1, taskID_1, taskID_2); debug_printf ("\t ncptl_func_mesh_distance (%d, %d, %d, 1, 1, 1, %" NICS ", %" NICS ") --> %" NICS, GRIDWIDTH, GRIDHEIGHT, GRIDDEPTH, taskID_1, taskID_2, torusdist); if (torusdist != expected_torusdist) { debug_printf (" (should be %" NICS ")\n", expected_torusdist); RETURN_FAILURE(); } debug_printf ("\n"); } } debug_printf ("\n"); /* Test ncptl_func_knomial_parent(). */ debug_printf ("\tTesting ncptl_func_knomial_parent() ...\n"); knomial_sizes[0] = sizeof(parent2k)/sizeof(ncptl_int); knomial_sizes[1] = 1000; knomial_sizes[2] = knomial_sizes[0] - 1; for (j=0; j<3; j++) { if (j == 2) parent2k[sizeof(parent2k)/sizeof(ncptl_int) - 1] = -1; for (i=0; i<(ncptl_int)(sizeof(parent2k)/sizeof(ncptl_int)); i++) { debug_printf ("\t ncptl_func_knomial_parent (%" NICS ", 2, %" NICS ") --> %" NICS, i, knomial_sizes[j], ncptl_func_knomial_parent(i, 2, knomial_sizes[j])); if (ncptl_func_knomial_parent(i, 2, knomial_sizes[j]) != parent2k[i]) { debug_printf (" (should be %" NICS ")\n", parent2k[i]); RETURN_FAILURE(); } else debug_printf ("\n"); } } knomial_sizes[0] = sizeof(parent3k)/sizeof(ncptl_int); knomial_sizes[1] = 1000; knomial_sizes[2] = knomial_sizes[0] - 1; for (j=0; j<3; j++) { if (j == 2) parent3k[sizeof(parent3k)/sizeof(ncptl_int) - 1] = -1; for (i=0; i<(ncptl_int)(sizeof(parent3k)/sizeof(ncptl_int)); i++) { debug_printf ("\t ncptl_func_knomial_parent (%" NICS ", 3, %" NICS ") --> %" NICS, i, knomial_sizes[j], ncptl_func_knomial_parent(i, 3, knomial_sizes[j])); if (ncptl_func_knomial_parent(i, 3, knomial_sizes[j]) != parent3k[i]) { debug_printf (" (should be %" NICS ")\n", parent3k[i]); RETURN_FAILURE(); } else debug_printf ("\n"); } } debug_printf ("\n"); /* Test ncptl_func_knomial_child(). */ knomial_sizes[0] = sizeof(child2k)/(3*sizeof(ncptl_int)); knomial_sizes[1] = knomial_sizes[0] - 1; debug_printf ("\tTesting ncptl_func_knomial_child() ...\n"); for (k=0; k<2; k++) { if (k == 1) child2k[3][0] = -1; for (i=0; i<(ncptl_int)(sizeof(child2k)/(3*sizeof(ncptl_int))); i++) for (j=0; j<2; j++) { ncptl_int result = ncptl_func_knomial_child(i, j, 2, knomial_sizes[k], 0); debug_printf ("\t ncptl_func_knomial_child (%" NICS ", %" NICS ", 2, %" NICS ", 0) --> %" NICS, i, j, knomial_sizes[k], result); if (result != child2k[i][j]) { debug_printf (" (should be %" NICS ")\n", child2k[i][j]); RETURN_FAILURE(); } else debug_printf ("\n"); } } knomial_sizes[0] = sizeof(child3k)/(6*sizeof(ncptl_int)); knomial_sizes[1] = knomial_sizes[0] - 1; for (k=0; k<2; k++) { if (k == 1) child3k[8][1] = -1; for (i=0; i<(ncptl_int)(sizeof(child3k)/(6*sizeof(ncptl_int))); i++) for (j=0; j<2; j++) { ncptl_int result = ncptl_func_knomial_child(i, j, 3, knomial_sizes[k], 0); debug_printf ("\t ncptl_func_knomial_child (%" NICS ", %" NICS ", 3, %" NICS ", 0) --> %" NICS, i, j, knomial_sizes[k], result); if (result != child3k[i][j]) { debug_printf (" (should be %" NICS ")\n", child3k[i][j]); RETURN_FAILURE(); } else debug_printf ("\n"); } } RETURN_SUCCESS(); }
bool ODBCResult::next() { SQLRETURN ret = SQLFetch(m_handle); return RETURN_SUCCESS(ret); }