static bool insert_test_sessions(void) { bool success = true; memset(session_instances, 0, sizeof(session_instances)); memset(sessions, 0, sizeof(sessions)); success &= inject(0, 1, 2, 2, 2); success &= inject(1, 1, 1, 2, 1); success &= inject(2, 2, 1, 2, 1); success &= inject(3, 2, 2, 2, 2); success &= inject(4, 1, 1, 2, 2); success &= inject(5, 2, 2, 1, 1); success &= inject(6, 2, 1, 1, 1); success &= inject(7, 1, 1, 1, 1); success &= inject(8, 2, 2, 1, 2); success &= inject(9, 1, 2, 1, 1); success &= inject(10, 2, 1, 1, 2); success &= inject(11, 1, 2, 1, 2); success &= inject(12, 2, 1, 2, 2); success &= inject(13, 1, 1, 1, 2); success &= inject(14, 1, 2, 2, 1); success &= inject(15, 2, 2, 2, 1); return success ? test_db() : false; }
int main(int argc, char **argv) { db_init(); test_db(); return EXIT_SUCCESS; }
/* Adds a new separate partition and loads all rules from database in shm */ dp_connection_list_p dp_add_connection(dp_head_p head) { dp_connection_list_t *el; if ((el = dp_get_connection(&head->partition)) != NULL){ return el; } el = shm_malloc(sizeof(dp_connection_list_t)); if (!el) { LM_ERR("No more shm mem\n"); return NULL; } memset(el, 0, sizeof(dp_connection_list_t)); /* create & init lock */ if((el->ref_lock = lock_init_rw()) == NULL) { LM_ERR("Failed to init lock\n"); shm_free(el); return NULL; } /*Set table name*/ el->table_name = head->dp_table_name; /*Set partition*/ el->partition = head->partition; /*Set db_url*/ el->db_url = head->dp_db_url; el->dp_db_handle = pkg_malloc(sizeof(db_con_t*)); if (!el->dp_db_handle) { LM_ERR("No more shm mem\n"); return NULL; } *el->dp_db_handle = 0; /* *el->dp_db_handle is set to null at the end of test_db; * no need to do it again here */ if (test_db(el) != 0) { LM_ERR("Unable to test db\n"); shm_free(el); return NULL; } el->next = dp_conns; dp_conns = el; LM_DBG("Added dialplan partition [%.*s] table [%.*s].\n", head->partition.len, head->partition.s, head->dp_table_name.len, head->dp_table_name.s); return el; }
static bool flush(void) { log_debug("Flushing."); bib_flush(db); memset(session_instances, 0, sizeof(session_instances)); memset(sessions, 0, sizeof(sessions)); return test_db(); }
void build_data(const Options& opts) { std::ifstream synth_db(opts.synth_db); build_data_bin(synth_db, alphabet_synth, corpus_synth, labels_synth); std::ifstream test_db(opts.test_db); build_data_bin(test_db, tool::alphabet_test, tool::corpus_test, tool::labels_test); //tool::corpus_test = tool::corpus_test.testing_part(); consolidate_labels(tool::alphabet_synth, tool::labels_synth, tool::labels_all); consolidate_labels(tool::alphabet_test, tool::labels_test, tool::labels_all); }
TEST_F(LoggingTests, BasicLogManagerTest) { peloton_logging_mode = LOGGING_TYPE_INVALID; auto &log_manager = logging::LogManager::GetInstance(); log_manager.DropFrontendLoggers(); log_manager.SetLoggingStatus(LOGGING_STATUS_TYPE_INVALID); // just start, write a few records and exit catalog::Schema *table_schema = new catalog::Schema( {ExecutorTestsUtil::GetColumnInfo(0), ExecutorTestsUtil::GetColumnInfo(1), ExecutorTestsUtil::GetColumnInfo(2), ExecutorTestsUtil::GetColumnInfo(3)}); std::string table_name("TEST_TABLE"); // Create table. bool own_schema = true; bool adapt_table = false; storage::DataTable *table = storage::TableFactory::GetDataTable( 12345, 123456, table_schema, table_name, 1, own_schema, adapt_table); storage::Database test_db(12345); test_db.AddTable(table); catalog::Manager::GetInstance().AddDatabase(&test_db); concurrency::TransactionManager &txn_manager = concurrency::TransactionManagerFactory::GetInstance(); txn_manager.BeginTransaction(); ExecutorTestsUtil::PopulateTable(table, 5, true, false, false); txn_manager.CommitTransaction(); peloton_logging_mode = LOGGING_TYPE_NVM_WAL; log_manager.SetSyncCommit(true); EXPECT_FALSE(log_manager.ContainsFrontendLogger()); log_manager.StartStandbyMode(); log_manager.GetFrontendLogger(0)->SetTestMode(true); log_manager.StartRecoveryMode(); log_manager.WaitForModeTransition(LOGGING_STATUS_TYPE_LOGGING, true); EXPECT_TRUE(log_manager.ContainsFrontendLogger()); log_manager.SetGlobalMaxFlushedCommitId(4); concurrency::Transaction test_txn; cid_t commit_id = 5; log_manager.PrepareLogging(); log_manager.LogBeginTransaction(commit_id); ItemPointer insert_loc(table->GetTileGroup(1)->GetTileGroupId(), 0); ItemPointer delete_loc(table->GetTileGroup(2)->GetTileGroupId(), 0); ItemPointer update_old(table->GetTileGroup(3)->GetTileGroupId(), 0); ItemPointer update_new(table->GetTileGroup(4)->GetTileGroupId(), 0); log_manager.LogInsert(commit_id, insert_loc); log_manager.LogUpdate(commit_id, update_old, update_new); log_manager.LogInsert(commit_id, delete_loc); log_manager.LogCommitTransaction(commit_id); // TODO: Check the flushed commit id // since we are doing sync commit we should have reached 5 already //EXPECT_EQ(commit_id, log_manager.GetPersistentFlushedCommitId()); log_manager.EndLogging(); }
int main(int argc, char *argv[]) { long numtrys = 10; setprogname(argv[0]); if (argc > 1) { numtrys = strtol(argv[1], NULL, 0); } Option.dir = "abc"; if (0) test_bag(numtrys); if (1) test_db(numtrys); return 0; }
static void test_generation (void) { for (iter = 0; iter < 20; iter++) { GNCPriceDB* db; g_message ("iter=%d", iter); session = qof_session_new (); db = get_random_pricedb (qof_session_get_book (session)); if (!db) { failure_args ("gnc_random_price_db returned NULL", __FILE__, __LINE__, "%d", iter); return; } if (gnc_pricedb_get_num_prices (db)) test_db (db); gnc_pricedb_destroy (db); qof_session_end (session); } }
int main() { test_db(); return 0; }
int _tmain(int argc, _TCHAR* argv[]) { LOG_F("xTest main(), 测试 \n"); int arg = 1018; switch(arg) { case 1000: { test_util_ex(); }break; case 1: { test_util(); }break; case 2: { test_container(); }break; case 3: { test_thread(); }break; case 4: { test_net(true); }break; case 5: { test_boost(); }break; case 6: { test_kbe(); }break; case 1001: { test_db(); }break; case 1002: { test_math(); }break; case 1003: { test_design(); }break; case 1005: { //test_net(false); test_async(true); }break; case 1006: { test_lua(); }break; case 1007: { test_js(); }break; case 1008: { test_proto(); }break; case 1010: { test_graph(); }break; case 1011: { test_graph_d3d(); }break; case 1013: { test_cv(); }break; case 1015: { test_battle(); }break; case 1016: { test_hacker(); }break; case 1017: { test_yh(); }break; case 1018: { test_ai(); } case 1900: { test_tool(); }break; default: { MainEx(argc, argv); }break; } // { // int n = X::Init(NULL); // LOGD_F(" n = %d", n); // // X_HSER->Init(); // X_HSER->Start(); // // X_HSER->Stop(); // X_HSER->Destroy(); // } // { // // [4/26/2015 Administrator] // HINSTANCE hnst=LoadLibrary(_T("xService.dll")); // FreeLibrary(hnst); // // HINSTANCE hnst2=LoadLibrary(_T("xService.dll")); // FreeLibrary(hnst); // // } LOG_F("xTest main(), end"); while(true) { X::Sleep_f(1); } return 0; }
static bool simple_session(void) { struct ipv4_range range; bool success = true; if (!insert_test_sessions()) return false; /* ---------------------------------------------------------- */ log_debug("Deleting sessions by BIB."); range.prefix.address.s_addr = cpu_to_be32(0xcb007101u); range.prefix.len = 32; range.ports.min = 1; range.ports.max = 1; bib_rm_range(db, PROTO, &range); sessions[1][1][2][2] = NULL; sessions[1][1][2][1] = NULL; sessions[1][1][1][1] = NULL; sessions[1][1][1][2] = NULL; success &= test_db(); /* ---------------------------------------------------------- */ log_debug("Deleting again."); bib_rm_range(db, PROTO, &range); success &= test_db(); /* ---------------------------------------------------------- */ success &= flush(); if (!insert_test_sessions()) return false; /* ---------------------------------------------------------- */ log_debug("Deleting by range (all addresses, lower ports)."); range.prefix.address.s_addr = cpu_to_be32(0xcb007100u); range.prefix.len = 30; range.ports.min = 0; range.ports.max = 1; bib_rm_range(db, PROTO, &range); sessions[2][1][2][1] = NULL; sessions[2][1][1][1] = NULL; sessions[1][1][2][2] = NULL; sessions[2][1][2][2] = NULL; sessions[2][1][1][2] = NULL; sessions[1][1][2][1] = NULL; sessions[1][1][1][1] = NULL; sessions[1][1][1][2] = NULL; success &= test_db(); /* ---------------------------------------------------------- */ success &= flush(); if (!insert_test_sessions()) return false; /* ---------------------------------------------------------- */ log_debug("Deleting by range (lower addresses, all ports)."); range.prefix.address.s_addr = cpu_to_be32(0xcb007100u); range.prefix.len = 31; range.ports.min = 0; range.ports.max = 65535; bib_rm_range(db, PROTO, &range); sessions[1][2][2][2] = NULL; sessions[1][1][2][2] = NULL; sessions[1][2][1][1] = NULL; sessions[1][1][2][1] = NULL; sessions[1][2][2][1] = NULL; sessions[1][2][1][2] = NULL; sessions[1][1][1][1] = NULL; sessions[1][1][1][2] = NULL; success &= test_db(); /* ---------------------------------------------------------- */ success &= flush(); return success; }
int main(int argc, char* argv[]) { ::google::InitGoogleLogging(argv[0]); #ifndef GFLAGS_GFLAGS_H_ namespace gflags = google; #endif gflags::SetUsageMessage("Generates random training data samples and puts it in\n" "the leveldb/lmdb format used as input for Caffe.\n" "Usage:\n" " generate-random-shape-training-data [FLAGS] DB_NAME\n"); gflags::ParseCommandLineFlags(&argc, &argv, true); if (argc != 2) { gflags::ShowUsageWithFlagsRestrict(argv[0], "generate-random-shape-training-data"); return 1; } // seed random generator std::srand(std::time(NULL)); // generate random data typedef float tInput; typedef std::vector<tInput> tData; typedef int tLabel; typedef std::pair<tData, tLabel> tSample; typedef std::vector<tSample> tSamples; tSamples samples; { // create random input // generate image const int rows = 200; const int cols = 200; cv::Mat in_image_bgr = cv::Mat::zeros(rows, cols, CV_8UC3); // square (red) for (int i = 0; i < 3; i++) { const int x1 = std::min(std::max(((float)std::rand() / RAND_MAX) * in_image_bgr.cols, (float)20), (float)rows-20); const int y1 = std::min(std::max(((float)std::rand() / RAND_MAX) * in_image_bgr.rows, (float)20), (float)cols-20); const int x2 = x1 + 8; const int y2 = y1 + 8; cv::rectangle(in_image_bgr, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(0, 0, 255), -1, 8); } // circle (green) for (int i = 0; i < 3; i++) { const int x1 = std::min(std::max(((float)std::rand() / RAND_MAX) * in_image_bgr.cols, (float)20), (float)rows-20); const int y1 = std::min(std::max(((float)std::rand() / RAND_MAX) * in_image_bgr.rows, (float)20), (float)cols-20); cv::circle(in_image_bgr, cv::Point(x1, y1), 5, cv::Scalar(0, 255, 0), -1, 8); } cv::namedWindow("in_image_bgr", CV_WINDOW_AUTOSIZE); cv::moveWindow("in_image_bgr", 20, 20); cv::imshow("in_image_bgr", in_image_bgr); std::cout << "generating training data from image..." << std::endl; // generate training data from input image const int kernel = 15; const int h_kernel = kernel / 2; int iBackgroundCount = 0; for (int y = h_kernel; y < in_image_bgr.rows - h_kernel - 1; y++) { for (int x = h_kernel; x < in_image_bgr.cols - h_kernel - 1; x++) { const cv::Vec3b vec = in_image_bgr.at<cv::Vec3b>(y, x); if (vec[1] > 0) // circle { const tLabel label = 1; tData data; for (int yk = y - h_kernel; yk < y + h_kernel + 1; yk++) { for (int xk = x - h_kernel; xk < x + h_kernel + 1; xk++) { const cv::Vec3b veck = in_image_bgr.at<cv::Vec3b>(yk, xk); if (veck[1] > 0 || veck[2] > 0 || veck[0] > 0) // binarize data.push_back(1.0f); else data.push_back(0.0f); } } samples.push_back(std::make_pair(data, label)); } else if (vec[2] > 0) // square { const tLabel label = 2; tData data; for (int yk = y - h_kernel; yk < y + h_kernel + 1; yk++) { for (int xk = x - h_kernel; xk < x + h_kernel + 1; xk++) { const cv::Vec3b veck = in_image_bgr.at<cv::Vec3b>(yk, xk); if (veck[1] > 0 || veck[2] > 0 || veck[0] > 0) // binarize data.push_back(1.0f); else data.push_back(0.0f); } } samples.push_back(std::make_pair(data, label)); } else // background { // if balance is true, the background samples that are added are mostly around the // squares and circles and some (but not all) others if (FLAGS_balance == true) { bool bHasNeighbourObject = false; const cv::Vec3b vec_h1 = in_image_bgr.at<cv::Vec3b>(y, x-1); const cv::Vec3b vec_h2 = in_image_bgr.at<cv::Vec3b>(y, x+1); const cv::Vec3b vec_v1 = in_image_bgr.at<cv::Vec3b>(y-1, x); const cv::Vec3b vec_v2 = in_image_bgr.at<cv::Vec3b>(y+1, x); if (vec_h1[1] > 0 || vec_h1[2] > 0 || vec_v1[1] > 0 || vec_v1[2] > 0 || vec_h2[1] > 0 || vec_h2[2] > 0 || vec_v2[1] > 0 || vec_v2[2] > 0) { bHasNeighbourObject = true; } iBackgroundCount++; // only take a part of the background and when background is near objects if (bHasNeighbourObject == false && iBackgroundCount % 50 != 0) continue; } const tLabel label = 0; tData data; for (int yk = y - h_kernel; yk < y + h_kernel + 1; yk++) { for (int xk = x - h_kernel; xk < x + h_kernel + 1; xk++) { const cv::Vec3b veck = in_image_bgr.at<cv::Vec3b>(yk, xk); if (veck[1] > 0 || veck[2] > 0 || veck[0] > 0) // binarize data.push_back(1.0f); else data.push_back(0.0f); } } samples.push_back(std::make_pair(data, label)); } } } } // count classes and number of occurences typedef std::map<int, int> tCounts; tCounts counts; for (tSamples::const_iterator itrSample = samples.begin() ; itrSample != samples.end() ; ++itrSample) { counts[itrSample->second]++; } // show counts for (tCounts::const_iterator itr = counts.begin() ; itr != counts.end() ; ++itr) { std::cout << "class: " << itr->first << " count: " << itr->second << std::endl; } // shuffle the data if (FLAGS_shuffle == true) { // randomly shuffle samples std::random_shuffle(samples.begin(), samples.end()); } // Create new train and test DB boost::scoped_ptr<caffe::db::DB> train_db(caffe::db::GetDB(FLAGS_backend)); std::string dbTrainName = argv[1]; dbTrainName += "_train"; train_db->Open(dbTrainName.c_str(), caffe::db::NEW); boost::scoped_ptr<caffe::db::Transaction> train_txn(train_db->NewTransaction()); boost::scoped_ptr<caffe::db::DB> test_db(caffe::db::GetDB(FLAGS_backend)); std::string dbTestName = argv[1]; dbTestName += "_test"; test_db->Open(dbTestName.c_str(), caffe::db::NEW); boost::scoped_ptr<caffe::db::Transaction> test_txn(test_db->NewTransaction()); // divide the train/test data, determine spliting tactic const int iSplitRate = FLAGS_split; int iNumberPutToTrain = 0; int iNumberPutToTest = 0; enum eNextSample {eNSTrain, eNSTest}; eNextSample nextSample = eNSTrain; // always start with train samples // convert samples to caffe::Datum int iCount = 0, iCountTrain = 0, iCountTest = 0; for (tSamples::const_iterator itrSample = samples.begin() ; itrSample != samples.end() ; ++itrSample) { // extract label from sample const int iLabel = itrSample->second; // convert sample to protobuf Datum caffe::Datum datum; datum.set_channels(itrSample->first.size()); datum.set_height(1); datum.set_width(1); datum.set_label(iLabel); for (tData::const_iterator itrInputData = itrSample->first.begin() ; itrInputData != itrSample->first.end() ; ++itrInputData) { datum.add_float_data(*itrInputData); } // write datum to db use the sample number as key for db std::string out; CHECK(datum.SerializeToString(&out)); std::stringstream ss; ss << iCount; // put sample if (nextSample == eNSTrain) // always start with train samples { train_txn->Put(ss.str(), out); iNumberPutToTrain++; iCountTrain++; } else if (nextSample == eNSTest) { test_txn->Put(ss.str(), out); iNumberPutToTest++; iCountTest++; } // determine where next sample should go if (iSplitRate == 0) { nextSample = eNSTrain; } else if (iSplitRate < 0) { if (iNumberPutToTest == std::abs(iSplitRate)) { nextSample = eNSTrain; iNumberPutToTest = 0; } else { nextSample = eNSTest; } } else if (iSplitRate > 0) { if (iNumberPutToTrain == iSplitRate) { nextSample = eNSTest; iNumberPutToTrain = 0; } else { nextSample = eNSTrain; } } // every 1000 samples commit to db if (iCountTrain % 1000 == 0) { train_txn->Commit(); train_txn.reset(train_db->NewTransaction()); } if (iCountTest % 1000 == 0) { test_txn->Commit(); test_txn.reset(test_db->NewTransaction()); } iCount++; } // commit the last unwritten batch if (iCountTrain % 1000 != 0) { train_txn->Commit(); } if (iCountTest % 1000 != 0) { test_txn->Commit(); } std::cout << "Total of " << iCount << " samples generated, put " << iCountTrain << " to TRAIN DB and " << iCountTest << " to TEST DB" << std::endl; cv::waitKey(0); return 0; }
/* Adds a new separate partition and loads all rules from database in shm */ dp_connection_list_p dp_add_connection(dp_head_p head) { dp_connection_list_t *el; if ((el = dp_get_connection(&head->partition)) != NULL){ return el; } int all_size = sizeof(dp_connection_list_t) +head->dp_table_name.len + head->partition.len + head->dp_db_url.len; el = shm_malloc(all_size); if(!el) LM_ERR("No more shm\n"); if (!el) { LM_ERR("No more shm mem\n"); return NULL; } /* create & init lock */ if((el->ref_lock = lock_init_rw()) == NULL) { LM_ERR("Failed to init lock\n"); shm_free(el); return NULL; } /*Set table name*/ el->table_name.s = (char*)el + sizeof(*el); el->table_name.len = head->dp_table_name.len; memcpy(el->table_name.s, head->dp_table_name.s, head->dp_table_name.len); /*Set partition*/ el->partition.s = el->table_name.s + el->table_name.len; el->partition.len = head->partition.len; memcpy(el->partition.s, head->partition.s, head->partition.len); /*Set db_url*/ el->db_url.s = el->partition.s + el->partition.len; el->db_url.len = head->dp_db_url.len; memcpy(el->db_url.s, head->dp_db_url.s, head->dp_db_url.len); el->dp_db_handle = pkg_malloc(sizeof(db_con_t*)); if (!el->dp_db_handle) { LM_ERR("No more shm mem\n"); return NULL; } *el->dp_db_handle = 0; if (test_db(el) != 0) { LM_ERR("Unable to test db\n"); shm_free(el); return NULL; } *el->dp_db_handle = 0; el->next = dp_conns; dp_conns = el; LM_DBG("Added dialplan partition [%.*s] table [%.*s].\n", head->partition.len, head->partition.s, head->dp_table_name.len, head->dp_table_name.s); return el; }