int main(int argc, char* argv[]) { Block *blocks; StreetSelecter *streetSelecter; int citySize, blockCount, numTrips, numWidened; Trip *trips, widenedBlocks[100]; char filename[80], command[80]; CPUTimer ct; if(argc != 2) { cout << "usage: RunCity.out CityFilename\n"; return 1; } ifstream inf(argv[1]); strcpy(filename, argv[1]); strtok(filename, "-"); strtok(NULL, "-"); numTrips = atoi(strtok(NULL, "-")); trips = new Trip[numTrips]; blocks = readFile(inf, citySize, blockCount, trips, numTrips); ct.reset(); streetSelecter = new StreetSelecter(blocks, blockCount, citySize, numTrips); delete [] blocks; streetSelecter->select(trips, numTrips, widenedBlocks, numWidened); // fills widenedBlocks cout << "CPU Time: " << ct.cur_CPUTime() << endl; writeSolution(argv[1], widenedBlocks, numWidened); // writes widenedBlocks to filename.ans sprintf(command,"./RunTrips.out %s", argv[1]); system(command); // runs trips of file using original and widened streets return 0; }
int main() { char a[1000]; cout << "Filename >> "; cin >> a; int choice; CPUTimer ct; do { cout << endl; choice = getChoice(); ct.reset(); switch (choice) { case 1: RunList(a); break; case 2: RunCursorList(a); break; case 3: RunStackAr(a); break; case 4: RunStackLi(a); break; case 5: RunQueueAr(a); break; case 6: RunSkipList(a); break; } cout << "CPU time: " << ct.cur_CPUTime() << endl ; } while(choice > 0); }
void MyDataLayer<Dtype>::load_batch(Batch<Dtype>* batch){ CPUTimer batch_timer; batch_timer.Start(); double read_time = 0; double trans_time = 0; CPUTimer timer; CHECK(batch->data_.count()); CHECK(this->transformed_data_.count()); MyDataParameter my_data_param = this-> layer_param_.my_data_param(); // Get batch size const int batch_size = my_data_param.batch_size(); // Reshape according to the first image of each batch // on single input batches allows for inputs of varying dimension cv::Mat cv_img = samples_[lines_id_].first; CHECK(cv_img.data) << "Could not load "<<lines_id_<<" sample"; // Use data_transformer to infer the expected blob shape from a cv_img vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img); this->transformed_data_.Reshape(top_shape); // Reshape batch according to the batch_size top_shape[0] = batch_size; batch->data_.Reshape(top_shape); Dtype* prefetch_data = batch->data_.mutable_cpu_data(); Dtype* prefetch_label= batch->label_.mutable_cpu_data(); // datum scales int samples_size = samples_.size(); for(int item_id=0;item_id<batch_size;++item_id){ // get a blob timer.Start(); CHECK_GT(samples_size, lines_id_); cv::Mat sample = samples_[lines_id_].first; CHECK(sample.data) << "Could not load "<<lines_id_<<" sample"; read_time += timer.MicroSeconds(); timer.Start(); // apply transformations to the image int offset = batch->data_.offset(item_id); this->transformed_data_.set_cpu_data(prefetch_data + offset); this->data_transformer_->Transform(sample,&(this->transformed_data_)); trans_time += timer.MicroSeconds(); prefetch_label[item_id] = samples_[lines_id_].second; // got the the next iter lines_id_++; if(lines_id_>=samples_size){ // We have reached the end. restart from the first. DLOG(INFO) << "Restarting data prefetching from start."; lines_id_=0; if(my_data_param.shuffle()){ ShuffleImages(); } } } batch_timer.Stop(); DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; }
int main( ) { char filename[FILENAME_MAX]; int choice; CPUTimer ct; cout << "Filename >> "; cin >> filename; do { choice = getChoice(); ct.reset(); switch( choice ) { case 1: RunList( filename ); break; case 2: RunCursorList( filename ); break; case 3: RunStackAr( filename ); break; case 4: RunStackLi( filename ); break; case 5: RunQueueAr( filename ); break; case 6: RunSkipList( filename ); break; } cout << "CPU time: " << ct.cur_CPUTime() << endl; } while( choice > 0 ); return 0; }
int insert(bst * h, char * str){ bst_ret ret; char * new_str = (char *) malloc(strlen(str) + 1); if(new_str == NULL){ printf("ERROR: No memory for new string!\n"); exit(1); } strcpy(new_str, str); /* Insert into Hash Table */ insTime.start(); ret = bst_insert(h, new_str); insTime.stop(); if(ret == bst_NoMem){ printf("ERROR: No memory for new node!\n"); exit(1); } /* We could insert the set */ if(ret == bst_Ok) return OP_OK; /* We had already inserted the set */ if(ret == bst_PrevInserted){ free(new_str); return PREV_INSERTED; } /* The hash table is full */ return BST_FULL; }
int main(int argc, char* argv[]) { short **map, **map2; int width, cityCount, pathCounts[50]; Coordinates *cityPos; Router *router; CPUTimer ct; readFile(argv[1], &map, &map2, &width, &cityPos, &cityCount); Coordinates **paths = new Coordinates*[cityCount]; for(int i = 0; i < cityCount; i++) { paths[i] = new Coordinates[width * width]; // maximum number of edges possible pathCounts[i] = 0; } ct.reset(); router = new Router(map, width); for(int i = 0; i < width; i++) delete [] map[i]; delete [] map; router->findRoutes((const Coordinates*) cityPos, cityCount, paths, pathCounts); double time = ct.cur_CPUTime(); int trainTime = checkRoutes(map2, cityPos, cityCount, paths, pathCounts, width, argc); cout << "CPU time: " << time << " Train time: " << trainTime << endl; return 0; }
int main(){ string filename; cout << "Filename >> "; cin >> filename; CPUTimer ct; //cpu timer instance int choice; do { choice = getChoice(); ct.reset(); switch (choice) { case 1: RunList(filename); break; case 2: RunCursorList(filename); break; case 3: RunStackAr(filename); break; case 4: RunStackLi(filename); break; case 5: RunQueueAr(filename); break; case 6: RunSkipList(filename); break; } //switch cout << "CPU time: " << ct.cur_CPUTime() << endl; } while(choice > 0); }
int main(int argc, char **argv) { int operationCount, count; short scores[600]; const Operation *operations; operations = readFile(argv[1], &operationCount); CPUTimer ct; ct.reset(); GradeBook *gradeBook = new GradeBook(); if(argv[2][0] != '0') runTests(gradeBook, operations, operationCount); else { for(int i = 0; i < operationCount; i++) { switch(operations[i].type) { case LIST_STUDENT : gradeBook->listStudent(operations[i].CRN, operations[i].SID, &count, scores); break; case ADD_STUDENT : gradeBook->addStudent(operations[i].CRN, operations[i].SID); break; case REMOVE_STUDENT : gradeBook->removeStudent(operations[i].CRN, operations[i].SID); break; case UPDATE: gradeBook->update(operations[i].CRN, operations[i].title, operations[i].SID, operations[i].score); break; case LIST_ASSIGNMENT : gradeBook->listAssignment(operations[i].CRN, operations[i].title, &count, scores); break; case ENTER_SCORES : gradeBook->enterScores(operations[i].CRN, operations[i].title, operations[i].scores); break; case ADD_ASSIGNMENT : gradeBook->addAssignment(operations[i].CRN, operations[i].title, operations[i].maxScore); break; case ADD_COURSE : gradeBook->addCourse(operations[i].CRN); for (int j = 0; j < operations[i].count ; j++ ) gradeBook->addStudent(operations[i].CRN, operations[i].SIDs[j]); break; } // switch } // for i } // else no tests cout << "CPU Time: " << ct.cur_CPUTime() << endl; return 0; } // main()
void benchmark_ts_PLP(tsppi::TsPpiGraph& tsppi, bool printHist=false) { CPUTimer timer; double naive_time, fast_time; LOG("Start Benchmark: Naive"); timer.start(); std::vector<NetworKit::Partition > partitions = tsppi::algo::subgraph_PLP(tsppi.subgraphs); timer.stop(); naive_time = timer.getTime(); LOG("Time for Naive: " << timer.getTime() << " s"); LOG("Start Benchmark: Fast"); timer.start(); std::vector<NetworKit::Partition > partitions_2 = tsppi::algo::subgraph_PLP_vec(tsppi.subgraphs); timer.stop(); fast_time = timer.getTime(); LOG("Time for Fast: " << timer.getTime() << " s"); // print histogram if (printHist) { std::cout << "Histogram of cluster sizes for Naive:" << std::endl; clusterSizeHist(partitions); std::cout << "Histogram of cluster sizes for fast:" << std::endl; clusterSizeHist(partitions_2); } // print timings std::cout << naive_time << ";" << fast_time; }
int search(bst * h, char * str){ bst_ret ret; searchTime.start(); ret = bst_search(h, str); searchTime.stop(); if(ret == bst_Found) return OP_OK; return NOT_FOUND; }
int _delete(hash * h, char * str){ hash_ret ret; remTime.start(); ret = hash_remove(h, str); remTime.stop(); if(ret == hash_NotFound) return NOT_FOUND; return OP_OK; }
void ImageDataLayer<Dtype>::InternalThreadEntry() { CPUTimer batch_timer; batch_timer.Start(); double read_time = 0; double trans_time = 0; CPUTimer timer; CHECK(this->prefetch_data_.count()); CHECK(this->transformed_data_.count()); Dtype* top_data = this->prefetch_data_.mutable_cpu_data(); Dtype* top_label = this->prefetch_label_.mutable_cpu_data(); ImageDataParameter image_data_param = this->layer_param_.image_data_param(); const int batch_size = image_data_param.batch_size(); const int new_height = image_data_param.new_height(); const int new_width = image_data_param.new_width(); const bool is_color = image_data_param.is_color(); string root_folder = image_data_param.root_folder(); // datum scales const int lines_size = lines_.size(); for (int item_id = 0; item_id < batch_size; ++item_id) { // get a blob timer.Start(); CHECK_GT(lines_size, lines_id_); cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, new_height, new_width, is_color); if (!cv_img.data) { continue; } read_time += timer.MicroSeconds(); timer.Start(); // Apply transformations (mirror, crop...) to the image int offset = this->prefetch_data_.offset(item_id); this->transformed_data_.set_cpu_data(top_data + offset); this->data_transformer_.Transform(cv_img, &(this->transformed_data_)); trans_time += timer.MicroSeconds(); top_label[item_id] = lines_[lines_id_].second; // go to the next iter lines_id_++; if (lines_id_ >= lines_size) { // We have reached the end. Restart from the first. DLOG(INFO) << "Restarting data prefetching from start."; lines_id_ = 0; if (this->layer_param_.image_data_param().shuffle()) { ShuffleImages(); } } } batch_timer.Stop(); DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; }
int search(hash * h, char * str){ hash_ret ret; searchTime.start(); ret = hash_search(h, str); searchTime.stop(); if(ret == hash_Found) return OP_OK; return NOT_FOUND; }
int _delete(bst * h, char * str){ bst_ret ret; remTime.start(); ret = bst_remove(h, str); remTime.stop(); if(ret == bst_NotFound) return NOT_FOUND; return OP_OK; }
int main(int argc, char* argv[]) { ifstream inf(argv[1]); int generations, pairs, queryCount, familyCount; char dummy; inf >> generations >> dummy >> pairs >> dummy >> queryCount; inf.ignore(10, '\n'); Family *families = new Family[200000]; Query *queries = new Query[queryCount]; Person *answers = new Person[queryCount]; Person *answerKeys = new Person[queryCount]; readQueries(inf, queries, answerKeys, queryCount); familyCount = readFamilies(inf, families); CPUTimer ct; ct.reset(); FamilyTree *familyTree = new FamilyTree(families, familyCount); delete [] families; familyTree->runQueries(queries, answers, queryCount); cout << "CPU Time: " << ct.cur_CPUTime() << endl; for(int i = 0; i < queryCount; i++) if(answerKeys[i].year == -1) { if(answers[i].year != -1) { cout << "You found an ancestor when there was none on query #" << i << endl; cout << "Descendent 1: " << queries[i].person1.year << ' ' << queries[i].person1.lastName << ',' << queries[i].person1.firstName << endl; cout << "Descendent 2: " << queries[i].person2.year << ' ' << queries[i].person2.lastName << ',' << queries[i].person2.firstName << endl; cout << "Your answer:" << answers[i].year << ' ' << answers[i].lastName << ',' << answers[i].firstName << endl; } } else // An ancestor should be found if(answers[i].year != answerKeys[i].year || strcmp(answers[i].lastName, answerKeys[i].lastName) != 0 || strcmp(answers[i].firstName, answerKeys[i].firstName) != 0 || answers[i].gender != answerKeys[i].gender) { cout << "Disagreement on query #" << i << endl; cout << "Descendent 1: " << queries[i].person1.year << ' ' << queries[i].person1.lastName << ',' << queries[i].person1.firstName << endl; cout << "Descendent 2: " << queries[i].person2.year << ' ' << queries[i].person2.lastName << ',' << queries[i].person2.firstName << endl; cout << "Proper answer: " << answerKeys[i].year << ' ' << answerKeys[i].lastName << ',' << answerKeys[i].firstName << endl; cout << "Your answer:" << answers[i].year << ' ' << answers[i].lastName << ',' << answers[i].firstName << endl; } return 0; } // main()
int main(int argc, char *argv[]) { DiskDrive diskDrive; diskDrive.readFile(argv[1]); CPUTimer ct; currentRAM = maxRAM = 0; ct.reset(); new Defragmenter(&diskDrive); cout << "CPU Time: " << ct.cur_CPUTime() << " Disk accesses: " << diskDrive.getDiskAccesses() << " RAM: " << maxRAM << endl; diskDrive.check(); return 0; } // main
void BinaryDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) { CPUTimer batch_timer; batch_timer.Start(); double read_time = 0; static int time_idx = 0; CPUTimer timer; CHECK(batch->data_.count()); ImageDataParameter image_data_param = this->layer_param_.image_data_param(); string root_folder = image_data_param.root_folder(); const int batch_size = this->layer_param_.image_data_param().batch_size(); const vector<int> & top_shape = this->top_shape_; // Reshape batch according to the batch_size. batch->data_.Reshape(top_shape); Dtype* prefetch_data = batch->data_.mutable_cpu_data(); Dtype* prefetch_label = batch->label_.mutable_cpu_data(); // datum scales const int lines_size = lines_.size(); const int count = top_shape[1] * top_shape[2] * top_shape[3]; for (int item_id = 0; item_id < batch_size; ++item_id) { // get a blob timer.Start(); CHECK_GT(lines_size, lines_id_); int offset = batch->data_.offset(item_id); int ret = ReadBinaryBlob(root_folder + lines_[lines_id_].first, prefetch_data + offset, count); read_time += timer.MicroSeconds(); CHECK(ret == 0) << "Could not load " << lines_[lines_id_].first; prefetch_label[item_id] = lines_[lines_id_].second; // go to the next iter lines_id_++; if (lines_id_ >= lines_size) { // We have reached the end. Restart from the first. DLOG(INFO) << "Restarting data prefetching from start."; lines_id_ = 0; if (this->layer_param_.image_data_param().shuffle()) { ShuffleImages(); } } } batch_timer.Stop(); DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; }
int main (int argc, char** argv) { CPUTimer ct; int choice; char filename[79]; cout << "Filename: "; cin >> filename; do { choice = getChoice(); ct.reset(); switch (choice) //Switch statement based on user's choice { case 1: RunList(filename); break; case 2: RunCursorList(filename); break; case 3: RunStackAr(filename); break; case 4: RunStackLi(filename); break; case 5: RunQueueAr(filename); break; case 6: RunSkipList(filename); break; } // end switch cout << "CPU time: " << ct.cur_CPUTime() << endl; } while (choice > 0); //end doWhile return 0; }//main
void DataLayer<Dtype>::InternalThreadEntry() { CPUTimer batch_timer; batch_timer.Start(); double read_time = 0; double trans_time = 0; CPUTimer timer; CHECK(this->prefetch_data_.count()); CHECK(this->transformed_data_.count()); // Reshape according to the first datum of each batch // on single input batches allows for inputs of varying dimension. const int batch_size = this->layer_param_.data_param().batch_size(); Datum datum; datum.ParseFromString(cursor_->value()); // Use data_transformer to infer the expected blob shape from datum. vector<int> top_shape = this->data_transformer_->InferBlobShape(datum); this->transformed_data_.Reshape(top_shape); // Reshape prefetch_data according to the batch_size. top_shape[0] = batch_size; this->prefetch_data_.Reshape(top_shape); Dtype* top_data = this->prefetch_data_.mutable_cpu_data(); Dtype* top_label = NULL; // suppress warnings about uninitialized variables if (this->output_labels_) { top_label = this->prefetch_label_.mutable_cpu_data(); } timer.Start(); for (int item_id = 0; item_id < batch_size; ++item_id) { // get a datum Datum datum; datum.ParseFromString(cursor_->value()); read_time += timer.MicroSeconds(); timer.Start(); // Apply data transformations (mirror, scale, crop...) int offset = this->prefetch_data_.offset(item_id); this->transformed_data_.set_cpu_data(top_data + offset); this->data_transformer_->Transform(datum, &(this->transformed_data_)); // Copy label. if (this->output_labels_) { top_label[item_id] = datum.label(); } trans_time += timer.MicroSeconds(); timer.Start(); // go to the next item. cursor_->Next(); if (!cursor_->valid()) { DLOG(INFO) << "Restarting data prefetching from start."; cursor_->SeekToFirst(); } } timer.Stop(); batch_timer.Stop(); DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; }
int main(int argc, char** argv) { int numTrips, numFlights, numCities; Flight *flights, *flights2; Trip *trips; readFile(&flights, &trips, &numTrips, &numFlights, &numCities, argv[1], &flights2); Itinerary *itineraries = new Itinerary[numTrips]; CPUTimer ct; Router *router = new Router(numCities, numFlights, flights2); delete [] flights2; for(int i = 0 ; i < numTrips; i++) router->findRoute((const Trip*) &trips[i], &itineraries[i]); cout << "CPU Time: " << ct.cur_CPUTime(); checkRoutes(flights, trips, numTrips, numFlights, itineraries, argc, argv); return 0; } // main()
int main(int argc, char** argv) { int numOperations, numInserts; const char *result; Operation *operations; CPUTimer ct; initializeNew(); // determines if malloc uses 1 or 2 ints for size. SongInfo *songInfos = new SongInfo[1000000]; readTrackFile(songInfos); SongInfo2 *songInfos2 = readSalesFile(argv[1], &numInserts, &numOperations, songInfos, &operations); ct.reset(); Sales *sales = new Sales(songInfos2, numInserts); delete [] songInfos2; maxRAM = currentRAM; // eliminated the songInfos2 size for(int i = 0; i < numOperations; i++) { // if(i == 465) // cout << i << endl; if(operations[i].operation == 'A') { result = sales->artistList( songInfos[operations[i].indices[0]].artist, operations[i].indices[1]); checkResult(result, songInfos[operations[i].indices[2]].trackID, i); } // if artistList else // purchase { result = sales->purchase( songInfos[operations[i].indices[0]].song, songInfos[operations[i].indices[0]].artist, songInfos[operations[i].indices[1]].song, songInfos[operations[i].indices[1]].artist, operations[i].indices[2]); checkResult(result, songInfos[operations[i].indices[3]].trackID, i); } // else purchae } // for each operation cout << "CPU Time: " << ct.cur_CPUTime() << " maxRAM: " << maxRAM << endl; return 0; } // main())
void run_all_periodic_test(int nx, int ny, int nz, float hx, float hy, float hz, double tol) { BoundaryConditionSet bc; set_bc(bc, BC_PERIODIC, 0); Sol_PCGPressure3DDeviceD solver; Grid3DDeviceD rhs, coeff; init_rhs(rhs, nx, ny, nz, hx, hy, hz, -1, false); // init to sin waves, no axis init_coeff(coeff, nx, ny, nz, hx, hy, hz); init_solver(solver, rhs, coeff, bc, nx, ny, nz, hx, hy, hz); init_search_vector(solver, nx, ny, nz, false); // init to zero double residual; CPUTimer timer; timer.start(); UNITTEST_ASSERT_TRUE(solver.solve(residual,tol,1000)); timer.stop(); printf("%f sec\n", timer.elapsed_sec()); UNITTEST_ASSERT_EQUAL_DOUBLE(residual, 0, tol); }
int main( int argc, const char * argv[] ) { int n_itrs = MAX; #ifdef SHOW_QS mpz_class quoc; #endif if( argc > 1 ) n_itrs = atoi( argv[1] ); #ifdef TIME CPUTimer timer; unsigned int runs = 0; #endif for(int x = -n_itrs; x < n_itrs; ++x) for(int y = -n_itrs; y < n_itrs; ++y) { if( y == x ) continue; Quociente q( x , y ) ; for(int k = 1 ; k < n_itrs; ++k) { std::cout << "x[" << x << "] y[" << y << "] k[" << k << "]\n"; #ifdef TIME timer.start(); #endif #ifdef SHOW_QS quoc = q.FindFor( k ); #else q.FindFor( k ); #endif #ifdef TIME timer.stop(); runs++; std::cout << "\tTrial time: " << timer.getCPUCurrSecs() << "s" << std::endl; #endif #ifdef SHOW_QS std::cout << "\tQuocient: " << quoc << std::endl; #endif } } #ifdef TIME std::cout << "\n\nTotal time: " << timer.getCPUTotalSecs() << "s" << std::endl; std::cout << "Avg. time : " << timer.getCPUTotalSecs()/runs << "s" << std::endl; #endif return 0; }
int main(){ ifstream inputFile; //char strin[20]; char fileName[20]; char iord; int number; int choice; CPUTimer ct; cout << "Filename >> "; cin >> fileName; inputFile.open(fileName); do { choice = getChoice(); ct.reset(); //string dummy; //getline(thestream, dummy); //for(int i = 0; i < 10; i++){ //thestream >> iord >> number; //cout << iord; //cout << number << endl; //} //PassStream(inputFile); //RunList(); switch(choice) { case 1: RunList(inputFile); break; case 2: RunCursorList(inputFile); break; case 3: RunStackAr(inputFile); break; case 4: RunStackLi(inputFile); break; case 5: RunQueueAr(inputFile); break; case 6: RunSkipList(inputFile); break; } cout << "CPU time: " << ct.cur_CPUTime() << endl; }while(choice > 0); return 0; }
int main(int argc, char* argv[]) { char c; int numStations, numCars, numActions; Action *actions = new Action[1000000]; ifstream inf(argv[1]); inf >> numStations >> c >> numCars; Station *stations = new Station[numStations]; Station *stations2 = new Station[numStations]; Car *cars = new Car[numCars]; readFile(inf, stations, numStations, numCars, cars); memcpy(stations2, stations, sizeof(Station) * numStations); CPUTimer ct; Train *train = new Train(stations, numStations); delete [] stations; train->run((const Car*) cars, numCars, actions, &numActions); double time = ct.cur_CPUTime(); int totalDistance = checkActions(actions, numActions, stations2, numStations,cars, numCars); cout << "CPU time: " << time << " Total distance: " << totalDistance << endl; return 0; } // main()
int main(int argc, char **argv) { char word[MAX_LENGTH + 1], matchingWords[100][MAX_LENGTH + 1]; int numWords, count; DictionaryWord *words = readWordsFile(); MatchingWords *matchingWordsKey = readTesterFile(argv[1], &numWords); CPUTimer ct; Checker *checker = new Checker((const DictionaryWord*) words, NUM_WORDS); delete words; for(int i = 0; i < numWords; i++) { strcpy(word, matchingWordsKey[i].word); checker->findWord(word, matchingWords, &count); if(count != matchingWordsKey[i].count) { cout << "Incorrect count for trial# " << i << " for " << matchingWordsKey[i].word << " should be " << matchingWordsKey[i].count << " but received " << count << endl; } else // correct count { for(int j = 0; j < count; j++) if(strcmp(matchingWordsKey[i].matches[j], matchingWords[j]) != 0) { cout << "Words don't match for trial# " << i << " for " << matchingWordsKey[i].word << " match# " << j << " should be " << matchingWordsKey[i].matches[j] << " but received " << matchingWords[j] << endl; } // if invalid match } // else correct count } // for each word cout << "CPU Time: " << ct.cur_CPUTime() << endl; return 0; }
void FlowDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) { CPUTimer batch_timer; batch_timer.Start(); double read_time = 0; double trans_time = 0; CPUTimer timer; CHECK(batch->data_.count()); CHECK(this->transformed_data_.count()); // Reshape according to the first datum of each batch // on single input batches allows for inputs of varying dimension. const int batch_size = this->layer_param_.flow_data_param().batch_size(); Datum& datum = *(reader_.full().peek()); // Use data_transformer to infer the expected blob shape from datum. vector<int> top_shape = this->data_transformer_->InferBlobShape(datum); top_shape[0] = num_test_views_; this->transformed_data_.Reshape(top_shape); // Reshape batch according to the batch_size. top_shape[0] = batch_size * num_test_views_; batch->data_.Reshape(top_shape); Dtype* top_data = batch->data_.mutable_cpu_data(); Dtype* top_label = NULL; // suppress warnings about uninitialized variables if (this->output_labels_) { top_label = batch->label_.mutable_cpu_data(); } for (int item_id = 0; item_id < batch_size; ++item_id) { timer.Start(); // get a datum Datum& datum = *(reader_.full().pop("Waiting for flow data")); read_time += timer.MicroSeconds(); // DLOG(INFO) << "number of data in full queue: " << reader_.full().size(); timer.Start(); // Apply data transformations (mirror, scale, crop...) int offset = batch->data_.offset(item_id * num_test_views_); this->transformed_data_.set_cpu_data(top_data + offset); if (this->phase_ == TRAIN) this->data_transformer_->TransformVariedSizeDatum(datum, &(this->transformed_data_)); else if (this->phase_ == TEST) this->data_transformer_->TransformVariedSizeTestDatum(datum, &(this->transformed_data_), num_test_views_); // Copy label. if (this->output_labels_) { top_label[item_id] = datum.label(); } trans_time += timer.MicroSeconds(); reader_.free().push(const_cast<Datum*>(&datum)); } timer.Stop(); batch_timer.Stop(); DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; }
void benchmark_ts_betweenness(tsppi::TsPpiGraph& tsppi) { CPUTimer timer; double naive_time, fast_time; LOG("Start Benchmark: Naive"); timer.start(); std::vector<std::vector<double> > naive_ts_bw = tsppi::algo::subgraph_betweenness(tsppi.subgraphs); timer.stop(); naive_time = timer.getTime(); LOG("Time for Naive: " << timer.getTime() << " s"); LOG("Start Benchmark: Fast"); timer.start(); std::vector<std::vector<double> > fast_ts_bw = tsppi::algo::subgraph_betweenness_fast(tsppi.subgraphs); timer.stop(); fast_time = timer.getTime(); LOG("Time for Fast: " << timer.getTime() << " s"); std::cout << naive_time << ";" << fast_time; }
void SegDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) { CPUTimer batch_timer; batch_timer.Start(); double deque_time = 0; double trans_time = 0; CPUTimer timer; CHECK(batch->data_.count()); CHECK(this->transformed_data_.count()); // Reshape on single input batches for inputs of varying dimension. const int batch_size = this->layer_param_.data_param().batch_size(); Dtype* top_data = batch->data_.mutable_cpu_data(); Dtype* top_label = NULL; // suppress warnings about uninitialized variables if (this->output_labels_) { top_label = batch->label_.mutable_cpu_data(); } for (int item_id = 0; item_id < batch_size; ++item_id) { // get a blob timer.Start(); Datum& datum = *(reader_.full().pop("Waiting for data")); deque_time += timer.MicroSeconds(); // Apply data transformations (mirror, scale, crop...) timer.Start(); const int offset_data = batch->data_.offset(item_id); const int offset_label = batch->label_.offset(item_id); this->transformed_data_.set_cpu_data(top_data + offset_data); this->transformed_label_.set_cpu_data(top_label + offset_label); this->data_transformer_->SegTransform(datum, &(this->transformed_data_), &(this->transformed_label_)); trans_time += timer.MicroSeconds(); reader_.free().push(const_cast<Datum*>(&datum)); } timer.Stop(); batch_timer.Stop(); #ifdef BENCHMARK_DATA LOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; LOG(INFO) << " Dequeue time: " << deque_time / 1000 << " ms."; LOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; #endif }
void CPMDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) { CPUTimer batch_timer; batch_timer.Start(); double deque_time = 0; double decod_time = 0; double trans_time = 0; static int cnt = 0; CPUTimer timer; CHECK(batch->data_.count()); CHECK(this->transformed_data_.count()); // Reshape on single input batches for inputs of varying dimension. const int batch_size = this->layer_param_.cpmdata_param().batch_size(); const int crop_size = this->layer_param_.transform_param().crop_size(); bool force_color = this->layer_param_.cpmdata_param().force_encoded_color(); if (batch_size == 1 && crop_size == 0) { Datum& datum = *(reader_.full().peek()); if (datum.encoded()) { if (force_color) { DecodeDatum(&datum, true); } else { DecodeDatumNative(&datum); } } batch->data_.Reshape(1, datum.channels(), datum.height(), datum.width()); this->transformed_data_.Reshape(1, datum.channels(), datum.height(), datum.width()); } Dtype* top_data = batch->data_.mutable_cpu_data(); Dtype* top_label = NULL; // suppress warnings about uninitialized variables if (this->output_labels_) { top_label = batch->label_.mutable_cpu_data(); } for (int item_id = 0; item_id < batch_size; ++item_id) { // get a blob timer.Start(); Datum& datum = *(reader_.full().pop("Waiting for data")); deque_time += timer.MicroSeconds(); timer.Start(); cv::Mat cv_img; if (datum.encoded()) { if (force_color) { cv_img = DecodeDatumToCVMat(datum, true); } else { cv_img = DecodeDatumToCVMatNative(datum); } if (cv_img.channels() != this->transformed_data_.channels()) { LOG(WARNING) << "Your dataset contains encoded images with mixed " << "channel sizes. Consider adding a 'force_color' flag to the " << "model definition, or rebuild your dataset using " << "convert_imageset."; } } decod_time += timer.MicroSeconds(); // Apply data transformations (mirror, scale, crop...) timer.Start(); const int offset_data = batch->data_.offset(item_id); const int offset_label = batch->label_.offset(item_id); this->transformed_data_.set_cpu_data(top_data + offset_data); this->transformed_label_.set_cpu_data(top_label + offset_label); if (datum.encoded()) { this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); } else { this->data_transformer_->Transform_nv(datum, &(this->transformed_data_), &(this->transformed_label_), cnt); ++cnt; } // if (this->output_labels_) { // top_label[item_id] = datum.label(); // } trans_time += timer.MicroSeconds(); reader_.free().push(const_cast<Datum*>(&datum)); } batch_timer.Stop(); #ifdef BENCHMARK_DATA LOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; LOG(INFO) << " Dequeue time: " << deque_time / 1000 << " ms."; LOG(INFO) << " Decode time: " << decod_time / 1000 << " ms."; LOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; #endif }