Relation* Select(vector<string> &words, SchemaManager &schema_manager, MainMemory &mem){ vector<string> select_list, from_list, where_list, order_list; bool has_distinct = false, has_where = false, has_orderby = false; int i = 1; if (words[i] == "DISTINCT"){ has_distinct = true; i++; } while (i < words.size() && words[i] != "FROM"){ // drop comma select_list.push_back(splitBy(words[i], ",")[0]); i++; } i++; // skip FROM while ( i < words.size() && words[i] != "WHERE" && words[i] != "ORDER"){ from_list.push_back(splitBy(words[i], ",")[0]); i++; } if (i < words.size()){ if (words[i] == "WHERE"){ has_where = true; i++; // skip WHERE while (i < words.size() && words[i] != "ORDER"){ where_list.push_back(words[i]); i++; } } if (i < words.size() && words[i] == "ORDER"){ has_orderby = true; i = i + 2; // skip ORDER BY order_list.push_back(words[i]); i++; } } // add table name to each column name preProcess(from_list, select_list, schema_manager); preProcess(from_list, where_list, schema_manager); preProcess(from_list, order_list, schema_manager); /* print(select_list); print(from_list); print(where_list); print(order_list); */ Relation* view = generateLQP(has_distinct, select_list, from_list, where_list, order_list, schema_manager, mem); cout<<*view<<endl; return view; }
Packet::Ptr PacketSocket::selectPacket() { QMutexLocker locker(&mutex); Packet::Ptr ret(0); // this function should ensure that between // each data packet at least 3 control packets are sent // so requests can get through if (ctrl_packets_sent < 3) { // try to send another control packet if (control_packets.size() > 0) ret = control_packets.front(); else if (data_packets.size() > 0) ret = data_packets.front(); } else { if (data_packets.size() > 0) { ctrl_packets_sent = 0; ret = data_packets.front(); } else if (control_packets.size() > 0) ret = control_packets.front(); } if (ret) preProcess(ret); return ret; }
int main(int argc, char **argv) { FILE *entrada; FILE *saida; FILE *automatoFile; TArquivo *context; Automato *automato; if ((entrada = fopen("resources/Entrada.txt", "r+")) != NULL) { context = preProcess(entrada); if((automatoFile = fopen("resources/Automato.txt", "r+")) != NULL){ automato=inicializaAutomato(automatoFile); if ((saida = fopen("resources/Saida.txt", "w+")) != NULL) { process(automato,context,entrada,saida); fclose(saida); }else{ printf("Falha ao criar arquivo de saida"); } } else{ printf("Falha ao carregar automato"); } fclose(entrada); }else{ printf("Falha ao abrir o arquivo"); } return EXIT_SUCCESS; }
/** * Method to preprocess the scenegraph before exporting. * The method will find the total number of nodes in the * scene and convert all the standard MAX materials in the scene to * OSG materials and textures. */ BOOL OSGExp::preProcess(INode* node, TimeValue t){ if (_ip->GetCancel()) return TRUE; // Only export material if hole scene is to be exported or // this node is choosen to be exported. if(!_onlyExportSelected || node->Selected()) { // Add to the total number of nodes. _nTotalNodeCount++; // Add the nodes material to out material list // Null entries are ignored when added... if(_options->getExportMaterials()){ BOOL mtlAdded = _mtlList->addMtl(node->GetMtl(), _options, t); if(mtlAdded){ // Update material exporting progress bar. _nCurMtl++; _ip->ProgressUpdate((int)((float)_nCurMtl/_nTotalMtlCount*100.0f)); } } } // For each child of this node, we recurse into ourselves // and increment the counter until no more children are found. for (int c = 0; c < node->NumberOfChildren(); c++) { if(!preProcess(node->GetChildNode(c),t)) return FALSE; } return TRUE; }
string longestPalindrome(string s) { string T = preProcess(s); int n = T.length(); int *P = new int[n]; int C = 0, R = 0; for (int i = 1; i < n-1; i++) { int i_mirror = 2*C-i; // equals to i' = C - (i-C) P[i] = (R > i) ? min(R-i, P[i_mirror]) : 0; // Attempt to expand palindrome centered at i while (T[i + 1 + P[i]] == T[i - 1 - P[i]]) P[i]++; // If palindrome centered at i expand past R, // adjust center based on expanded palindrome. if (i + P[i] > R) { C = i; R = i + P[i]; } } // Find the maximum element in P. int maxLen = 0; int centerIndex = 0; for (int i = 1; i < n-1; i++) { if (P[i] > maxLen) { maxLen = P[i]; centerIndex = i; } } delete[] P; return s.substr((centerIndex - 1 - maxLen)/2, maxLen); }
void ImageGLProcessor::process() { if (internalInvalid_) { internalInvalid_ = false; const DataFormatBase* format = inport_.getData()->getDataFormat(); size2_t dimensions; if (outport_.isHandlingResizeEvents() || !inport_.isOutportDeterminingSize()) dimensions = outport_.getData()->getDimensions(); else dimensions = inport_.getData()->getDimensions(); if (!outport_.hasData() || format != outport_.getData()->getDataFormat() || dimensions != outport_.getData()->getDimensions()){ Image *img = new Image(dimensions, format); img->copyMetaDataFrom(*inport_.getData()); outport_.setData(img); } } TextureUnit imgUnit; utilgl::bindColorTexture(inport_, imgUnit); utilgl::activateTargetAndCopySource(outport_, inport_, ImageType::ColorOnly); shader_.activate(); utilgl::setShaderUniforms(shader_, outport_, "outportParameters_"); shader_.setUniform("inport_", imgUnit.getUnitNumber()); preProcess(); utilgl::singleDrawImagePlaneRect(); shader_.deactivate(); utilgl::deactivateCurrentTarget(); postProcess(); }
string longestPalindrome(string s) { string T = preProcess(s); const int n = T.length(); vector<int> P(n); int C = 0, R = 0; for (int i = 1; i < n - 1; ++i) { int i_mirror = 2 * C - i; // equals to i' = C - (i-C) P[i] = (R > i) ? min(R - i, P[i_mirror]) : 0; // Attempt to expand palindrome centered at i while (T[i + 1 + P[i]] == T[i - 1 - P[i]]) { ++P[i]; } // If palindrome centered at i expands the past R, // adjust center based on expanded palindrome. if (i + P[i] > R) { C = i; R = i + P[i]; } } // Find the maximum element in P. int max_len = 0, center_index = 0; for (int i = 1; i < n - 1; ++i) { if (P[i] > max_len) { max_len = P[i]; center_index = i; } } return s.substr((center_index - 1 - max_len) / 2, max_len); }
void OutputDeviceNodeXAudio::submitNextBuffer() { auto ctx = getContext(); if( ! ctx ) return; lock_guard<mutex> lock( ctx->getMutex() ); // verify context still exists, since its destructor may have been holding the lock ctx = getContext(); if( ! ctx ) return; ctx->preProcess(); auto internalBuffer = getInternalBuffer(); internalBuffer->zero(); pullInputs( internalBuffer ); if( checkNotClipping() ) internalBuffer->zero(); if( getNumChannels() == 2 ) dsp::interleaveStereoBuffer( internalBuffer, &mBufferInterleaved ); HRESULT hr = mSourceVoice->SubmitSourceBuffer( &mXAudioBuffer ); CI_ASSERT( hr == S_OK ); ctx->postProcess(); }
int main() { int t; unsigned long long f; generatePrime(); scanf("%d", &t); while (t--) { scanf("%llu", &f); if(f<5) printf("%d",2); while(1) { if(f%2==0) f--; else f-=2; if(preProcess(f)) { printf("%llu\n", f); break; } } } return 0; }
/** * @name processAutomatic(EngineData *inp) * @param inp pointer to EngineData structure * processes the whole sequence from reading inputs to writing outputs */ void TruthTableEngine::processAutomatic(EngineData *inp) { readInputs(inp); // read inputs preProcess(inp); // pre process into testbyte process(inp); // search for valid combination postProcess(inp); // copy result to output array writeOutputs(inp); // write digital pins based on active levels }
bool ProcessObject::start() { if(m_pProcInfo.hProcess) return true; //Pre-Process preProcess(); // start a process with given index STARTUPINFO startUpInfo = { sizeof(STARTUPINFO),NULL,_T(""),NULL,0,0,0,0,0,0,0,STARTF_USESHOWWINDOW,0,0,NULL,0,0,0}; if(m_isUserInterface) startUpInfo.wShowWindow = SW_SHOW; else startUpInfo.wShowWindow = SW_HIDE; startUpInfo.lpDesktop = NULL; // set the correct desktop for the process to be started if(m_isImpersonate==false) { // create the process if(CreateProcess(NULL, const_cast<TCHAR*>(m_commandLine.GetString()),NULL,NULL,FALSE,NORMAL_PRIORITY_CLASS, NULL,NULL,&startUpInfo,&m_pProcInfo)) { Sleep(m_delayStartTime); return true; } else { TCHAR pTemp[256]; long nError = GetLastError(); _stprintf(pTemp,_T("Failed to start program '%s', error code = %d"), m_commandLine.GetString(), nError); LOG_WRITER_INSTANCE.WriteLog( pTemp); return false; } } else { HANDLE hToken = NULL; if(LogonUser(m_userName.GetString(),(m_domainName.GetLength()==0)?_T("."):m_domainName.GetString(),m_userPassword.GetString(),LOGON32_LOGON_SERVICE,LOGON32_PROVIDER_DEFAULT,&hToken)) { if(CreateProcessAsUser(hToken,NULL,const_cast<TCHAR*>(m_commandLine.GetString()),NULL,NULL,TRUE,NORMAL_PRIORITY_CLASS,NULL,NULL,&startUpInfo,&m_pProcInfo)) { Sleep(m_delayStartTime); return true; } long nError = GetLastError(); TCHAR pTemp[256]; _stprintf(pTemp,_T("Failed to start program '%s' as user '%s', error code = %d"), m_commandLine.GetString(), m_userName.GetString(), nError); LOG_WRITER_INSTANCE.WriteLog( pTemp); return false; } long nError = GetLastError(); TCHAR pTemp[256]; _stprintf(pTemp,_T("Failed to logon as user '%s', error code = %d"), m_userName.GetString(), nError); LOG_WRITER_INSTANCE.WriteLog( pTemp); return false; } }
/** @brief called to process everything */ virtual void process(void) { // If _dstImg was set, check that the _renderWindow is lying into dstBounds if (_dstImg) { const OfxRectI& dstBounds = _dstImg->getBounds(); // is the renderWindow within dstBounds ? assert(dstBounds.x1 <= _renderWindow.x1 && _renderWindow.x2 <= dstBounds.x2 && dstBounds.y1 <= _renderWindow.y1 && _renderWindow.y2 <= dstBounds.y2); // exit gracefully in case of error if (!(dstBounds.x1 <= _renderWindow.x1 && _renderWindow.x2 <= dstBounds.x2 && dstBounds.y1 <= _renderWindow.y1 && _renderWindow.y2 <= dstBounds.y2) || (_renderWindow.x1 >= _renderWindow.x2) || (_renderWindow.y1 >= _renderWindow.y2)) { return; } } // call the pre MP pass preProcess(); // make sure there are at least 4096 pixels per CPU and at least 1 line par CPU unsigned int nCPUs = ((std::min)(_renderWindow.x2 - _renderWindow.x1, 4096) * (_renderWindow.y2 - _renderWindow.y1)) / 4096; // make sure the number of CPUs is valid (and use at least 1 CPU) nCPUs = std::max(1u, (std::min)(nCPUs, OFX::MultiThread::getNumCPUs())); // call the base multi threading code, should put a pre & post thread calls in too multiThread(nCPUs); // call the post MP pass postProcess(); }
/** * 顔を認識して、該当する人のIDを返す */ int EigenFace::recognize(IplImage* testFace) { // 事前加工 IplImage* resizedFaceImage = preProcess(testFace); float * projectedTestFace = 0; // project the test images onto the PCA subspace projectedTestFace = (float *)cvAlloc( nEigens*sizeof(float) ); int iNearest, nearest; // project the test image onto the PCA subspace cvEigenDecomposite( resizedFaceImage, nEigens, eigenVectArr, 0, 0, pAvgTrainImg, projectedTestFace); iNearest = findNearestNeighbor(projectedTestFace); nearest = trainPersonNumMat->data.i[iNearest]; cvReleaseImage(&resizedFaceImage); return nearest; }
int ProcessImg(int argc,char** argv) { char fn_in[MAX_FNAME_LEN] = {0}; char fn_out[MAX_FNAME_LEN] = {0}; int i = 0; IplImage * pImgIn, *pImgOut; if(argc ==3){ strcpy(fn_in,argv[1]); strcpy(fn_out,argv[2]); //whichKernel = 7; }else if(argc ==4){ strcpy(fn_in,argv[1]); strcpy(fn_out,argv[2]); whichKernel = atoi(argv[3]); }else{ exit(0); } pImgIn = cvLoadImage( fn_in,-1); VIEDOW = pImgIn ->width; VIEDOH = pImgIn ->height; CvSize ImageSize = cvSize(VIEDOW,VIEDOH); pImgOut = cvCreateImage( ImageSize , IPL_DEPTH_8U, 3 ); pImagePool[0] = cvCreateImage( ImageSize , IPL_DEPTH_8U, 3 ); preProcess(0,pImgIn,0,pImgOut); cvSaveImage(fn_out,pImgOut); return 0; }
//wraps it all together void FindGoal::finalize() { loadSrc(); cv::Mat blurred = preProcess(src); std::vector<cv::Vec4i> houghLines = edgeDetection(blurred, srcCopy); cv::Vec4i goalCors = shapeValidation(houghLines); graphics(goalCors, srcCopy); }
/*! @brief Runs the current behaviour. Note that this may not be the current behaviour. @param jobs the nubot job list @param data the nubot sensor data @param actions the nubot actionators data @param fieldobjects the nubot world model @param gameinfo the nubot game information @param teaminfo the nubot team information */ void BehaviourProvider::process(JobList* jobs, NUSensorsData* data, NUActionatorsData* actions, FieldObjects* fieldobjects, GameInformation* gameinfo, TeamInformation* teaminfo) { if (preProcess(jobs, data, actions, fieldobjects, gameinfo, teaminfo)) { doBehaviour(); postProcess(); } }
int main(int argc, char **argv){ char array[] = "asdfasdfasdfasdf"; int *p = preProcess(array, 16); double *pd = preProcess2(array, 16); printf("find average 2 to 9: %d\n", findAverage(array, 16, 2, 9, p)); printf("find average 2 to 9: %d\n", findAverage2(array, 16, 2, 9, pd)); printf("find middle 2 to 9: %d\n", findMiddle(array, 16, 2, 9)); return 1; }
vector<vector<string>> partition(string s) { int len = s.length(); vector<vector<string>> res; vector<vector<bool>> dp(len, vector<bool>(len, false)); vector<string> cur; preProcess(dp, s); nextPartition(res, dp, cur, s, 0); return res; }
bool GLPadding::process( const QList<Effect*> &el, double pts, Frame *src, Profile *p ) { Q_UNUSED( pts ); preProcess( src, p ); Effect* e = el[0]; return e->set_int( "width", src->glWidth ) && e->set_int( "height", src->glHeight ) && e->set_float( "top", top ) && e->set_float( "left", left ); }
void MeshBase::loadFromObj( const std::string& filename ) { preProcess(); loadInfoFromObj( filename ); allocateData(); startWritingData(); loadDataFromObj( filename ); computeAabb(); postProcess(); finishWritingData(); }
void ShaderParser::parse(const std::string& source) { mySource = preProcess(source); std::array<std::string, 5> shaderNames = {{ "vertex", "geometry", "fragment", "depth", "blend" }}; std::size_t startIndex = 0; std::size_t endIndex = 0; while(endIndex != std::string::npos) { endIndex = mySource.find('\n', startIndex); if(endIndex != std::string::npos) { std::string line = mySource.substr(startIndex, endIndex - startIndex); tyr::ShaderType type = tyr::ShaderType::None; for(std::size_t typeIndex = 0; typeIndex < 5; typeIndex++) { if(line == shaderNames[typeIndex]) { if(typeIndex <= 2) { type = static_cast<tyr::ShaderType>(typeIndex + 1); endIndex = appendSourceTo(type, endIndex + 1); } else if(typeIndex == 3) { endIndex = parseDepthSettings(endIndex + 1); type = tyr::ShaderType::Count; } else if(typeIndex == 4) { endIndex = parseBlendSettings(endIndex + 1); type = tyr::ShaderType::Count; } break; } } if(type == tyr::ShaderType::None) { myShaderSources[0] += line + "\n"; } } startIndex = endIndex + 1; } }
int main(int argc, char *argv[]){ minutiaeNearestSize = 100; double threshold = 0.5; int imgTraningSize = 5; cout << "USUARIO DEDO TX0 TX1 TX2 TX3 TX4 MEDIA\n" << endl; for(int user=0;user < 500;user++){ vector< vector<double> > mat; for(int i = 0; i < imgTraningSize; i++){ string str = "CASIA-FingerprintV5/"+intToString(user, true)+"/L/"+intToString(user, true)+"_L0_"+intToString(i, false)+".bmp"; Mat img = imread(str, CV_LOAD_IMAGE_GRAYSCALE); //Leitura Mat res = preProcess(img); // PreProcessamento, aplicação de filtros e esqueletização mat.push_back(nearestMinutiaes(res)); // inclui as distâncias das minucias mais próximas } for(int ignorar = 0; ignorar < imgTraningSize; ++ignorar){ printf("%03d L0 ", user); double avg = 0; for(int j = 0; j < mat.size(); j++){ int hit = 0; if(j != ignorar){ for(int k = 0, l = 0; k < mat[j].size() && l < mat[ignorar].size();){ double inf = mat[j][k] - threshold; double sup = mat[j][k] + threshold; if(mat[ignorar][l] >= inf && mat[ignorar][l] <= sup){ hit++; k++; l++; } else { if(mat[ignorar][l] < inf) l++; else k++; } } avg+=(hit*100)/(double)mat[ignorar].size(); printf("%.5lf\t", hit/(double)mat[ignorar].size()); } else{ printf("XXX\t"); } } printf("%.5lf\n", (avg / 4.0)); } printf("\n"); } }
int main(void){ char pat[50]; char text[100]; int M,N; int* b; scanf("%[^\n]",text); scanf("%s",pat); M = strlen(pat); N = strlen(text); b = (int *)malloc(sizeof(int)*(M+1)); preProcess(pat,b,M); matching(text,pat,b,N,M); return 0; }
bool ReceivedPacketProcessor::process() { quint64 now = usecTimestampNow(); quint64 sinceLastWindow = now - _lastWindowAt; if (sinceLastWindow > USECS_PER_SECOND) { lock(); float secondsSinceLastWindow = sinceLastWindow / USECS_PER_SECOND; float incomingPacketsPerSecondInWindow = (float)_lastWindowIncomingPackets / secondsSinceLastWindow; _incomingPPS.updateAverage(incomingPacketsPerSecondInWindow); float processedPacketsPerSecondInWindow = (float)_lastWindowProcessedPackets / secondsSinceLastWindow; _processedPPS.updateAverage(processedPacketsPerSecondInWindow); _lastWindowAt = now; _lastWindowIncomingPackets = 0; _lastWindowProcessedPackets = 0; unlock(); } if (_packets.size() == 0) { _waitingOnPacketsMutex.lock(); _hasPackets.wait(&_waitingOnPacketsMutex, getMaxWait()); _waitingOnPacketsMutex.unlock(); } preProcess(); if (!_packets.size()) { return isStillRunning(); } lock(); std::list<NodeSharedReceivedMessagePair> currentPackets; currentPackets.swap(_packets); unlock(); for(auto& packetPair : currentPackets) { processPacket(packetPair.second, packetPair.first); _lastWindowProcessedPackets++; midProcess(); } lock(); for(auto& packetPair : currentPackets) { _nodePacketCounts[packetPair.first->getUUID()]--; } unlock(); postProcess(); return isStillRunning(); // keep running till they terminate us }
int main(int argc, char **argv) { uchar4 *h_inputImageRGBA, *d_inputImageRGBA; uchar4 *h_outputImageRGBA, *d_outputImageRGBA; unsigned char *d_redBlurred, *d_greenBlurred, *d_blueBlurred; float *h_filter; int filterWidth; std::string input_file; std::string output_file; if (argc == 3) { input_file = std::string(argv[1]); output_file = std::string(argv[2]); } else { std::cerr << "Usage: ./hw input_file output_file" << std::endl; exit(1); } //load the image and give us our input and output pointers preProcess(&h_inputImageRGBA, &h_outputImageRGBA, &d_inputImageRGBA, &d_outputImageRGBA, &d_redBlurred, &d_greenBlurred, &d_blueBlurred, &h_filter, &filterWidth, input_file); allocateMemoryAndCopyToGPU(numRows(), numCols(), h_filter, filterWidth); GpuTimer timer; timer.Start(); //call the students' code your_gaussian_blur(h_inputImageRGBA, d_inputImageRGBA, d_outputImageRGBA, numRows(), numCols(), d_redBlurred, d_greenBlurred, d_blueBlurred, filterWidth); timer.Stop(); cudaDeviceSynchronize(); //checkCudaErrors(cudaGetLastError()); int err = printf("%f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } cleanup(); //check results and output the blurred image postProcess(output_file); checkCudaErrors(cudaFree(d_redBlurred)); checkCudaErrors(cudaFree(d_greenBlurred)); checkCudaErrors(cudaFree(d_blueBlurred)); return 0; }
ModeLaplace1DQ1::ModeLaplace1DQ1(const Epetra_Comm &_Comm, double _Lx, int _nX) : myVerify(_Comm), MyComm(_Comm), mySort(), Map(0), K(0), M(0), Lx(_Lx), nX(_nX), x(0) { preProcess(); }
/** * Estimates the room number digits by smartly analyzing an input ROI or window * and applying template matching to sub-windows representing the digits' images * respectively in order to find the best match. * @param in: Datastructure containing input image, circle radius and center. */ void calcRoom(PointImage* in) { char path[20]; //temp to hold path to template images IplImage* tpl, *tempRes, *img = cvCreateImage(cvSize(0, 0), 8, 1); IplImage templates[10]; //array of loaded template images int room[3], matchIndex = -1; //array holding room detected digits CvRect rect; double maxMatch = 1.5, currMatch = 1.5; //their max range is typically 1.0. //Load the templates for (int k = 0; k < 10; k++) { sprintf(path, "%s%d%s", "templates/", k, ".png"); templates[k] = *cvLoadImage(path, CV_LOAD_IMAGE_GRAYSCALE); } //find best match for every sub-window (the three subimages from ROI) for (int j = 0; j < 3; j++) { for (int i = 0; i <= 9; i++) { tpl = &templates[i]; img = preProcess(in, tpl); rect = cvRect(j * cvRound(img->width / 3), 0, cvRound(img->width / 3), img->height); cvSetImageROI(img, rect); tempRes = cvCreateImage(cvSize(rect.width - tpl->width + 1, rect.height - tpl->height + 1), IPL_DEPTH_32F, 1); cvMatchTemplate(img, tpl, tempRes, CV_TM_SQDIFF_NORMED); currMatch = calcMatchingPercent(tempRes); if (currMatch < maxMatch) { maxMatch = currMatch; matchIndex = i; } cvReleaseImage(&tempRes); } //Assign current detected digit. room[j] = matchIndex; //Reset temporary vars. currMatch = 1.5; maxMatch = 1.5; matchIndex = -1; } //print the room no. room[1] = (room[1] == (CV_TM_CCOEFF_NORMED | 6)) ? 1 : room[1]; printf("Room no: %d %d %d\n", room[0], room[1], room[2]); //Memory cleanup. cvReleaseImage(&img); cvReleaseImage(&in->img); for (int l = 0; l < 10; l++) { cvReleaseImage((IplImage**) &templates[l]); } }
Relation* Delete(vector<string> &words, SchemaManager &schema_manager, MainMemory &mem){ Relation* relation_ptr = schema_manager.getRelation(words[2]); vector<string>::iterator it = find(words.begin(), words.end(), "WHERE"); // no WHERE, delete everything if (it == words.end()){ relation_ptr->deleteBlocks(0); } // with WHERE clause else{ vector<string> where_list(it, words.end()); preProcess(vector<string> (1, words[2]), where_list, schema_manager); Relation * new_relation = generateDLQP(where_list, words[2], schema_manager, mem); // very .... schema_manager.deleteRelation(words[2]); Relation* newRR = schema_manager.createRelation(words[2], new_relation->getSchema()); assert(!free_blocks.empty()); int memory_block_index = free_blocks.front(); free_blocks.pop(); int dBlocks = new_relation->getNumOfBlocks(); int size = 0; Block * block_ptr = NULL; while(size < dBlocks){ // read the relatioin block by block new_relation->getBlock(size, memory_block_index); block_ptr = mem.getBlock(memory_block_index); vector<Tuple> tuples = block_ptr->getTuples(); if(tuples.empty()){ cerr<<"Warning In Delete: No tuples in the current mem block!"<<endl; } for(int i = 0; i < tuples.size(); ++i){ Tuple t = tuples[i]; appendTupleToRelation(newRR, mem, t); } size++; } free_blocks.push(memory_block_index); // cout<<newRR->getRelationName()<<endl; // cout<<*newRR<<endl; } relation_ptr = schema_manager.getRelation(words[2]); cout<<relation_ptr<<endl; return relation_ptr; }
int main(int argc, char **argv) { uchar4 *h_sourceImg, *h_destImg, *h_blendedImg; size_t numRowsSource, numColsSource; std::string input_source_file; std::string input_dest_file; std::string output_file; if (argc == 4) { input_source_file = std::string(argv[1]); input_dest_file = std::string(argv[2]); output_file = std::string(argv[3]); } else { std::cerr << "Usage: ./hw input_source_file input_dest_file output_file" << std::endl; exit(1); } //load the image and give us our input and output pointers preProcess(&h_sourceImg, numRowsSource, numColsSource, &h_destImg, &h_blendedImg, input_source_file, input_dest_file); GpuTimer timer; timer.Start(); //call the students' code your_blend(h_sourceImg, numRowsSource, numColsSource, h_destImg, h_blendedImg); timer.Stop(); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); int err = printf("e57__TIMING__f82 %f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } //check results and output the tone-mapped image postProcess(h_blendedImg, numRowsSource, numColsSource, output_file); delete[] h_destImg; delete[] h_sourceImg; delete[] h_blendedImg; return 0; }
// Tests if a ClusterGraph is C-planar bool CconnectClusterPlanar::call(ClusterGraph &C) { Graph G; ClusterGraph Cp(C,G); OGDF_ASSERT(Cp.consistencyCheck()); m_clusterPQTree.init(Cp,0); bool cPlanar = preProcess(Cp,G); m_parallelEdges.init(); m_isParallel.init(); m_clusterPQTree.init(); return cPlanar; }