void testApp::postProcess(vector<Triangle>& triangles, float scale) { for(int i = 0; i < triangles.size(); i++) { Triangle& cur = triangles[i]; postProcess(cur.vert1, scale); postProcess(cur.vert2, scale); postProcess(cur.vert3, scale); } }
void FeatureDemo::onFrameRender() { beginTestFrame(); if (mpSceneRenderer) { beginFrame(); { PROFILE(updateScene); mpSceneRenderer->update(mCurrentTime); } depthPass(); shadowPass(); mpState->setFbo(mpMainFbo); renderSkyBox(); lightingPass(); antiAliasing(); postProcess(); ambientOcclusion(); endFrame(); } else { mpRenderContext->clearFbo(mpDefaultFBO.get(), vec4(0.2f, 0.4f, 0.5f, 1), 1, 0); } endTestFrame(); }
void ImageGLProcessor::process() { if (internalInvalid_) { internalInvalid_ = false; const DataFormatBase* format = inport_.getData()->getDataFormat(); size2_t dimensions; if (outport_.isHandlingResizeEvents() || !inport_.isOutportDeterminingSize()) dimensions = outport_.getData()->getDimensions(); else dimensions = inport_.getData()->getDimensions(); if (!outport_.hasData() || format != outport_.getData()->getDataFormat() || dimensions != outport_.getData()->getDimensions()){ Image *img = new Image(dimensions, format); img->copyMetaDataFrom(*inport_.getData()); outport_.setData(img); } } TextureUnit imgUnit; utilgl::bindColorTexture(inport_, imgUnit); utilgl::activateTargetAndCopySource(outport_, inport_, ImageType::ColorOnly); shader_.activate(); utilgl::setShaderUniforms(shader_, outport_, "outportParameters_"); shader_.setUniform("inport_", imgUnit.getUnitNumber()); preProcess(); utilgl::singleDrawImagePlaneRect(); shader_.deactivate(); utilgl::deactivateCurrentTarget(); postProcess(); }
void PreviewToggleCommand::enable() { // Cleanup Preview and Camera Test are exclusive. In case, disable the latter. // NOTE: This is done *before* attaching, since attach may invoke a preview // rebuild. CameraTestCheck *tc = CameraTestCheck::instance(); tc->setIsEnabled(false); // Attach to the model CleanupSettingsModel *model = CleanupSettingsModel::instance(); model->attach(CleanupSettingsModel::LISTENER | CleanupSettingsModel::PREVIEWER); // Connect signals bool ret = true; ret = ret && connect(model, SIGNAL(previewDataChanged()), this, SLOT(onPreviewDataChanged())); ret = ret && connect(model, SIGNAL(modelChanged(bool)), this, SLOT(onModelChanged(bool))); ret = ret && connect(&m_timer, SIGNAL(timeout()), this, SLOT(postProcess())); TPaletteHandle *ph = TApp::instance()->getPaletteController()->getCurrentCleanupPalette(); ret = ret && connect(ph, SIGNAL(colorStyleChanged()), &m_timer, SLOT(start())); ret = ret && connect(ph, SIGNAL(paletteChanged()), &m_timer, SLOT(start())); assert(ret); onPreviewDataChanged(); // in preview cleanup mode, tools are forbidden! Reverting to hand... TApp::instance()->getCurrentTool()->setTool(T_Hand); }
void ColladaDocumentImporter::finish () { assert((std::cout << "MCB: ColladaDocumentImporter::finish() entered" << std::endl,true)); postProcess (); mState = FINISHED; }
/** * @name processAutomatic(EngineData *inp) * @param inp pointer to EngineData structure * processes the whole sequence from reading inputs to writing outputs */ void TruthTableEngine::processAutomatic(EngineData *inp) { readInputs(inp); // read inputs preProcess(inp); // pre process into testbyte process(inp); // search for valid combination postProcess(inp); // copy result to output array writeOutputs(inp); // write digital pins based on active levels }
void ProcessObject::stop() { if(m_pProcInfo.hProcess) { // post a WM_QUIT message first PostThreadMessage(m_pProcInfo.dwThreadId,WM_QUIT,0,0); // sleep for a while so that the process has a chance to terminate itself ::Sleep(m_delayPauseEndTime>0?m_delayPauseEndTime:50); // terminate the process by force TerminateProcess(m_pProcInfo.hProcess,0); try // close handles to avoid ERROR_NO_SYSTEM_RESOURCES { ::CloseHandle(m_pProcInfo.hThread); ::CloseHandle(m_pProcInfo.hProcess); } catch(...) {} TCHAR pTemp[121]; _stprintf(pTemp, _T("Process%d ended"),m_objIndex); LOG_WRITER_INSTANCE.WriteLog( pTemp); m_pProcInfo.hProcess = 0; m_pProcInfo.hThread = 0; postProcess(); } }
/** @brief called to process everything */ virtual void process(void) { // If _dstImg was set, check that the _renderWindow is lying into dstBounds if (_dstImg) { const OfxRectI& dstBounds = _dstImg->getBounds(); // is the renderWindow within dstBounds ? assert(dstBounds.x1 <= _renderWindow.x1 && _renderWindow.x2 <= dstBounds.x2 && dstBounds.y1 <= _renderWindow.y1 && _renderWindow.y2 <= dstBounds.y2); // exit gracefully in case of error if (!(dstBounds.x1 <= _renderWindow.x1 && _renderWindow.x2 <= dstBounds.x2 && dstBounds.y1 <= _renderWindow.y1 && _renderWindow.y2 <= dstBounds.y2) || (_renderWindow.x1 >= _renderWindow.x2) || (_renderWindow.y1 >= _renderWindow.y2)) { return; } } // call the pre MP pass preProcess(); // make sure there are at least 4096 pixels per CPU and at least 1 line par CPU unsigned int nCPUs = ((std::min)(_renderWindow.x2 - _renderWindow.x1, 4096) * (_renderWindow.y2 - _renderWindow.y1)) / 4096; // make sure the number of CPUs is valid (and use at least 1 CPU) nCPUs = std::max(1u, (std::min)(nCPUs, OFX::MultiThread::getNumCPUs())); // call the base multi threading code, should put a pre & post thread calls in too multiThread(nCPUs); // call the post MP pass postProcess(); }
bool ArtistInfo::parse(std::string &data) { size_t a, b; bool parse_failed = false; if ((a = data.find("<content>")) != std::string::npos) { a += static_strlen("<content>"); if ((b = data.find("</content>")) == std::string::npos) parse_failed = true; } else parse_failed = true; if (parse_failed) { data = msgParseFailed; return false; } if (a == b) { data = "No description available for this artist."; return false; } std::vector< std::pair<std::string, std::string> > similars; for (size_t i = data.find("<name>"), j, k = data.find("<url>"), l; i != std::string::npos; i = data.find("<name>", i), k = data.find("<url>", k)) { j = data.find("</name>", i); i += static_strlen("<name>"); l = data.find("</url>", k); k += static_strlen("<url>"); similars.push_back(std::make_pair(data.substr(i, j-i), data.substr(k, l-k))); StripHtmlTags(similars.back().first); } a += static_strlen("<![CDATA["); b -= static_strlen("]]>"); data = data.substr(a, b-a); postProcess(data); data += "\n\nSimilar artists:\n"; for (size_t i = 1; i < similars.size(); ++i) { data += "\n * "; data += similars[i].first; data += " ("; data += similars[i].second; data += ")"; } data += "\n\n"; data += similars.front().second; return true; }
void MergeInfoSection::writePass( MergeFile * outFile, WCPtrOrderedVector<MergeFile> & inFiles ) //------------------------------------------------------------------------- { uint_32 len; uint_32 size; char * block; const int BufSize = 512; MergeStringHdl::ragnarok(); _diesByName->freeDirectory(); MergeInfoPP postProcess( inFiles.entries() ); relocPass( postProcess ); block = new char [ BufSize ]; memset( block, 0, BufSize ); len = _compunitHdr->_infoLength + sizeof(uint_32); while( len ) { size = (BufSize < len) ? BufSize : len; outFile->writeBlock( block, size ); len -= size; } delete [] block; outFile->seekSect( DR_DEBUG_INFO, 0 ); outFile->writeBlock( _compunitHdr, sizeof(MergeCompunitHdr) ); postProcess.execute( this, *outFile, inFiles ); }
cv::Mat_<int>* Segmentation_SLIC::segmentImage_SLIC(cv::Mat* in, int k, float M, int iteration){ this->k = k; //number of clusters N = width * height; //number of points S = (int)sqrt(1.0 * N / k); //grid(window) size this->M = M; //色と距離の重み付け if(!initialized){ initMemory_SLIC(); //Init GPU memory initialized = true; } initLD_SLIC(); //init pixel parameters copyInputImage_SLIC(in); //set input image computeGrd(); //compute gradients sampleInitialClusters(); //sample clusters, move centers /*cudaDeviceSynchronize(); unsigned t1 = clock();*/ for(int i = 0; i < iteration; i++){ set_ld(); //Set cluster IDs aggregateClusters(); //Analyze cluster clouds resetAggregationUnit(); } //cudaDeviceSynchronize(); //std::cout << (clock() - t1) / 1000.0 << std::endl; postProcess(); setOutputImage(); return &bw; }
void PreviewToggleCommand::disable() { CleanupSettingsModel *model = CleanupSettingsModel::instance(); model->detach(CleanupSettingsModel::LISTENER | CleanupSettingsModel::PREVIEWER); bool ret = true; ret = ret && disconnect(model, SIGNAL(previewDataChanged()), this, SLOT(onPreviewDataChanged())); ret = ret && disconnect(model, SIGNAL(modelChanged(bool)), this, SLOT(onModelChanged(bool))); ret = ret && disconnect(&m_timer, SIGNAL(timeout()), this, SLOT(postProcess())); // Cleanup palette changes all falls under post-processing stuff. And do not // involve the model. TPaletteHandle *ph = TApp::instance()->getPaletteController()->getCurrentCleanupPalette(); ret = ret && disconnect(ph, SIGNAL(colorStyleChanged()), &m_timer, SLOT(start())); ret = ret && disconnect(ph, SIGNAL(paletteChanged()), &m_timer, SLOT(start())); assert(ret); clean(); TApp::instance()->getCurrentLevel()->notifyLevelChange(); }
void OutputDeviceNodeXAudio::submitNextBuffer() { auto ctx = getContext(); if( ! ctx ) return; lock_guard<mutex> lock( ctx->getMutex() ); // verify context still exists, since its destructor may have been holding the lock ctx = getContext(); if( ! ctx ) return; ctx->preProcess(); auto internalBuffer = getInternalBuffer(); internalBuffer->zero(); pullInputs( internalBuffer ); if( checkNotClipping() ) internalBuffer->zero(); if( getNumChannels() == 2 ) dsp::interleaveStereoBuffer( internalBuffer, &mBufferInterleaved ); HRESULT hr = mSourceVoice->SubmitSourceBuffer( &mXAudioBuffer ); CI_ASSERT( hr == S_OK ); ctx->postProcess(); }
/*! @brief Runs the current behaviour. Note that this may not be the current behaviour. @param jobs the nubot job list @param data the nubot sensor data @param actions the nubot actionators data @param fieldobjects the nubot world model @param gameinfo the nubot game information @param teaminfo the nubot team information */ void BehaviourProvider::process(JobList* jobs, NUSensorsData* data, NUActionatorsData* actions, FieldObjects* fieldobjects, GameInformation* gameinfo, TeamInformation* teaminfo) { if (preProcess(jobs, data, actions, fieldobjects, gameinfo, teaminfo)) { doBehaviour(); postProcess(); } }
void TraceDataByRank::getData(Time timeStart, Time timeRange, double pixelLength) { // get the start location FileOffset startLoc = findTimeInInterval(timeStart, minloc, maxloc); // get the end location Time endTime = timeStart + timeRange; FileOffset endLoc = min( findTimeInInterval(endTime, minloc, maxloc) + SIZE_OF_TRACE_RECORD, maxloc); // get the number of records data to display Long numRec = 1 + getNumberOfRecords(startLoc, endLoc); // -------------------------------------------------------------------------------------------------- // if the data-to-display is fit in the display zone, we don't need to use recursive binary search // we just simply display everything from the file // -------------------------------------------------------------------------------------------------- if (numRec <= numPixelsH) { // display all the records for (FileOffset i = startLoc; i <= endLoc;) { listCPID->push_back(getData(i)); // one record of data contains of an integer (cpid) and a long (time) i = i + SIZE_OF_TRACE_RECORD; } } else { // the data is too big: try to fit the "big" data into the display //fills in the rest of the data for this process timeline sampleTimeLine(startLoc, endLoc, 0, numPixelsH, 0, pixelLength, timeStart); } // -------------------------------------------------------------------------------------------------- // get the last data if necessary: the rightmost time is still less then the upper limit // I think we can add the rightmost data into the list of samples // -------------------------------------------------------------------------------------------------- if (endLoc < maxloc) { TimeCPID dataLast = getData(endLoc); addSample(listCPID->size(), dataLast); } // -------------------------------------------------------------------------------------------------- // get the first data if necessary: the leftmost time is still bigger than the lower limit // similarly, we add to the list // -------------------------------------------------------------------------------------------------- if (startLoc > minloc) { TimeCPID dataFirst = getData(startLoc - SIZE_OF_TRACE_RECORD); addSample(0, dataFirst); } postProcess(); }
Response *Controller::handleRequest(Request &request) { Response *response = process(request); if (response != NULL) { postProcess(request, *response); } return response; }
QList<QSharedDataPointer<QNetworkInterfacePrivate> > QNetworkInterfaceManager::allInterfaces() { QList<QNetworkInterfacePrivate *> list = postProcess(scan()); QList<QSharedDataPointer<QNetworkInterfacePrivate> > result; foreach (QNetworkInterfacePrivate *ptr, list) result << QSharedDataPointer<QNetworkInterfacePrivate>(ptr); return result; }
void MeshBase::loadFromObj( const std::string& filename ) { preProcess(); loadInfoFromObj( filename ); allocateData(); startWritingData(); loadDataFromObj( filename ); computeAabb(); postProcess(); finishWritingData(); }
/** Handles the receiver by storing received bits and post processing them **/ void rx_handler() { static uint32_t edge_timeStamp[67] = {0}; static int rx_current_state = -1; // If this is the first interrupt indicating signal start. Init the timer // for subsequent interrupts if (rx_current_state == -1) { init_timer_1A(); rx_current_state++; return; } // Read the amount of time taken between edges. Then set timer back to 0 edge_timeStamp[rx_current_state] = TIMER1->TAR; TIMER1->TAV = 0; // Determine if valid header or not if (rx_current_state == 0) { edge_timeStamp[0] = (625 * edge_timeStamp[0])/10000; // If the 1st 16T long header is not between 8.8ms 9.2ms, then invalid if(!(8800 < edge_timeStamp[0] && edge_timeStamp[0] < 9200)) { rx_current_state = -1; return; } } else if (rx_current_state == 1) { edge_timeStamp[1] = (625 * edge_timeStamp[1])/10000; // If the 2nd 8T long header is not between 4.3ms and 4.7ms, then invalid or empty if(!(4300 < edge_timeStamp[1] && edge_timeStamp[1] < 4700)) { rx_current_state = -1; return; } } rx_current_state++; // If we've reached the stop bit, then do post processing. Then reset state back to inactive if (rx_current_state == 68) { postProcess(edge_timeStamp); //New delay_timer_0A(300); rx_interrupt_flag = 0; rx_current_state = -1; return; } }
bool ReceivedPacketProcessor::process() { quint64 now = usecTimestampNow(); quint64 sinceLastWindow = now - _lastWindowAt; if (sinceLastWindow > USECS_PER_SECOND) { lock(); float secondsSinceLastWindow = sinceLastWindow / USECS_PER_SECOND; float incomingPacketsPerSecondInWindow = (float)_lastWindowIncomingPackets / secondsSinceLastWindow; _incomingPPS.updateAverage(incomingPacketsPerSecondInWindow); float processedPacketsPerSecondInWindow = (float)_lastWindowProcessedPackets / secondsSinceLastWindow; _processedPPS.updateAverage(processedPacketsPerSecondInWindow); _lastWindowAt = now; _lastWindowIncomingPackets = 0; _lastWindowProcessedPackets = 0; unlock(); } if (_packets.size() == 0) { _waitingOnPacketsMutex.lock(); _hasPackets.wait(&_waitingOnPacketsMutex, getMaxWait()); _waitingOnPacketsMutex.unlock(); } preProcess(); if (!_packets.size()) { return isStillRunning(); } lock(); std::list<NodeSharedReceivedMessagePair> currentPackets; currentPackets.swap(_packets); unlock(); for(auto& packetPair : currentPackets) { processPacket(packetPair.second, packetPair.first); _lastWindowProcessedPackets++; midProcess(); } lock(); for(auto& packetPair : currentPackets) { _nodePacketCounts[packetPair.first->getUUID()]--; } unlock(); postProcess(); return isStillRunning(); // keep running till they terminate us }
int main(int argc, char **argv) { uchar4 *h_inputImageRGBA, *d_inputImageRGBA; uchar4 *h_outputImageRGBA, *d_outputImageRGBA; unsigned char *d_redBlurred, *d_greenBlurred, *d_blueBlurred; float *h_filter; int filterWidth; std::string input_file; std::string output_file; if (argc == 3) { input_file = std::string(argv[1]); output_file = std::string(argv[2]); } else { std::cerr << "Usage: ./hw input_file output_file" << std::endl; exit(1); } //load the image and give us our input and output pointers preProcess(&h_inputImageRGBA, &h_outputImageRGBA, &d_inputImageRGBA, &d_outputImageRGBA, &d_redBlurred, &d_greenBlurred, &d_blueBlurred, &h_filter, &filterWidth, input_file); allocateMemoryAndCopyToGPU(numRows(), numCols(), h_filter, filterWidth); GpuTimer timer; timer.Start(); //call the students' code your_gaussian_blur(h_inputImageRGBA, d_inputImageRGBA, d_outputImageRGBA, numRows(), numCols(), d_redBlurred, d_greenBlurred, d_blueBlurred, filterWidth); timer.Stop(); cudaDeviceSynchronize(); //checkCudaErrors(cudaGetLastError()); int err = printf("%f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } cleanup(); //check results and output the blurred image postProcess(output_file); checkCudaErrors(cudaFree(d_redBlurred)); checkCudaErrors(cudaFree(d_greenBlurred)); checkCudaErrors(cudaFree(d_blueBlurred)); return 0; }
// comment in header void Builder::postProcess() { std::unordered_set<const Block*> reachableBlocks; std::unordered_set<Id> unreachableDefinitions; // Collect IDs defined in unreachable blocks. For each function, label the // reachable blocks first. Then for each unreachable block, collect the // result IDs of the instructions in it. for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) { Function* f = *fi; Block* entry = f->getEntryBlock(); inReadableOrder(entry, [&reachableBlocks](const Block* b) { reachableBlocks.insert(b); }); for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) { Block* b = *bi; if (reachableBlocks.count(b) == 0) { for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ii++) unreachableDefinitions.insert(ii->get()->getResultId()); } } } // Remove unneeded decorations, for unreachable instructions decorations.erase(std::remove_if(decorations.begin(), decorations.end(), [&unreachableDefinitions](std::unique_ptr<Instruction>& I) -> bool { Id decoration_id = I.get()->getIdOperand(0); return unreachableDefinitions.count(decoration_id) != 0; }), decorations.end()); // Add per-instruction capabilities, extensions, etc., // process all reachable instructions... for (auto bi = reachableBlocks.cbegin(); bi != reachableBlocks.cend(); ++bi) { const Block* block = *bi; const auto function = [this](const std::unique_ptr<Instruction>& inst) { postProcessReachable(*inst.get()); }; std::for_each(block->getInstructions().begin(), block->getInstructions().end(), function); } // process all block-contained instructions for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) { Function* f = *fi; for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) { Block* b = *bi; for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ii++) postProcess(*ii->get()); } } }
int main(int argc, char **argv) { uchar4 *h_sourceImg, *h_destImg, *h_blendedImg; size_t numRowsSource, numColsSource; std::string input_source_file; std::string input_dest_file; std::string output_file; if (argc == 4) { input_source_file = std::string(argv[1]); input_dest_file = std::string(argv[2]); output_file = std::string(argv[3]); } else { std::cerr << "Usage: ./hw input_source_file input_dest_file output_file" << std::endl; exit(1); } //load the image and give us our input and output pointers preProcess(&h_sourceImg, numRowsSource, numColsSource, &h_destImg, &h_blendedImg, input_source_file, input_dest_file); GpuTimer timer; timer.Start(); //call the students' code your_blend(h_sourceImg, numRowsSource, numColsSource, h_destImg, h_blendedImg); timer.Stop(); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); int err = printf("e57__TIMING__f82 %f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } //check results and output the tone-mapped image postProcess(h_blendedImg, numRowsSource, numColsSource, output_file); delete[] h_destImg; delete[] h_sourceImg; delete[] h_blendedImg; return 0; }
LyricsFetcher::Result LyricsFetcher::fetch(const std::string &artist, const std::string &title) { Result result; result.first = false; Regex::RE artist_exp("%artist%"); Regex::RE title_exp("%title%"); std::string url = urlTemplate(); artist_exp.ReplaceAll(artist, url); title_exp.ReplaceAll(title, url); std::string data; CURLcode code = Curl::perform(data, url); if (code != CURLE_OK) { result.second = curl_easy_strerror(code); return result; } auto lyrics = getContent(regex(), data); if (lyrics.empty() || notLyrics(data)) { result.second = msgNotFound; return result; } data.clear(); for (auto it = lyrics.begin(); it != lyrics.end(); ++it) { postProcess(*it); if (!it->empty()) { data += *it; if (it != lyrics.end()-1) data += "\n\n----------\n\n"; } } result.second = data; result.first = true; return result; }
String demangleAndSimplify(const String & mangled, int isDataMember, int entryType) { String buildingResult; const char *m = mangled; //int mlen = mangled.length(); String saveMangled = mangled; // dem may modify the string //const char *d = demangle_withlen(m, mlen); char buf[MAXDBUF]; char sbuf[MAXDBUF]; DEM dm; int putbackUnder = 0; if (*m == '_' && hasextra_(entryType) && !isDataMember) { putbackUnder = 1; } if( dem((char*)m, &dm, sbuf) < 0 || //Error dm.type == DEM_PTBL || dm.type == DEM_STI || dm.type == DEM_STD ) { buildingResult = saveMangled; beVerbose(saveMangled, saveMangled, entryType, isDataMember); return buildingResult; } dem_print( &dm, buf ); postProcess(isDataMember, saveMangled, buildingResult, buf); //postProcess may decide to use the mangled name even if the name is // demangleable, for instance, after-first instance of overloading // function name using -d option if( ( buildingResult != saveMangled ) && ( putbackUnder == 1 ) ) buildingResult = "_" + buildingResult; // convert the non C function name char in function name to a // C string if( prepareDebug ) translate( buildingResult ); beVerbose(saveMangled, buildingResult, entryType, isDataMember); return buildingResult; }
/** @brief called to process everything */ virtual void process() { // is it OK ? if( _renderArgs.renderWindow.x2 - _renderArgs.renderWindow.x1 == 0 || _renderArgs.renderWindow.y2 - _renderArgs.renderWindow.y1 == 0 ) { BOOST_THROW_EXCEPTION( exception::ImageFormat() << exception::user( "RenderWindow empty !" ) ); } // call the pre MP pass preProcess(); // call the base multi threading code, should put a pre & post thread calls in too multiThread( _nbThreads ); // call the post MP pass postProcess(); }
Uint32 TrafficShapedSocket::read(bt::Uint32 max_bytes_to_read, bt::TimeStamp now) { Uint32 br = 0; bool no_limit = (max_bytes_to_read == 0); Uint32 ba = sock->bytesAvailable(); if (ba == 0) { // For some strange reason, sometimes bytesAvailable returns 0, while there are // bytes to read, so give ba the maximum value it can be ba = max_bytes_to_read > 0 ? max_bytes_to_read : OUTPUT_BUFFER_SIZE; } while ((br < max_bytes_to_read || no_limit) && ba > 0) { Uint32 tr = ba; if (tr > OUTPUT_BUFFER_SIZE) tr = OUTPUT_BUFFER_SIZE; if (!no_limit && tr + br > max_bytes_to_read) tr = max_bytes_to_read - br; int ret = sock->recv(input_buffer, tr); if (ret > 0) { mutex.lock(); down_speed->onData(ret, now); mutex.unlock(); if (rdr) { postProcess(input_buffer, ret); rdr->onDataReady(input_buffer, ret); } br += ret; ba -= ret; } else if (ret < 0) { return br; } else { sock->close(); return br; } } return br; }
int main(int argc, char **argv) { float *d_luminance; unsigned int *d_cdf; size_t numRows, numCols; unsigned int numBins; std::string input_file; std::string output_file; if (argc == 3) { input_file = std::string(argv[1]); output_file = std::string(argv[2]); } else { std::cerr << "Usage: ./hw input_file output_file" << std::endl; exit(1); } //load the image and give us our input and output pointers preProcess(&d_luminance, &d_cdf, &numRows, &numCols, &numBins, input_file); GpuTimer timer; float min_logLum, max_logLum; min_logLum = 0.f; max_logLum = 1.f; timer.Start(); //call the students' code your_histogram_and_prefixsum(d_luminance, d_cdf, min_logLum, max_logLum, numRows, numCols, numBins); timer.Stop(); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); int err = printf("%f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } //check results and output the tone-mapped image postProcess(output_file, numRows, numCols, min_logLum, max_logLum); return 0; }
void DarkenManager::render() { m_frames++; #ifdef _WIN32 { UINT64 ticks; QueryPerformanceCounter((LARGE_INTEGER*)&ticks); UINT64 curr_ticks = ticks - m_prev_fps_ticks; double t = (double)curr_ticks/m_ticksPerSecond; if (t >= 1) { float fps = (float)(m_frames / ((t))); std::stringstream caption; caption << "cl_gl@" << (int)fps << "fps"; glutSetWindowTitle(caption.str().c_str()); m_prev_fps_ticks = ticks; m_frames = 0; } } #endif //_WIN32 glBindFramebuffer( GL_FRAMEBUFFER, m_fbo ); glClearColor ( 0.2f, 0.2f, 0.2f, 0.0f ); glClear ( GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT); glUseProgram(m_const_col_prog); glBindVertexArray( m_geom_vao ); glDrawArrays(GL_TRIANGLES, 0, 3 ); // do OpenCL postProcess(); glBindFramebuffer( GL_FRAMEBUFFER, 0 ); glClearColor ( 0.2f, 0.0f, 0.0f, 0.0f ); glClear ( GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT); glUseProgram(m_tex_prog); glBindVertexArray( m_quad_vao ); glBindTexture(GL_TEXTURE_2D, m_tex); glDrawArrays(GL_TRIANGLE_STRIP, 0, 4 ); glUseProgram(0); }
void CameraTestToggleCommand::onPreviewDataChanged() { CleanupSettingsModel *model = CleanupSettingsModel::instance(); // Retrieve level under cleanup TXshSimpleLevel *sl; TFrameId fid; model->getCleanupFrame(sl, fid); // In case the level changes, release all previously previewed images if (m_sl.getPointer() != sl) clean(); m_sl = sl; if (sl) { if (!(sl->getFrameStatus(fid) & TXshSimpleLevel::CleanupPreview)) { m_fids.push_back(fid); sl->setFrameStatus( fid, sl->getFrameStatus(fid) | TXshSimpleLevel::CleanupPreview); } postProcess(); } }