int main() { CImg<double> image("../../../../images/futbol.jpg"); CImg<double> img=image.get_noise(25,0);//gaussiano img.noise(5,2);//impulsivo CImg<double> imgFiltrada=denoise(img,3,12,0,4); cout<<"********ECM*********\n"; cout<<"ECM limpia-degradada: "<<image.MSE(img)<<endl; cout<<"ECM limpia-filtrada : "<<image.MSE(imgFiltrada)<<endl<<endl; (image,img,imgFiltrada).display("original -- img ruidosa -- imgFiltrada"); // //Pruebo el pseudoinverso // img = filtrar3(image,gaussian_mask(image,25.0)); // CImg<double> imgArreglada = filtrado_pseudoinverso3(img,gaussian_mask(image,25.0),0.0000001); // cout<<"********ECM*********\n"; // cout<<"ECM limpia-degradada: "<<image.MSE(img)<<endl; // cout<<"ECM limpia-filtrada : "<<image.MSE(imgArreglada)<<endl<<endl; // (image,img,imgArreglada).display("original -- img desenfocada -- imgFiltrada"); return 0; }
void thread_run(DeviceTask *task) { if(task->type == DeviceTask::FILM_CONVERT) { film_convert(*task, task->buffer, task->rgba_byte, task->rgba_half); } else if(task->type == DeviceTask::SHADER) { shader(*task); } else if(task->type == DeviceTask::RENDER) { RenderTile tile; DenoisingTask denoising(this, *task); /* Keep rendering tiles until done. */ while(task->acquire_tile(this, tile)) { if(tile.task == RenderTile::PATH_TRACE) { int start_sample = tile.start_sample; int end_sample = tile.start_sample + tile.num_samples; for(int sample = start_sample; sample < end_sample; sample++) { if(task->get_cancel()) { if(task->need_finish_queue == false) break; } path_trace(tile, sample); tile.sample = sample + 1; task->update_progress(&tile, tile.w*tile.h); } /* Complete kernel execution before release tile */ /* This helps in multi-device render; * The device that reaches the critical-section function * release_tile waits (stalling other devices from entering * release_tile) for all kernels to complete. If device1 (a * slow-render device) reaches release_tile first then it would * stall device2 (a fast-render device) from proceeding to render * next tile. */ clFinish(cqCommandQueue); } else if(tile.task == RenderTile::DENOISE) { tile.sample = tile.start_sample + tile.num_samples; denoise(tile, denoising); task->update_progress(&tile, tile.w*tile.h); } task->release_tile(tile); } } }
void thread_run(DeviceTask *task) { flush_texture_buffers(); if(task->type == DeviceTask::FILM_CONVERT) { film_convert(*task, task->buffer, task->rgba_byte, task->rgba_half); } else if(task->type == DeviceTask::SHADER) { shader(*task); } else if(task->type == DeviceTask::RENDER) { RenderTile tile; DenoisingTask denoising(this); /* Allocate buffer for kernel globals */ device_only_memory<KernelGlobalsDummy> kgbuffer(this, "kernel_globals"); kgbuffer.alloc_to_device(1); /* Keep rendering tiles until done. */ while(task->acquire_tile(this, tile)) { if(tile.task == RenderTile::PATH_TRACE) { assert(tile.task == RenderTile::PATH_TRACE); scoped_timer timer(&tile.buffers->render_time); split_kernel->path_trace(task, tile, kgbuffer, *const_mem_map["__data"]); /* Complete kernel execution before release tile. */ /* This helps in multi-device render; * The device that reaches the critical-section function * release_tile waits (stalling other devices from entering * release_tile) for all kernels to complete. If device1 (a * slow-render device) reaches release_tile first then it would * stall device2 (a fast-render device) from proceeding to render * next tile. */ clFinish(cqCommandQueue); } else if(tile.task == RenderTile::DENOISE) { tile.sample = tile.start_sample + tile.num_samples; denoise(tile, denoising, *task); task->update_progress(&tile, tile.w*tile.h); } task->release_tile(tile); } kgbuffer.free(); } }
void DriveSystem::Estimate(float _deltaTime, SystemRequierements& _requirements) { // Compute how much of the required force can by provided by this drive system? // Decompose the vectors float currentThrustAbs = denoise(DAMPING * len( _requirements.thrust )); float currentTorqueAbs = denoise(DAMPING * len( _requirements.torque )); if( currentThrustAbs + currentTorqueAbs > 1e-5f ) { // Get maximal drive properties for current directions Vec4 maxThrust(0.0f), maxTorque(0.0f); if( currentThrustAbs > 0.0f ) maxThrust = m_maxThrust(_requirements.thrust); if( currentTorqueAbs > 0.0f ) maxTorque = m_maxTorque(_requirements.torque); // Assumption: both the maxThrust and maxTorque require full energy. // Compute a relation how much is needed. float relTh = min(1.0f, currentThrustAbs / max(1e-6f, maxThrust[0])); float relTo = min(1.0f, currentTorqueAbs / max(1e-6f, maxTorque[0])); // Scale both down if they need more than 100% float sum = max(1.0f, relTh + relTo); relTh /= sum; relTo /= sum; // Apply what is remaining, additionally add the side effects from both components. // Use normalized direction _requirements.thrust / currentThrustAbs with new scale. m_currentThrust = maxThrust[0] * _requirements.thrust * (relTh / max(1e-6f, currentThrustAbs)); m_currentThrust[0] += denoise(maxTorque[1] * relTo * TORQUE_FORCE_COUPLING); m_currentThrust[1] += denoise(maxTorque[2] * relTo * TORQUE_FORCE_COUPLING); m_currentThrust[2] += denoise(maxTorque[3] * relTo * TORQUE_FORCE_COUPLING); m_currentTorque = maxTorque[0] * _requirements.torque * (relTo / max(1e-6f, currentTorqueAbs)); m_currentTorque[0] += denoise(maxThrust[1] * relTh * TORQUE_FORCE_COUPLING); m_currentTorque[1] += denoise(maxThrust[2] * relTh * TORQUE_FORCE_COUPLING); m_currentTorque[2] += denoise(maxThrust[3] * relTh * TORQUE_FORCE_COUPLING); // TODO: the current resulting Torque and thrust are not optimal. Iterative refinement might help! // How large is a percentage of how much energy must be used now m_energyDemand = m_maxEnergyDrain * _deltaTime * (relTh + relTo); } else { m_currentThrust = m_currentTorque = Vec3(0.0f); m_energyDemand = 0.0f; } }
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; HQDN3DContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; AVFrame *out; int direct, c; if (av_frame_is_writable(in) && !ctx->is_disabled) { direct = 1; out = in; } else { direct = 0; out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } for (c = 0; c < 3; c++) { denoise(s, in->data[c], out->data[c], s->line, &s->frame_prev[c], FF_CEIL_RSHIFT(in->width, (!!c * s->hsub)), FF_CEIL_RSHIFT(in->height, (!!c * s->vsub)), in->linesize[c], out->linesize[c], s->coefs[c ? CHROMA_SPATIAL : LUMA_SPATIAL], s->coefs[c ? CHROMA_TMP : LUMA_TMP]); } if (ctx->is_disabled) { av_frame_free(&out); return ff_filter_frame(outlink, in); } if (!direct) av_frame_free(&in); return ff_filter_frame(outlink, out); }
Mat TargetExtractor::extract(const Mat& frame, map<int, Target>& targets, bool track) { mFrame = frame; /* for 2.avi: * movement: 0.008; * color: 120, 0.2; * regionGrow: enable; * for 6.avi: * movement: 0.012; * color: 150, 0.4; * regionGrow: disable; */ movementDetect(0.012); colorDetect(150, 0.4); denoise(7, 5); fill(7, 5); medianBlur(mMask, mMask, 3); // TODO: make use of accumulate result //regionGrow(); //fill(7, 6); //medianBlur(mMask, mMask, 3); //Mat element = getStructuringElement(MORPH_CROSS, Size(3, 3)); //erode(mMask, mMask, element); //dilate(mMask, mMask, element); smallAreaFilter(12, 8); #ifdef DEBUG_OUTPUT namedWindow("mask"); moveWindow("mask", 350, 120); imshow("mask", mMask); #endif if (track) { blobTrack(targets); } return mMask; }
int main( int argc, char* argv[] ) { std::string input_file; std::string output_file; float variance; int patchRadius; int searchRadius; po::options_description desc("Allowed options"); desc.add_options() ("help", "produce help message") ("input,i", po::value<std::string>(&input_file), "input file") ("output,o", po::value<std::string>(&output_file), "output file") ("variance,v", po::value<float>(&variance)->default_value(10.0), "variance") ("patch-radius,p", po::value<int>(&patchRadius)->default_value(2), "patch radius") ("search-radius,s", po::value<int>(&searchRadius)->default_value(4), "search radius") ; po::variables_map vm; po::store(po::parse_command_line(argc, argv, desc), vm); po::notify(vm); if (vm.count("help")) { std::cout << desc << "\n"; return 1; } if (vm.count("input") == 0) { std::cout << "Missing input filename\n" << desc << "\n"; return 1; } if (vm.count("output") == 0) { std::cout << "Missing output filename\n" << desc << "\n"; return 1; } const unsigned int Dimension = 3; //typedef float PixelType; //typedef itk::Image< PixelType, Dimension > ImageType; typedef itk::ImageFileReader<ImageType> ReaderType; typedef itk::ImageFileWriter<ImageType> WriterType; ReaderType::Pointer reader = ReaderType::New(); reader->SetFileName(input_file); reader->Update(); ImageType::Pointer image = reader->GetOutput(); ImageType::Pointer output_image = ImageType::New(); output_image->SetRegions(image->GetLargestPossibleRegion()); output_image->Allocate(); output_image->SetSpacing(image->GetSpacing()); //denoise(image, output_image); denoise( image->GetPixelContainer()->GetBufferPointer(), output_image->GetPixelContainer()->GetBufferPointer(), image->GetLargestPossibleRegion().GetSize()[0], image->GetLargestPossibleRegion().GetSize()[1], image->GetLargestPossibleRegion().GetSize()[2], variance, patchRadius, searchRadius); WriterType::Pointer writer = WriterType::New(); writer->SetFileName(output_file); writer->SetInput(output_image); try { writer->Update(); } catch( itk::ExceptionObject & error ) { std::cerr << "Error: " << error << std::endl; return EXIT_FAILURE; } return EXIT_SUCCESS; }
void thread_run(DeviceTask *task) { if(task->type == DeviceTask::FILM_CONVERT) { film_convert(*task, task->buffer, task->rgba_byte, task->rgba_half); } else if(task->type == DeviceTask::SHADER) { shader(*task); } else if(task->type == DeviceTask::RENDER) { RenderTile tile; /* Copy dummy KernelGlobals related to OpenCL from kernel_globals.h to * fetch its size. */ typedef struct KernelGlobals { ccl_constant KernelData *data; #define KERNEL_TEX(type, ttype, name) \ ccl_global type *name; #include "kernel/kernel_textures.h" #undef KERNEL_TEX SplitData split_data; SplitParams split_param_data; } KernelGlobals; /* Allocate buffer for kernel globals */ device_memory kgbuffer; kgbuffer.resize(sizeof(KernelGlobals)); mem_alloc("kernel_globals", kgbuffer, MEM_READ_WRITE); /* Keep rendering tiles until done. */ while(task->acquire_tile(this, tile)) { if(tile.task == RenderTile::PATH_TRACE) { assert(tile.task == RenderTile::PATH_TRACE); split_kernel->path_trace(task, tile, kgbuffer, *const_mem_map["__data"]); /* Complete kernel execution before release tile. */ /* This helps in multi-device render; * The device that reaches the critical-section function * release_tile waits (stalling other devices from entering * release_tile) for all kernels to complete. If device1 (a * slow-render device) reaches release_tile first then it would * stall device2 (a fast-render device) from proceeding to render * next tile. */ clFinish(cqCommandQueue); } else if(tile.task == RenderTile::DENOISE) { tile.sample = tile.start_sample + tile.num_samples; denoise(tile, *task); task->update_progress(&tile, tile.w*tile.h); } task->release_tile(tile); } mem_free(kgbuffer); } }
void denoise_test(const QString& signalWithNoiseFileName, const QString& noiseFileName, const QString& outputFileName) { WavFile signalFile(signalWithNoiseFileName); signalFile.open(WavFile::ReadOnly); WavFile noiseFile(noiseFileName); noiseFile.open(WavFile::ReadOnly); if (signalFile.getHeader() != noiseFile.getHeader()) { qDebug() << "Signal and noise files have the different headers!"; return; } WavFile outputFile(outputFileName); outputFile.open(WavFile::WriteOnly, signalFile.getHeader()); const int frameSize = 1024; int minSize = qMin(signalFile.size(), noiseFile.size()); int frameNum = minSize / frameSize; float adaptation_rate = 1.65; float error = 0.1; float* signal_with_noise = new float[frameSize]; float* noise = new float[frameSize]; float* signal = new float[frameSize]; float* filter = new float[frameSize]; memset(signal, 0, frameSize * sizeof(float)); memset(filter, 0, frameSize * sizeof(float)); for (int i = 0; i < frameNum; i++) { qDebug() << "Frame #" << i; Signal signalFrame = signalFile.read(frameSize); Signal noiseFrame = noiseFile.read(frameSize); for (int j = 0; j < frameSize; j++) { signal_with_noise[j] = static_cast<float>(signalFrame[j].toInt()) * pow(2, -15); noise[j] = static_cast<float>(noiseFrame[j].toInt()) * pow(2, -15); } memset(filter, 0, frameSize * sizeof(float)); float final_err = error; final_err = denoise(signal_with_noise, noise, filter, signal, frameSize, final_err, adaptation_rate); qDebug() << "Adapt error: " << final_err; Signal outputFrame(frameSize, signalFile.getHeader()); for (int j = 0; j < frameSize; j++) { try { outputFrame[j] = static_cast<int>(signal[j] * pow(2, 15)); } catch (Sample::OutOfRangeValue exc) { int tmp = static_cast<int>(signal[j] * pow(2, 15)); if (tmp > 0) { outputFrame[j] = 32767; } else { outputFrame[j] = -32768; } } } outputFile.write(outputFrame); } delete[] signal_with_noise; delete[] noise; delete[] signal; delete[] filter; }
void thread_render(DeviceTask &task) { if (task_pool.canceled()) { if (task.need_finish_queue == false) return; } /* allocate buffer for kernel globals */ device_only_memory<KernelGlobals> kgbuffer(this, "kernel_globals"); kgbuffer.alloc_to_device(1); KernelGlobals *kg = new ((void *)kgbuffer.device_pointer) KernelGlobals(thread_kernel_globals_init()); profiler.add_state(&kg->profiler); CPUSplitKernel *split_kernel = NULL; if (use_split_kernel) { split_kernel = new CPUSplitKernel(this); if (!split_kernel->load_kernels(requested_features)) { thread_kernel_globals_free((KernelGlobals *)kgbuffer.device_pointer); kgbuffer.free(); delete split_kernel; return; } } RenderTile tile; DenoisingTask denoising(this, task); denoising.profiler = &kg->profiler; while (task.acquire_tile(this, tile)) { if (tile.task == RenderTile::PATH_TRACE) { if (use_split_kernel) { device_only_memory<uchar> void_buffer(this, "void_buffer"); split_kernel->path_trace(&task, tile, kgbuffer, void_buffer); } else { path_trace(task, tile, kg); } } else if (tile.task == RenderTile::DENOISE) { denoise(denoising, tile); task.update_progress(&tile, tile.w * tile.h); } task.release_tile(tile); if (task_pool.canceled()) { if (task.need_finish_queue == false) break; } } profiler.remove_state(&kg->profiler); thread_kernel_globals_free((KernelGlobals *)kgbuffer.device_pointer); kg->~KernelGlobals(); kgbuffer.free(); delete split_kernel; }