void SensorSpecHamamatsu::takeMeasurementAveraging() { boolean isAvgReady = takeAverageMeasurement(); if (isAvgReady) { if (averageQueue == QUEUE_BASELINE) { // Store measurement in baseline (higher priority but infrequent condition) for (int i=0; i<SPEC_CHANNELS; i++) { baseline[i] = average[i]; } baselineOverexposed = postProcessing(&baseline[0]); hasBaseline = true; averageQueue = QUEUE_EMPTY; } else { // Store measurement in experimental data (normal condition) for (int i=0; i<SPEC_CHANNELS; i++) { data[i] = average[i]; } dataOverexposed = postProcessing(&data[0]); // Find peak channel value uint16_t maxValue = 0; for (int i=0; i<SPEC_CHANNELS; i++) { if (data[i] > maxValue) { peakChannel = i; maxValue = data[i]; } } averageQueue = QUEUE_EMPTY; } } }
void NumberParser::run() { prepare(); States state = States::Init; Position before; while ( true ) { before = _input.position(); char c{}; if ( _input.eof() ) state = States::Quit; else c = _input.readChar(); switch ( state ) { case States::Init: state = stateInit( c ); break; case States::Minus: state = stateMinus( c ); break; case States::Zero: state = stateZero( c ); break; case States::Digits: state = stateDigits( c ); break; case States::Point: state = statePoint( c ); break; case States::DecimalDigits: state = stateDecimalDigits( c ); break; case States::E: state = stateE( c ); break; case States::EPlusMinus: state = stateEplusMinus( c ); break; case States::EDigits: state = stateEDigits( c ); break; default: break; } if ( state == States::Quit ) break; } _input.position( before );// return back the last character if ( _isE ) postProcessing(); if ( _isMinus ) { if ( isReal() ) _real *= -1; if ( isInteger() ) _integer *= -1; } }
bool AubioOnsetDetector :: processframe(float* frame, const int& n){ bool newFrameResult = false; //Paul Brossier's aubioonsetclass~ code ported from Pd int j,isonset; for (j=0;j<n;j++) { // write input to datanew fvec_write_sample(vec, frame[j], 0, pos);//vec->data[0][pos] = frame[j] //time for fft if (pos == hopsize-1) { //hopsize is 512 newFrameResult = true; aubioOnsetFound = false; // block loop aubio_pvoc_do (pv,vec, fftgrain); fftgrain->norm[0][0] = fabs(fftgrain->norm[0][0]); //added hack to solve bug that norm[0][0] is negative sometimes. aubio_onsetdetection(o, fftgrain, onset); rawDetectionValue = onset->data[0][0]; //Paul Brossier's method to return value of peak picking process postProcessing(); // smpl_t my_sample_value; //peakPickedDetectionValue = aubio_peakpick_pimrt_getval(parms); //peakPickedDetectionValue = my_sample_value; //Paul Brossier's onset detection method isonset = aubio_peakpick_pimrt(onset,parms); if (isonset) { // test for silence if (aubio_silence_detection(vec, threshold2)==1){ isonset=0; } else{ // outlet_bang(x->bangoutlet); aubioOnsetFound = true; } }//end if (isonset) // end of block loop pos = -1; // so it will be zero next j loop } pos++; }//end for j //end of Paul's code return newFrameResult; }
/** * 新产生的谓词 s、t为内涵谓词,succ、max为外延谓词 * @param _originalFml * @return */ Formulas HengZhang::transform(const Formula& _originalFml) { Formula fml = preProcessing(_originalFml); Formulas fmls; fmls.pushBack(createFormula_1()); fmls.pushBack(createFormula_2(fml)); fmls.pushBack(createFormula_3(fml)); fmls.pushBack(createFormula_4_1(fml)); fmls.pushBack(createFormula_4_2(fml)); fmls.pushBack(createFormula_5_1(fml)); fmls.pushBack(createFormula_5_2(fml)); postProcessing(); return fmls; }
// Data methods void SensorSpecHamamatsu::takeMeasurement() { Serial.println ("Starting measurement..."); readSpectrometer(&data[0], false); Serial.println ("Measurement complete..."); dataOverexposed = postProcessing(&data[0]); // Find peak channel value uint16_t maxValue = 0; for (int i=0; i<SPEC_CHANNELS; i++) { if (data[i] > maxValue) { peakChannel = i; maxValue = data[i]; } } }
void SMTController::run() { assert(m_uiNumber); assert(m_szFile); fillFileList(); m_pUPThread->start(); if (!makeThreads()) return; for (size_t x=0; x<m_vWorkerList.size(); x++) m_vWorkerList[x]->workThread->start(); while (true) { doPause(); if (isStopped()) break; //wait here as we have nothing else to do m_WaitCond.wait(2); if (m_iRunningWorkers==0) break; } m_pUPThread->stop(); for (size_t x=0; x<m_vWorkerList.size(); x++) m_vWorkerList[x]->workThread->stop(); if (!isStopped()) postProcessing(); safe_delete(m_vWorkerList); }
void TGen::Engine::DeferredRenderer::renderWorld(TGen::Engine::World & world, TGen::Camera * camera, scalar dt) { /*if (!world) { renderWorldless(dt); return; } */ mainCamera = camera; //world.getCamera("maincam"); // TODO: mainCamera?! // TODO: early-z, z-pass first. ska kunna aktiveras/avaktiveras genom variabel // packa ihop alla ljus som använder samma material (och som har samma timer) // rendrera deras fillquads i något i en metod högre än render och byt ut lampor mellan. men hur får man den nivån? // sen finns det kameror som rendrerar till texturer, men de hanteras åt andra sättet, dvs // ett material kopplas till en kamera // dvs textursources måste bli mer avancerade i materialsystemet, ska ju kunna ha envmap som uppdateras i realtid // sen borde man kunna definiera de kameror som binds till ett material också, hur ofta de ska uppdateras, osv // vad hanterar map och fiender osv? map? currentMap->fillRenderList(thisList); // World!!!... ska det var ändå var är camera? kom ihåg att camera borde hanteras av en playermovement-klass och att kameror ska kunna vara portabla // lampor i portalbana fixas genom att ta lampor från alla synliga rum + anslutande rum (även de som inte syns alltså) // kan sen optimeras genom att kolla lightboxen world.prepareLists(mainCamera); TGen::RenderList & renderList = world.getRenderList(); TGen::Engine::LightList & lightList = world.getLightList(); renderList.sort(*mainCamera, "default"); //std::cout << "renderlist: " << renderList.getNumFaces() << std::endl; TGen::Engine::MetaCreator mc; metaLines.beginBatch(); mc.writeAxes(TGen::Matrix4x4::Identity, metaLines); //renderList.writeMeta(TGen::MetaNormals, TGen::Matrix4x4::Identity, metaLines); //renderList.writeMeta(TGen::MetaPortals, TGen::Matrix4x4::Identity, metaLines); metaLines.endBatch(); TGen::Rectangle viewport = renderer.getViewport(); // UPDATE MAPS (color, normal, spec, depth, etc) renderer.setViewport(mrtSize); renderer.setClearColor(TGen::Color::Black); renderTarget->reset(); renderTarget->setColorUnit(0, colorMap); renderTarget->setColorUnit(1, normalMap); renderTarget->setColorUnit(2, miscMap); renderTarget->setDepthUnit(0, depthMap); renderer.setRenderTarget(renderTarget); renderer.clearBuffers(TGen::ColorBuffer | TGen::DepthBuffer); renderer.setAmbientLight(world.getAmbientLight()); renderer.setTransform(TGen::TransformProjection, mainCamera->getProjection()); renderList.render(renderer, mainCamera->getTransform(), mainCamera->getLod(), "default"); renderer.setTransform(TGen::TransformWorldView, mainCamera->getTransform()); metaNormalMaterial->render(renderer, metaLines, TGen::MaterialRenderMetadata("default", 9, NULL, 0, NULL)); // TODO: var ska det här vara egentligen.... //vars.postProcessing = false; // postprocessing kostar 110 fps /*if (vars.postProcessing) { renderer.setRenderTarget(postTargets1); renderer.setViewport(mrtSize); } else { renderer.setRenderTarget(NULL); renderer.setViewport(viewport); }*/ //app.renderer.clearBuffers(TGen::DepthBuffer); // AMBIENT TO RESULT //renderFillQuad(lightAmbientMaterial); // LIGHTS TO RESULT TGen::Engine::LightList::LightMap & lightsByMaterial = lightList.getLightsByMaterial(); for (TGen::Engine::LightList::LightMap::iterator iter = lightsByMaterial.begin(); iter != lightsByMaterial.end(); ++iter) { //std::cout << "MATERIAL " << iter->first << std::endl; // TODO: OM EN LAMPA HAR FACES SÅ RENDRERA DEM ist för fillquad!!!! det är bounding boxes // om man är innanför en bbox för ett ljus rita backfaces, annars bara frontfaces // för mindre ljus kan man köra bbox. kolla mot bounding-sphere eller aabb. TGen::Texture * textures[] = {NULL, colorMap, normalMap, miscMap, depthMap}; renderer.setTransform(TGen::TransformProjection, mainCamera->getProjection()); // borde inte vara nödvändigt... TGen::Engine::LightList::LightArray * lights = iter->second; if (lights) { lightBatchSize = 1; for (int i = 0; i < lights->size(); i += lightBatchSize) { int a = 0; for (; a < lightBatchSize && i + a < lights->size(); ++a) { renderer.setTransform(TGen::TransformWorldView, mainCamera->getTransform() * (*lights)[a + i]->getTransform()); if ((*lights)[a + i]->getType() == TGen::Engine::LightDirectional) (*lights)[a + i]->getLightProperties().position = TGen::Vector4((*lights)[a + i]->getTransform().getZ().normalize(), 0.0f); else (*lights)[a + i]->getLightProperties().position = TGen::Vector4(0.0f, 0.0f, 0.0f, 1.0f); //(*lights)[a]->getWorldPosition(); renderer.setLight(a, (*lights)[a + i]->getLightProperties()); if ((*lights)[a + i]->getType() == TGen::Engine::LightDirectional) renderFillQuad(iter->first, TGen::lexical_cast<std::string>(a + 1 + i) + "lights"); // TODO: optimize else { // glIsTexture(1); (*lights)[a + i]->getMaterial()->render(renderer, TGen::SceneNodeRenderable(*(*lights)[a + i]), TGen::MaterialRenderMetadata(TGen::lexical_cast<std::string>(a + 1 + i) + "lights", 9, textures, 0, this)); } //lightPositionalMaterial->render(renderer, TGen::SceneNodeRenderable(*(*lights)[a]), TGen::lexical_cast<std::string>(a + 1) + "lights", 9, NULL, NULL); } //lightPositionalMaterial->render(renderer, *model, TGen::lexical_cast<std::string>(a) + "lights", 9, NULL, NULL); } } } // TODO: bbox för lights... dvs genCube //} //lightType = 1; /*TGen::VertexBuffer * vb = app.renderer.createVertexBuffer(LightVertexDecl(), sizeof(LightVertexDecl::Type) * 24 * 8, TGen::UsageStream); for (int i = 0; i < lights[lightType].size(); i += lightBatchSize) { std::vector<LightVertexDecl::Type> vertices; vertices.reserve(24 * 8); int a = 0; for (; a < lightBatchSize && i + a < lights[lightType].size(); ++a) { TGen::Vector3 min = lights[lightType][i + a]->boundingBox.getMin(); TGen::Vector3 max = lights[lightType][i + a]->boundingBox.getMax(); vertices.push_back(TGen::Vector3(min.x, min.y, max.z)); vertices.push_back(TGen::Vector3(max.x, min.y, max.z)); vertices.push_back(TGen::Vector3(max.x, max.y, max.z)); vertices.push_back(TGen::Vector3(min.x, max.y, max.z)); vertices.push_back(TGen::Vector3(max.x, min.y, min.z)); vertices.push_back(TGen::Vector3(min.x, min.y, min.z)); vertices.push_back(TGen::Vector3(min.x, max.y, min.z)); vertices.push_back(TGen::Vector3(max.x, max.y, min.z)); vertices.push_back(TGen::Vector3(max.x, min.y, max.z)); vertices.push_back(TGen::Vector3(max.x, min.y, min.z)); vertices.push_back(TGen::Vector3(max.x, max.y, min.z)); vertices.push_back(TGen::Vector3(max.x, max.y, max.z)); vertices.push_back(TGen::Vector3(min.x, min.y, min.z)); vertices.push_back(TGen::Vector3(min.x, min.y, max.z)); vertices.push_back(TGen::Vector3(min.x, max.y, max.z)); vertices.push_back(TGen::Vector3(min.x, max.y, min.z)); vertices.push_back(TGen::Vector3(min.x, min.y, min.z)); vertices.push_back(TGen::Vector3(max.x, min.y, min.z)); vertices.push_back(TGen::Vector3(max.x, min.y, max.z)); vertices.push_back(TGen::Vector3(min.x, min.y, max.z)); app.renderer.setLight(a, lights[lightType][i + a]->light); } vb->bufferData(&vertices[0], sizeof(LightVertexDecl::Type) * vertices.size(), 0); //renderFillQuad(lightDirectionalMaterial, TGen::lexical_cast<std::string>(a) + "lights"); TGen::Texture * textures[] = {NULL, colorMap, normalMap, miscMap, depthMap}; // app.renderer.setRenderTarget(depthTarget); lightPositionalMaterial->render(app.renderer, TGen::Engine::VBRenderable(vb, TGen::PrimitiveQuads, vertices.size()), "1lights", 9, textures, this); } delete vb;*/ // TODO: fixa det här med specialization // cacha varje materials specs, cachedSpecializations[getMaterial("directional")][2] pekar på "2light" // entryn byggs upp när materialet först introduceras //renderFillQuad(lightMaterials[lightType * lightBatchSize + a ]); // TODO: om man stöter på "pass" i materialblocket (innan lod) så skapas lod 9 // TODO: specializations/lod ska kunna ha en flagga som säger om den nivån verkligen ska länkas vid link // om den är satt så länkas den inte. Eller nej, borde finnas en lista som kollas mot när man parsar material // om en lod finns med i den listan så laddas den inte in // sen kan man ha en annan flagga som berättar om den lod:en verkligen använts någon gång // hur arbetar man med detta: gå runt i en värld och titta runt på alla möjliga sätt, som när man spelar // sen sparar man alla lods flaggor till en fil, de som är false. detta används sen när man laddar in alla material // vissa material ska inte kunna dödas såhär, t ex lampor. men då sätter man en param: noPreCull (i paramblocket) // TODO: #loop 0 to r_lightBatchSize (read only) if (vars.postProcessing) { postProcessing(viewport); } }
int main(void) { UT_array * dictionary; UT_array * specialChar; UT_array * WholeString = NULL; CharProfile NewCharProfile; CharProbability NewCharProbability; /*utarray_new(dictionary, &ut_str_icd); utarray_new(specialChar, &ut_str_icd);*/ dictionary = postProcessingInitializeDictionary(); specialChar = postProcessingInitializeSpecialChar(); /*initialize string profile*/ UT_icd StringProfile_icd = {sizeof(CharProfile), NULL, NULL, CharProfile_free}; utarray_new(WholeString, &StringProfile_icd); /*first char spot*/ UT_icd CharProbability_icd = {sizeof(CharProbability), NULL, NULL, NULL}; utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = 'f'; NewCharProbability.Probability = 25; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = 't'; NewCharProbability.Probability = 75; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*second char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = 'o'; NewCharProbability.Probability = 35; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = '0'; NewCharProbability.Probability = 85; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*third char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = 't'; NewCharProbability.Probability = 50; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = 'r'; NewCharProbability.Probability = 100; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*third char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = '('; NewCharProbability.Probability = 50; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = '='; NewCharProbability.Probability = 100; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*fifth char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = 'm'; NewCharProbability.Probability = 50; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = 'n'; NewCharProbability.Probability = 100; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*sixth char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = 'y'; NewCharProbability.Probability = 50; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = 't'; NewCharProbability.Probability = 100; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*seventh char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = ' '; NewCharProbability.Probability = 50; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = 'u'; NewCharProbability.Probability = 100; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*eigth char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = 'h'; NewCharProbability.Probability = 50; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = 'w'; NewCharProbability.Probability = 100; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*ninth char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = ')'; NewCharProbability.Probability = 50; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = '}'; NewCharProbability.Probability = 100; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*tenth char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = '\n'; NewCharProbability.Probability = 50; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = 'u'; NewCharProbability.Probability = 100; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*sixth char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = '{'; NewCharProbability.Probability = 50; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = '['; NewCharProbability.Probability = 100; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*sixth char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = 'd'; NewCharProbability.Probability = 50; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = 'a'; NewCharProbability.Probability = 100; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*sixth char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = 'u'; NewCharProbability.Probability = 50; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = 'd'; NewCharProbability.Probability = 100; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*sixth char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = 't'; NewCharProbability.Probability = 50; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = 'y'; NewCharProbability.Probability = 100; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*sixth char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = 'o'; NewCharProbability.Probability = 50; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = 't'; NewCharProbability.Probability = 100; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); /*sixth char spot*/ utarray_new(NewCharProfile.CharChoices, &CharProbability_icd); /*first possibility*/ NewCharProbability.Char = '}'; NewCharProbability.Probability = 50; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*second probability*/ NewCharProbability.Char = ']'; NewCharProbability.Probability = 100; utarray_push_back(NewCharProfile.CharChoices, &NewCharProbability); /*push to the whole string*/ utarray_push_back(WholeString, &NewCharProfile); UT_string * advance_string = postProcessingAdvance(WholeString,dictionary,specialChar); UT_string * normal_string = postProcessing(WholeString); printf("Result from highest probability catch is:\n%s\nResult from keyword match is:\n%s\n", utstring_body(normal_string), utstring_body(advance_string)); utstring_free(advance_string); utstring_free(normal_string); postProcessingCleanUP(dictionary, specialChar); return 0; }
void bcg729Decoder(bcg729DecoderChannelContextStruct *decoderChannelContext, uint8_t bitStream[], uint8_t frameErasureFlag, int16_t signal[]) { int i; uint16_t parameters[NB_PARAMETERS]; /* internal buffers which we do not need to keep between calls */ word16_t qLSP[NB_LSP_COEFF]; /* store the qLSP coefficients in Q0.15 */ word16_t interpolatedqLSP[NB_LSP_COEFF]; /* store the interpolated qLSP coefficient in Q0.15 */ word16_t LP[2*NB_LSP_COEFF]; /* store the 2 sets of LP coefficients in Q12 */ int16_t intPitchDelay; /* store the Pitch Delay in and out of decodeAdaptativeCodeVector, in for decodeFixedCodeVector */ word16_t fixedCodebookVector[L_SUBFRAME]; /* the fixed Codebook Vector in Q1.13*/ word16_t postFilteredSignal[L_SUBFRAME]; /* store the postfiltered signal in Q0 */ uint8_t parityErrorFlag; int subframeIndex; int parametersIndex = 4; /* this is used to select the right parameter according to the subframe currently computed, start pointing to P1 */ int LPCoefficientsIndex = 0; /* this is used to select the right LP Coefficients according to the subframe currently computed */ /*** parse the bitstream and get all parameter into an array as in spec 4 - Table 8 ***/ /* parameters buffer mapping : */ /* 0 -> L0 (1 bit) */ /* 1 -> L1 (7 bits) */ /* 2 -> L2 (5 bits) */ /* 3 -> L3 (5 bits) */ /* 4 -> P1 (8 bit) */ /* 5 -> P0 (1 bits) */ /* 6 -> C1 (13 bits) */ /* 7 -> S1 (4 bits) */ /* 8 -> GA1(3 bits) */ /* 9 -> GB1(4 bits) */ /* 10 -> P2 (5 bits) */ /* 11 -> C2 (13 bits) */ /* 12 -> S2 (4 bits) */ /* 13 -> GA2(3 bits) */ /* 14 -> GB2(4 bits) */ if (bitStream!=NULL) { /* bitStream might be null in case of frameErased (which shall be set in the appropriated flag)*/ parametersBitStream2Array(bitStream, parameters); } else { /* avoid compiler complaining for non inizialazed use of variable */ for (i=0; i<NB_PARAMETERS; i++) { parameters[i]=0; } } /*****************************************************************************************/ /*** on frame basis : decodeLSP, interpolate them with previous ones and convert to LP ***/ decodeLSP(decoderChannelContext, parameters, qLSP, frameErasureFlag); /* decodeLSP need the first 4 parameters: L0-L3 */ interpolateqLSP(decoderChannelContext->previousqLSP, qLSP, interpolatedqLSP); /* copy the currentqLSP to previousqLSP buffer */ for (i=0; i<NB_LSP_COEFF; i++) { decoderChannelContext->previousqLSP[i] = qLSP[i]; } /* call the qLSP2LP function for first subframe */ qLSP2LP(interpolatedqLSP, LP); /* call the qLSP2LP function for second subframe */ qLSP2LP(qLSP, &(LP[NB_LSP_COEFF])); /* check the parity on the adaptativeCodebookIndexSubframe1(P1) with the received one (P0)*/ parityErrorFlag = (uint8_t)(computeParity(parameters[4]) ^ parameters[5]); /* loop over the two subframes */ for (subframeIndex=0; subframeIndex<L_FRAME; subframeIndex+=L_SUBFRAME) { /* decode the adaptative Code Vector */ decodeAdaptativeCodeVector( decoderChannelContext, subframeIndex, parameters[parametersIndex], parityErrorFlag, frameErasureFlag, &intPitchDelay, &(decoderChannelContext->excitationVector[L_PAST_EXCITATION + subframeIndex])); if (subframeIndex==0) { /* at first subframe we have P0 between P1 and C1 */ parametersIndex+=2; } else { parametersIndex++; } /* in case of frame erasure we shall generate pseudoRandom signs and index for fixed code vector decoding according to spec 4.4.4 */ if (frameErasureFlag) { parameters[parametersIndex] = pseudoRandom(decoderChannelContext)&(uint16_t)0x1fff; /* signs are set to the 13 LSB of the first pseudoRandom number */ parameters[parametersIndex+1] = pseudoRandom(decoderChannelContext)&(uint16_t)0x000f; /* signs are set to the 4 LSB of the second pseudoRandom number */ } /* decode the fixed Code Vector */ decodeFixedCodeVector(parameters[parametersIndex+1], parameters[parametersIndex], intPitchDelay, decoderChannelContext->boundedAdaptativeCodebookGain, fixedCodebookVector); parametersIndex+=2; /* decode gains */ decodeGains(decoderChannelContext, parameters[parametersIndex], parameters[parametersIndex+1], fixedCodebookVector, frameErasureFlag, &(decoderChannelContext->adaptativeCodebookGain), &(decoderChannelContext->fixedCodebookGain)); parametersIndex+=2; /* update bounded Adaptative Codebook Gain (in Q14) according to eq47 */ decoderChannelContext->boundedAdaptativeCodebookGain = decoderChannelContext->adaptativeCodebookGain; if (decoderChannelContext->boundedAdaptativeCodebookGain>BOUNDED_PITCH_GAIN_MAX) { decoderChannelContext->boundedAdaptativeCodebookGain = BOUNDED_PITCH_GAIN_MAX; } if (decoderChannelContext->boundedAdaptativeCodebookGain<BOUNDED_PITCH_GAIN_MIN) { decoderChannelContext->boundedAdaptativeCodebookGain = BOUNDED_PITCH_GAIN_MIN; } /* compute excitation vector according to eq75 */ /* excitationVector = adaptative Codebook Vector * adaptativeCodebookGain + fixed Codebook Vector * fixedCodebookGain */ /* the adaptative Codebook Vector is in the excitationVector buffer [L_PAST_EXCITATION + subframeIndex] */ /* with adaptative Codebook Vector in Q0, adaptativeCodebookGain in Q14, fixed Codebook Vector in Q1.13 and fixedCodebookGain in Q14.1 -> result in Q14 on 32 bits */ /* -> shift right 14 bits and store the value in Q0 in a 16 bits type */ for (i=0; i<L_SUBFRAME; i++) { decoderChannelContext->excitationVector[L_PAST_EXCITATION + subframeIndex + i] = (word16_t)(SATURATE(PSHR( ADD32( MULT16_16(decoderChannelContext->excitationVector[L_PAST_EXCITATION + subframeIndex + i], decoderChannelContext->adaptativeCodebookGain), MULT16_16(fixedCodebookVector[i], decoderChannelContext->fixedCodebookGain) ), 14), MAXINT16)); } /* reconstruct speech using LP synthesis filter spec 4.1.6 eq77 */ /* excitationVector in Q0, LP in Q12, recontructedSpeech in Q0 -> +NB_LSP_COEFF on the index of this one because the first NB_LSP_COEFF elements store the previous frame filter output */ LPSynthesisFilter(&(decoderChannelContext->excitationVector[L_PAST_EXCITATION + subframeIndex]), &(LP[LPCoefficientsIndex]), &(decoderChannelContext->reconstructedSpeech[NB_LSP_COEFF+subframeIndex]) ); /* NOTE: ITU code check for overflow after LP Synthesis Filter computation and if it happened, divide excitation buffer by 2 and recompute the LP Synthesis Filter */ /* here, possible overflows are managed directly inside the Filter by saturation at MAXINT16 on each result */ /* postFilter */ postFilter(decoderChannelContext, &(LP[LPCoefficientsIndex]), /* select the LP coefficients for this subframe */ &(decoderChannelContext->reconstructedSpeech[NB_LSP_COEFF+subframeIndex]), intPitchDelay, subframeIndex, postFilteredSignal); /* postProcessing */ postProcessing(decoderChannelContext, postFilteredSignal); /* copy postProcessing Output to the signal output buffer */ for (i=0; i<L_SUBFRAME; i++) { signal[subframeIndex+i] = postFilteredSignal[i]; } /* increase LPCoefficient Indexes */ LPCoefficientsIndex+=NB_LSP_COEFF; } /* Shift Excitation Vector by L_FRAME left */ memmove(decoderChannelContext->excitationVector, &(decoderChannelContext->excitationVector[L_FRAME]), L_PAST_EXCITATION*sizeof(word16_t)); /* Copy the last 10 words of reconstructed Speech to the begining of the array for next frame computation */ memcpy(decoderChannelContext->reconstructedSpeech, &(decoderChannelContext->reconstructedSpeech[L_FRAME]), NB_LSP_COEFF*sizeof(word16_t)); return; }
void detectFaces(GtkImage *image, StrongClassifier *sc) { static DetectedFace faces[20000000]; int nbFaces = 0; GdkPixbuf *pixbuf = gtk_image_get_pixbuf(image); GreyImage *grey = GreyImageNewFromImage(image); IntegralImage *intImage = IntegralImageNew(grey); int imgW = intImage->width; int imgH = intImage->height; int maxX = imgW - WIN_WIDTH; int maxY = imgH - WIN_HEIGHT; GreyImage *squaredGrey = GreyImageNew(imgW, imgH); for(int i = 0, count = imgW*imgH; i < count; ++i) squaredGrey->pixels[i] = grey->pixels[i]*grey->pixels[i]; IntegralImage *squaredImage = IntegralImageNew(squaredGrey); for(int x = 0; x < maxX; ++x) { for(int y = 0; y < maxY; ++y) { double scale = 1; int maxWidth = imgW - x; int maxHeight = imgH - y; int width = WIN_WIDTH; int height = WIN_HEIGHT; while(width <= maxWidth && height <= maxHeight) { int deviation = IntegralImageGetDeviation(intImage, squaredImage, scale ,x, y, width, height); if(StrongClassifierCheck(sc, intImage, x, y, scale, deviation)) { faces[nbFaces].x = x; faces[nbFaces].y = y; faces[nbFaces].w = width; faces[nbFaces].h = height; ++nbFaces; } scale *= WIN_RATIO; width = WIN_WIDTH * scale; height = WIN_HEIGHT * scale; } } } DetectedFace *trueFaces = postProcessing(faces, nbFaces, &nbFaces); for(int i = 0; i < nbFaces; ++i) addRect(pixbuf, trueFaces[i].x, trueFaces[i].y, trueFaces[i].w, trueFaces[i].h); }
TEST( SDCFsiTest, reuse ) { std::vector<int> nbIter; for ( int nbReuse = 0; nbReuse < 3; nbReuse++ ) { scalar r0 = 0.2; scalar a0 = M_PI * r0 * r0; scalar u0 = 0.1; scalar p0 = 0; scalar dt = 0.01; int N = 20; scalar L = 1; scalar T = 1; scalar dx = L / N; scalar rho = 1.225; scalar E = 490; scalar h = 1.0e-3; scalar cmk = std::sqrt( E * h / (2 * rho * r0) ); scalar c0 = std::sqrt( cmk * cmk - p0 / (2 * rho) ); scalar kappa = c0 / u0; bool parallel = false; int extrapolation = 0; scalar tol = 1.0e-5; int maxIter = 50; scalar initialRelaxation = 1.0e-3; int maxUsedIterations = 50; scalar singularityLimit = 1.0e-13; int reuseInformationStartingFromTimeIndex = 0; bool scaling = false; bool updateJacobian = false; scalar beta = 0.1; int minIter = 5; ASSERT_NEAR( kappa, 10, 1.0e-13 ); ASSERT_TRUE( dx > 0 ); std::shared_ptr<tubeflow::SDCTubeFlowFluidSolver> fluid( new tubeflow::SDCTubeFlowFluidSolver( a0, u0, p0, dt, cmk, N, L, T, rho ) ); std::shared_ptr<tubeflow::SDCTubeFlowSolidSolver> solid( new tubeflow::SDCTubeFlowSolidSolver( a0, cmk, p0, rho, L, N ) ); shared_ptr<RBFFunctionInterface> rbfFunction; shared_ptr<RBFInterpolation> rbfInterpolator; shared_ptr<RBFCoarsening> rbfInterpToCouplingMesh; shared_ptr<RBFCoarsening> rbfInterpToMesh; rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToCouplingMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); shared_ptr<MultiLevelSolver> fluidSolver( new MultiLevelSolver( fluid, fluid, rbfInterpToCouplingMesh, rbfInterpToMesh, 0, 0 ) ); rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToCouplingMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); shared_ptr<MultiLevelSolver> solidSolver( new MultiLevelSolver( solid, fluid, rbfInterpToCouplingMesh, rbfInterpToMesh, 1, 0 ) ); std::shared_ptr< std::list<std::shared_ptr<ConvergenceMeasure> > > convergenceMeasures; convergenceMeasures = std::shared_ptr<std::list<std::shared_ptr<ConvergenceMeasure> > >( new std::list<std::shared_ptr<ConvergenceMeasure> > ); convergenceMeasures->push_back( std::shared_ptr<ConvergenceMeasure>( new RelativeConvergenceMeasure( 0, false, tol ) ) ); convergenceMeasures->push_back( std::shared_ptr<ConvergenceMeasure>( new MinIterationConvergenceMeasure( 0, false, minIter ) ) ); shared_ptr<MultiLevelFsiSolver> fsi( new MultiLevelFsiSolver( fluidSolver, solidSolver, convergenceMeasures, parallel, extrapolation ) ); shared_ptr<PostProcessing> postProcessing( new AndersonPostProcessing( fsi, maxIter, initialRelaxation, maxUsedIterations, nbReuse, singularityLimit, reuseInformationStartingFromTimeIndex, scaling, beta, updateJacobian ) ); std::shared_ptr<sdc::SDCFsiSolverInterface> sdcFluidSolver = std::dynamic_pointer_cast<sdc::SDCFsiSolverInterface>( fluid ); std::shared_ptr<sdc::SDCFsiSolverInterface> sdcSolidSolver = std::dynamic_pointer_cast<sdc::SDCFsiSolverInterface>( solid ); assert( sdcFluidSolver ); assert( sdcSolidSolver ); std::shared_ptr<fsi::SDCFsiSolver> fsiSolver( new fsi::SDCFsiSolver( sdcFluidSolver, sdcSolidSolver, postProcessing, extrapolation ) ); int nbNodes = 3; std::shared_ptr<fsi::quadrature::IQuadrature<scalar> > quadrature; quadrature = std::shared_ptr<fsi::quadrature::IQuadrature<scalar> >( new fsi::quadrature::Uniform<scalar>( nbNodes ) ); std::shared_ptr<sdc::SDC> sdc( new sdc::SDC( fsiSolver, quadrature, 1.0e-10, nbNodes, nbNodes ) ); sdc->run(); nbIter.push_back( fsi->nbIter ); } int iprev; int index = 0; for ( int i : nbIter ) { std::cout << "nbIter = " << i << std::endl; if ( index > 0 ) ASSERT_LE( i, iprev ); iprev = i; index++; } }
void SensorSpecHamamatsu::takeBaseline() { readSpectrometer(&baseline[0], false); baselineOverexposed = postProcessing(&baseline[0]); hasBaseline = true; }
TEST( SDIRKFsiSolidTest, linearized ) { scalar r0 = 3.0e-3; scalar h = 3.0e-4; scalar L = 0.126; scalar rho_s = 1000; scalar E0 = 4.0e5; scalar G = 4.0e5; scalar nu = 0.5; scalar a0 = M_PI * r0 * r0; scalar u0 = 0.26; scalar p0 = 0; int N = 5; scalar T = 1; scalar rho_f = 1060; scalar E = 490; scalar cmk = std::sqrt( E * h / (2 * rho_f * r0) ); bool parallel = false; int extrapolation = 0; scalar tol = 1.0e-8; int maxIter = 50; scalar initialRelaxation = 1.0e-3; int maxUsedIterations = 50; int nbReuse = 0; scalar singularityLimit = 1.0e-13; int reuseInformationStartingFromTimeIndex = 0; bool scaling = false; scalar beta = 0.01; bool updateJacobian = false; int minIter = 5; int nbComputations = 3; std::deque<std::shared_ptr<tubeflow::SDCTubeFlowFluidSolver> > fluidSolvers; std::deque<std::shared_ptr<tubeflow::SDCTubeFlowLinearizedSolidSolver> > solidSolvers; std::deque<int> nbTimeStepsList; for ( int iComputation = 0; iComputation < nbComputations; iComputation++ ) { int nbTimeSteps = 120 * std::pow( 2, iComputation ); std::cout << "nbTimeSteps = " << nbTimeSteps << std::endl; scalar dt = T / nbTimeSteps; std::shared_ptr<tubeflow::SDCTubeFlowFluidSolver> fluid( new tubeflow::SDCTubeFlowFluidSolver( a0, u0, p0, dt, cmk, N, L, T, rho_f ) ); std::shared_ptr<tubeflow::SDCTubeFlowLinearizedSolidSolver> solid( new tubeflow::SDCTubeFlowLinearizedSolidSolver( N, nu, rho_s, h, L, dt, G, E0, r0, T ) ); shared_ptr<RBFFunctionInterface> rbfFunction; shared_ptr<RBFInterpolation> rbfInterpolator; shared_ptr<RBFCoarsening> rbfInterpToCouplingMesh; shared_ptr<RBFCoarsening> rbfInterpToMesh; rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToCouplingMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); shared_ptr<MultiLevelSolver> fluidSolver( new MultiLevelSolver( fluid, fluid, rbfInterpToCouplingMesh, rbfInterpToMesh, 0, 0 ) ); rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToCouplingMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); shared_ptr<MultiLevelSolver> solidSolver( new MultiLevelSolver( solid, fluid, rbfInterpToCouplingMesh, rbfInterpToMesh, 1, 0 ) ); std::shared_ptr< std::list<std::shared_ptr<ConvergenceMeasure> > > convergenceMeasures; convergenceMeasures = std::shared_ptr<std::list<std::shared_ptr<ConvergenceMeasure> > >( new std::list<std::shared_ptr<ConvergenceMeasure> >() ); convergenceMeasures->push_back( std::shared_ptr<ConvergenceMeasure>( new RelativeConvergenceMeasure( 0, true, tol ) ) ); convergenceMeasures->push_back( std::shared_ptr<ConvergenceMeasure>( new MinIterationConvergenceMeasure( 0, false, minIter ) ) ); shared_ptr<MultiLevelFsiSolver> fsi( new MultiLevelFsiSolver( fluidSolver, solidSolver, convergenceMeasures, parallel, extrapolation ) ); shared_ptr<PostProcessing> postProcessing( new AndersonPostProcessing( fsi, maxIter, initialRelaxation, maxUsedIterations, nbReuse, singularityLimit, reuseInformationStartingFromTimeIndex, scaling, beta, updateJacobian ) ); std::shared_ptr<sdc::SDCFsiSolverInterface> sdcFluidSolver = std::dynamic_pointer_cast<sdc::SDCFsiSolverInterface>( fluid ); std::shared_ptr<sdc::SDCFsiSolverInterface> sdcSolidSolver = std::dynamic_pointer_cast<sdc::SDCFsiSolverInterface>( solid ); assert( sdcFluidSolver ); assert( sdcSolidSolver ); std::shared_ptr<fsi::SDCFsiSolver> fsiSolver( new fsi::SDCFsiSolver( sdcFluidSolver, sdcSolidSolver, postProcessing, extrapolation ) ); std::shared_ptr<sdc::AdaptiveTimeStepper> adaptiveTimeStepper( new sdc::AdaptiveTimeStepper( false ) ); std::string method = "ESDIRK53PR"; std::shared_ptr<sdc::ESDIRK> esdirk( new sdc::ESDIRK( fsiSolver, method, adaptiveTimeStepper ) ); esdirk->run(); fluidSolvers.push_back( fluid ); solidSolvers.push_back( solid ); nbTimeStepsList.push_back( nbTimeSteps ); } std::cout << "solid" << std::endl; for ( int i = 0; i < 2; i++ ) { fsi::vector ref; if ( i == 0 ) ref = solidSolvers.back()->r; else ref = solidSolvers.back()->u; std::deque<scalar> errors; for ( int iComputation = 0; iComputation < nbComputations - 1; iComputation++ ) { fsi::vector data; if ( i == 0 ) data = solidSolvers.at( iComputation )->r; else data = solidSolvers.at( iComputation )->u; scalar error = (ref - data).norm() / ref.norm(); errors.push_back( error ); } for ( int iComputation = 0; iComputation < nbComputations - 2; iComputation++ ) { scalar order = ( std::log10( errors.at( iComputation ) ) - std::log10( errors.at( iComputation + 1 ) ) ) / ( std::log10( nbTimeStepsList.at( iComputation + 1 ) ) - std::log10( nbTimeStepsList.at( iComputation ) ) ); std::cout << "order = " << order << std::endl; if ( i == 0 ) ASSERT_NEAR( order, 3, 0.1 ); } } std::cout << "fluid" << std::endl; for ( int i = 0; i < 3; i++ ) { fsi::vector ref; if ( i == 0 ) ref = fluidSolvers.back()->u; if ( i == 1 ) ref = fluidSolvers.back()->a; if ( i == 2 ) ref = fluidSolvers.back()->p; std::deque<scalar> errors; for ( int iComputation = 0; iComputation < nbComputations - 1; iComputation++ ) { fsi::vector data; if ( i == 0 ) data = fluidSolvers.at( iComputation )->u; if ( i == 1 ) data = fluidSolvers.at( iComputation )->a; if ( i == 2 ) data = fluidSolvers.at( iComputation )->p; scalar error = (ref - data).norm() / ref.norm(); errors.push_back( error ); } for ( int iComputation = 0; iComputation < nbComputations - 2; iComputation++ ) { scalar order = ( std::log10( errors.at( iComputation ) ) - std::log10( errors.at( iComputation + 1 ) ) ) / ( std::log10( nbTimeStepsList.at( iComputation + 1 ) ) - std::log10( nbTimeStepsList.at( iComputation ) ) ); std::cout << "order = " << order << std::endl; if ( i == 1 || i == 2 ) ASSERT_NEAR( order, 3, 0.1 ); } } }
virtual void SetUp() { scalar r0 = 3.0e-3; scalar h = 3.0e-4; scalar L = 0.126; scalar rho_s = 1000; scalar E0 = 4.0e5; scalar G = 4.0e5; scalar nu = 0.5; scalar a0 = M_PI * r0 * r0; scalar u0 = 0.26; scalar p0 = 0; scalar dt = 1; int N = 5; scalar T = 1; scalar rho_f = 1060; scalar E = 490; scalar cmk = std::sqrt( E * h / (2 * rho_f * r0) ); bool parallel = false; int extrapolation = 0; scalar tol = 1.0e-3; int maxIter = 20; scalar initialRelaxation = 1.0e-3; int maxUsedIterations = 50; int nbReuse = 0; scalar singularityLimit = 1.0e-13; int reuseInformationStartingFromTimeIndex = 0; bool scaling = false; bool updateJacobian = false; scalar beta = 0.5; int minIter = 5; std::shared_ptr<tubeflow::SDCTubeFlowFluidSolver> fluid( new tubeflow::SDCTubeFlowFluidSolver( a0, u0, p0, dt, cmk, N, L, T, rho_f ) ); std::shared_ptr<fsi::BaseMultiLevelSolver> solid; solid = std::shared_ptr<fsi::BaseMultiLevelSolver>( new tubeflow::SDCTubeFlowLinearizedSolidSolver( N, nu, rho_s, h, L, dt, G, E0, r0, T ) ); assert( solid ); shared_ptr<RBFFunctionInterface> rbfFunction; shared_ptr<RBFInterpolation> rbfInterpolator; shared_ptr<RBFCoarsening> rbfInterpToCouplingMesh; shared_ptr<RBFCoarsening> rbfInterpToMesh; rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToCouplingMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); shared_ptr<MultiLevelSolver> fluidSolver( new MultiLevelSolver( fluid, fluid, rbfInterpToCouplingMesh, rbfInterpToMesh, 0, 0 ) ); rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToCouplingMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); shared_ptr<MultiLevelSolver> solidSolver( new MultiLevelSolver( solid, fluid, rbfInterpToCouplingMesh, rbfInterpToMesh, 1, 0 ) ); std::shared_ptr< std::list<std::shared_ptr<ConvergenceMeasure> > > convergenceMeasures; convergenceMeasures = std::shared_ptr<std::list<std::shared_ptr<ConvergenceMeasure> > >( new std::list<std::shared_ptr<ConvergenceMeasure> >() ); convergenceMeasures->push_back( std::shared_ptr<ConvergenceMeasure>( new RelativeConvergenceMeasure( 0, true, tol ) ) ); convergenceMeasures->push_back( std::shared_ptr<ConvergenceMeasure>( new MinIterationConvergenceMeasure( 0, false, minIter ) ) ); shared_ptr<MultiLevelFsiSolver> fsi( new MultiLevelFsiSolver( fluidSolver, solidSolver, convergenceMeasures, parallel, extrapolation ) ); shared_ptr<PostProcessing> postProcessing( new AndersonPostProcessing( fsi, maxIter, initialRelaxation, maxUsedIterations, nbReuse, singularityLimit, reuseInformationStartingFromTimeIndex, scaling, beta, updateJacobian ) ); std::shared_ptr<sdc::SDCFsiSolverInterface> sdcFluidSolver = std::dynamic_pointer_cast<sdc::SDCFsiSolverInterface>( fluid ); std::shared_ptr<sdc::SDCFsiSolverInterface> sdcSolidSolver = std::dynamic_pointer_cast<sdc::SDCFsiSolverInterface>( solid ); assert( sdcFluidSolver ); assert( sdcSolidSolver ); std::shared_ptr<fsi::SDCFsiSolver> fsiSolver( new fsi::SDCFsiSolver( sdcFluidSolver, sdcSolidSolver, postProcessing, extrapolation ) ); std::shared_ptr<sdc::AdaptiveTimeStepper> adaptiveTimeStepper( new sdc::AdaptiveTimeStepper( false ) ); std::string method = "ESDIRK53PR"; esdirk = std::shared_ptr<sdc::ESDIRK> ( new sdc::ESDIRK( fsiSolver, method, adaptiveTimeStepper ) ); }
/******************************************************************************* * Function: extractFGTargets * Description: extract FG targets with given conditions and return objects * Arguments: inImg - input image fgImg - output FG mask image seLength - length of structuring elements (opening) threshVal - threshold value for converting to binary image minArea - minimum area of FG targets maxArea - maximum area of FG targets minAspRatio - minimum aspect ratio of FG targets maxAspRatio - maximum aspect ratio of FG targets * Returns: vector<FGObject>* - all extracted FG targets * Comments: * Revision: *******************************************************************************/ vector<FGObject>* FGExtraction::extractFGTargets(InputArray inImg, OutputArray fgImg, int seLength, int threshVal, double minArea, double maxArea, double minAspRatio, double maxAspRatio) { double theta = 0.4; if(!inImg.obj) return NULL; _inImg = inImg.getMat(); this->init(); //showImage("inImg", _inImg); // background subtraction by opening int err = subtractBGOpenDiagonal(inImg, _bgsImg, threshVal, seLength); if (err>0) { vector<FGObject>* fgObjects = new vector<FGObject>; return fgObjects; } //subtractBGMedian(inImg.getMat(), bgSubImg, threshVal, seLength); //showImage("inImg", _inImg, 0, 1); //showImage("bgSub", _bgsImg); // get the contour vector<vector<Point>> contours = extractContours(_bgsImg); //cout<<contours.size()<<endl; // double local thresholding // histogram backprojection Mat mask = Mat::zeros(_bgsImg.size(), CV_8U); vector<int> areas(contours.size()); int cnt = 0; int argMax = 0; int max_area = 0; for(vector<vector<Point> >::const_iterator it = contours.begin(); it != contours.end(); ++it){ Rect uprightBox = boundingRect(*it); areas[cnt] = uprightBox.height*uprightBox.width; if (areas[cnt]>max_area) { max_area = areas[cnt]; argMax = cnt; } cnt++; } //showImage("inImg", _inImg, 0, 1); vector<Point> largestContour = contours[argMax]; //***** only use the largest contour RotatedRect orientedBox = orientedBoundingBox(largestContour); orientedBox.size.width *= 1.5; orientedBox.size.height *= 1.5; ellipse(mask, orientedBox, Scalar(255), -1); //Rect tempRect = boundingRect(largestContour); //Mat tempImg = mask(tempRect); //imshow("tempImg", tempImg); //imshow("mask", mask); //waitKey(0); // double local thresholding double percentage = 0.8; doubleThresholdByValue(percentage, mask); /*finish = clock(); duration = (double)(finish - start) / (double)CLOCKS_PER_SEC; cout << duration << " sec" << endl; start = clock();*/ // remove noise by a median filter medianBlur(_fgHighImg, _fgHighImg, 3); medianBlur(_fgLowImg, _fgLowImg, 3); //showImage("_fgHighImg", _fgHighImg); //showImage("_fgLowImg", _fgLowImg); /*finish = clock(); duration = (double)(finish - start) / (double)CLOCKS_PER_SEC; cout << duration << " sec" << endl; start = clock();*/ // merge two masks using histogram backprojection //showImage("_fgImg", _fgImg); //showImage("mask", mask); updateByHistBackproject(theta, mask); ellipse(mask, orientedBox, Scalar(0), -1); ellipse(_fgHighImg, orientedBox, Scalar(0), -1); ellipse(_fgLowImg, orientedBox, Scalar(0), -1); //} // thresholding by area and variance #ifdef IMAGE_DOWNSAMPLING int dilateSESize = 3; int erodeSESize = 3; int varThresh = 30; #else int dilateSESize = 7; int erodeSESize = 7; int varThresh = 30; #endif //showImage("fg high", _fgHighImg, 0, 1); //showImage("fg low", _fgLowImg, 0, 1); //showImage("after histbp", _fgImg, 0); thresholdByAreaRatioVar(minArea, maxArea, dilateSESize, erodeSESize, minAspRatio, maxAspRatio, varThresh); //showImage("after area threshold", _fgImg, 0); // post-processing postProcessing(_fgImg, _fgImg); //imshow("_fgImg",_fgImg); //waitKey(0); // fill the holes of the fgImg _fgImg.copyTo(fgImg); floodFill(fgImg, cv::Point(0,0), Scalar(255)); //imshow("fgImg",fgImg); //waitKey(0); bitwise_not(fgImg, fgImg); bitwise_or(fgImg, _fgImg, _fgImg); //imshow("inImg", inImg); //imshow("_fgImg",_fgImg); //waitKey(0); // opening RotatedRect rotatedR = fitEllipse(Mat(largestContour)); float objHeight = min(rotatedR.size.height,rotatedR.size.width); int seSize = int(objHeight/10.0+0.5); Mat se = getStructuringElement(MORPH_ELLIPSE, Size(seSize, seSize)); //***** choose different size according to object height morphologyEx(_fgImg, _fgImg, MORPH_OPEN, se); //imshow("_fgImg",_fgImg); //waitKey(0); // close morphologyEx(_fgImg, _fgImg, MORPH_CLOSE, se); // timer /*clock_t start, finish; double duration = 0.0; start = clock(); finish = clock(); duration = (double)(finish - start) / (double)CLOCKS_PER_SEC; cout << duration << " sec" << endl;*/ thresholdByAreaRatioVar(0.5*minArea, maxArea, 1, 1, minAspRatio, maxAspRatio, 30); // push targets into our vector //Mat largeInImg; #ifdef IMAGE_DOWNSAMPLING resize(_fgImg, _fgImg, Size(), 2, 2, INTER_LINEAR); resize(_inImg, largeInImg, Size(), 2, 2, INTER_LINEAR); #endif //tempImg = _fgImg.clone(); //findContours(tempImg, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); //tempImg.release(); //imshow("_fgImg",_fgImg); //waitKey(0); contours = extractContours(_fgImg); vector<FGObject>* fgObjects = new vector<FGObject>; //Mat mask8U = Mat::zeros(largeInImg.size(), CV_8U); for (size_t i = 0; i < contours.size(); i++){ double area = contourArea(contours[i]); RotatedRect orientedRect = orientedBoundingBox(contours[i]); Point2f points[4]; orientedRect.points(points); /* orientedRect.size.width *= 1.5; orientedRect.size.height *= 1.5; ellipse(mask8U, orientedRect, Scalar(255), -1); int channels[] = {0}; int nbins = 16; const int histSize[] = {nbins}; float range[] = {0, 255}; const float* ranges[] = {range}; Mat hist; cv::calcHist(&largeInImg, 1, channels, mask8U, hist, 1, histSize, ranges); */ // push targets into our vector FGObject* obj = new FGObject; //obj->histogram = hist; obj->setObjectProperties(area, orientedRect.angle, contours[i], points, SOURCE_UNRECTIFIED); if(obj->isPartialOut(_fgImg.cols, _fgImg.rows) == false){ fgObjects->push_back(*obj); } delete obj; //ellipse(mask8U, orientedRect, Scalar(0), -1); } // eliminate artifacts with width of 1 at the border... rectangle(_fgImg, Point(0,0), Point(_fgImg.cols-1, _fgImg.rows-1), Scalar(0)); fgImg.getMatRef() = _fgImg.clone(); return fgObjects; }
int main() { int nbComputations = 6; int nbNodes = 5; #pragma omp parallel for collapse(2), schedule(dynamic,1) for ( int iNodes = 0; iNodes < nbNodes; iNodes++ ) { for ( int iComputation = 0; iComputation < nbComputations; iComputation++ ) { unsigned int nbNodes = iNodes + 1; unsigned int nbTimeSteps = std::pow( 2, iComputation ); std::shared_ptr<sdc::TimeIntegrationScheme> timeIntegrationScheme; std::shared_ptr<tubeflow::SDCTubeFlowFluidSolver> fluid; std::shared_ptr<tubeflow::SDCTubeFlowBDFLinearizedSolidSolver> solid; std::shared_ptr<MultiLevelFsiSolver> fsi; { scalar r0 = 0.2; scalar a0 = M_PI * r0 * r0; scalar u0 = 0.1; scalar p0 = 0; scalar L = 1; scalar T = 1; scalar dt = T / nbTimeSteps; scalar rho_f = 1.225; scalar rho_s = 1.225; scalar E0 = 490; scalar G = 490; scalar h = 1.0e-3; scalar nu = 0.5; scalar cmk = std::sqrt( E0 * h / (2 * rho_f * r0) ); int N = 250; bool parallel = false; int extrapolation = 0; int maxIter = 100; scalar initialRelaxation = 1.0e-3; int maxUsedIterations = 50; int nbReuse = 0; scalar tol = 1.0e-5; scalar absoluteTol = 1.0e-13; scalar singularityLimit = 1.0e-13; int reuseInformationStartingFromTimeIndex = 0; bool scaling = false; bool updateJacobian = false; scalar beta = 0.1; int timeOrder = 1; if ( nbNodes > 1 ) timeOrder = 2; fluid = std::shared_ptr<tubeflow::SDCTubeFlowFluidSolver> ( new tubeflow::SDCTubeFlowFluidSolver( a0, u0, p0, dt, cmk, N, L, T, rho_f ) ); solid = std::shared_ptr<tubeflow::SDCTubeFlowBDFLinearizedSolidSolver>( new tubeflow::SDCTubeFlowBDFLinearizedSolidSolver( N, nu, rho_s, h, L, dt, G, E0, r0, T, timeOrder ) ); shared_ptr<MultiLevelSolver> fluidSolver( new MultiLevelSolver( fluid, fluid, 0, 0 ) ); shared_ptr<MultiLevelSolver> solidSolver( new MultiLevelSolver( solid, fluid, 1, 0 ) ); std::shared_ptr< std::list<std::shared_ptr<ConvergenceMeasure> > > convergenceMeasures; convergenceMeasures = std::shared_ptr<std::list<std::shared_ptr<ConvergenceMeasure> > >( new std::list<std::shared_ptr<ConvergenceMeasure> > ); convergenceMeasures->push_back( std::shared_ptr<ConvergenceMeasure>( new ResidualRelativeConvergenceMeasure( 0, true, tol ) ) ); convergenceMeasures->push_back( std::shared_ptr<ConvergenceMeasure>( new AbsoluteConvergenceMeasure( 0, true, 0.1 * absoluteTol ) ) ); fsi = shared_ptr<MultiLevelFsiSolver> ( new MultiLevelFsiSolver( fluidSolver, solidSolver, convergenceMeasures, parallel, extrapolation ) ); shared_ptr<PostProcessing> postProcessing( new AndersonPostProcessing( fsi, maxIter, initialRelaxation, maxUsedIterations, nbReuse, singularityLimit, reuseInformationStartingFromTimeIndex, scaling, beta, updateJacobian ) ); std::shared_ptr<sdc::SDCFsiSolverInterface> sdcFluidSolver = std::dynamic_pointer_cast<sdc::SDCFsiSolverInterface>( fluid ); std::shared_ptr<sdc::SDCFsiSolverInterface> sdcSolidSolver = std::dynamic_pointer_cast<sdc::SDCFsiSolverInterface>( solid ); assert( sdcFluidSolver ); assert( sdcSolidSolver ); std::shared_ptr<fsi::SDCFsiSolver> fsiSolver( new fsi::SDCFsiSolver( sdcFluidSolver, sdcSolidSolver, postProcessing ) ); std::shared_ptr<fsi::quadrature::IQuadrature<scalar> > quadrature; quadrature = std::shared_ptr<fsi::quadrature::IQuadrature<scalar> >( new fsi::quadrature::Uniform<scalar>( nbNodes ) ); timeIntegrationScheme = std::shared_ptr<sdc::TimeIntegrationScheme> ( new sdc::SDC( fsiSolver, quadrature, absoluteTol, nbNodes, 50 ) ); } assert( timeIntegrationScheme ); assert( fluid ); assert( solid ); assert( fsi ); std::chrono::time_point<std::chrono::high_resolution_clock> start, end; start = std::chrono::high_resolution_clock::now(); timeIntegrationScheme->run(); end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed_seconds = end - start; std::string label = "IDC_BDF"; label += "_nbNodes_" + std::to_string( nbNodes ); label += "_nbTimeSteps_" + std::to_string( nbTimeSteps ); ofstream log_file( label + ".log" ); ofstream data_fluid_u( label + "_data_fluid_u.log" ); ofstream data_fluid_a( label + "_data_fluid_a.log" ); ofstream data_fluid_p( label + "_data_fluid_p.log" ); ofstream data_solid_u( label + "_data_solid_u.log" ); ofstream data_solid_r( label + "_data_solid_r.log" ); log_file << "label = " << label << std::endl; log_file << "nbNodes = " << nbNodes << std::endl; log_file << "nbTimeSteps = " << nbTimeSteps << std::endl; log_file << "nbIterations = " << fsi->nbIter << std::endl; log_file << "timing = " << elapsed_seconds.count() << std::endl; data_fluid_u << std::setprecision( 20 ) << fluid->u << std::endl; data_fluid_a << std::setprecision( 20 ) << fluid->a << std::endl; data_fluid_p << std::setprecision( 20 ) << fluid->p << std::endl; data_solid_u << std::setprecision( 20 ) << solid->u << std::endl; data_solid_r << std::setprecision( 20 ) << solid->r << std::endl; log_file.close(); data_fluid_u.close(); data_fluid_a.close(); data_fluid_p.close(); data_solid_u.close(); data_solid_r.close(); } } }
TEST( SDCFsiTest, order ) { int N = 5; scalar r0 = 0.2; scalar a0 = M_PI * r0 * r0; scalar u0 = 0.1; scalar p0 = 0; scalar L = 1; scalar T = 1; scalar dx = L / N; scalar rho = 1.225; scalar E = 490; scalar h = 1.0e-3; scalar cmk = std::sqrt( E * h / (2 * rho * r0) ); scalar c0 = std::sqrt( cmk * cmk - p0 / (2 * rho) ); scalar kappa = c0 / u0; bool parallel = false; int extrapolation = 0; scalar tol = 1.0e-5; int maxIter = 20; scalar initialRelaxation = 1.0e-3; int maxUsedIterations = 50; int nbReuse = 0; scalar singularityLimit = 1.0e-13; int reuseInformationStartingFromTimeIndex = 0; bool scaling = false; scalar beta = 0.01; bool updateJacobian = false; int minIter = 5; ASSERT_NEAR( kappa, 10, 1.0e-13 ); ASSERT_TRUE( dx > 0 ); int nbComputations = 5; std::deque<std::shared_ptr<tubeflow::SDCTubeFlowFluidSolver> > fluidSolvers; std::deque<int> nbTimeStepsList; for ( int iComputation = 0; iComputation < nbComputations; iComputation++ ) { int nbTimeSteps = 4 * std::pow( 2, iComputation ); std::cout << "nbTimeSteps = " << nbTimeSteps << std::endl; scalar dt = T / nbTimeSteps; std::shared_ptr<tubeflow::SDCTubeFlowFluidSolver> fluid( new tubeflow::SDCTubeFlowFluidSolver( a0, u0, p0, dt, cmk, N, L, T, rho ) ); std::shared_ptr<tubeflow::SDCTubeFlowSolidSolver> solid( new tubeflow::SDCTubeFlowSolidSolver( a0, cmk, p0, rho, L, N ) ); shared_ptr<RBFFunctionInterface> rbfFunction; shared_ptr<RBFInterpolation> rbfInterpolator; shared_ptr<RBFCoarsening> rbfInterpToCouplingMesh; shared_ptr<RBFCoarsening> rbfInterpToMesh; rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToCouplingMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); shared_ptr<MultiLevelSolver> fluidSolver( new MultiLevelSolver( fluid, fluid, rbfInterpToCouplingMesh, rbfInterpToMesh, 0, 0 ) ); rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToCouplingMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); rbfFunction = shared_ptr<RBFFunctionInterface>( new TPSFunction() ); rbfInterpolator = shared_ptr<RBFInterpolation>( new RBFInterpolation( rbfFunction ) ); rbfInterpToMesh = shared_ptr<RBFCoarsening> ( new RBFCoarsening( rbfInterpolator ) ); shared_ptr<MultiLevelSolver> solidSolver( new MultiLevelSolver( solid, fluid, rbfInterpToCouplingMesh, rbfInterpToMesh, 1, 0 ) ); std::shared_ptr< std::list<std::shared_ptr<ConvergenceMeasure> > > convergenceMeasures; convergenceMeasures = std::shared_ptr<std::list<std::shared_ptr<ConvergenceMeasure> > >( new std::list<std::shared_ptr<ConvergenceMeasure> > ); convergenceMeasures->push_back( std::shared_ptr<ConvergenceMeasure>( new ResidualRelativeConvergenceMeasure( 0, false, tol ) ) ); convergenceMeasures->push_back( std::shared_ptr<ConvergenceMeasure>( new MinIterationConvergenceMeasure( 0, false, minIter ) ) ); shared_ptr<MultiLevelFsiSolver> fsi( new MultiLevelFsiSolver( fluidSolver, solidSolver, convergenceMeasures, parallel, extrapolation ) ); shared_ptr<PostProcessing> postProcessing( new AndersonPostProcessing( fsi, maxIter, initialRelaxation, maxUsedIterations, nbReuse, singularityLimit, reuseInformationStartingFromTimeIndex, scaling, beta, updateJacobian ) ); std::shared_ptr<sdc::SDCFsiSolverInterface> sdcFluidSolver = std::dynamic_pointer_cast<sdc::SDCFsiSolverInterface>( fluid ); std::shared_ptr<sdc::SDCFsiSolverInterface> sdcSolidSolver = std::dynamic_pointer_cast<sdc::SDCFsiSolverInterface>( solid ); assert( sdcFluidSolver ); assert( sdcSolidSolver ); std::shared_ptr<fsi::SDCFsiSolver> fsiSolver( new fsi::SDCFsiSolver( sdcFluidSolver, sdcSolidSolver, postProcessing, extrapolation ) ); int nbNodes = 3; std::shared_ptr<fsi::quadrature::IQuadrature<scalar> > quadrature; quadrature = std::shared_ptr<fsi::quadrature::IQuadrature<scalar> >( new fsi::quadrature::GaussLobatto<scalar>( nbNodes ) ); std::shared_ptr<sdc::SDC> sdc( new sdc::SDC( fsiSolver, quadrature, 1.0e-13, 10, 200 ) ); sdc->run(); ASSERT_TRUE( sdc->isConverged() ); fluidSolvers.push_back( fluid ); nbTimeStepsList.push_back( nbTimeSteps ); } for ( int i = 0; i < 2; i++ ) { fsi::vector ref; if ( i == 0 ) ref = fluidSolvers.back()->u; else ref = fluidSolvers.back()->a; std::deque<scalar> errors; for ( int iComputation = 0; iComputation < nbComputations - 1; iComputation++ ) { fsi::vector data; if ( i == 0 ) data = fluidSolvers.at( iComputation )->u; else data = fluidSolvers.at( iComputation )->a; scalar error = (ref - data).norm() / data.norm(); std::cout << "error = " << error << std::endl; errors.push_back( error ); } for ( int iComputation = 0; iComputation < nbComputations - 2; iComputation++ ) { scalar order = ( std::log10( errors.at( iComputation ) ) - std::log10( errors.at( iComputation + 1 ) ) ) / ( std::log10( nbTimeStepsList.at( iComputation + 1 ) ) - std::log10( nbTimeStepsList.at( iComputation ) ) ); std::cout << "order = " << order << std::endl; ASSERT_GE( order, 3.8 ); } } }