Result SoundSourceWV::tryOpen(const AudioSourceConfig& audioSrcCfg) { DEBUG_ASSERT(!m_wpc); char msg[80]; // hold possible error message int openFlags = OPEN_WVC | OPEN_NORMALIZE; if ((kChannelCountMono == audioSrcCfg.channelCountHint) || (kChannelCountStereo == audioSrcCfg.channelCountHint)) { openFlags |= OPEN_2CH_MAX; } m_wpc = WavpackOpenFileInput( getLocalFileNameBytes().constData(), msg, openFlags, 0); if (!m_wpc) { qDebug() << "SSWV::open: failed to open file : " << msg; return ERR; } setChannelCount(WavpackGetReducedChannels(m_wpc)); setFrameRate(WavpackGetSampleRate(m_wpc)); setFrameCount(WavpackGetNumSamples(m_wpc)); if (WavpackGetMode(m_wpc) & MODE_FLOAT) { m_sampleScaleFactor = CSAMPLE_PEAK; } else { const int bitsPerSample = WavpackGetBitsPerSample(m_wpc); const uint32_t wavpackPeakSampleValue = uint32_t(1) << (bitsPerSample - 1); m_sampleScaleFactor = CSAMPLE_PEAK / CSAMPLE(wavpackPeakSampleValue); } return OK; }
Result SoundSourceSndFile::tryOpen(const AudioSourceConfig& /*audioSrcCfg*/) { DEBUG_ASSERT(!m_pSndFile); SF_INFO sfInfo; #ifdef __WINDOWS__ // Pointer valid until string changed const QString fileName(getLocalFileName()); LPCWSTR lpcwFilename = (LPCWSTR) fileName.utf16(); m_pSndFile = sf_wchar_open(lpcwFilename, SFM_READ, &sfInfo); #else memset(&sfInfo, 0, sizeof(sfInfo)); m_pSndFile = sf_open(getLocalFileNameBytes().constData(), SFM_READ, &sfInfo); #endif if (!m_pSndFile) { // sf_format_check is only for writes qWarning() << "Error opening libsndfile file:" << getUrlString() << sf_strerror(m_pSndFile); return ERR; } if (sf_error(m_pSndFile) > 0) { qWarning() << "Error opening libsndfile file:" << getUrlString() << sf_strerror(m_pSndFile); return ERR; } setChannelCount(sfInfo.channels); setFrameRate(sfInfo.samplerate); setFrameCount(sfInfo.frames); return OK; }
SoundSourceMediaFoundation::SoundSourceMediaFoundation(QUrl url) : SoundSourcePlugin(url, "m4a"), m_hrCoInitialize(E_FAIL), m_hrMFStartup(E_FAIL), m_pReader(NULL), m_pAudioType(NULL), m_wcFilename(NULL), m_nextFrame(0), m_leftoverBuffer(NULL), m_leftoverBufferSize(0), m_leftoverBufferLength(0), m_leftoverBufferPosition(0), m_mfDuration(0), m_iCurrentPosition(0), m_dead(false), m_seeking(false) { // these are always the same, might as well just stick them here // -bkgood // AudioSource properties setFrameRate(kSampleRate); // presentation attribute MF_PD_AUDIO_ENCODING_BITRATE only exists for // presentation descriptors, one of which MFSourceReader is not. // Therefore, we calculate it ourselves, assuming 16 bits per sample setBitrate((frames2samples(getFrameRate()) * kBitsPerSample) / 1000); }
bool SnesEmu::init(const QString &diskPath, QString *error) { S9xSetSoundMute(FALSE); setDefaultSettings(); S9xSetPlaybackRate(); m_frame = QImage(512, 239, QImage::Format_RGB16); setVideoSrcRect(QRectF(0.0f, 0.0f, 256.0f, 224.f)); setFrameRate(60); GFX.Pitch = 512 * 2; GFX.RealPitch = 512 * 2; GFX.PPL = GFX.Pitch >> 1; GFX.PPLx2 = GFX.Pitch; GFX.Screen = (u8 *)m_frame.bits(); GFX.SubScreen = (u8 *) malloc(GFX.RealPitch * 239); GFX.ZBuffer = (u8 *) malloc(GFX.PPL * 239); GFX.SubZBuffer = (u8 *) malloc(GFX.PPL * 239); GFX.Delta = (GFX.SubScreen - GFX.Screen) >> 1; if (!GFX.Screen || !GFX.SubScreen || !GFX.ZBuffer || !Memory.Init() || !S9xInitAPU() || !GFX.SubZBuffer ) { *error = tr("SNES emulation init failed!"); return false; } S9xInitSound(); if (!S9xGraphicsInit()) { *error = tr("SNES emulation init failed!"); return false; } S9xReset(); setDefaultSettings(); *error = setDisk(diskPath); return error->isEmpty(); }
void ReymentaHapPlayerApp::setup() { g_Width = 640; g_Height = 480; // parameters mParameterBag = ParameterBag::create(); // utils mBatchass = Batchass::create(mParameterBag); // if AutoLayout, try to position the window on the 2nd screen if (mParameterBag->mAutoLayout) { mBatchass->getWindowsResolution(); } setWindowSize(mParameterBag->mRenderWidth, mParameterBag->mRenderHeight); setWindowPos(ivec2(mParameterBag->mRenderX, mParameterBag->mRenderY)); setFullScreen(false); //enableHighDensityDisplay(); setFrameRate(60); mBatchass->setup(); mLoopVideo = false; // -------- SPOUT ------------- // Set up the texture we will use to send out // We grab the screen so it has to be the same size bInitialized = false; }
void setup() { size(1024, 768); background(0); setFrameRate(60); // Setup lighting ambientLight(30); light.init(200, 200, 200, 0, 3, 0); // Uncomment this line to see the position of the light light.drawDebug(true); // Init 3d object's properties box.init(50, 25, 75); box.setPosition(width/2.0f, height/2.0f); sphere1.init(30); sphere2.init(60); sphere3.init(100); // Now, we make the spheres children of the box's (scene node) box.addChild( sphere1 ); box.addChild( sphere2 ); box.addChild( sphere3 ); // Translate the sphere (relative to its parent, the box) // This way, when we rotate the box (parent object), the spheres will orbitate around it sphere1.setPosition( 2, 0, 0 ); sphere2.setPosition( 5, 0, 0 ); sphere3.setPosition( 7, 0, 0 ); // Add the second light as child of one of the spheres sphere1.addChild( light ); }
NaoCamera::NaoCamera(const char* device, CameraInfo::Camera camera, int width, int height, bool flip, const CameraSettings::CameraSettingsCollection& settings, const Matrix5uc& autoExposureWeightTable) : camera(camera), WIDTH(width), HEIGHT(height) { VERIFY((fd = open(device, O_RDWR | O_NONBLOCK)) != -1); mapBuffers(); queueBuffers(); setImageFormat(); setFrameRate(1, 30); checkSettingsAvailability(); specialSettings.horizontalFlip.value = flip ? 1 : 0; setControlSetting(specialSettings.horizontalFlip); specialSettings.verticalFlip.value = flip ? 1 : 0; setControlSetting(specialSettings.verticalFlip); setSettings(settings, autoExposureWeightTable); writeCameraSettings(); readCameraSettings(); startCapturing(); }
bool WaveformWidgetFactory::setConfig(ConfigObject<ConfigValue> *config) { m_config = config; if (!m_config) { return false; } bool ok = false; int frameRate = m_config->getValueString(ConfigKey("[Waveform]","FrameRate")).toInt(&ok); if (ok) { setFrameRate(frameRate); } else { m_config->set(ConfigKey("[Waveform]","FrameRate"), ConfigValue(m_frameRate)); } int vsync = m_config->getValueString(ConfigKey("[Waveform]","VSync"),"0").toInt(); setVSyncType(vsync); int defaultZoom = m_config->getValueString(ConfigKey("[Waveform]","DefaultZoom")).toInt(&ok); if (ok) { setDefaultZoom(defaultZoom); } else{ m_config->set(ConfigKey("[Waveform]","DefaultZoom"), ConfigValue(m_defaultZoom)); } int zoomSync = m_config->getValueString(ConfigKey("[Waveform]","ZoomSynchronization")).toInt(&ok); if (ok) { setZoomSync(static_cast<bool>(zoomSync)); } else { m_config->set(ConfigKey("[Waveform]","ZoomSynchronization"), ConfigValue(m_zoomSync)); } WaveformWidgetType::Type type = static_cast<WaveformWidgetType::Type>( m_config->getValueString(ConfigKey("[Waveform]","WaveformType")).toInt(&ok)); if (!ok || !setWidgetType(type)) { setWidgetType(autoChooseWidgetType()); } for (int i = 0; i < FilterCount; i++) { double visualGain = m_config->getValueString( ConfigKey("[Waveform]","VisualGain_" + QString::number(i))).toDouble(&ok); if (ok) { setVisualGain(FilterIndex(i), visualGain); } else { m_config->set(ConfigKey("[Waveform]","VisualGain_" + QString::number(i)), QString::number(m_visualGain[i])); } } int overviewNormalized = m_config->getValueString(ConfigKey("[Waveform]","OverviewNormalized")).toInt(&ok); if (ok) { setOverviewNormalized(static_cast<bool>(overviewNormalized)); } else { m_config->set(ConfigKey("[Waveform]","OverviewNormalized"), ConfigValue(m_overviewNormalized)); } return true; }
Player& Player::init() { setChannelCount(kDefaultChannelCount); setFrameRate(kDefaultFrameRate); setFrameSize(kDefaultFrameSize); setSource(NULL); setTick(0); return *this; }
bool DirectShowCamera::setupDevice() { setResolution(_width, _height); setFrameRate(_frameRate); _videoInput->setAutoReconnectOnFreeze(_id, true, 3); return true; }
void Framework::update(){ setFrameRate( 60 ); //元の頂点配列 Vector3 p[ 4 ]; unsigned c[ 4 ]; //わかりやすいように色 p[ 0 ].set( -1000.0, 0.0, -1000.0 ); p[ 1 ].set( -1000.0, 0.0, 1000.0 ); p[ 2 ].set( 1000.0, 0.0, -1000.0 ); p[ 3 ].set( 1000.0, 0.0, 1000.0 ); c[ 0 ] = 0xffff0000; //赤 c[ 1 ] = 0xff00ff00; //緑 c[ 2 ] = 0xff0000ff; //青 c[ 3 ] = 0xffffffff; //白 //ビュー行列を作ろう Matrix34 m; m.setViewTransform( gEyePosition, gEyeTarget ); //この中が本体なのでそっちを参照のこと //行列にベクタをかけて回る for ( int i = 0; i < 4; ++i ){ m.multiply( &p[ i ], p[ i ] ); } //ニアクリップとファークリップ const double nearClip = 1.0; const double farClip = 10000.0; //ニアとファーからZ範囲変換の式を作る const double zConvA = 1.0 / ( nearClip - farClip ); //1/(n-f) const double zConvB = nearClip * zConvA; //n/(n-f) //ハードウェアに渡す準備をする。4次元化 double p4[ 4 ][ 4 ]; for ( int i = 0; i < 4; ++i ){ p4[ i ][ 0 ] = p[ i ].x; //yに640/480をかけて縦横比を調整 p4[ i ][ 1 ] = p[ i ].y * 640.0 / 480.0; //wに範囲変換前のzを「マイナスにして」格納。Z軸が手前向きだとこのマイナスが必要。 p4[ i ][ 3 ] = -p[ i ].z; //z範囲変換 p4[ i ][ 2 ] = zConvA * p[ i ].z + zConvB; //範囲変換もZ軸の向きを //zにwをかけておく。 p4[ i ][ 2 ] *= p4[ i ][ 3 ]; } //四角形を描く。 drawTriangle3DH( p4[ 0 ], p4[ 1 ], p4[ 2 ], 0, 0, 0, c[ 0 ], c[ 1 ], c[ 2 ] ); drawTriangle3DH( p4[ 3 ], p4[ 1 ], p4[ 2 ], 0, 0, 0, c[ 3 ], c[ 1 ], c[ 2 ] ); ++gCount; //視点と注視点を適当にいじる gEyePosition.x = sin( gCount ) * 2000; gEyePosition.z = cos( gCount ) * 2000; gEyePosition.y = 1000.f; gEyeTarget.x = gCount % 100; gEyeTarget.y = gCount % 200; gEyeTarget.z = gCount % 300; }
QString SnesEmu::setDisk(const QString &path) { if (!Memory.LoadROM(path.toAscii().constData())) return tr("Load disk failed."); Memory.ROMFramesPerSecond = Settings.PAL ? 50 : 60; setFrameRate(Memory.ROMFramesPerSecond); romLoaded = true; S9xMainLoop(); return QString(); }
OptiTrackCamera::OptiTrackCamera(int id, int width, int height, int frameRate, int cameraCollectionIndex) : AbstractCamera(id, width, height, frameRate), _camera(NULL), _NPFrame(NULL), _cameraCollectionIndex( cameraCollectionIndex) { //TODO Dynamic load OptiTrack library CAMERA_COLLECTION->Item(cameraCollectionIndex, &_camera); setupDevice(); setResolution(_width, _height); setFrameRate(_frameRate); /*Start camera capture*/ _camera->Start(); }
NaoCamera::NaoCamera(const char* device, CameraInfoBH::Camera camera, int width, int height, bool flip) : timeWaitedForLastImage(0), WIDTH(width * 2), HEIGHT(height * 2), #ifndef NDEBUG SIZE(WIDTH * HEIGHT * 2), #endif currentBuf(0), timeStamp(0), camera(camera), first(true), lastCameraSettingTimestamp(0), cameraSettingApplicationRate(16000) { initOpenVideoDevice(device); initRequestAndMapBuffers(); initQueueAllBuffers(); initSetImageFormat(); setFrameRate(1, 15); setFrameRate(1, 30); initDefaultControlSettings(flip); startCapturing(); }
void Framework::update(){ if ( !Sequence::Parent::instance() ){ Sequence::Parent::create(); setFrameRate( 60 ); } Sequence::Parent::instance()->update(); //終了判定(qが押されたか、マウスで×ボタンが叩かれたか) if ( isKeyOn( 'q' ) ){ requestEnd(); } if ( isEndRequested() ){ Sequence::Parent::destroy(); } }
void Framework::update() { if( bFirstFrame ) { Pad::Create(); bFirstFrame = false; g_ResourceStage = new GraphicData("data.txt"); g_ResourceRobot = new GraphicData("robo.txt"); g_Model[0] = g_ResourceRobot->CreateModel("robo"); g_Model[1] = g_ResourceRobot->CreateModel("robo"); Document doc; Element* root = doc.GetRoot(); Element* tmp = new Element("Test"); double p[3] = { 10,0,-10 }; Attribute* a = new Attribute("Pos", "1,1,1"); a->Set("Hehe",p,3); tmp->AddAttribute(a); root->AddElement(tmp); doc.Write("MyFirst.txt"); } setFrameRate( 60 ); g_cnt++; // View matrix //camera.SetPos( Vector3(sin(g_cnt)*10, 1, cos(g_cnt)*10) ); camera.SetPos( Vector3(0, 1, 10) ); camera.SetTarget( Vector3(0,0,0) ); Matrix44 matProjView; camera.CreateProjViewMatrix(&matProjView); g_ResourceStage->GetBatch("batch")->Draw(matProjView); g_Model[0]->SetPos(Vector3(2,0,0)); g_Model[0]->Draw(matProjView); g_Model[1]->SetPos(Vector3(-2,1,0)); g_Model[1]->SetScale(Vector3(2,1,2)); g_Model[1]->Draw(matProjView); enableDepthTest( true ); if ( isEndRequested() ){ SAFE_DELETE( g_ResourceStage ); SAFE_DELETE( g_ResourceRobot ); Pad::Destroy(); } }
void Framework::update(){ setFrameRate( 60 ); double p0[ 3 ] = { 100.0, 100.0, 0.0 }; double p1[ 3 ] = { 400.0, 200.0, 0.4 }; double p2[ 3 ] = { 200.0, 400.0, 0.8 }; double zOffset = ( gCount % 200 ) * 0.01 - 1.0; p0[ 2 ] += zOffset; p1[ 2 ] += zOffset; p2[ 2 ] += zOffset; drawTriangle3D( p0, p1, p2 ); ++gCount; }
void ofxFenster::setup() { setActive(); ofAddListener(ofEvents().update, this, &ofxFenster::update); ofAddListener(ofEvents().draw, this, &ofxFenster::draw); ofxFensterListenerList::iterator it=listeners.begin(); ofNotifyEvent(ofEvents().setup, voidEventArgs); ofNotifyEvent(events.setup, voidEventArgs); while(it!=listeners.end()) { (*it)->setup(this); ++it; } setFrameRate(60); }
bool CvCaptureCAM_CMU::setProperty( int property_id, double value ) { bool retval = false; int ival = cvRound(value); C1394Camera* cmucam = camera(); if( !cmucam ) return false; switch (property_id) { case CV_CAP_PROP_FRAME_WIDTH: case CV_CAP_PROP_FRAME_HEIGHT: { int width, height; if (property_id == CV_CAP_PROP_FRAME_WIDTH) { width = ival; height = width*3/4; } else { height = ival; width = height*4/3; } retval = setVideoSize(width, height); } break; case CV_CAP_PROP_FPS: retval = setFrameRate(ival); break; case CV_CAP_PROP_MODE: retval = setMode(ival); break; case CV_CAP_PROP_FORMAT: retval = setFormat(ival); break; } // resize image if its not the right size anymore CvSize size = getSize(); if( !image || image->width != size.width || image->height != size.height ) { cvReleaseImage( &image ); image = cvCreateImage( size, 8, 3 ); } return retval; }
bool SDLEnvironment::init(Game *_game) { Environment::init(_game); openLogFile(); printLog("--------------------------------------------------------\n"); printLog(" %s - %s\n", m_game->name(), m_game->version()); printLog("--------------------------------------------------------\n"); SDL_Init(0); m_screen = new SDLScreen(); m_screen->setCaption(m_game->name()); if (!m_screen->preinit()) { printLog("ERROR: Couldn't init screen.\n"); return false; } m_audio = emyl::manager::get_instance(); m_input = new SDLInput(); setFrameRate(); m_game->init(); if (!m_screen->init()) { printLog("ERROR: Couldn't init screen.\n"); return false; } if (!m_audio-> init()) { printLog("ERROR: Couldn't run sound.\n"); } emyl::setErrorCallback([](const std::string &error){ printLog("emyl error: %s\n", error.c_str()); }); m_game->load(); return true; }
/*! *============================================================================= * * \brief Grabber::Grabber * \param depthCamera * \param frameFlag * \param sys * *============================================================================= */ Grabber::Grabber(DepthCameraPtr depthCamera, FrameFlag frameFlag, CameraSystem &sys) : _depthCamera(depthCamera), _frameFlag(frameFlag), _sys(sys) { if (!_depthCamera->isInitialized()) { logger(LOG_ERROR) << "Grabber: camera not initialized." << endl; return; } FrameSize sz; _depthCamera->getFrameSize(sz); _rows = sz.height; _cols = sz.width; _frameCount = 0; _updateDone = false; setFrameRate(30.0); if (_frameFlag & FRAMEFLAG_XYZI_POINT_CLOUD_FRAME) _depthCamera->registerCallback(DepthCamera::FRAME_XYZI_POINT_CLOUD_FRAME, std::bind(&Grabber::_callback, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); if (_frameFlag & FRAMEFLAG_DEPTH_FRAME) _depthCamera->registerCallback(DepthCamera::FRAME_DEPTH_FRAME, std::bind(&Grabber::_callback, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); if (_frameFlag & FRAMEFLAG_RAW_PROCESSED) _depthCamera->registerCallback(DepthCamera::FRAME_RAW_FRAME_PROCESSED, std::bind(&Grabber::_callback, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); if (_frameFlag & FRAMEFLAG_RAW_UNPROCESSED) _depthCamera->registerCallback(DepthCamera::FRAME_RAW_FRAME_UNPROCESSED, std::bind(&Grabber::_callback, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); }
Result SoundSourceWV::tryOpen(const AudioSourceConfig& audioSrcCfg) { DEBUG_ASSERT(!m_wpc); char msg[80]; // hold possible error message int openFlags = OPEN_WVC | OPEN_NORMALIZE; if ((kChannelCountMono == audioSrcCfg.channelCountHint) || (kChannelCountStereo == audioSrcCfg.channelCountHint)) { openFlags |= OPEN_2CH_MAX; } // We use WavpackOpenFileInputEx to support Unicode paths on windows // http://www.wavpack.com/lib_use.txt QString wavPackFileName = getLocalFileName(); m_pWVFile = new QFile(wavPackFileName); m_pWVFile->open(QFile::ReadOnly); QString correctionFileName(wavPackFileName + "c"); if (QFile::exists(correctionFileName)) { // If there is a correction file, open it as well m_pWVCFile = new QFile(correctionFileName); m_pWVCFile->open(QFile::ReadOnly); } m_wpc = WavpackOpenFileInputEx(&s_streamReader, m_pWVFile, m_pWVCFile, msg, openFlags, 0); if (!m_wpc) { qDebug() << "SSWV::open: failed to open file : " << msg; return ERR; } setChannelCount(WavpackGetReducedChannels(m_wpc)); setFrameRate(WavpackGetSampleRate(m_wpc)); setFrameCount(WavpackGetNumSamples(m_wpc)); if (WavpackGetMode(m_wpc) & MODE_FLOAT) { m_sampleScaleFactor = CSAMPLE_PEAK; } else { const int bitsPerSample = WavpackGetBitsPerSample(m_wpc); const uint32_t wavpackPeakSampleValue = 1u << (bitsPerSample - 1); m_sampleScaleFactor = CSAMPLE_PEAK / wavpackPeakSampleValue; } return OK; }
//initialize the codec if needed static void initializeCodec(VP8EncoderGlobals glob, ICMCompressorSourceFrameRef sourceFrame) { if (glob->codec != NULL) return; dbg_printf("[vp8e - %08lx] initializeCodec\n", (UInt32)glob); glob->codec = calloc(1, sizeof(vpx_codec_ctx_t)); setBitrate(glob, sourceFrame); //because we don't know framerate untile we have a source image.. this is done here setMaxKeyDist(glob); setFrameRate(glob); setCustom(glob); glob->cfg.g_pass = glob->currentPass; dbg_printEncoderSettings(&glob->cfg); if (vpx_codec_enc_init(glob->codec, &vpx_codec_vp8_cx_algo, &glob->cfg, 0)) { const char *detail = vpx_codec_error_detail(glob->codec); dbg_printf("[vp8e - %08lx] Failed to initialize encoder pass = %d %s\n", (UInt32)glob, glob->currentPass, detail); } setCustomPostInit(glob); }
void Graphics::reset() { /* Dispose all live Disposables */ IntruListLink<Disposable> *iter; for (iter = p->dispList.begin(); iter != p->dispList.end(); iter = iter->next) { iter->data->dispose(); } p->dispList.clear(); /* Reset attributes (frame count not included) */ p->fpsLimiter.resetFrameAdjust(); p->frozen = false; p->screen.getPP().clearBuffers(); setFrameRate(DEF_FRAMERATE); setBrightness(255); }
void msaFluidParticlesApp::setup() { console() << "ciMSAFluid Demo | (c) 2009 Mehmet Akten | www.memo.tv" << std::endl; // setup fluid stuff fluidSolver.setup(100, 100); fluidSolver.enableRGB(true).setFadeSpeed(0.004).setDeltaT(0.5).setVisc(0.00019).setColorDiffusion(0); fluidDrawer.setup( &fluidSolver ); particleSystem.setFluidSolver( &fluidSolver ); fluidCellsX = 150; drawFluid = true; drawParticles = true; renderUsingVA = true; width = 1024; height = 768; setFrameRate( 60.0f ); setWindowSize(width, height); pMouse = getWindowCenter(); resizeFluid = true; gl::enableAlphaBlending(); gl::Fbo::Format format; format.setSamples( 8 ); //format.setWrap(GL_CLAMP_TO_EDGE, GL_CLAMP_TO_EDGE); mFbo = gl::Fbo(width, height, format); drawFluidTex = false; loadShader(); }
void Framework::update(){ if ( !gRootSequence ){ gRootSequence = new Sequence::Parent(); } //フレームレート調整 setFrameRate( 60 ); //一回呼べばいいのだが面倒なので呼んでしまう。 if ( gCounter % 60 == 0 ){ //60フレームに一回フレームレート表示 cout << " FrameRate:" << frameRate() << endl; } ++gCounter; gRootSequence->update(); //終了判定(qが押されたか、マウスで×ボタンが叩かれたか) if ( isKeyOn( 'q' ) ){ requestEnd(); } if ( isEndRequested() ){ SAFE_DELETE( gRootSequence ); } }
Result SoundSourceOggVorbis::tryOpen(const AudioSourceConfig& /*audioSrcCfg*/) { const QByteArray qbaFilename(getLocalFileNameBytes()); if (0 != ov_fopen(qbaFilename.constData(), &m_vf)) { qWarning() << "Failed to open OggVorbis file:" << getUrlString(); return ERR; } if (!ov_seekable(&m_vf)) { qWarning() << "OggVorbis file is not seekable:" << getUrlString(); return ERR; } // lookup the ogg's channels and sample rate const vorbis_info* vi = ov_info(&m_vf, kCurrentBitstreamLink); if (!vi) { qWarning() << "Failed to read OggVorbis file:" << getUrlString(); return ERR; } setChannelCount(vi->channels); setFrameRate(vi->rate); if (0 < vi->bitrate_nominal) { setBitrate(vi->bitrate_nominal / 1000); } else { if ((0 < vi->bitrate_lower) && (vi->bitrate_lower == vi->bitrate_upper)) { setBitrate(vi->bitrate_lower / 1000); } } ogg_int64_t pcmTotal = ov_pcm_total(&m_vf, kEntireBitstreamLink); if (0 <= pcmTotal) { setFrameCount(pcmTotal); } else { qWarning() << "Failed to read total length of OggVorbis file:" << getUrlString(); return ERR; } return OK; }
Animation::Animation(float frameRate, int numFrames) : active(false), frame(0) { setFrameRate(frameRate); setNumFrames(numFrames); }
bool DepthCamera::_applyConfigParams(const ConfigSet *params) { for(auto i = 0; i < params->paramNames.size(); i++) { if(params->paramNames[i].compare(0, 2, "0x") == 0) { logger(LOG_INFO) << "DepthCamera: Setting register '" << params->paramNames[i] << "'" << std::endl; char *endptr; uint32_t reg = (uint32_t)strtol(params->paramNames[i].c_str(), &endptr, 16); uint32_t value = (uint32_t)strtol(params->get(params->paramNames[i]).c_str(), &endptr, 0); if(!_programmer->writeRegister(reg, value)) { logger(LOG_ERROR) << "Failed to write to register @0x" << std::hex << reg << " = 0x" << value << std::dec << std::endl; } continue; } else if(params->paramNames[i] == "frame_rate") { float rate = params->getFloat(params->paramNames[i]); FrameRate r; r.numerator = rate*10000; r.denominator = 10000; uint g = gcd(r.numerator, r.denominator); r.numerator /= g; r.denominator /= g; if(!setFrameRate(r)) { logger(LOG_ERROR) << "DepthCamera: Failed to set frame rate to " << rate << "fps" << std::endl; return false; } continue; } logger(LOG_INFO) << "DepthCamera: Setting parameter '" << params->paramNames[i] << "'" << std::endl; const Parameter *p = getParam(params->paramNames[i]).get(); if(!p) { logger(LOG_ERROR) << "DepthCamera: Ignoring unknown parameter " << params->paramNames[i] << std::endl; return false; } const BoolParameter *bp = dynamic_cast<const BoolParameter *>(p); const EnumParameter *ep = dynamic_cast<const EnumParameter *>(p); const IntegerParameter *ip = dynamic_cast<const IntegerParameter *>(p); const UnsignedIntegerParameter *up = dynamic_cast<const UnsignedIntegerParameter *>(p); const FloatParameter *fp = dynamic_cast<const FloatParameter *>(p); if(bp) { if(!set(params->paramNames[i], params->getBoolean(params->paramNames[i]))) return false; } else if(ip || ep) { if(!set(params->paramNames[i], params->getInteger(params->paramNames[i]))) return false; } else if(up) { if(!set(params->paramNames[i], (uint)params->getInteger(params->paramNames[i]))) return false; } else if(fp) { if(!set(params->paramNames[i], params->getFloat(params->paramNames[i]))) return false; } else { logger(LOG_ERROR) << "DepthCamera: Parameter type unknown for " << params->paramNames[i] << std::endl; return false; } } return true; }
Result SoundSourceM4A::tryOpen(const AudioSourceConfig& audioSrcCfg) { DEBUG_ASSERT(MP4_INVALID_FILE_HANDLE == m_hFile); /* open MP4 file, check for >= ver 1.9.1 */ #if MP4V2_PROJECT_version_hex <= 0x00010901 m_hFile = MP4Read(getLocalFileNameBytes().constData(), 0); #else m_hFile = MP4Read(getLocalFileNameBytes().constData()); #endif if (MP4_INVALID_FILE_HANDLE == m_hFile) { qWarning() << "Failed to open file for reading:" << getUrlString(); return ERR; } m_trackId = findFirstAudioTrackId(m_hFile); if (MP4_INVALID_TRACK_ID == m_trackId) { qWarning() << "No AAC track found:" << getUrlString(); return ERR; } const MP4SampleId numberOfSamples = MP4GetTrackNumberOfSamples(m_hFile, m_trackId); if (0 >= numberOfSamples) { qWarning() << "Failed to read number of samples from file:" << getUrlString(); return ERR; } m_maxSampleBlockId = kSampleBlockIdMin + (numberOfSamples - 1); // Determine the maximum input size (in bytes) of a // sample block for the selected track. const u_int32_t maxSampleBlockInputSize = MP4GetTrackMaxSampleSize(m_hFile, m_trackId); m_inputBuffer.resize(maxSampleBlockInputSize, 0); DEBUG_ASSERT(NULL == m_hDecoder); // not already opened m_hDecoder = NeAACDecOpen(); if (!m_hDecoder) { qWarning() << "Failed to open the AAC decoder!"; return ERR; } NeAACDecConfigurationPtr pDecoderConfig = NeAACDecGetCurrentConfiguration( m_hDecoder); pDecoderConfig->outputFormat = FAAD_FMT_FLOAT; if ((kChannelCountMono == audioSrcCfg.channelCountHint) || (kChannelCountStereo == audioSrcCfg.channelCountHint)) { pDecoderConfig->downMatrix = 1; } else { pDecoderConfig->downMatrix = 0; } pDecoderConfig->defObjectType = LC; if (!NeAACDecSetConfiguration(m_hDecoder, pDecoderConfig)) { qWarning() << "Failed to configure AAC decoder!"; return ERR; } u_int8_t* configBuffer = NULL; u_int32_t configBufferSize = 0; if (!MP4GetTrackESConfiguration(m_hFile, m_trackId, &configBuffer, &configBufferSize)) { /* failed to get mpeg-4 audio config... this is ok. * NeAACDecInit2() will simply use default values instead. */ qWarning() << "Failed to read the MP4 audio configuration." << "Continuing with default values."; } SAMPLERATE_TYPE sampleRate; unsigned char channelCount; if (0 > NeAACDecInit2(m_hDecoder, configBuffer, configBufferSize, &sampleRate, &channelCount)) { free(configBuffer); qWarning() << "Failed to initialize the AAC decoder!"; return ERR; } else { free(configBuffer); } setChannelCount(channelCount); setFrameRate(sampleRate); setFrameCount(getFrameCountForSampleBlockId(m_maxSampleBlockId)); // Resize temporary buffer for decoded sample data const SINT sampleBufferCapacity = frames2samples(kFramesPerSampleBlock); m_sampleBuffer.resetCapacity(sampleBufferCapacity); // Invalidate current position to enforce the following // seek operation m_curFrameIndex = getMaxFrameIndex(); // (Re-)Start decoding at the beginning of the file seekSampleFrame(getMinFrameIndex()); return OK; }