Ejemplo n.º 1
0
/*/////////////////////////////////////////////////////////////////*/
void OgreOggStreamSound::_playImpl() {
  if(isPlaying())
    return;

  // Grab a source if not already attached
  if(mSource == AL_NONE)
    if(!OgreOggSoundManager::getSingleton()._requestSoundSource(this))
      return;

  alGetError();
  // Play source
  alSourcePlay(mSource);
  if(alGetError()) {
    Ogre::LogManager::getSingleton().logMessage("Unable to play sound");
    return;
  }
  // Set play flag
  mPlay = true;

  // Notify listener
  if(mSoundListener) mSoundListener->soundPlayed(this);
}
Ejemplo n.º 2
0
	void MidiService::close(bool closeAll, string file)
	{
		if (!closeAll && ((int)file.length() == 0))
			return;
		if (closeAll || (midiFile == file)) {
			if (isPlaying(midiFile)) {
				MCIERROR err = mciSendString(L"stop MidiFile", NULL, 0, NULL);
				if (err != 0)
				{
					showError("Не удалось остановить MIDI-файл: " + midiFile);
					return;
				}
				err = mciSendString(L"close MidiFile", NULL, 0, NULL);
				if (err != 0)
				{
					showError("Не удалось закрыть MIDI-файл: " + midiFile);
					return;
				}
			}
			midiFile = "";
		}
	}
Ejemplo n.º 3
0
StreamSound::~StreamSound()
{
    // stop sound
    if( isPlaying() ) stop();
    // release sound interfaces
    if( _iDirectSound3DBuffer8 ) _iDirectSound3DBuffer8->Release();
    _iDirectSoundBuffer8->Release();    
    _iDirectSoundBuffer->Release();
    // release OGG file
    delete _oggFile;    
    // unregister
    for( StreamSoundI streamSoundI = _streamSounds.begin();
                      streamSoundI != _streamSounds.end();
                      streamSoundI++ )
    {
        if( *streamSoundI == this )
        {
            _streamSounds.erase( streamSoundI );
            break;
        }
    }
}
Ejemplo n.º 4
0
bool NetDemo::startRecording(const std::string &filename)
{
    this->filename = filename;

    if (isPlaying() || isPaused())
    {
        error("Cannot record a netdemo while not connected to a server.");
        return false;
    }

    // Already recording so just ignore the command
    if (isRecording())
        return true;

    if (demofp != NULL)		// file is already open for some reason
    {
        fclose(demofp);
        demofp = NULL;
    }

    demofp = fopen(filename.c_str(), "wb");
    if (!demofp)
    {
        error("Unable to create netdemo file.");
        return false;
    }

    memset(&header, 0, NetDemo::HEADER_SIZE);
    // Note: The header is not finalized at this point.  Write it anyway to
    // reserve space in the output file for it and overwrite it later.
    if (!writeHeader())
    {
        error("Unable to write netdemo header.");
        return false;
    }

    state = NetDemo::recording;
    return true;
}
Ejemplo n.º 5
0
    void SoundSource::update()
    {
        if (!mStarted)
        {
            mStarted = true;
            mSourceBinding = requestAudioSource();

            if (mSourceBinding)
            {
                mSourceBinding->buffer = mSoundBuffer.mHandle;
                alSourcei(mSourceBinding->source, AL_BUFFER,
                    mSourceBinding->buffer);
                alSourcePlay(mSourceBinding->source);
            }
        }
        else if (!isPlaying() && isActive())
        {
            alSourcei(mSourceBinding->source, AL_BUFFER, 0);
            mSourceBinding->buffer = 0;
            mSourceBinding = NULL;
        }
    }
Ejemplo n.º 6
0
bool AudioSpeech::playSpeech(const char *name, int balance) {
	// debug("AudioSpeech::playSpeech(\"%s\")", name);
	Common::ScopedPtr<Common::SeekableReadStream> r(_vm->getResourceStream(name));

	if (!r) {
		warning("AudioSpeech::playSpeech: AUD resource \"%s\" not found", name);
		return false;
	}

	if (r->size() > BUFFER_SIZE) {
		warning("AudioSpeech::playSpeech: AUD larger than buffer size (%d > %d)", r->size(), BUFFER_SIZE);
		return false;
	}

	if (isPlaying()) {
		stopSpeech();
	}

	r->read(_data, r->size());
	if (r->err()) {
		warning("AudioSpeech::playSpeech: Error reading resource \"%s\"", name);
		return false;
	}

	AudStream *audioStream = new AudStream(_data);

	_vm->_mixer->playStream(
		Audio::Mixer::kPlainSoundType,
		&_soundHandle,
		audioStream,
		-1,
		_volume * 255 / 100,
		balance);

	_isMaybeActive = true;

	return true;
}
Ejemplo n.º 7
0
bool OggStream::update()
{
	if(bstop)
		return false;


	if (!source) return false;

    int processed;
    bool active = true;

	if (!isPlaying())
	{
		alSourcePlay(source);
	}

    alGetSourcei(source, AL_BUFFERS_PROCESSED, &processed);

    while(processed--)
    {
        ALuint buffer;
        
        alSourceUnqueueBuffers(source, 1, &buffer);
        check();

        active = stream(buffer);

        alSourceQueueBuffers(source, 1, &buffer);
        check();
    }

	if (!active)
	{
		release();
	}

    return active;
}
Ejemplo n.º 8
0
void TheoraTexture::play()
{
   if( isPlaying() )
      return;
      
   if( !mAsyncState )
      setFile( mFilename, mSFXDescription );
      
   // Construct playback queue that sync's to our time source,
   // writes to us, and drops outdated packets.
   
   if( !mPlaybackQueue )
      mPlaybackQueue = new PlaybackQueueType( 1, _getTimeSource(), this, 0, true );
         
   // Start playback.
   
   if( mSFXSource )
      mSFXSource->play();
   else
      mPlaybackTimer.start();
      
   mIsPaused = false;
}
Ejemplo n.º 9
0
void StreamSoundSource::update()
{
    SoundSource::update();

    ALint processed = 0;
    alGetSourcei(m_sourceId, AL_BUFFERS_PROCESSED, &processed);
    for(ALint i = 0; i < processed; ++i) {
        ALuint buffer;
        alSourceUnqueueBuffers(m_sourceId, 1, &buffer);
        //SoundManager::check_al_error("Couldn't unqueue audio buffer: ");

        if(!fillBufferAndQueue(buffer))
            break;
    }

    if(!isPlaying()) {
        if(processed == 0 || !m_looping)
            return;

        g_logger.traceError("restarting audio source because of buffer underrun");
        play();
    }
}
void QQuickAnimatedImage::setSource(const QUrl &url)
{
    Q_D(QQuickAnimatedImage);
    if (url == d->url)
        return;

    if (d->reply) {
        d->reply->deleteLater();
        d->reply = 0;
    }

    d->oldPlaying = isPlaying();
    if (d->_movie) {
        delete d->_movie;
        d->_movie = 0;
    }

    d->url = url;
    emit sourceChanged(d->url);

    if (isComponentComplete())
        load();
}
Ejemplo n.º 11
0
//FIXME: why no demuxer will not get an eof if replaying by seek(0)?
void AVPlayer::play()
{
    if (isPlaying())
        stop();
    /*
     * avoid load mutiple times when replaying the same seekable file
     * TODO: force load unseekable stream? avio.seekable. currently you
     * must setFile() agian to reload an unseekable stream
     */
    //FIXME: seek(0) for audio without video crashes, why?
    //TODO: no eof if replay by seek(0)
    if (true || !isLoaded() || !vCodecCtx) { //if (!isLoaded() && !load())
        if (!load()) {
            mStatistics.reset();
            return;
        } else {
            initStatistics();
        }
    } else {
        qDebug("seek(0)");
        demuxer.seek(0); //FIXME: now assume it is seekable. for unseekable, setFile() again
    }
    Q_ASSERT(clock != 0);
    clock->reset();

    if (vCodecCtx && video_thread) {
        qDebug("Starting video thread...");
        video_thread->start();
    }
    if (aCodecCtx && audio_thread) {
        qDebug("Starting audio thread...");
        audio_thread->start();
    }
    demuxer_thread->start();
    //blockSignals(false);
    emit started();
}
Ejemplo n.º 12
0
void VideoDecoder::addTrack(Track *track, bool isExternal) {
	_tracks.push_back(track);

	if (isExternal)
		_externalTracks.push_back(track);
	else
		_internalTracks.push_back(track);

	if (track->getTrackType() == Track::kTrackTypeAudio) {
		// Update volume settings if it's an audio track
		((AudioTrack *)track)->setVolume(_audioVolume);
		((AudioTrack *)track)->setBalance(_audioBalance);

		if (!isExternal && supportsAudioTrackSwitching()) {
			if (_mainAudioTrack) {
				// The main audio track has already been found
				((AudioTrack *)track)->setMute(true);
			} else {
				// First audio track found -> now the main one
				_mainAudioTrack = (AudioTrack *)track;
				_mainAudioTrack->setMute(false);
			}
		}
	} else if (track->getTrackType() == Track::kTrackTypeVideo) {
		// If this track has a better time, update _nextVideoTrack
		if (!_nextVideoTrack || ((VideoTrack *)track)->getNextFrameStartTime() < _nextVideoTrack->getNextFrameStartTime())
			_nextVideoTrack = (VideoTrack *)track;
	}

	// Keep the track paused if we're paused
	if (isPaused())
		track->pause(true);

	// Start the track if we're playing
	if (isPlaying() && track->getTrackType() == Track::kTrackTypeAudio)
		((AudioTrack *)track)->start();
}
Ejemplo n.º 13
0
bool Player::computeCurrentFrame() {
    if (!isPlaying()) {
        _currentFrame = INVALID_FRAME;
        return false;
    }
    if (_currentFrame < 0) {
        _currentFrame = 0;
    }
    
    qint64 elapsed = glm::clamp(Player::elapsed() - _audioOffset, (qint64)0, (qint64)_recording->getLength());
    while(_currentFrame >= 0 &&
          _recording->getFrameTimestamp(_currentFrame) > elapsed) {
        --_currentFrame;
    }
    
    while (_currentFrame < _recording->getFrameNumber() &&
           _recording->getFrameTimestamp(_currentFrame) < elapsed) {
        ++_currentFrame;
    }
    --_currentFrame;
    
    if (_currentFrame == _recording->getFrameNumber() - 1) {
        --_currentFrame;
        _frameInterpolationFactor = 1.0f;
    } else {
        qint64 currentTimestamps = _recording->getFrameTimestamp(_currentFrame);
        qint64 nextTimestamps = _recording->getFrameTimestamp(_currentFrame + 1);
        _frameInterpolationFactor = (float)(elapsed - currentTimestamps) /
                                    (float)(nextTimestamps - currentTimestamps);
    }
    
    if (_frameInterpolationFactor < 0.0f || _frameInterpolationFactor > 1.0f) {
        _frameInterpolationFactor = 0.0f;
        qCDebug(avatars) << "Invalid frame interpolation value: overriding";
    }
    return true;
}
Ejemplo n.º 14
0
void AnimatedImage::paintFrames(const CPoint &p, UINT last) {
  if(!isLoaded() || isPlaying()) {
    return;
  }
  if(last >= (UINT)m_frameTable.size()) {
    last = (int)m_frameTable.size()-1;
  }
  if(hasSavedBackground() && (m_background->getSize() != m_size)) {
    hide();
  }
  if(!hasSavedBackground()) {
    saveBackground(p);
  }
  if(m_lastPaintedFrame != NULL) {
    m_lastPaintedFrame->dispose();
  }
  for(UINT i = 0; i <= last; i++) {
    const GifFrame &frame = m_frameTable[i];
    frame.paint();
    if(i < last) {
      frame.dispose();
    }
  }
}
Ejemplo n.º 15
0
void NetDemo::readMessages(buf_t* netbuffer)
{
    if (!isPlaying())
    {
        return;
    }

    netdemo_message_t type;
    uint32_t len = 0, tic = 0;

    // get the values for type, len and tic
    readMessageHeader(type, len, tic);

    while (type == NetDemo::msg_snapshot)
    {
        // skip over snapshots and read the next message instead
        fseek(demofp, len, SEEK_CUR);
        readMessageHeader(type, len, tic);
    }

    // read from the input file and put the data into netbuffer
    gametic = tic;
    readMessageBody(netbuffer, len);
}
Ejemplo n.º 16
0
bool TFrameHandle::scrub(int r0, int r1, double framePerSecond) {
  if (isPlaying() || isScrubbing()) return false;
  bool onlyOneFrame = (r0 == r1);

  if (!isScrubbing() || !onlyOneFrame) emit scrubStarted();
  if (!onlyOneFrame) {
    m_fps        = framePerSecond;
    m_scrubRange = std::make_pair(r0, r1);
  }
  setFrame(r0);

  if (m_audioColumn)
    m_audioColumn->scrub(r0, r1);
  else if (m_xsheet) {
    int i;
    for (i = r0; i <= r1; i++) m_xsheet->scrub(i);
  }

  if (onlyOneFrame) return false;

  m_clock.start();
  m_timerId = startTimer(40);
  return true;
}
Ejemplo n.º 17
0
/*!
	Destroyes the stream.
*/
	Stream::~Stream()
	{
		if( isPlaying() )
		{
			stopSound();
		}

		release();

		alDeleteBuffers( NUMBUFFERSOGG, _uiBuffers );

		if( _pDecodeBuffer )
		{
			free( _pDecodeBuffer );
			_pDecodeBuffer = NULL;
		}

		//fn_ov_clear(sOggVorbisFile);
		if( _sOggVorbisFile )
		{
			delete( _sOggVorbisFile );
		}

	}
Ejemplo n.º 18
0
int Tune::play(char* trackName)
{
	if (isPlaying()) return 1;
	
	// Exit if track not found
	if (!track.open(trackName, O_READ))
	{
		sd.errorHalt("Track not found !");
		return 3;
	}
	
	playState = playback;
	
	// Reset decode time & bitrate from previous playback
	writeSCI(SCI_DECODE_TIME, 0);
	delay(100);
	
	skipTag(); // Skip ID3v2 tag if there's one
	
	feed(); // Feed VS1011e
	attachInterrupt(0, feed, RISING); // Let the interrupt handle the rest of the process
	
	return 0;
}
Ejemplo n.º 19
0
void VideoDecoder::close() {
	if (isPlaying())
		stop();

	for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
		delete *it;

	_tracks.clear();
	_internalTracks.clear();
	_externalTracks.clear();
	_dirtyPalette = false;
	_palette = 0;
	_startTime = 0;
	_audioVolume = Audio::Mixer::kMaxChannelVolume;
	_audioBalance = 0;
	_pauseLevel = 0;
	_needsUpdate = false;
	_lastTimeChange = 0;
	_endTime = 0;
	_endTimeSet = false;
	_nextVideoTrack = 0;
	_mainAudioTrack = 0;
	_canSetDither = true;
}
Ejemplo n.º 20
0
/**
 * Retrieve the value of a CDAudio-interface property.
 */
int DM_CDAudio_Get(int prop, void* ptr)
{
    if(!cdInited)
        return false;

    switch(prop)
    {
    case MUSIP_ID:
        if(ptr)
        {
            strcpy((char*) ptr, "WinMM::CD");
            return true;
        }
        break;

    case MUSIP_PLAYING:
        return (cdInited && isPlaying()? true : false);

    default:
        break;
    }

    return false;
}
Ejemplo n.º 21
0
void VideoDecoder::start() {
	if (!isPlaying())
		setRate(1);
}
Ejemplo n.º 22
0
void Motor::playVibAlert( uint8_t waveform, uint8_t pwr, uint8_t onTime, uint8_t offTime )
{
    stopPlaying = false;

    if( isPlaying() || isPlayingVib() )
    {
        Serial.println( F( "Already playing effect" ) );
        return;
    }

    if( currentMotor.LRA )
    {
        Serial.println( F("No Vib Alerts on LRA motor" ));
        return;
    }

    Serial.println( F("Playing Vib Alert" ));
    playing = true;
    playingVIB = true;
    uint8_t on = true;
    uint8_t lTime = onTime;      // contains time in ms * 100

    uint8_t pwr_range = currentMotor.rated_duty - currentMotor.min_duty;
    uint16_t currentPWR;

    // Setup PWM for 31.25kHz and specified % duty.
    TCCR1A |= (1<<COM1A1) | (1<<WGM10);   // 8 bit fast  PWM, not inverted, output on OC1A (D9) A.K.A. PWM_OUT
    TCCR1B |= (1<<WGM12) | (1<<CS10);    //  8 bit fast PWM, prescaler /1
    OCR1A = 0;

    long startTime = millis();

    while( !stopPlaying )
    {
        int t = (millis() - startTime) / lTime ;

        if( on )
        {
            currentPWR = ( (uint16_t) pwr * pwr_range ) / ( uint16_t ) 100;  // pwr is 0 to 100
            currentPWR = (uint16_t) (currentPWR * calculatePWR( waveform, t )) / ( uint16_t ) 100;  // calculate power returns 0 - 100
            currentPWR += currentMotor.min_duty;
            OCR1A = currentPWR;	// OCR1 is 0-255

            if( t >= 100 )	// 30ms * 100( min time period)
            {
                if( offTime > 0 )
                {
                    lTime = offTime;
                    on = false;
                    OCR1A = 0;
                }

                startTime = millis();
            }
        }

        else if( t >= 100 )        // if on = false and t=> 1 now should be immediate
        {
            startTime = millis();
            lTime = onTime;
            on = true;
        }
    }//while

    TCCR1A = 0x00; // No PWM
    TCCR1B = 0x00; // stop timer 1
    OCR1A=0;
    digitalWrite( PWM_OUT, LOW ); // PWM output low until further notice.
    playingVIB = false;
    playing = false;
}
Ejemplo n.º 23
0
 bool SoundManager::getSoundPlaying(const MWWorld::Ptr &ptr, const std::string& soundId) const
 {
     return isPlaying(ptr, soundId);
 }
Ejemplo n.º 24
0
 bool SoundManager::sayDone(const MWWorld::Ptr &ptr) const
 {
     return !isPlaying(ptr, "_say_sound");
 }
Ejemplo n.º 25
0
static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as,
                              void *drv_opaque)
{
    OSStatus status;
    coreaudioVoiceOut *core = (coreaudioVoiceOut *) hw;
    UInt32 propertySize;
    int err;
    const char *typ = "playback";
    AudioValueRange frameRange;
    CoreaudioConf *conf = drv_opaque;

    /* create mutex */
    err = pthread_mutex_init(&core->mutex, NULL);
    if (err) {
        dolog("Could not create mutex\nReason: %s\n", strerror (err));
        return -1;
    }

    audio_pcm_init_info (&hw->info, as);

    /* open default output device */
    propertySize = sizeof(core->outputDeviceID);
    status = AudioHardwareGetProperty(
        kAudioHardwarePropertyDefaultOutputDevice,
        &propertySize,
        &core->outputDeviceID);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not get default output Device\n");
        return -1;
    }
    if (core->outputDeviceID == kAudioDeviceUnknown) {
        dolog ("Could not initialize %s - Unknown Audiodevice\n", typ);
        return -1;
    }

    /* get minimum and maximum buffer frame sizes */
    propertySize = sizeof(frameRange);
    status = AudioDeviceGetProperty(
        core->outputDeviceID,
        0,
        0,
        kAudioDevicePropertyBufferFrameSizeRange,
        &propertySize,
        &frameRange);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not get device buffer frame range\n");
        return -1;
    }

    if (frameRange.mMinimum > conf->buffer_frames) {
        core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMinimum;
        dolog ("warning: Upsizing Buffer Frames to %f\n", frameRange.mMinimum);
    }
    else if (frameRange.mMaximum < conf->buffer_frames) {
        core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMaximum;
        dolog ("warning: Downsizing Buffer Frames to %f\n", frameRange.mMaximum);
    }
    else {
        core->audioDevicePropertyBufferFrameSize = conf->buffer_frames;
    }

    /* set Buffer Frame Size */
    propertySize = sizeof(core->audioDevicePropertyBufferFrameSize);
    status = AudioDeviceSetProperty(
        core->outputDeviceID,
        NULL,
        0,
        false,
        kAudioDevicePropertyBufferFrameSize,
        propertySize,
        &core->audioDevicePropertyBufferFrameSize);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not set device buffer frame size %" PRIu32 "\n",
                           (uint32_t)core->audioDevicePropertyBufferFrameSize);
        return -1;
    }

    /* get Buffer Frame Size */
    propertySize = sizeof(core->audioDevicePropertyBufferFrameSize);
    status = AudioDeviceGetProperty(
        core->outputDeviceID,
        0,
        false,
        kAudioDevicePropertyBufferFrameSize,
        &propertySize,
        &core->audioDevicePropertyBufferFrameSize);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not get device buffer frame size\n");
        return -1;
    }
    hw->samples = conf->nbuffers * core->audioDevicePropertyBufferFrameSize;

    /* get StreamFormat */
    propertySize = sizeof(core->outputStreamBasicDescription);
    status = AudioDeviceGetProperty(
        core->outputDeviceID,
        0,
        false,
        kAudioDevicePropertyStreamFormat,
        &propertySize,
        &core->outputStreamBasicDescription);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not get Device Stream properties\n");
        core->outputDeviceID = kAudioDeviceUnknown;
        return -1;
    }

    /* set Samplerate */
    core->outputStreamBasicDescription.mSampleRate = (Float64) as->freq;
    propertySize = sizeof(core->outputStreamBasicDescription);
    status = AudioDeviceSetProperty(
        core->outputDeviceID,
        0,
        0,
        0,
        kAudioDevicePropertyStreamFormat,
        propertySize,
        &core->outputStreamBasicDescription);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ, "Could not set samplerate %d\n",
                           as->freq);
        core->outputDeviceID = kAudioDeviceUnknown;
        return -1;
    }

    /* set Callback */
    status = AudioDeviceAddIOProc(core->outputDeviceID, audioDeviceIOProc, hw);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ, "Could not set IOProc\n");
        core->outputDeviceID = kAudioDeviceUnknown;
        return -1;
    }

    /* start Playback */
    if (!isPlaying(core->outputDeviceID)) {
        status = AudioDeviceStart(core->outputDeviceID, audioDeviceIOProc);
        if (status != kAudioHardwareNoError) {
            coreaudio_logerr2 (status, typ, "Could not start playback\n");
            AudioDeviceRemoveIOProc(core->outputDeviceID, audioDeviceIOProc);
            core->outputDeviceID = kAudioDeviceUnknown;
            return -1;
        }
    }

    return 0;
}
Ejemplo n.º 26
0
bool StandardDynamicStimulus::needDraw() {
    return isPlaying();
}
Ejemplo n.º 27
0
//-----------------------------------------------------------------------------
bool MusicOggStream::load(const std::string& filename)
{
    if (isPlaying()) stopMusic();

    m_error = true;
    m_fileName = filename;
    if(m_fileName=="") return false;

    m_oggFile = fopen(m_fileName.c_str(), "rb");

    if(!m_oggFile)
    {
        Log::error("MusicOgg", "Loading Music: %s failed (fopen returned NULL)",
                   m_fileName.c_str());
        return false;
    }

#if defined( WIN32 ) || defined( WIN64 )
    const int result = ov_open_callbacks((void *)m_oggFile, &m_oggStream, NULL,
                                         0, OV_CALLBACKS_DEFAULT             );
#else
    const int result = ov_open(m_oggFile, &m_oggStream, NULL, 0);
#endif

    if (result < 0)
    {
        fclose(m_oggFile);


        const char* errorMessage;
        switch (result)
        {
            case OV_EREAD:
                errorMessage = "OV_EREAD";
                break;
            case OV_ENOTVORBIS:
                errorMessage = "OV_ENOTVORBIS";
                break;
            case OV_EVERSION:
                errorMessage = "OV_EVERSION";
                break;
            case OV_EBADHEADER:
                errorMessage = "OV_EBADHEADER";
                break;
            case OV_EFAULT:
                errorMessage = "OV_EFAULT";
                break;
            default:
                errorMessage = "Unknown Error";
        }

        Log::error("MusicOgg", "Loading Music: %s failed : "
                               "ov_open returned error code %i (%s)",
               m_fileName.c_str(), result, errorMessage);
        return false;
    }

    m_vorbisInfo = ov_info(&m_oggStream, -1);

    if (m_vorbisInfo->channels == 1) nb_channels = AL_FORMAT_MONO16;
    else                             nb_channels = AL_FORMAT_STEREO16;

    alGenBuffers(2, m_soundBuffers);
    if (check("alGenBuffers") == false) return false;

    alGenSources(1, &m_soundSource);
    if (check("alGenSources") == false) return false;

    alSource3f(m_soundSource, AL_POSITION,        0.0, 0.0, 0.0);
    alSource3f(m_soundSource, AL_VELOCITY,        0.0, 0.0, 0.0);
    alSource3f(m_soundSource, AL_DIRECTION,       0.0, 0.0, 0.0);
    alSourcef (m_soundSource, AL_ROLLOFF_FACTOR,  0.0          );
    alSourcef (m_soundSource, AL_GAIN,            1.0          );
    alSourcei (m_soundSource, AL_SOURCE_RELATIVE, AL_TRUE      );

    m_error=false;
    return true;
}   // load
Ejemplo n.º 28
0
//--------------------------------------------------------------
void MoviePlayer::play(){
    if (!isPlaying() && !isDone && !isPaused) {
        videos.at(whichMov)->play();
    }
}
Ejemplo n.º 29
0
	/*/////////////////////////////////////////////////////////////////*/
	void OgreOggStreamSound::_updateAudioBuffers()
	{
		if (!isPlaying()) return;

		ALenum state;
		alGetSourcei(mSource, AL_SOURCE_STATE, &state);

		if (state == AL_PAUSED) return;

		// Ran out of buffer data?
		if (state == AL_STOPPED)
		{
			if(mStreamEOF)
			{
				stop();
				
				// Finished callback
				if ( mSoundListener ) 
					mSoundListener->soundFinished(this);
				
				return;
			}
			else
			{
				// Clear audio data already played...
				_dequeue();

				// Fill with next chunk of audio...
				_prebuffer();

				// Play...
				alSourcePlay(mSource);
			}
		}

		int processed;

		alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);

		while(processed--)
		{
			ALuint buffer;
			ALint size, bits, channels, freq;

			alSourceUnqueueBuffers(mSource, 1, &buffer);

			// Get buffer details
			alGetBufferi(buffer, AL_SIZE, &size);
			alGetBufferi(buffer, AL_BITS, &bits);
			alGetBufferi(buffer, AL_CHANNELS, &channels);
			alGetBufferi(buffer, AL_FREQUENCY, &freq);    

			// Update offset (in seconds)
			mLastOffset += ((ALuint)size/channels/(bits/8)) / (ALfloat)freq;
			if ( mLastOffset>=mPlayTime )
			{
				mLastOffset = mLastOffset-mPlayTime;
				
				/**	This is the closest we can get to a loop trigger.
				@remarks 
					If played data size exceeds audio data size trigger callback.
				*/
				if ( mSoundListener ) mSoundListener->soundLooping(this);
			}

			if ( _stream(buffer) ) alSourceQueueBuffers(mSource, 1, &buffer);
		}

		// handle play position change 
		if ( mPlayPosChanged ) 
		{
			_updatePlayPosition();
		}
	}
Ejemplo n.º 30
0
void MidiPlayer::run()
{
    // Workaround to fix errors with the Microsoft GS Wavetable Synth on
    // Windows 10 - see http://stackoverflow.com/a/32553208/586978
#ifdef _WIN32
    CoInitializeEx(nullptr, COINIT_MULTITHREADED);
    BOOST_SCOPE_EXIT(this_) {
        CoUninitialize();
    } BOOST_SCOPE_EXIT_END
#endif

    boost::signals2::scoped_connection connection(
        mySettingsManager.subscribeToChanges([&]() {
            auto settings = mySettingsManager.getReadHandle();
            myMetronomeEnabled = settings->get(Settings::MetronomeEnabled);
        }));

    setIsPlaying(true);

    MidiFile::LoadOptions options;
    options.myEnableMetronome = true;
    options.myRecordPositionChanges = true;

    // Load MIDI settings.
    int api;
    int port;
    {
        auto settings = mySettingsManager.getReadHandle();
        myMetronomeEnabled = settings->get(Settings::MetronomeEnabled);

        api = settings->get(Settings::MidiApi);
        port = settings->get(Settings::MidiPort);

        options.myMetronomePreset = settings->get(Settings::MetronomePreset) +
                                    Midi::MIDI_PERCUSSION_PRESET_OFFSET;
        options.myStrongAccentVel =
            settings->get(Settings::MetronomeStrongAccent);
        options.myWeakAccentVel = settings->get(Settings::MetronomeWeakAccent);
        options.myVibratoStrength = settings->get(Settings::MidiVibratoLevel);
        options.myWideVibratoStrength =
            settings->get(Settings::MidiWideVibratoLevel);
    }

    MidiFile file;
    file.load(myScore, options);

    const int ticks_per_beat = file.getTicksPerBeat();

    // Merge the MIDI evvents for each track.
    MidiEventList events;
    for (MidiEventList &track : file.getTracks())
    {
        track.convertToAbsoluteTicks();
        events.concat(track);
    }

    // TODO - since each track is already sorted, an n-way merge should be faster.
    std::stable_sort(events.begin(), events.end());
    events.convertToDeltaTicks();

    // Initialize RtMidi and set the port.
    MidiOutputDevice device;
    if (!device.initialize(api, port))
    {
        emit error(tr("Error initializing MIDI output device."));
        return;
    }

    bool started = false;
    int beat_duration = Midi::BEAT_DURATION_120_BPM;
    const SystemLocation start_location(myStartLocation.getSystemIndex(),
                                        myStartLocation.getPositionIndex());
    SystemLocation current_location = start_location;

    for (auto event = events.begin(); event != events.end(); ++event)
    {
        if (!isPlaying())
            break;

        if (event->isTempoChange())
            beat_duration = event->getTempo();

        // Skip events before the start location, except for events such as
        // instrument changes. Tempo changes are tracked above.
        if (!started)
        {
            if (event->getLocation() < start_location)
            {
                if (event->isProgramChange())
                    device.sendMessage(event->getData());

                continue;
            }
            else
            {
                performCountIn(device, event->getLocation(), beat_duration);

                started = true;
            }
        }

        const int delta = event->getTicks();
        assert(delta >= 0);

        const int duration_us = boost::rational_cast<int>(
            boost::rational<int>(delta, ticks_per_beat) * beat_duration);

        usleep(duration_us * (100.0 / myPlaybackSpeed));

        // Don't play metronome events if the metronome is disabled.
        if (event->isNoteOnOff() && event->getChannel() == METRONOME_CHANNEL &&
            !myMetronomeEnabled)
        {
            continue;
        }

        device.sendMessage(event->getData());

        // Notify listeners of the current playback position.
        if (event->getLocation() != current_location)
        {
            const SystemLocation &new_location = event->getLocation();

            // Don't move backwards unless a repeat occurred.
            if (new_location < current_location && !event->isPositionChange())
                    continue;

            if (new_location.getSystem() != current_location.getSystem())
                emit playbackSystemChanged(new_location.getSystem());

            emit playbackPositionChanged(new_location.getPosition());

            current_location = new_location;
        }
    }
}