int32 Sound::playCompSpeech(uint32 speechId, uint8 vol, int8 pan) { if (_speechMuted) return RD_OK; if (getSpeechStatus() == RDERR_SPEECHPLAYING) return RDERR_SPEECHPLAYING; int cd = _vm->_resman->getCD(); SoundFileHandle *fh = (cd == 1) ? &_speechFile[0] : &_speechFile[1]; Audio::AudioStream *input = getAudioStream(fh, "speech", cd, speechId, NULL); if (!input) return RDERR_INVALIDID; // Modify the volume according to the master volume byte volume = _speechMuted ? 0 : vol * Audio::Mixer::kMaxChannelVolume / 16; int8 p = (pan * 127) / 16; if (isReverseStereo()) p = -p; // Start the speech playing _vm->_mixer->playStream(Audio::Mixer::kSpeechSoundType, &_soundHandleSpeech, input, -1, volume, p); return RD_OK; }
uint32 Sound::preFetchCompSpeech(uint32 speechId, uint16 **buf) { int cd = _vm->_resman->getCD(); uint32 numSamples; SoundFileHandle *fh = (cd == 1) ? &_speechFile[0] : &_speechFile[1]; Audio::AudioStream *input = getAudioStream(fh, "speech", cd, speechId, &numSamples); if (!input) return 0; *buf = NULL; // Decompress data into speech buffer. uint32 bufferSize = 2 * numSamples; *buf = (uint16 *)malloc(bufferSize); if (!*buf) { delete input; fh->file.close(); return 0; } uint32 readSamples = input->readBuffer((int16 *)*buf, numSamples); fh->file.close(); delete input; return 2 * readSamples; }
void VideoDecoder::AudioTrack::start() { stop(); Audio::AudioStream *stream = getAudioStream(); assert(stream); g_system->getMixer()->playStream(getSoundType(), &_handle, stream, -1, _muted ? 0 : getVolume(), getBalance(), DisposeAfterUse::NO); // Pause the audio again if we're still paused if (isPaused()) g_system->getMixer()->pauseHandle(_handle, true); }
void VideoDecoder::AudioTrack::start(const Audio::Timestamp &limit) { stop(); Audio::AudioStream *stream = getAudioStream(); assert(stream); stream = Audio::makeLimitingAudioStream(stream, limit, DisposeAfterUse::NO); g_system->getMixer()->playStream(getSoundType(), &_handle, stream, -1, getVolume(), getBalance(), DisposeAfterUse::YES); // Pause the audio again if we're still paused if (isPaused()) g_system->getMixer()->pauseHandle(_handle, true); }
int AudioPlayer::wPlayAudio(uint16 module, uint32 tuple) { // Get the audio sample length and set the wPlay flag so we return 0 on // position. SSCI pre-loads the audio here, but it's much easier for us to // just get the sample length and return that. wPlayAudio should *not* // actually start the sample. int sampleLen = 0; Audio::AudioStream *audioStream = getAudioStream(tuple, module, &sampleLen); if (!audioStream) warning("wPlayAudio: unable to create stream for audio tuple %d, module %d", tuple, module); delete audioStream; _wPlayFlag = true; return sampleLen; }
int AudioPlayer::startAudio(uint16 module, uint32 number) { int sampleLen; Audio::AudioStream *audioStream = getAudioStream(number, module, &sampleLen); if (audioStream) { _wPlayFlag = false; _mixer->playStream(Audio::Mixer::kSpeechSoundType, &_audioHandle, audioStream); return sampleLen; } else { // Don't throw a warning in this case. getAudioStream() already has. Some games // do miss audio entries (perhaps because of a typo, or because they were simply // forgotten). return 0; } }
uint64 ResourceTreeItem::getSoundDuration() const { if (_triedDuration) return _duration; _triedDuration = true; try { std::unique_ptr<Sound::AudioStream> sound(getAudioStream()); Sound::RewindableAudioStream &rewSound = dynamic_cast<Sound::RewindableAudioStream &>(*sound); _duration = rewSound.getDuration(); } catch (...) { } return _duration; }
void AVIDecoder::AVIAudioTrack::skipAudio(const Audio::Timestamp &time, const Audio::Timestamp &frameTime) { Audio::Timestamp timeDiff = time.convertToFramerate(_wvInfo.samplesPerSec) - frameTime.convertToFramerate(_wvInfo.samplesPerSec); int skipFrames = timeDiff.totalNumberOfFrames(); if (skipFrames <= 0) return; Audio::AudioStream *audioStream = getAudioStream(); if (!audioStream) return; if (audioStream->isStereo()) skipFrames *= 2; int16 *tempBuffer = new int16[skipFrames]; audioStream->readBuffer(tempBuffer, skipFrames); delete[] tempBuffer; }
MusicInputStream::MusicInputStream(int cd, SoundFileHandle *fh, uint32 musicId, bool looping) { _cd = cd; _fh = fh; _musicId = musicId; _looping = looping; _bufferEnd = _buffer + BUFFER_SIZE; _remove = false; _fading = 0; _decoder = getAudioStream(_fh, "music", _cd, _musicId, &_numSamples); if (_decoder) { _samplesLeft = _numSamples; _fadeSamples = (getRate() * FADE_LENGTH) / 1000; fadeUp(); // Read in initial data refill(); } }
IOReturn AREngine::performFormatChange(IOAudioStream* inStream, const IOAudioStreamFormat* inNewFormat, const IOAudioSampleRate* inNewSampleRate) { // set up the time stamp generator if(inNewSampleRate != NULL) { mTimeStampGenerator.SetSampleRate(inNewSampleRate->whole); } // When the format of a stream changes, we have to synch the corresponding stream in the other direction // so that the reflection works without munging channels. if(inNewFormat != NULL) { // we're going to change a lot of stuff, including controls beginConfigurationChange(); // get the starting channel for the stream UInt32 theStartingChannel = inStream->getStartingChannelID(); // get the direction for the stream IOAudioStreamDirection theDirection = inStream->getDirection(); // flip the direction theDirection = (theDirection == kIOAudioStreamDirectionOutput) ? kIOAudioStreamDirectionInput : kIOAudioStreamDirectionOutput; // look up the stream in the opposite direction IOAudioStream* theOppositeStream = getAudioStream(theDirection, theStartingChannel); // tell the opposite stream to change format too if(theOppositeStream != NULL) { theOppositeStream->setFormat(inNewFormat, false); } // we're done changing stuff completeConfigurationChange(); } return kIOReturnSuccess; }
bool VideoDecoder::AudioTrack::endOfTrack() const { Audio::AudioStream *stream = getAudioStream(); return !stream || !g_system->getMixer()->isSoundHandleActive(_handle) || stream->endOfData(); }
void MusicInputStream::refill() { int16 *buf = _buffer; uint32 numSamples = 0; uint32 len_left; bool endFade = false; len_left = BUFFER_SIZE; if (_fading > 0 && (uint32)_fading < len_left) len_left = _fading; if (_samplesLeft < len_left) len_left = _samplesLeft; if (!_looping) { // Non-looping music is faded out at the end. If this fade // out would have started somewhere within the len_left samples // to read, we only read up to that point. This way, we can // treat this fade as any other. if (!_fading) { uint32 currentlyAt = _numSamples - _samplesLeft; uint32 fadeOutAt = _numSamples - _fadeSamples; uint32 readTo = currentlyAt + len_left; if (fadeOutAt == currentlyAt) fadeDown(); else if (fadeOutAt > currentlyAt && fadeOutAt <= readTo) { len_left = fadeOutAt - currentlyAt; endFade = true; } } } int desired = len_left - numSamples; int len = _decoder->readBuffer(buf, desired); // Shouldn't happen, but if it does it could cause an infinite loop. // Of course there were bugs that caused it to happen several times // during development. :-) if (len < desired) { warning("Expected %d samples, but got %d", desired, len); _samplesLeft = len; } buf += len; numSamples += len; len_left -= len; _samplesLeft -= len; int16 *ptr; if (_fading > 0) { // Fade down for (ptr = _buffer; ptr < buf; ptr++) { if (_fading > 0) { _fading--; *ptr = (*ptr * _fading) / _fadeSamples; } if (_fading == 0) { _looping = false; _remove = true; *ptr = 0; } } } else if (_fading < 0) { // Fade up for (ptr = _buffer; ptr < buf; ptr++) { _fading--; *ptr = -(*ptr * _fading) / _fadeSamples; if (_fading <= -_fadeSamples) { _fading = 0; break; } } } if (endFade) fadeDown(); if (!_samplesLeft) { if (_looping) { delete _decoder; _decoder = getAudioStream(_fh, "music", _cd, _musicId, &_numSamples); _samplesLeft = _numSamples; } else _remove = true; } _pos = _buffer; _bufferEnd = buf; }
int Sci1SongIterator::nextCommand(byte *buf, int *result) { if (!_initialised) { //printf("[iterator] DEBUG: Initialising for %d\n", _deviceId); _initialised = true; if (initSong()) return SI_FINISHED; } if (_delayRemaining) { int delay = _delayRemaining; _delayRemaining = 0; return delay; } int retval = 0; do { /* All delays must be processed separately */ int chan = getCommandIndex(); if (chan == COMMAND_INDEX_NONE) { return SI_FINISHED; } if (chan == COMMAND_INDEX_PCM) { if (_samples.begin()->announced) { /* Already announced; let's discard it */ Audio::AudioStream *feed = getAudioStream(); delete feed; } else { int delay = _samples.begin()->delta; if (delay) { updateDelta(delay); return delay; } /* otherwise we're touching a PCM */ _samples.begin()->announced = true; return SI_PCM; } } else { /* Not a PCM */ retval = processMidi(buf, result, &(_channels[chan]), PARSE_FLAG_LOOPS_UNLIMITED); if (retval == SI_LOOP) { _numLoopedChannels++; _channels[chan].state = SI_STATE_PENDING; _channels[chan].delay = 0; if (_numLoopedChannels == _numActiveChannels) { int i; /* Everyone's ready: Let's loop */ for (i = 0; i < _numChannels; i++) if (_channels[i].state == SI_STATE_PENDING) _channels[i].state = SI_STATE_DELTA_TIME; _numLoopedChannels = 0; return SI_LOOP; } } else if (retval == SI_FINISHED) { #ifdef DEBUG fprintf(stderr, "FINISHED some channel\n"); #endif } else if (retval > 0) { int sd ; sd = getSmallestDelta(); if (noDeltaTime() && sd) { /* No other channel is ready */ updateDelta(sd); /* Only from here do we return delta times */ return sd; } } } /* Not a PCM */ } while (retval > 0); return retval; }