Ejemplo n.º 1
0
SubSeekableAudioStream::SubSeekableAudioStream(SeekableAudioStream *parent, const Timestamp start, const Timestamp end, DisposeAfterUse::Flag disposeAfterUse)
    : _parent(parent), _disposeAfterUse(disposeAfterUse),
      _start(convertTimeToStreamPos(start, getRate(), isStereo())),
       _pos(0, getRate() * (isStereo() ? 2 : 1)),
      _length(convertTimeToStreamPos(end - start, getRate(), isStereo())) {

	assert(_length.totalNumberOfFrames() % (isStereo() ? 2 : 1) == 0);
	_parent->seek(_start);
}
Ejemplo n.º 2
0
SubLoopingAudioStream::SubLoopingAudioStream(SeekableAudioStream *stream,
                                             uint loops,
                                             const Timestamp loopStart,
                                             const Timestamp loopEnd,
                                             DisposeAfterUse::Flag disposeAfterUse)
    : _parent(stream), _disposeAfterUse(disposeAfterUse), _loops(loops),
      _pos(0, getRate() * (isStereo() ? 2 : 1)),
      _loopStart(convertTimeToStreamPos(loopStart, getRate(), isStereo())),
      _loopEnd(convertTimeToStreamPos(loopEnd, getRate(), isStereo())),
      _done(false) {
	assert(loopStart < loopEnd);

	if (!_parent->rewind())
		_done = true;
}
Ejemplo n.º 3
0
	EMISubLoopingAudioStream(Audio::SeekableAudioStream *stream, uint loops,
		const Audio::Timestamp start,
		const Audio::Timestamp loopStart,
		const Audio::Timestamp loopEnd,
		DisposeAfterUse::Flag disposeAfterUse = DisposeAfterUse::YES)
		: _parent(stream, disposeAfterUse),
		_pos(convertTimeToStreamPos(start, getRate(), isStereo())),
		_loopStart(convertTimeToStreamPos(loopStart, getRate(), isStereo())),
		_loopEnd(convertTimeToStreamPos(loopEnd, getRate(), isStereo())),
		_done(false), _hasLooped(false) {
		assert(loopStart < loopEnd);

		if (!_parent->seek(_pos))
			_done = true;
	}
Ejemplo n.º 4
0
int SCXStream::readBuffer(int16 *buffer, const int numSamples) {
	if (isStereo()) {
		// Needs to be divisible by the channel count
		assert((numSamples % 2) == 0);

		// TODO: As per above, this probably should do more actual streaming

		// Decode enough data from each channel
		int samplesPerChannel = numSamples / 2;
		int16 *leftSamples = new int16[samplesPerChannel];
		int16 *rightSamples = new int16[samplesPerChannel];

		int samplesDecodedLeft = _xaStreams[0]->readBuffer(leftSamples, samplesPerChannel);
		int samplesDecodedRight = _xaStreams[1]->readBuffer(rightSamples, samplesPerChannel);
		assert(samplesDecodedLeft == samplesDecodedRight);

		// Now re-interleave the data
		int samplesDecoded = 0;
		int16 *leftSrc = leftSamples, *rightSrc = rightSamples;
		for (; samplesDecoded < numSamples; samplesDecoded += 2) {
			*buffer++ = *leftSrc++;
			*buffer++ = *rightSrc++;
		}

		delete[] leftSamples;
		delete[] rightSamples;
		return samplesDecoded;
	}

	// Just read from the stream directly for mono
	return _xaStreams[0]->readBuffer(buffer, numSamples);
}
Ejemplo n.º 5
0
void AudioMixerClientData::setupCodec(CodecPluginPointer codec, const QString& codecName) {
    cleanupCodec(); // cleanup any previously allocated coders first
    _codec = codec;
    _selectedCodecName = codecName;
    if (codec) {
        _encoder = codec->createEncoder(AudioConstants::SAMPLE_RATE, AudioConstants::STEREO);
        _decoder = codec->createDecoder(AudioConstants::SAMPLE_RATE, AudioConstants::MONO);
    }

    auto avatarAudioStream = getAvatarAudioStream();
    if (avatarAudioStream) {
        avatarAudioStream->setupCodec(codec, codecName, avatarAudioStream->isStereo() ? AudioConstants::STEREO : AudioConstants::MONO);
        qCDebug(audio) << "setting AvatarAudioStream... codec:" << _selectedCodecName << "isStereo:" << avatarAudioStream->isStereo();
    }

#if INJECTORS_SUPPORT_CODECS
    // fixup codecs for any active injectors...
    auto it = _audioStreams.begin();
    while (it != _audioStreams.end()) {
        SharedStreamPointer stream = it->second;
        if (stream->getType() == PositionalAudioStream::Injector) {
            stream->setupCodec(codec, codecName, stream->isStereo() ? AudioConstants::STEREO : AudioConstants::MONO);
        }
        ++it;
    }
#endif
}
Ejemplo n.º 6
0
void QueuingAudioStreamImpl::queueAudioStream(AudioStream *stream, DisposeAfterUse::Flag disposeAfterUse) {
	assert(!_finished);
	if ((stream->getRate() != getRate()) || (stream->isStereo() != isStereo()))
		error("QueuingAudioStreamImpl::queueAudioStream: stream has mismatched parameters");

	Common::StackLock lock(_mutex);
	_queue.push(StreamHolder(stream, disposeAfterUse));
}
Ejemplo n.º 7
0
void AvatarManager::handleCollisionEvents(const CollisionEvents& collisionEvents) {
    bool playedCollisionSound { false };
    for (Collision collision : collisionEvents) {
        // TODO: The plan is to handle MOTIONSTATE_TYPE_AVATAR, and then MOTIONSTATE_TYPE_MYAVATAR. As it is, other
        // people's avatars will have an id that doesn't match any entities, and one's own avatar will have
        // an id of null. Thus this code handles any collision in which one of the participating objects is
        // my avatar. (Other user machines will make a similar analysis and inject sound for their collisions.)
        if (collision.idA.isNull() || collision.idB.isNull()) {
            auto myAvatar = getMyAvatar();
            myAvatar->collisionWithEntity(collision);

            if (!playedCollisionSound) {
                playedCollisionSound = true;
                auto collisionSound = myAvatar->getCollisionSound();
                if (collisionSound) {
                    const auto characterController = myAvatar->getCharacterController();
                    const float avatarVelocityChange =
                        (characterController ? glm::length(characterController->getVelocityChange()) : 0.0f);
                    const float velocityChange = glm::length(collision.velocityChange) + avatarVelocityChange;
                    const float MIN_AVATAR_COLLISION_ACCELERATION = 2.4f;  // walking speed
                    const bool isSound =
                        (collision.type == CONTACT_EVENT_TYPE_START) && (velocityChange > MIN_AVATAR_COLLISION_ACCELERATION);

                    if (!isSound) {
                        return;  // No sense iterating for others. We only have one avatar.
                    }
                    // Your avatar sound is personal to you, so let's say the "mass" part of the kinetic energy is already accounted for.
                    const float energy = velocityChange * velocityChange;
                    const float COLLISION_ENERGY_AT_FULL_VOLUME = 10.0f;
                    const float energyFactorOfFull = fmin(1.0f, energy / COLLISION_ENERGY_AT_FULL_VOLUME);

                    // For general entity collisionSoundURL, playSound supports changing the pitch for the sound based on the size of the object,
                    // but most avatars are roughly the same size, so let's not be so fancy yet.
                    const float AVATAR_STRETCH_FACTOR = 1.0f;

                    _collisionInjectors.remove_if([](const AudioInjectorPointer& injector) { return !injector; });

                    static const int MAX_INJECTOR_COUNT = 3;
                    if (_collisionInjectors.size() < MAX_INJECTOR_COUNT) {
                        AudioInjectorOptions options;
                        options.stereo = collisionSound->isStereo();
                        options.position = myAvatar->getWorldPosition();
                        options.volume = energyFactorOfFull;
                        options.pitch = 1.0f / AVATAR_STRETCH_FACTOR;

                        auto injector = DependencyManager::get<AudioInjectorManager>()->playSound(collisionSound, options, true);
                        _collisionInjectors.emplace_back(injector);
                    }
                }
            }
        }
    }
}
Ejemplo n.º 8
0
bool SubSeekableAudioStream::seek(const Timestamp &where) {
	_pos = convertTimeToStreamPos(where, getRate(), isStereo());
	if (_pos > _length) {
		_pos = _length;
		return false;
	}

	if (_parent->seek(_pos + _start)) {
		return true;
	} else {
		_pos = _length;
		return false;
	}
}
Ejemplo n.º 9
0
	uint64_t SoundEmitter::getDuration() {
		if (m_soundClip) {
			//convert to milliseconds
			double samplerate = static_cast<double>(getSampleRate()) / 1000.0;
			double bitres = static_cast<double>(getBitResolution());
			// convert to bits
			double size = static_cast<double>(getDecodedLength()) * 8.0;
			double stereo = (isStereo() ? 2.0 : 1.0);
			double time = ( size / (samplerate * bitres) ) / stereo;

			return static_cast<uint64_t>(time);
		}
		return 0;
	}
Ejemplo n.º 10
0
void GlWindow::setViewBuffer(vrj::Viewport::View view)
{
   if(!isStereo())
   {
      glDrawBuffer(GL_BACK);
   }
   else if(Viewport::LEFT_EYE == view)
   {
      glDrawBuffer(GL_BACK_LEFT);
   }
   else if(Viewport::RIGHT_EYE == view)
   {
      glDrawBuffer(GL_BACK_RIGHT);
   }
}
Ejemplo n.º 11
0
bool RawStream<is16Bit, isUnsigned, isLE>::seek(const Timestamp &where) {
	_endOfData = true;

	if (where > _playtime)
		return false;

	const uint32 seekSample = convertTimeToStreamPos(where, getRate(), isStereo()).totalNumberOfFrames();
	_stream->seek(seekSample * (is16Bit ? 2 : 1), SEEK_SET);

	// In case of an error we will not continue stream playback.
	if (!_stream->err() && !_stream->eos() && _stream->pos() != _stream->size())
		_endOfData = false;

	return true;
}
Ejemplo n.º 12
0
void JitterSample::run(const render::RenderContextPointer& renderContext) {
    auto& current = _sampleSequence.currentIndex;
    if (!_freeze) {
        if (current >= 0) {
            current = (current + 1) % SEQUENCE_LENGTH;
        } else {
            current = -1;
        }
    }
    auto args = renderContext->args;
    auto viewFrustum = args->getViewFrustum();

    auto jit = _sampleSequence.offsets[(current < 0 ? SEQUENCE_LENGTH : current)];
    auto width = (float)args->_viewport.z;
    auto height = (float)args->_viewport.w;

    auto jx = 2.0f * jit.x / width;
    auto jy = 2.0f * jit.y / height;

    if (!args->isStereo()) {
        auto projMat = viewFrustum.getProjection();

        projMat[2][0] += jx;
        projMat[2][1] += jy;

        viewFrustum.setProjection(projMat);
        viewFrustum.calculate();
        args->pushViewFrustum(viewFrustum);
    } else {
        mat4 projMats[2];
        args->_context->getStereoProjections(projMats);

        jx *= 2.0f;

        for (int i = 0; i < 2; i++) {
            auto& projMat = projMats[i];
            projMat[2][0] += jx;
            projMat[2][1] += jy;
        }

        args->_context->setStereoProjections(projMats);
    }
}
Ejemplo n.º 13
0
Sample* Wav::makeSample(int chan)
{
    if(!isok)
    {
        DERROR("not loaded");
        return NULL;
    }
   
   
    Sample* samp;
   
    if(chan==EXTRACT_ALL)
    {
        samp = new Sample(head.nbytes/2);
        memcpy(samp->data, bytes, head.nbytes);
    }
    else
    {
        DASSERTP(isStereo(),"(W): trying to extract single channel from mono!");
	
        samp = new Sample(head.nbytes/4);
        short* dp = (short*)bytes;
	
        if(chan==EXTRACT_LEFT)
        {
            for(int i=0;i<head.nbytes/4;i++)
            {
                samp->data[i] = dp[i<<1];
            }
        }
        if(chan==EXTRACT_RIGHT)
        {
            for(int i=0;i<head.nbytes/4;i++)
            {
                samp->data[i] = dp[ 1+ (i<<1)];
            }
        }
	
    }
   
    return samp;
}
Ejemplo n.º 14
0
bool SCXStream::rewind() {
	if (!_xaStreams[0]->rewind())
		return false;

	return !isStereo() || _xaStreams[1]->rewind();
}
Ejemplo n.º 15
0
SCXStream::SCXStream(Common::SeekableReadStream *stream, DisposeAfterUse::Flag disposeAfterUse) {
	static const uint32 stereoChannelNames[MAX_CHANNELS] = { MKTAG('L', 'E', 'F', 'T'), MKTAG('R', 'G', 'H', 'T') };

	stream->readUint32BE(); // 'SCRX'
	stream->readUint32LE();

	_blockSize = stream->readUint16LE();
	/* totalBlockSize = */ stream->readUint16LE();

	if (_blockSize & 0xf)
		error("Bad SCX block size %04x", _blockSize);

	// Base our channel count based off the block size
	_channels = (_blockSize == 0) ? 1 : 2;

	stream->skip(12);

	uint32 channelSize[MAX_CHANNELS];
	for (int i = 0; i < _channels; i++) {
		uint32 tag = stream->readUint32BE();

		if (isStereo()) {
			if (tag != stereoChannelNames[i])
				error("Bad stereo channel tag found '%s'", tag2str(tag));
		} else if (tag != MKTAG('M', 'O', 'N', 'O'))
			error("Bad mono channel tag found '%s'", tag2str(tag));

		channelSize[i] = stream->readUint32LE();
	}

	stream->seek(0x80);

	uint32 leftRate = 0, rightRate = 0;
	for (int i = 0; i < _channels; i++) {
		if (stream->readUint32BE() != MKTAG('V', 'A', 'G', 'p'))
			error("Bad VAG header");

		/* uint32 version = */ stream->readUint32BE();
		stream->readUint32BE();
		stream->readUint32BE();

		if (i == 0)
			leftRate = stream->readUint32BE();
		else
			rightRate = stream->readUint32BE();

		stream->skip(12); // skip useless info
		stream->skip(16); // skip name
		stream->skip(16); // skip zeroes
	}

	if (isStereo() && leftRate != rightRate)
		error("Mismatching SCX rates");

	_rate = leftRate;

	if (isStereo()) {
		// TODO: Make XAStream allow for appending data (similar to how ScummVM
		// handles AAC/QDM2. For now, we de-interleave the XA ADPCM data and then
		// re-interleave in readBuffer().
		// Of course, in doing something that does better streaming, it would
		// screw up the XA loop points. So, I'm not really sure what is best atm.
		byte *leftOut = new byte[channelSize[0]];
		byte *rightOut = new byte[channelSize[1]];
		Common::MemoryWriteStream *leftStream = new Common::MemoryWriteStream(leftOut, channelSize[0]);
		Common::MemoryWriteStream *rightStream = new Common::MemoryWriteStream(rightOut, channelSize[1]);
		byte *buf = new byte[_blockSize];

		while (stream->pos() < stream->size()) {
			stream->read(buf, _blockSize);
			leftStream->write(buf, _blockSize);
			stream->read(buf, _blockSize);
			rightStream->write(buf, _blockSize);
		}

		_xaStreams[0] = Audio::makeXAStream(new Common::MemoryReadStream(leftOut, channelSize[0], DisposeAfterUse::YES), _rate);
		_xaStreams[1] = Audio::makeXAStream(new Common::MemoryReadStream(rightOut, channelSize[1], DisposeAfterUse::YES), _rate);

		delete[] buf;
		delete leftStream;
		delete rightStream;
	} else {
		_xaStreams[0] = Audio::makeXAStream(stream->readStream(channelSize[0]), _rate);
		_xaStreams[1] = 0;
	}

	if (disposeAfterUse == DisposeAfterUse::YES)
		delete stream;
}
Ejemplo n.º 16
0
void Tool::play(char **argv)
{
	AudioDevice *dev;
	PlayStream playfile;
	const char *path = *argv;
	Linear buffer;
	Info info;
	unsigned bufcount, pages;
	
	dev = getDevice();

	if(!hasDevice() && !dev)
	{
		cerr << "no device supported" << endl;
		exit(-1);
	}
	else if(!dev)
	{
		cerr << "device unavailable" << endl;
		exit(-1);
	}

	playfile.open(argv);

	if(!playfile.isOpen())
	{
		cerr << "audiotool: " << path << ": unable to access" << endl;
		exit(-1);
	}

	if(!playfile.isStreamable())
	{
		cerr << "audiotool: " << path << ": missing needed codec" << endl;
		exit(-1);
	}

	playfile.getInfo(&info);
	if(!dev->setAudio((Rate)info.rate, isStereo(info.encoding), 10))
	{
		cerr << "audiotool: sound device does not support rate" << endl;
		exit(-1);
	}

	bufcount = playfile.getCount();
	if(isStereo(info.encoding))
		buffer = new Sample[bufcount * 2];
	else
		buffer = new Sample[bufcount];

	for(;;)
	{
		if(isStereo(info.encoding))
			pages = playfile.getStereo(buffer, 1);
		else
			pages = playfile.getMono(buffer, 1);

		if(!pages)
			break;

		dev->putSamples(buffer, bufcount);
	}

	dev->sync();
	delete dev;
	playfile.close();		
	exit(0);
}
Ejemplo n.º 17
0
void Tremolo::process( AudioBuffer* sampleBuffer, bool isMonoSource )
{
    int bufferSize = sampleBuffer->bufferSize;
    bool doStereo  = ( sampleBuffer->amountOfChannels > 1 ) && isStereo();

    SAMPLE_TYPE* envelopeTable;
    SAMPLE_TYPE volume;

    for ( int c = 0, ca = sampleBuffer->amountOfChannels; c < ca; ++c )
    {
        envelopeTable = getTableForChannel( c );
        SAMPLE_TYPE* channelBuffer = sampleBuffer->getBufferForChannel( c );
        bool useLeft = !doStereo || c % 2 == 0;

        for ( int i = 0; i < bufferSize; ++i )
        {
            if ( useLeft )
            {
                if ( _leftState == 0 )
                {
                    if (( _leftTableIndex += _leftAttackIncr ) >= ENVELOPE_PRECISION )
                    {
                        _leftTableIndex = ENVELOPE_PRECISION - 1;
                        _leftState      = 1;
                    }
                }
                else if ( _leftState == 1 )
                {
                    if (( _leftTableIndex -= _leftDecayIncr ) <= 0 )
                    {
                        _leftTableIndex = 0;

                        // optional: hold state
                        // if resulting volume is now smaller than sustain amp > increment _leftState
                        _leftState = 0;
                    }
                }
                volume = envelopeTable[( int ) _leftTableIndex ];
            }
            else {

                // right channel

                if ( _rightState == 0 )
                {
                    if (( _rightTableIndex += _rightAttackIncr ) >= ENVELOPE_PRECISION )
                    {
                        _rightTableIndex = ENVELOPE_PRECISION - 1;
                        _rightState      = 1;
                    }
                }
                else if ( _rightState == 1 )
                {
                    if (( _rightTableIndex -= _rightDecayIncr ) <= 0 )
                    {
                        _rightTableIndex = 0;

                        // optional: hold state
                        // if resulting volume is now smaller than sustain amp > increment _rightState
                        _rightState = 0;
                    }
                }
                volume = envelopeTable[( int ) _rightTableIndex ];
            }
            channelBuffer[ i ] *= volume;
        }

        // save CPU cycles when source and output are mono
        if ( isMonoSource && !doStereo )
        {
            sampleBuffer->applyMonoSource();
            break;
        }
    }
}