size_t Interleaver::readBuffer(int16 *buffer, const size_t numSamples) { size_t maxSamples = numSamples; size_t samples = 0; for (; samples < maxSamples; ) { if (endOfData()) break; // Read one sample of each channel of each stream for (std::vector<AudioStream *>::iterator s = _streams.begin(); s != _streams.end(); ++s) { const size_t channels = (*s)->getChannels(); const size_t nRead = (*s)->readBuffer(buffer, channels); if (nRead == kSizeInvalid) return kSizeInvalid; if (nRead != channels) memset(buffer, 0, 2 * channels); buffer += channels; samples += (*s)->getChannels(); } } return samples; }
int RawStream<is16Bit, isUnsigned, isLE>::fillBuffer(int maxSamples) { int bufferedSamples = 0; byte *dst = _buffer; // We can only read up to "kSampleBufferLength" samples // so we take this into consideration, when trying to // read up to maxSamples. maxSamples = MIN<int>(kSampleBufferLength, maxSamples); // We will only read up to maxSamples while (maxSamples > 0 && !endOfData()) { // Try to read all the sample data and update the // destination pointer. const int bytesRead = _stream->read(dst, maxSamples * (is16Bit ? 2 : 1)); dst += bytesRead; // Calculate how many samples we actually read. const int samplesRead = bytesRead / (is16Bit ? 2 : 1); // Update all status variables bufferedSamples += samplesRead; maxSamples -= samplesRead; // We stop stream playback, when we reached the end of the data stream. // We also stop playback when an error occures. if (_stream->pos() == _stream->size() || _stream->err() || _stream->eos()) _endOfData = true; } return bufferedSamples; }
void CGMSKClientThread::setState(RX_STATE state) { // This is the from state switch (m_state) { case RXS_LISTENING: m_syncCount = 0U; break; case RXS_PROCESS_DATA: endOfData(); m_dongle->setMode(DVDMODE_IDLE); break; } // This is the too state switch (state) { case RXS_LISTENING: m_state = RXS_LISTENING; break; case RXS_PROCESS_DATA: ::wxGetApp().showBusy(new CBusyData(true)); m_slowDataDecoder.reset(); m_decodeData.clear(); m_decodeAudio.clear(); m_dongle->setMode(DVDMODE_DECODE); m_ambeLength = 0U; m_syncCount = 20U; m_state = RXS_PROCESS_DATA; break; } }
size_t ASFStream::readBuffer(int16 *buffer, const size_t numSamples) { size_t samplesDecoded = 0; for (;;) { if (_curAudioStream) { const size_t n = _curAudioStream->readBuffer(buffer + samplesDecoded, numSamples - samplesDecoded); if (n == kSizeInvalid) return kSizeInvalid; samplesDecoded += n; if (_curAudioStream->endOfData()) { delete _curAudioStream; _curAudioStream = 0; } } if (samplesDecoded == numSamples || endOfData()) break; if (!_curAudioStream) { _curAudioStream = createAudioStream(); } } return samplesDecoded; }
static int sameData(const char *l1, const char *l2) { char *end1, *end2; int length; /* find the first semicolon in each line - there must be one */ l1=strchr(l1, ';')+1; l2=strchr(l2, ';')+1; /* find the end of data: end of string or start of comment */ end1=endOfData(l1); end2=endOfData(l2); /* compare the line data portions */ length=end1-l1; return length==(end2-l2) && 0==memcmp(l1, l2, length); }
void PullStream::needData() { const QByteArray data = getMediaData(); if (data.isEmpty()) { endOfData(); } else { writeData(data); } }
void IODeviceStream::needData() { Q_D(IODeviceStream); const QByteArray data = d->ioDevice->read(4096); if (data.isEmpty() && !d->ioDevice->atEnd()) { error(Phonon::NormalError, d->ioDevice->errorString()); } writeData(data); if (d->ioDevice->atEnd()) { endOfData(); } }
bool Parser::parseWhitespace() { bool found = false; while( isInSet( whitespaceSet, current ) ) { found = true; readChar(); if( endOfData() ) break; } return found; }
int Oki_ADPCMStream::readBuffer(int16 *buffer, const int numSamples) { int samples; byte data; for (samples = 0; samples < numSamples && !endOfData(); samples++) { if (_decodedSampleCount == 0) { data = _stream->readByte(); _decodedSamples[0] = decodeOKI((data >> 4) & 0x0f); _decodedSamples[1] = decodeOKI((data >> 0) & 0x0f); _decodedSampleCount = 2; } // (1 - (count - 1)) ensures that _decodedSamples acts as a FIFO of depth 2 buffer[samples] = _decodedSamples[1 - (_decodedSampleCount - 1)]; _decodedSampleCount--; }
bool Parser::parseProcessingInstruction (Node * node) { // Processing instructions start with <? if (!doesStreamMatchString ("<?")) return false; std::string name; if (!parseName (name)) reportError ("Valid processing instructions have to have a name."); if (name == "xml") reportError ("Processing instructions cannot have the reserved name 'xml'"); Node *thisNode = new Node; thisNode->setName (name); thisNode->setNodeType (ProcessingInstruction); try { parseWhitespace (); std::string chars; while (!doesStreamMatchString ("?>")) { readChar (false); data += current; // We're in the middle of parsing a processing instruction if (endOfData ()) reportError ("Unterminated processing instruction!"); } thisNode->setData (data); } catch (...) { delete thisNode; throw; } node->addNode (thisNode); return true; }
int AACStream::readBuffer(int16 *buffer, const int numSamples) { int samples = 0; assert((numSamples % _channels) == 0); // Dip into our remaining samples pool if it's available if (_remainingSamples) { samples = MIN<int>(numSamples, _remainingSamplesSize - _remainingSamplesPos); memcpy(buffer, _remainingSamples + _remainingSamplesPos, samples * 2); _remainingSamplesPos += samples; if (_remainingSamplesPos == _remainingSamplesSize) { delete[] _remainingSamples; _remainingSamples = 0; } } // Decode until we have enough samples (or there's no more left) while (samples < numSamples && !endOfData()) { NeAACDecFrameInfo frameInfo; uint16 *decodedSamples = (uint16 *)NeAACDecDecode(_handle, &frameInfo, _inBuffer + _inBufferPos, _inBufferSize - _inBufferPos); if (frameInfo.error != 0) error("Failed to decode AAC frame: %s", NeAACDecGetErrorMessage(frameInfo.error)); int decodedSampleSize = frameInfo.samples; int copySamples = (decodedSampleSize > (numSamples - samples)) ? (numSamples - samples) : decodedSampleSize; memcpy(buffer + samples, decodedSamples, copySamples * 2); samples += copySamples; // Copy leftover samples for use in a later readBuffer() call if (copySamples != decodedSampleSize) { _remainingSamplesSize = decodedSampleSize - copySamples; _remainingSamples = new int16[_remainingSamplesSize]; _remainingSamplesPos = 0; memcpy(_remainingSamples, decodedSamples + copySamples, _remainingSamplesSize * 2); } _inBufferPos += frameInfo.bytesconsumed; } return samples; }
// This is kind of a fetch-ahead bool Parser::doesStreamMatchString( std::string str ) { unsigned int i = 0; bool match = true; State state = SaveState(); if( str.length() == 0 ) return false; do { match = ( current == str[i] ); ++i; readChar(); } while( match && !endOfData() && i < str.length() ); if( !match ) RestoreState( state ); return match; }
int ASFStream::readBuffer(int16 *buffer, const int numSamples) { int samplesDecoded = 0; for (;;) { if (_curAudioStream) { samplesDecoded += _curAudioStream->readBuffer(buffer + samplesDecoded, numSamples - samplesDecoded); if (_curAudioStream->endOfData()) { delete _curAudioStream; _curAudioStream = 0; } } if (samplesDecoded == numSamples || endOfData()) break; if (!_curAudioStream) { _curAudioStream = createAudioStream(); } } return samplesDecoded; }
int Interleaver::readBuffer(int16 *buffer, const int numSamples) { int maxSamples = numSamples; int samples = 0; for (; samples < maxSamples; ) { if (endOfData()) break; // Read one sample of each channel of each stream for (std::vector<AudioStream *>::iterator s = _streams.begin(); s != _streams.end(); ++s) { const int channels = (*s)->getChannels(); const bool success = (*s)->readBuffer(buffer, channels) == channels; if (!success) memset((byte *) buffer, 0, 2 * channels); buffer += channels; samples += (*s)->getChannels(); } } return samples; }
// Writes the requested amount (or less) of samples into buffer and returns the amount of samples, that got written int Audio3DO_ADP4_Stream::readBuffer(int16 *buffer, const int numSamples) { int8 byteCache[AUDIO_3DO_CACHE_SIZE]; int8 *byteCachePtr = NULL; int byteCacheSize = 0; int requestedBytesLeft = 0; int decodedSamplesCount = 0; int8 compressedByte = 0; if (endOfData()) return 0; // no more bytes left if (_callerDecoderData) { // copy caller decoder data over memcpy(&_curDecoderData, _callerDecoderData, sizeof(_curDecoderData)); if (_initialRead) { _initialRead = false; memcpy(&_initialDecoderData, &_curDecoderData, sizeof(_initialDecoderData)); } } requestedBytesLeft = numSamples >> 1; // 1 byte for 2 16-bit sample if (requestedBytesLeft > _streamBytesLeft) requestedBytesLeft = _streamBytesLeft; // not enough bytes left // in case caller requests an uneven amount of samples, we will return an even amount // buffering, so that direct decoding of files and such runs way faster while (requestedBytesLeft) { if (requestedBytesLeft > AUDIO_3DO_CACHE_SIZE) { byteCacheSize = AUDIO_3DO_CACHE_SIZE; } else { byteCacheSize = requestedBytesLeft; } requestedBytesLeft -= byteCacheSize; _streamBytesLeft -= byteCacheSize; // Fill our byte cache _stream->read(byteCache, byteCacheSize); byteCachePtr = byteCache; // Mono while (byteCacheSize) { compressedByte = *byteCachePtr++; byteCacheSize--; buffer[decodedSamplesCount] = decodeSample(compressedByte >> 4); decodedSamplesCount++; buffer[decodedSamplesCount] = decodeSample(compressedByte & 0x0f); decodedSamplesCount++; } } if (_callerDecoderData) { // copy caller decoder data back memcpy(_callerDecoderData, &_curDecoderData, sizeof(_curDecoderData)); } return decodedSamplesCount; }
// Writes the requested amount (or less) of samples into buffer and returns the amount of samples, that got written int Audio3DO_SDX2_Stream::readBuffer(int16 *buffer, const int numSamples) { int8 byteCache[AUDIO_3DO_CACHE_SIZE]; int8 *byteCachePtr = NULL; int byteCacheSize = 0; int requestedBytesLeft = numSamples; // 1 byte per 16-bit sample int decodedSamplesCount = 0; int8 compressedByte = 0; uint8 squareTableOffset = 0; int16 decodedSample = 0; if (endOfData()) return 0; // no more bytes left if (_stereo) { // We expect numSamples to be even in case of Stereo audio assert((numSamples & 1) == 0); } if (_callerDecoderData) { // copy caller decoder data over memcpy(&_curDecoderData, _callerDecoderData, sizeof(_curDecoderData)); if (_initialRead) { _initialRead = false; memcpy(&_initialDecoderData, &_curDecoderData, sizeof(_initialDecoderData)); } } requestedBytesLeft = numSamples; if (requestedBytesLeft > _streamBytesLeft) requestedBytesLeft = _streamBytesLeft; // not enough bytes left // buffering, so that direct decoding of files and such runs way faster while (requestedBytesLeft) { if (requestedBytesLeft > AUDIO_3DO_CACHE_SIZE) { byteCacheSize = AUDIO_3DO_CACHE_SIZE; } else { byteCacheSize = requestedBytesLeft; } requestedBytesLeft -= byteCacheSize; _streamBytesLeft -= byteCacheSize; // Fill our byte cache _stream->read(byteCache, byteCacheSize); byteCachePtr = byteCache; if (!_stereo) { // Mono while (byteCacheSize) { compressedByte = *byteCachePtr++; byteCacheSize--; squareTableOffset = compressedByte + 128; if (!(compressedByte & 1)) _curDecoderData.lastSample1 = 0; decodedSample = _curDecoderData.lastSample1 + audio_3DO_SDX2_SquareTable[squareTableOffset]; _curDecoderData.lastSample1 = decodedSample; buffer[decodedSamplesCount] = decodedSample; decodedSamplesCount++; } } else { // Stereo while (byteCacheSize) { compressedByte = *byteCachePtr++; byteCacheSize--; squareTableOffset = compressedByte + 128; if (!(decodedSamplesCount & 1)) { // First channel if (!(compressedByte & 1)) _curDecoderData.lastSample1 = 0; decodedSample = _curDecoderData.lastSample1 + audio_3DO_SDX2_SquareTable[squareTableOffset]; _curDecoderData.lastSample1 = decodedSample; } else { // Second channel if (!(compressedByte & 1)) _curDecoderData.lastSample2 = 0; decodedSample = _curDecoderData.lastSample2 + audio_3DO_SDX2_SquareTable[squareTableOffset]; _curDecoderData.lastSample2 = decodedSample; } buffer[decodedSamplesCount] = decodedSample; decodedSamplesCount++; } } } if (_callerDecoderData) { // copy caller decoder data back memcpy(_callerDecoderData, &_curDecoderData, sizeof(_curDecoderData)); } return decodedSamplesCount; }
// XXX Some of this should go into EndOfData() void CGMSKClientThread::resetReceiver() { setState(RXS_LISTENING); endOfData(); }