Esempio n. 1
0
  OutputStream*
  DSAudioDevice::openStream(SampleSource* source) {
    if (!source) {
      return 0;
    }

    ADR_GUARD("DSAudioDevice::openStream");

    int channel_count, sample_rate;
    SampleFormat sample_format;
    source->getFormat(channel_count, sample_rate, sample_format);

    const int frame_size = channel_count * GetSampleSize(sample_format);

    // calculate an ideal buffer size
    const int buffer_length = sample_rate * m_buffer_length / 1000;

    // define the wave format
    WAVEFORMATEX wfx;
    memset(&wfx, 0, sizeof(wfx));
    wfx.wFormatTag      = WAVE_FORMAT_PCM;
    wfx.nChannels       = channel_count;
    wfx.nSamplesPerSec  = sample_rate;
    wfx.nAvgBytesPerSec = sample_rate * frame_size;
    wfx.nBlockAlign     = frame_size;
    wfx.wBitsPerSample  = GetSampleSize(sample_format) * 8;
    wfx.cbSize          = sizeof(wfx);

    DSBUFFERDESC dsbd;
    memset(&dsbd, 0, sizeof(dsbd));
    dsbd.dwSize        = sizeof(dsbd);
    dsbd.dwFlags       = DSBCAPS_GETCURRENTPOSITION2 | DSBCAPS_CTRLPAN |
                         DSBCAPS_CTRLVOLUME | DSBCAPS_CTRLFREQUENCY;
    if (m_global_focus) {
      dsbd.dwFlags |= DSBCAPS_GLOBALFOCUS;
    }
    dsbd.dwBufferBytes = frame_size * buffer_length;
    dsbd.lpwfxFormat   = &wfx;

    // create the DirectSound buffer
    IDirectSoundBuffer* buffer;
    HRESULT result = m_direct_sound->CreateSoundBuffer(&dsbd, &buffer, NULL);
    if (FAILED(result) || !buffer) {
      return 0;
    }

    ADR_LOG("CreateSoundBuffer succeeded");

    // now create the output stream
    DSOutputStream* stream = new DSOutputStream(
      this, buffer, buffer_length, source);

    // add it the list of streams and return
    SYNCHRONIZED(this);
    m_open_streams.push_back(stream);
    return stream;
  }
Esempio n. 2
0
void
AudioSegment::WriteTo(nsAudioStream* aOutput)
{
  NS_ASSERTION(mChannels == aOutput->GetChannels(), "Wrong number of channels");
  nsAutoTArray<uint8_t,STATIC_AUDIO_BUFFER_BYTES> buf;
  uint32_t frameSize = GetSampleSize(aOutput->GetFormat())*mChannels;
  for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
    AudioChunk& c = *ci;
    if (frameSize*c.mDuration > PR_UINT32_MAX) {
      NS_ERROR("Buffer overflow");
      return;
    }
    buf.SetLength(int32_t(frameSize*c.mDuration));
    if (c.mBuffer) {
      InterleaveAndConvertBuffer(c.mBuffer->Data(), c.mBufferFormat, c.mBufferLength,
                                 c.mOffset, int32_t(c.mDuration),
                                 c.mVolume,
                                 aOutput->GetChannels(),
                                 buf.Elements(), aOutput->GetFormat());
    } else {
      // Assumes that a bit pattern of zeroes == 0.0f
      memset(buf.Elements(), 0, buf.Length());
    }
    aOutput->Write(buf.Elements(), int32_t(c.mDuration));
  }
}
double Robot::GetRangeSample(int n) {
    if ( ranger ) {
        return (*rp)[30 + n];
    } else {
        return (*sp)[GetSampleSize() - n];
    }
}
void Robot::UpdateRangeData() {

    for (int i = 0; i < rdata.size(); i++ ) {
        Vector vAvg = (Vector){0.0, 0.0};
        double c = 0.0;

        for ( int j = 0; j < bunchSize; j++ ) {
            double sample = GetRangeSample((i * bunchSize) + j);

            Vector vTemp = (Vector){ ((3.14159 * ((i * bunchSize) + j)) / GetSampleSize()), 
                sample };

            if ( sample > 0.0 ) {
                if ( j == 0 ) vAvg = vTemp;
                else vAvg.direction += vTemp.direction, vAvg.magnitude += vTemp.magnitude;
                c = c + 1.0;
            }
        }

        if ( c > 1.0 ) {
            vAvg.magnitude /= c;
            vAvg.direction /= c;
        }

        rdata[i] = vAvg;
    }
    
}
Esempio n. 5
0
AAFRESULT STDMETHODCALLTYPE
    ImplAAFTimecodeStream::SetPositionTimecode (
      aafPosition_t  position,
      aafTimecode_t  timecode)
{
	aafUInt32		sampleSize, bytesRead, bytesWritten;
	aafUInt8		*packedBuf = NULL;

	XPROTECT()
	{
		CHECK(GetSampleSize(&sampleSize));
		packedBuf = new aafUInt8[sampleSize];
		if(packedBuf == NULL)
			RAISE(AAFRESULT_NOMEMORY);
		CHECK(SetPosition(position * sampleSize));
		(void)Read(sampleSize, packedBuf, &bytesRead);
		CHECK(PackTimecode(&timecode, packedBuf, sampleSize));
		CHECK(SetPosition(position * sampleSize));
		CHECK(Write(sampleSize, packedBuf, &bytesWritten));
		if(sampleSize != bytesWritten)
			RAISE(AAFRESULT_CONTAINERWRITE);
		delete [] packedBuf;
	}
	XEXCEPT
	{
		if(packedBuf != NULL)
			delete [] packedBuf;
	}
	XEND;
	
	return AAFRESULT_SUCCESS;
}
Esempio n. 6
0
u_int64_t MP4Track::GetSampleFileOffset(MP4SampleId sampleId)
{
	u_int32_t stscIndex =
		GetSampleStscIndex(sampleId);

	u_int32_t firstChunk = 
		m_pStscFirstChunkProperty->GetValue(stscIndex);

	MP4SampleId firstSample = 
		m_pStscFirstSampleProperty->GetValue(stscIndex);

	u_int32_t samplesPerChunk = 
		m_pStscSamplesPerChunkProperty->GetValue(stscIndex);

	MP4ChunkId chunkId = firstChunk +
		((sampleId - firstSample) / samplesPerChunk);

	u_int64_t chunkOffset = m_pChunkOffsetProperty->GetValue(chunkId - 1);

	MP4SampleId firstSampleInChunk = 
		sampleId - ((sampleId - firstSample) % samplesPerChunk);

	// need cumulative samples sizes from firstSample to sampleId - 1
	u_int32_t sampleOffset = 0;
	for (MP4SampleId i = firstSampleInChunk; i < sampleId; i++) {
		sampleOffset += GetSampleSize(i);
	}

	return chunkOffset + sampleOffset;
}
Esempio n. 7
0
AAFRESULT STDMETHODCALLTYPE
    ImplAAFTimecodeStream::SetUserDataAtPosition (
      aafPosition_t  position,
      aafInt32  buflen,
      aafDataBuffer_t  buffer)
{
	aafUInt32		sampleSize, bytesRead, bytesWritten;
	aafUInt8		*packedBuf = NULL;

	XPROTECT()
	{
		CHECK(GetSampleSize(&sampleSize));
		packedBuf = new aafUInt8[sampleSize];
		if(packedBuf == NULL)
			RAISE(AAFRESULT_NOMEMORY);
		CHECK(SetPosition(position * sampleSize));
		CHECK(Read(sampleSize, packedBuf, &bytesRead));
		CHECK(PackUserBits(buffer, buflen, packedBuf, sampleSize));
		CHECK(SetPosition(position * sampleSize));
		CHECK(Write(sampleSize, packedBuf, &bytesWritten));
		if(sampleSize != bytesWritten)
			RAISE(AAFRESULT_CONTAINERWRITE);
		delete [] packedBuf;
	}
	XEXCEPT
	{
		if(packedBuf != NULL)
			delete [] packedBuf;
	}
	XEND;
	
	return AAFRESULT_SUCCESS;
}		
Esempio n. 8
0
  int
  AIFFInputStream::doRead(int frame_count, void* buffer) {
    if (m_frames_left_in_chunk == 0) {
      return 0;
    }

    const int frames_to_read = std::min(frame_count, m_frames_left_in_chunk);
    const int frame_size = m_channel_count * GetSampleSize(m_sample_format);
    const int bytes_to_read = frames_to_read * frame_size;
  
    const int read = m_file->read(buffer, bytes_to_read);
    const int frames_read = read / frame_size;

#ifndef WORDS_BIGENDIAN
    if (m_sample_format == SF_S16) {
      // make little endian into host endian
      u8* out = (u8*)buffer;
      for (int i = 0; i < frames_read * m_channel_count; ++i) {
        std::swap(out[0], out[1]);
        out += 2;
      }
    }
#endif

    // assume that if we didn't get a full read, we're done
    if (read != bytes_to_read) {
      m_frames_left_in_chunk = 0;
      return frames_read;
    }

    m_frames_left_in_chunk -= frames_read;
    return frames_read;
  }
Esempio n. 9
0
  int
  FLACInputStream::doRead(int frame_count, void* samples) {
    const int frame_size = m_channel_count * GetSampleSize(m_sample_format);
    u8* out = (u8*)samples;
    
    // we keep reading till we finish topping up!
    int frames_read = 0;
    while (frames_read < frame_count) {
      
      // if the buffer is empty, ask FLAC to fill it p
      if (m_buffer.getSize() < frame_size) {
        if (!FLAC__seekable_stream_decoder_process_single(m_decoder)) {
          return frames_read;
        }

        // if the buffer still has a size of 0, we are probably at the
        // end of the stream
        if (m_buffer.getSize() < frame_size) {
          return frames_read;
        }
      }

      // read what we've got!
      const int to_read = std::min(
        frame_count - frames_read,
        m_buffer.getSize() / frame_size);
      m_buffer.read(out, to_read * frame_size);
      out += to_read * frame_size;
      frames_read += to_read;
    }

    return frames_read;
  }
Esempio n. 10
0
  bool
  AIFFInputStream::findSoundChunk() {
    ADR_GUARD("AIFFInputStream::findSoundChunk");

    // seek to just after the IFF header
    m_file->seek(12, File::BEGIN);

    // search for a sound chunk
    while (true) {
      u8 chunk_header[8];
      if (m_file->read(chunk_header, 8) != 8) {
        ADR_LOG("Couldn't read SSND chunk header");
        return false;
      }
      u32 chunk_length = read32_be(chunk_header + 4);

      // if we found a data chunk, excellent!
      if (memcmp(chunk_header, "SSND", 4) == 0) {
        ADR_LOG("Found sound chunk");

        u8 chunk_contents[8];
        if (m_file->read(chunk_contents, 8) != 8) {
          ADR_LOG("Couldn't read SSND chunk contents");
          return false;
        }
        if (read32_be(chunk_contents + 0) != 0 ||
            read32_be(chunk_contents + 4) != 0)
        {
          ADR_LOG("Block-aligned AIFF files not supported!");
          return false;
        }

        // calculate the frame size so we can truncate the data chunk
        int frame_size = m_channel_count * GetSampleSize(m_sample_format);

        m_data_chunk_location  = m_file->tell();
        m_data_chunk_length    = (chunk_length - 8) / frame_size;
        m_frames_left_in_chunk = m_data_chunk_length;
        return true;

      } else {

        ADR_IF_DEBUG {
          const u8* ci = chunk_header;
          char str[80];
          sprintf(str, "Skipping: %d bytes in chunk '%c%c%c%c'",
                  (int)chunk_length, ci[0], ci[1], ci[2], ci[3]);
          ADR_LOG(str);
        }

        // skip the rest of the chunk
        if (!skipBytes(chunk_length)) {
          // oops, end of stream
          return false;
        }

      }
    }
  }
Esempio n. 11
0
SequencePacker::SequencePacker(
    MemoryProviderPtr memoryProvider,
    TransformerPtr transformer,
    size_t minibatchSize,
    const std::vector<StreamDescriptionPtr>& streams) : m_transformer(transformer),
    m_minibatchSize(minibatchSize),
    m_outputStreams(streams),
    m_memoryProvider(memoryProvider)
{
    m_inputStreams = m_transformer->GetStreamDescriptions();
    assert(m_inputStreams.size() == m_outputStreams.size());
    // Currently do not support sparse output.
    // TODO: Will be supported in the future.
    auto sparseOutput = std::find_if(
        m_outputStreams.begin(),
        m_outputStreams.end(),
        [](const StreamDescriptionPtr& s)
        {
            return s->m_storageType == StorageType::sparse_csc;
        });

    if (sparseOutput != m_outputStreams.end())
    {
        RuntimeError("Sparse sequences are currently not supported.");
    }

    assert(m_minibatchSize > 0);
    for (int i = 0; i < m_outputStreams.size(); ++i)
    {
        const auto& stream = m_outputStreams[i];
        UNUSED(stream);

        // Input and output should match in everything except for sparse/dense storage type.
        assert(stream->m_elementType == ElementType::tfloat || stream->m_elementType == ElementType::tdouble);
        assert(stream->m_name == m_inputStreams[i]->m_name);
        assert(stream->m_id == m_inputStreams[i]->m_id);
        assert(GetSampleSize(m_inputStreams[i]) == GetSampleSize(stream));

        m_streamBufferSizes.push_back(0);
        m_streamBuffers.push_back(nullptr);
    }
}
Esempio n. 12
0
/*----------------------------------------------------------------------
|   AP4_AudioSampleEntry::ToSampleDescription
+---------------------------------------------------------------------*/
AP4_SampleDescription*
AP4_AudioSampleEntry::ToSampleDescription()
{
    // create a sample description
    return new AP4_GenericAudioSampleDescription(
        m_Type,
        GetSampleRate(),
        GetSampleSize(),
        GetChannelCount(),
        this);
}
Esempio n. 13
0
float Sound::GetLength() const
{
    if (!compressed_)
    {
        if (!frequency_)
            return 0.0f;
        else
            return ((float)dataSize_) / GetSampleSize() / frequency_;
    }
    else
        return compressedLength_;
}
Esempio n. 14
0
void CaptureSoundDlg::OnButtonRecord() 
{
	if (!m_bSoundCapture)
	{
		m_bSoundCapture = TRUE;

		if (m_fpSoundFile != NULL)
		{
			fclose(m_fpSoundFile);
			m_fpSoundFile = NULL;
		}

		GetDlgItem(IDC_EDIT_SOUNDFILE)->GetWindowText(m_csSoundFile);

		m_fpSoundFile = fopen(m_csSoundFile.GetBuffer(0), "wb");
		m_csSoundFile.ReleaseBuffer();

		if (m_fpSoundFile == NULL)
		{
			MessageBox("Create sound file failed!!");
			return;
		}

		BOOL bRet = PLAY_OpenAudioRecord(AudioCallFunction, m_lBitPerSample, m_lSampleSPerSec, 
			GetSampleSize(m_lBitPerSample, m_lSampleSPerSec), 0, (long)this);
		if (!bRet)
		{
			fclose(m_fpSoundFile);
			m_fpSoundFile = NULL;

			MessageBox("Open sound capture failed!!");
			return;
		}
		
		GetDlgItem(IDC_BUTTON_RECORD)->SetWindowText("stop");
	}
	else
	{
		m_bSoundCapture = FALSE;
		PLAY_CloseAudioRecord();

		if (m_fpSoundFile != NULL)
		{
			fclose(m_fpSoundFile);
			m_fpSoundFile = NULL;
		}

		GetDlgItem(IDC_BUTTON_RECORD)->SetWindowText("Record");
	}
	
}
Esempio n. 15
0
/*----------------------------------------------------------------------
|   AP4_AudioSampleEntry::InspectFields
+---------------------------------------------------------------------*/
AP4_Result
AP4_AudioSampleEntry::InspectFields(AP4_AtomInspector& inspector)
{
    // dump the fields from the base class
    AP4_SampleEntry::InspectFields(inspector);

    // fields
    inspector.AddField("channel_count", GetChannelCount());
    inspector.AddField("sample_size", GetSampleSize());
    inspector.AddField("sample_rate", GetSampleRate());
    if (m_QtVersion) {
        inspector.AddField("qt_version", m_QtVersion);
    }
    
    return AP4_SUCCESS;
}
Esempio n. 16
0
/*----------------------------------------------------------------------
|   AP4_MpegAudioSampleEntry::ToSampleDescription
+---------------------------------------------------------------------*/
AP4_SampleDescription*
AP4_MpegAudioSampleEntry::ToSampleDescription()
{
    // find the esds atom
    AP4_EsdsAtom* esds = AP4_DYNAMIC_CAST(AP4_EsdsAtom, GetChild(AP4_ATOM_TYPE_ESDS));
    if (esds == NULL) {
        // check if this is a quicktime style sample description
        if (m_QtVersion > 0) {
            esds = AP4_DYNAMIC_CAST(AP4_EsdsAtom, FindChild("wave/esds"));
        }
    }
    
    // create a sample description
    return new AP4_MpegAudioSampleDescription(GetSampleRate(),
                                              GetSampleSize(),
                                              GetChannelCount(),
                                              esds);
}
Esempio n. 17
0
AAFRESULT STDMETHODCALLTYPE
    ImplAAFTimecodeStream::GetPositionTimecode (
      aafPosition_t  position,
      aafTimecode_t *timecode)
{
	aafUInt32		sampleSize, bytesRead, fps;
	aafUInt8		*buffer = NULL;
	aafRational_t	rate;
	double			floatRate;

	XPROTECT()
	{
		if(timecode == NULL)
			RAISE(AAFRESULT_NULL_PARAM);

		CHECK(GetSampleRate(&rate));
		//!!! Need a better algorithm here
		floatRate = (double)rate.numerator / (double)rate.denominator;
		if((floatRate >= 29.96) && (floatRate <= 30.0))
			fps = 30;
		else
			fps = (aafUInt32)floatRate;
		timecode->fps = (aafUInt16)fps;
		CHECK(GetSampleSize(&sampleSize));
		buffer = new aafUInt8[sampleSize];
		if(buffer == NULL)
			RAISE(AAFRESULT_NOMEMORY);
		CHECK(SetPosition(position * sampleSize));
		CHECK(Read(sampleSize, buffer, &bytesRead));
		CHECK(UnpackTimecode(buffer, sampleSize, fps, timecode));
		delete [] buffer;
	}
	XEXCEPT
	{
		if(buffer != NULL)
			delete [] buffer;
	}
	XEND;
	
	return AAFRESULT_SUCCESS;
}
Esempio n. 18
0
void Sound::SetLoop(unsigned repeatOffset, unsigned endOffset)
{
    if (!compressed_)
    {
        if (repeatOffset > dataSize_)
            repeatOffset = dataSize_;
        if (endOffset > dataSize_)
            endOffset = dataSize_;
        
        // Align repeat and end on sample boundaries
        int sampleSize = GetSampleSize();
        repeatOffset &= -sampleSize;
        endOffset &= -sampleSize;
        
        repeat_ = data_.Get() + repeatOffset;
        end_ = data_.Get() + endOffset;
        looped_ = true;
        
        FixInterpolation();
    }
    else
        looped_ = true;
}
Esempio n. 19
0
u_int32_t MP4Track::GetChunkSize(MP4ChunkId chunkId)
{
	u_int32_t stscIndex = GetChunkStscIndex(chunkId);

	MP4ChunkId firstChunkId = 
		m_pStscFirstChunkProperty->GetValue(stscIndex);

	MP4SampleId firstSample = 
		m_pStscFirstSampleProperty->GetValue(stscIndex);

	u_int32_t samplesPerChunk = 
		m_pStscSamplesPerChunkProperty->GetValue(stscIndex);

	MP4SampleId firstSampleInChunk = 
		firstSample + ((chunkId - firstChunkId) * samplesPerChunk);

	// need cumulative sizes of samples in chunk 
	u_int32_t chunkSize = 0;
	for (u_int32_t i = 0; i < samplesPerChunk; i++) {
		chunkSize += GetSampleSize(firstSampleInChunk + i);
	}

	return chunkSize;
}
Esempio n. 20
0
u_int32_t MP4Track::GetMaxBitrate()
{
	u_int32_t timeScale = GetTimeScale();
	MP4SampleId numSamples = GetNumberOfSamples();
	u_int32_t maxBytesPerSec = 0;
	u_int32_t bytesThisSec = 0;
	MP4Timestamp thisSec = 0;

	for (MP4SampleId sid = 1; sid <= numSamples; sid++) {
		u_int32_t sampleSize;
		MP4Timestamp sampleTime;

		sampleSize = GetSampleSize(sid);

		GetSampleTimes(sid, &sampleTime, NULL);

		// sample counts for current second
		if (sampleTime < thisSec + timeScale) {
			bytesThisSec += sampleSize;
		} else { // sample is in a future second
			if (bytesThisSec > maxBytesPerSec) {
				maxBytesPerSec = bytesThisSec;
			}

			thisSec = sampleTime - (sampleTime % timeScale);
			bytesThisSec = sampleSize;
		}
	}

	// last second (or partial second) 
	if (bytesThisSec > maxBytesPerSec) {
		maxBytesPerSec = bytesThisSec;
	}

	return maxBytesPerSec * 8;
}
Esempio n. 21
0
  OutputStream*
  DSAudioDevice::openBuffer(
    void* samples, int frame_count,
    int channel_count, int sample_rate, SampleFormat sample_format)
  {
    ADR_GUARD("DSAudioDevice::openBuffer");

    const int frame_size = channel_count * GetSampleSize(sample_format);

    WAVEFORMATEX wfx;
    memset(&wfx, 0, sizeof(wfx));
    wfx.wFormatTag      = WAVE_FORMAT_PCM;
    wfx.nChannels       = channel_count;
    wfx.nSamplesPerSec  = sample_rate;
    wfx.nAvgBytesPerSec = sample_rate * frame_size;
    wfx.nBlockAlign     = frame_size;
    wfx.wBitsPerSample  = GetSampleSize(sample_format) * 8;
    wfx.cbSize          = sizeof(wfx);

    DSBUFFERDESC dsbd;
    memset(&dsbd, 0, sizeof(dsbd));
    dsbd.dwSize  = sizeof(dsbd);
    dsbd.dwFlags = DSBCAPS_GETCURRENTPOSITION2 | DSBCAPS_CTRLPAN |
                   DSBCAPS_CTRLVOLUME | DSBCAPS_CTRLFREQUENCY |
                   DSBCAPS_STATIC | DSBCAPS_CTRLPOSITIONNOTIFY;
    if (m_global_focus) {
      dsbd.dwFlags |= DSBCAPS_GLOBALFOCUS;
    }

    const int buffer_frame_count = std::max(m_min_buffer_length, frame_count);
    const int buffer_size = buffer_frame_count * frame_size;
    dsbd.dwBufferBytes = buffer_size;
    dsbd.lpwfxFormat   = &wfx;

    // create the DS buffer
    IDirectSoundBuffer* buffer;
    HRESULT result = m_direct_sound->CreateSoundBuffer(
      &dsbd, &buffer, NULL);
    if (FAILED(result) || !buffer) {
      return 0;
    }

    ADR_IF_DEBUG {
      DSBCAPS caps;
      caps.dwSize = sizeof(caps);
      result = buffer->GetCaps(&caps);
      if (FAILED(result)) {
        buffer->Release();
        return 0;
      } else {
        std::ostringstream ss;
        ss << "actual buffer size: " << caps.dwBufferBytes << std::endl
           << "buffer_size: " << buffer_size;
        ADR_LOG(ss.str().c_str());
      }
    }

    void* data;
    DWORD data_size;
    result = buffer->Lock(0, buffer_size, &data, &data_size, 0, 0, 0);
    if (FAILED(result)) {
      buffer->Release();
      return 0;
    }

    ADR_IF_DEBUG {
      std::ostringstream ss;
      ss << "buffer size: " << buffer_size << std::endl
         << "data size:   " << data_size << std::endl
         << "frame count: " << frame_count;
      ADR_LOG(ss.str().c_str());
    }

    const int actual_size = frame_count * frame_size;
    memcpy(data, samples, actual_size);
    memset((u8*)data + actual_size, 0, buffer_size - actual_size);

    buffer->Unlock(data, data_size, 0, 0);

    DSOutputBuffer* b = new DSOutputBuffer(
      this, buffer, buffer_frame_count, frame_size);
    SYNCHRONIZED(this);
    m_open_buffers.push_back(b);
    return b;
  }
Esempio n. 22
0
  // Override from AAFSegment
AAFRESULT STDMETHODCALLTYPE
    ImplAAFTimecodeStream::SegmentTCToOffset (/*[in]*/ aafTimecode_t *pTimecode,
      /*[in]*/ aafRational_t *pEditRate,
      /*[out]*/ aafFrameOffset_t *pOffset)
{
	//!!! Is there a better algorithm assuming "mostly increasing"?
	// Since timecode need not be contiguous, or even increasing, we can try to optimize the search,
	// but we may have to search linearly.
	aafPosition_t	testOffset, scanStart;
	aafTimecode_t	baseTimecode, testTimecode;
	aafPosition_t	increment = pTimecode->fps * 10;	// Skip at 10 second intervals
	aafBool			found = kAAFFalse;
	aafInt32		error;
	aafLength_t		len, numSamples;
	aafUInt32		sampleSize;

	XPROTECT()
	{
		if(pEditRate == NULL || pOffset == NULL)
			RAISE(AAFRESULT_NULL_PARAM);
		scanStart = 0;
		CHECK(GetSampleRate(pEditRate));
		CHECK(GetPositionTimecode (0, &baseTimecode));

		// Assume that TC is contiguous, and prove otherwise
		testOffset = pTimecode->startFrame - baseTimecode.startFrame;
		CHECK(GetSize (&len));
		CHECK(GetSampleSize(&sampleSize));
		numSamples = len / sampleSize;
		testTimecode.startFrame = baseTimecode.startFrame;
		if((testOffset >= 0) && (testOffset < numSamples))
		{
			CHECK(GetPositionTimecode (testOffset, &testTimecode));
			if(	(pTimecode->drop == testTimecode.drop) && 
				(pTimecode->fps == testTimecode.fps) && 
				(pTimecode->startFrame == testTimecode.startFrame))
			{
				// TEST: Path #1 through code
				*pOffset = testOffset;
				found = kAAFTrue;
			}
			else
			{
				// See if timecodes are monatonically increasing anywhere near here.
				// See if we can compute an offset from the error in reaching here.
				testOffset += pTimecode->startFrame - testTimecode.startFrame;
				if(testOffset >= 0)
				{
					CHECK(GetPositionTimecode (testOffset, &testTimecode));
					if(	(pTimecode->drop == testTimecode.drop) && 
						(pTimecode->fps == testTimecode.fps) && 
						(pTimecode->startFrame == testTimecode.startFrame))
					{
						// TEST: Path #2 through code
						*pOffset = testOffset;
						found = kAAFTrue;
					}
				}
			}
		}

		
		if(!found)
		{
			// Start at zero, and skip forward at increments, until a timecode is found within
			// (increment) of the goal.
			testOffset = increment;
			scanStart = 0;
			if(testOffset > numSamples)
			{
				// TEST: Path #3 through code
				testOffset = 0;				// It's short, skip the scan phase	
			}
			else
			{
				// TEST: Path #4 through code
				for( ; testOffset < numSamples; testOffset += increment)
				{
					CHECK(GetPositionTimecode (testOffset, &testTimecode));
					error = (aafInt32)(pTimecode->startFrame - testTimecode.startFrame);
					if((error >= -increment) && (error <= increment))
					{
						if(error < 0)
							testOffset -= increment;	// Need to get a running start
						scanStart = testOffset;
						break;
					}
				}

				if(testOffset >= numSamples)
				{
					// TEST: Path #5 through code
					testOffset -= increment;	// Need to get a running start
					scanStart = testOffset;
				}
			}

			// Now finished scanning, start looking at testOffset, incrementing until the end
			for( ; testOffset < numSamples; testOffset++)
			{
				CHECK(GetPositionTimecode (testOffset, &testTimecode));
				if(	(pTimecode->drop == testTimecode.drop) && 
					(pTimecode->fps == testTimecode.fps) && 
					(pTimecode->startFrame == testTimecode.startFrame))
				{
					// TEST: Path #6 through code
					*pOffset = testOffset;
					found = kAAFTrue;
					break;
				}
			}
		}

		if(!found)
		{
			// If not found
			testOffset = 0;
			for(testOffset = 0; testOffset < scanStart; testOffset++)
			{
				CHECK(GetPositionTimecode (testOffset, &testTimecode));
				if(	(pTimecode->drop == testTimecode.drop) && 
					(pTimecode->fps == testTimecode.fps) && 
					(pTimecode->startFrame == testTimecode.startFrame))
				{
					// TEST: Path #7 through code
					*pOffset = testOffset;
					found = kAAFTrue;
					break;
				}
			}
		}

		if(found == kAAFFalse)
		{
			// TEST: Path #8 through code
			return AAFRESULT_TIMECODE_NOT_FOUND;
		}
	}
	XEXCEPT
	XEND;

	return AAFRESULT_SUCCESS;
}
Esempio n. 23
0
void MP4Track::ReadSample(
	MP4SampleId sampleId,
	u_int8_t** ppBytes, 
	u_int32_t* pNumBytes, 
	MP4Timestamp* pStartTime, 
	MP4Duration* pDuration,
	MP4Duration* pRenderingOffset, 
	bool* pIsSyncSample)
{
	if (sampleId == MP4_INVALID_SAMPLE_ID) {
		throw new MP4Error("sample id can't be zero", 
			"MP4Track::ReadSample");
	}

	// handle unusual case of wanting to read a sample
	// that is still sitting in the write chunk buffer
	if (m_pChunkBuffer && sampleId >= m_writeSampleId - m_chunkSamples) {
		WriteChunkBuffer();
	}

	FILE* pFile = GetSampleFile(sampleId);

	if (pFile == (FILE*)-1) {
		throw new MP4Error("sample is located in an inaccessible file",
			"MP4Track::ReadSample");
	}

	u_int64_t fileOffset = GetSampleFileOffset(sampleId);

	u_int32_t sampleSize = GetSampleSize(sampleId);
	if (*ppBytes != NULL && *pNumBytes < sampleSize) {
		throw new MP4Error("sample buffer is too small",
			 "MP4Track::ReadSample");
	}
	*pNumBytes = sampleSize;

	VERBOSE_READ_SAMPLE(m_pFile->GetVerbosity(),
		printf("ReadSample: track %u id %u offset 0x"LLX" size %u (0x%x)\n",
			m_trackId, sampleId, fileOffset, *pNumBytes, *pNumBytes));

	bool bufferMalloc = false;
	if (*ppBytes == NULL) {
		*ppBytes = (u_int8_t*)MP4Malloc(*pNumBytes);
		bufferMalloc = true;
	}

	u_int64_t oldPos = m_pFile->GetPosition(pFile); // only used in mode == 'w'
	try { 
		m_pFile->SetPosition(fileOffset, pFile);
		m_pFile->ReadBytes(*ppBytes, *pNumBytes, pFile);

		if (pStartTime || pDuration) {
			GetSampleTimes(sampleId, pStartTime, pDuration);

			VERBOSE_READ_SAMPLE(m_pFile->GetVerbosity(),
				printf("ReadSample:  start "LLU" duration "LLD"\n",
					(pStartTime ? *pStartTime : 0), 
					(pDuration ? *pDuration : 0)));
		}
		if (pRenderingOffset) {
			*pRenderingOffset = GetSampleRenderingOffset(sampleId);

			VERBOSE_READ_SAMPLE(m_pFile->GetVerbosity(),
				printf("ReadSample:  renderingOffset "LLD"\n",
					*pRenderingOffset));
		}
		if (pIsSyncSample) {
			*pIsSyncSample = IsSyncSample(sampleId);

			VERBOSE_READ_SAMPLE(m_pFile->GetVerbosity(),
				printf("ReadSample:  isSyncSample %u\n",
					*pIsSyncSample));
		}
	}

	catch (MP4Error* e) {
		if (bufferMalloc) {
			// let's not leak memory
			MP4Free(*ppBytes);
			*ppBytes = NULL;
		}
		if (m_pFile->GetMode() == 'w') {
			m_pFile->SetPosition(oldPos, pFile);
		}
		throw e;
	}

	if (m_pFile->GetMode() == 'w') {
		m_pFile->SetPosition(oldPos, pFile);
	}
}
Esempio n. 24
0
MBLayoutPtr SequencePacker::PackDenseStream(const StreamBatch& batch, size_t streamIndex)
{
    assert(m_outputStreamDescriptions[streamIndex]->m_storageType == StorageType::dense);
    const auto& stream = m_inputStreamDescriptions[streamIndex];
    auto& buffer = m_streamBuffers[m_currentBufferIndex][streamIndex];
    size_t sampleSize = GetSampleSize(m_outputStreamDescriptions[streamIndex]);
    auto pMBLayout = CreateMBLayout(batch);
    size_t requiredSize = pMBLayout->GetNumCols() * sampleSize;
    if (buffer.m_size < requiredSize)
    {
        buffer.Resize(requiredSize);
    }

    auto elementSize = GetSizeByType(stream->m_elementType);

    const auto& sequenceInfos = pMBLayout->GetAllSequences();

    // Iterate over sequences in the layout, copy samples from the
    // source sequences into the buffer (at appropriate offsets).
    for (int i = 0; i < sequenceInfos.size(); ++i)
    {
        const auto& sequenceInfo = sequenceInfos[i];
        // skip gaps
        if (sequenceInfo.seqId == GAP_SEQUENCE_ID)
        {
            continue;
        }

        const auto& sequence = batch[sequenceInfo.seqId];
        size_t numSamples = sequence->m_numberOfSamples;
        assert(numSamples == sequenceInfo.GetNumTimeSteps());

        char* bufferPtr = buffer.m_data.get();
        // Iterate over all samples in the sequence, keep track of the sample offset (which is especially
        // important for sparse input, where offset == number of preceding nnz elements).
        for (size_t sampleIndex = 0, sampleOffset = 0; sampleIndex < numSamples; ++sampleIndex)
        {
            // Compute the offset into the destination buffer, using the layout information 
            // to get the column index corresponding to the given sample.
            auto destinationOffset = pMBLayout->GetColumnIndex(sequenceInfo, sampleIndex) * sampleSize;
            // verify that there's enough space left in the buffer to fit a full sample.
            assert(destinationOffset <= buffer.m_size - sampleSize);
            auto* destination = bufferPtr + destinationOffset;
            if (stream->m_storageType == StorageType::dense)
            {
                // verify that the offset (an invariant for dense).
                assert(sampleOffset == sampleIndex * sampleSize);
                PackDenseSample(destination, sequence, sampleOffset, sampleSize);
                sampleOffset += sampleSize;
            }
            else if (stream->m_storageType == StorageType::sparse_csc)
            {
                // TODO: make type casts members of the SparseSequenceData
                SparseSequenceDataPtr sparseSequence = static_pointer_cast<SparseSequenceData>(sequence);
                // make sure that the sequence meta-data is correct.
                assert(numSamples == sparseSequence->m_nnzCounts.size());
                PackSparseSampleAsDense(destination, sparseSequence, sampleIndex, sampleOffset, sampleSize, elementSize);
                // move the offset by nnz count of the sample.
                sampleOffset += sparseSequence->m_nnzCounts[sampleIndex];
                // verify that the offset is within the bounds (less or equal 
                // to the total nnz count of the sequence).
                assert(sampleOffset <= sparseSequence->m_totalNnzCount);
            }
            else
            {
                RuntimeError("Storage type %d is not supported.", (int)stream->m_storageType);
            }
        }
    }

    return pMBLayout;
}
Esempio n. 25
0
StreamMinibatchPtr SequencePacker::PackStreamMinibatch(const std::vector<SequenceDataPtr>& sequences, size_t streamId)
{
    // Create sequence info for each sequences that we have got from the transformer.

    std::vector<MBLayout::SequenceInfo> inputSequences;
    for (size_t index = 0; index < sequences.size(); ++index)
    {
        MBLayout::SequenceInfo info;

        // In each minibatch sequence ids should be unique.
        // They have to match between different input streams in the same minibatch.
        // We are using sequence index in the set of received sequences.
        // TODO: should we use m_key as sequence id and pass it with data?
        info.seqId = index;

        info.tBegin = 0;
        info.tEnd = sequences[index]->m_numberOfSamples;
        inputSequences.push_back(info);
    }

    std::vector<std::pair<size_t, size_t>> placement;
    std::vector<size_t> rowAllocations;

    // Creating the minibatch layout.
    MBLayoutPtr layout = std::make_shared<MBLayout>();
    layout->InitAsPackedSequences(inputSequences, placement, rowAllocations);

    // Allocating necessary data buffer for the stream.
    size_t sampleSize = GetSampleSize(m_inputStreams[streamId]);
    size_t totalNumberOfSamplesInBytes = layout->GetNumCols() * sampleSize;
    if (m_streamBufferSizes[streamId] < totalNumberOfSamplesInBytes)
    {
        m_streamBuffers[streamId] = AllocateBuffer(layout->GetNumCols(), sampleSize);
        m_streamBufferSizes[streamId] = totalNumberOfSamplesInBytes;
    }

    // Packing the actual data.
    StorageType storageType = m_inputStreams[streamId]->m_storageType;
    size_t elementSize = GetSizeByType(m_inputStreams[streamId]->m_elementType);
    const auto& packedSequences = layout->GetAllSequences();
    char* streamBuffer = m_streamBuffers[streamId].get();
    for (const auto& sequence : packedSequences)
    {
        if (sequence.seqId == GAP_SEQUENCE_ID)
            continue;
        const auto& data = sequences[sequence.seqId];

        // Packing the sequence
        for (size_t sampleIndex = 0; sampleIndex < sequence.GetNumTimeSteps(); ++sampleIndex)
        {
            char* destination = streamBuffer + layout->GetColumnIndex(sequence, sampleIndex) * sampleSize;
            if (storageType == StorageType::dense)
            {
                PackDenseSample(destination, data, sampleIndex, elementSize, sampleSize);
            }
            else // sparse
            {
                assert(storageType == StorageType::sparse_csc);
                PackSparseSample(destination, data, sampleIndex, elementSize, sampleSize);
            }
        }
    }

    // Ok, minibatch is ready, give it out.
    StreamMinibatchPtr result = std::make_shared<StreamMinibatch>();
    result->m_data = m_streamBuffers[streamId].get();
    result->m_layout = layout;
    return result;
}
Esempio n. 26
0
 int
 FLACInputStream::getPosition() {
   int bytes_per_frame = m_channel_count * GetSampleSize(m_sample_format);
   return m_position - (m_buffer.getSize() / bytes_per_frame);
 }
Esempio n. 27
0
float BufferedSoundStream::GetBufferLength() const
{
    return (float)GetBufferNumBytes() / (GetFrequency() * (float)GetSampleSize());
}
Esempio n. 28
0
 void
 AIFFInputStream::setPosition(int position) {
   int frame_size = m_channel_count * GetSampleSize(m_sample_format);
   m_frames_left_in_chunk = m_data_chunk_length - position;
   m_file->seek(m_data_chunk_location + position * frame_size, File::BEGIN);
 }