コード例 #1
0
OSStatus SetMovieAudioExtractionCurrentTime(MovieAudioExtractionRef inSessionRef,
                                            TimeRecord *inTimeRecord)
{
	// Set the current time for the audio extraction session 
	return(MovieAudioExtractionSetProperty( inSessionRef,
                                            kQTPropertyClass_MovieAudioExtraction_Movie,
                                            kQTMovieAudioExtractionMoviePropertyID_CurrentTime,
                                            sizeof(TimeRecord),
                                            (ConstQTPropertyValuePtr)inTimeRecord));
}
コード例 #2
0
ファイル: fsp_quicktime.cpp プロジェクト: Angeldude/pd
    virtual bool Seek(double pos)
    {
        FLEXT_ASSERT(movie);
        FLEXT_ASSERT(extractionSessionRef != nil);

        QTThread qt(movie);

        TimeRecord timeRec;
        timeRec.scale	= GetMovieTimeScale(movie);
        timeRec.base	= NULL;
        unsigned long long fpos = (long long)(pos*timeRec.scale);
        timeRec.value.hi = int(fpos>>32);
        timeRec.value.lo = int(fpos&((1LL<<32)-1));

        // Set the extraction current time.  The duration will 
        // be determined by how much is pulled.
        OSStatus err = MovieAudioExtractionSetProperty(extractionSessionRef,
                    kQTPropertyClass_MovieAudioExtraction_Movie,
                    kQTMovieAudioExtractionMoviePropertyID_CurrentTime,
                    sizeof(TimeRecord), &timeRec);

        return err == 0;
    }
コード例 #3
0
// Prepare the specified movie for extraction:
//	Open an extraction session.
//	Set the "All Channels Discrete" property if required.
//	Set the ASBD and output layout, if specified.
//      Set the extraction start time. 
// Return the audioExtractionSessionRef.
OSStatus PrepareMovieForExtraction( Movie movie, 
                                    MovieAudioExtractionRef* extractionRefPtr, 
                                    Boolean discrete, 
                                    AudioStreamBasicDescription asbd, 
                                    AudioChannelLayout** layout, 
                                    UInt32* layoutSizePtr, 
                                    TimeRecord startTime)
{
	OSStatus	err = noErr;
	
	if (extractionRefPtr == nil)
	{
		err = paramErr;
		goto bail;
	}
	
	// Movie extraction begin: Open an extraction session
	err = MovieAudioExtractionBegin(movie, 0, extractionRefPtr);
	require(err == noErr, bail);	
	
	// If we need to extract all discrete channels, set that property
	if (discrete)
	{
            err = MovieAudioExtractionSetProperty(*extractionRefPtr,
                                                    kQTPropertyClass_MovieAudioExtraction_Movie,
                                                    kQTMovieAudioExtractionMoviePropertyID_AllChannelsDiscrete,
                                                    sizeof (discrete), 
                                                    &discrete);
            require(err == noErr, bail);	
	}
	// Set the extraction ASBD
	err = MovieAudioExtractionSetProperty(*extractionRefPtr,
                                            kQTPropertyClass_MovieAudioExtraction_Audio,
                                            kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
                                            sizeof (asbd), &asbd);
	require(err == noErr, bail);	

	// Set the output layout, if supplied
	if (*layout)
	{
		err = MovieAudioExtractionSetProperty(*extractionRefPtr,
                                                kQTPropertyClass_MovieAudioExtraction_Audio,
                                                kQTMovieAudioExtractionAudioPropertyID_AudioChannelLayout,
                                                *layoutSizePtr, *layout);
                require(err == noErr, bail);	
	}

	// Set the extraction start time.  The duration will be determined by how much is pulled.
	err = MovieAudioExtractionSetProperty(*extractionRefPtr,
                                            kQTPropertyClass_MovieAudioExtraction_Movie,
                                            kQTMovieAudioExtractionMoviePropertyID_CurrentTime,
                                            sizeof(TimeRecord), &startTime);

bail:
	// If error, close the extraction session
	if (err != noErr)
	{
		if (*extractionRefPtr != nil)
			MovieAudioExtractionEnd(*extractionRefPtr);
	}	
	return err;
}
コード例 #4
0
ファイル: ImportQT.cpp プロジェクト: finefin/audacity
ProgressResult QTImportFileHandle::Import(TrackFactory *trackFactory,
                               TrackHolders &outTracks,
                               Tags *tags)
{
   outTracks.clear();

   OSErr err = noErr;
   MovieAudioExtractionRef maer = NULL;
   auto updateResult = ProgressResult::Success;
   auto totSamples =
      (sampleCount) GetMovieDuration(mMovie); // convert from TimeValue
   decltype(totSamples) numSamples = 0;
   Boolean discrete = true;
   UInt32 quality = kQTAudioRenderQuality_Max;
   AudioStreamBasicDescription desc;
   UInt32 maxSampleSize;
   bool res = false;

   auto cleanup = finally( [&] {
      if (maer) {
         MovieAudioExtractionEnd(maer);
      }
   } );

   CreateProgress();

   do
   {
      err = MovieAudioExtractionBegin(mMovie, 0, &maer);
      if (err != noErr) {
         AudacityMessageBox(_("Unable to start QuickTime extraction"));
         break;
      }
   
      err = MovieAudioExtractionSetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Audio,
                                            kQTMovieAudioExtractionAudioPropertyID_RenderQuality,
                                            sizeof(quality),
                                            &quality);
      if (err != noErr) {
         AudacityMessageBox(_("Unable to set QuickTime render quality"));
         break;
      }
   
      err = MovieAudioExtractionSetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Movie,
                                            kQTMovieAudioExtractionMoviePropertyID_AllChannelsDiscrete,
                                            sizeof(discrete),
                                            &discrete);
      if (err != noErr) {
         AudacityMessageBox(_("Unable to set QuickTime discrete channels property"));
         break;
      }
   
      err = MovieAudioExtractionGetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Audio,
                                            kQTAudioPropertyID_MaxAudioSampleSize,
                                            sizeof(maxSampleSize),
                                            &maxSampleSize,
                                            NULL);
      if (err != noErr) {
         AudacityMessageBox(_("Unable to get QuickTime sample size property"));
         break;
      }
   
      err = MovieAudioExtractionGetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Audio,
                                            kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
                                            sizeof(desc),
                                            &desc,
                                            NULL);
      if (err != noErr) {
         AudacityMessageBox(_("Unable to retrieve stream description"));
         break;
      }
   
      auto numchan = desc.mChannelsPerFrame;
      const size_t bufsize = 5 * desc.mSampleRate;
   
      // determine sample format
      sampleFormat format;
      switch (maxSampleSize)
      {
         case 16:
            format = int16Sample;
            break;
   
         case 24:
            format = int24Sample;
            break;
   
         default:
            format = floatSample;
            break;
      }

      // Allocate an array of pointers, whose size is not known statically,
      // and prefixed with the AudioBufferList structure.
      MallocPtr< AudioBufferList > abl{
         static_cast< AudioBufferList * >(
            calloc( 1, offsetof( AudioBufferList, mBuffers ) +
               (sizeof(AudioBuffer) * numchan))) };
      abl->mNumberBuffers = numchan;
   
      TrackHolders channels{ numchan };

      const auto size = sizeof(float) * bufsize;
      ArraysOf<unsigned char> holders{ numchan, size };
      for (size_t c = 0; c < numchan; c++) {
         auto &buffer = abl->mBuffers[c];
         auto &holder = holders[c];
         auto &channel = channels[c];

         buffer.mNumberChannels = 1;
         buffer.mDataByteSize = size;

         buffer.mData = holder.get();
   
         channel = trackFactory->NewWaveTrack( format );
         channel->SetRate( desc.mSampleRate );
   
         if (numchan == 2) {
            if (c == 0) {
               channel->SetChannel(Track::LeftChannel);
               channel->SetLinked(true);
            }
            else if (c == 1) {
               channel->SetChannel(Track::RightChannel);
            }
         }
      }
   
      do {
         UInt32 flags = 0;
         UInt32 numFrames = bufsize;
   
         err = MovieAudioExtractionFillBuffer(maer,
                                              &numFrames,
                                              abl.get(),
                                              &flags);
         if (err != noErr) {
            AudacityMessageBox(_("Unable to get fill buffer"));
            break;
         }
   
         for (size_t c = 0; c < numchan; c++) {
            channels[c]->Append((char *) abl->mBuffers[c].mData, floatSample, numFrames);
         }
   
         numSamples += numFrames;
   
         updateResult = mProgress->Update(
            numSamples.as_long_long(),
            totSamples.as_long_long() );
   
         if (numFrames == 0 || flags & kQTMovieAudioExtractionComplete) {
            break;
         }
      } while (updateResult == ProgressResult::Success);
   
      res = (updateResult == ProgressResult::Success && err == noErr);
   
      if (res) {
         for (const auto &channel: channels) {
            channel->Flush();
         }
   
         outTracks.swap(channels);
      }

      //
      // Extract any metadata
      //
      if (res) {
         AddMetadata(tags);
      }
   } while (false);

// done:

   return (res ? ProgressResult::Success : ProgressResult::Failed);
}
コード例 #5
0
ファイル: qtsplitter.c プロジェクト: Kelimion/wine
static DWORD WINAPI QTSplitter_thread(LPVOID data)
{
    QTSplitter *This = (QTSplitter *)data;
    HRESULT hr = S_OK;
    TimeValue next_time;
    CVPixelBufferRef pixelBuffer = NULL;
    OSStatus err;
    TimeRecord tr;

    WaitForSingleObject(This->runEvent, -1);

    EnterCriticalSection(&This->csReceive);
    if (!This->pQTMovie)
    {
        LeaveCriticalSection(&This->csReceive);
        return 0;
    }

    This->state = State_Running;
    /* Prime the pump:  Needed for MPEG streams */
    GetMovieNextInterestingTime(This->pQTMovie, nextTimeEdgeOK | nextTimeStep, 0, NULL, This->movie_time, 1, &next_time, NULL);

    GetMovieTime(This->pQTMovie, &tr);

    if (This->pAudio_Pin)
        QT_Create_Extract_Session(This);

    LeaveCriticalSection(&This->csReceive);

    do
    {
        LONGLONG tStart=0, tStop=0;
        LONGLONG mStart=0, mStop=0;
        float time;

        EnterCriticalSection(&This->csReceive);
        if (!This->pQTMovie)
        {
            LeaveCriticalSection(&This->csReceive);
            return 0;
        }

        GetMovieNextInterestingTime(This->pQTMovie, nextTimeStep, 0, NULL, This->movie_time, 1, &next_time, NULL);

        if (next_time == -1)
        {
            TRACE("No next time\n");
            LeaveCriticalSection(&This->csReceive);
            break;
        }

        tr.value = SInt64ToWide(next_time);
        SetMovieTime(This->pQTMovie, &tr);
        MoviesTask(This->pQTMovie,0);
        QTVisualContextTask(This->vContext);

        TRACE("In loop at time %ld\n",This->movie_time);
        TRACE("In Next time %ld\n",next_time);

        mStart = This->movie_time;
        mStop = next_time;

        time = (float)(This->movie_time - This->movie_start) / This->movie_scale;
        tStart = time * 10000000;
        time = (float)(next_time - This->movie_start) / This->movie_scale;
        tStop = time * 10000000;

        /* Deliver Audio */
        if (This->pAudio_Pin && This->pAudio_Pin->pin.pin.pConnectedTo && This->aSession)
        {
            int data_size=0;
            BYTE* ptr;
            IMediaSample *sample = NULL;
            AudioBufferList aData;
            UInt32 flags;
            UInt32 frames;
            WAVEFORMATEX* pvi;
            float duration;

            pvi = (WAVEFORMATEX*)This->pAudio_Pin->pmt->pbFormat;

            hr = BaseOutputPinImpl_GetDeliveryBuffer(&This->pAudio_Pin->pin, &sample, NULL, NULL, 0);

            if (FAILED(hr))
            {
                ERR("Audio: Unable to get delivery buffer (%x)\n", hr);
                goto audio_error;
            }

            hr = IMediaSample_GetPointer(sample, &ptr);
            if (FAILED(hr))
            {
                ERR("Audio: Unable to get pointer to buffer (%x)\n", hr);
                goto audio_error;
            }

            duration = (float)next_time / This->movie_scale;
            time = (float)This->movie_time / This->movie_scale;
            duration -= time;
            frames = pvi->nSamplesPerSec * duration;
            TRACE("Need audio for %f seconds (%li frames)\n",duration,frames);

            data_size = IMediaSample_GetSize(sample);
            if (data_size < frames * pvi->nBlockAlign)
                FIXME("Audio buffer is too small\n");

            aData.mNumberBuffers = 1;
            aData.mBuffers[0].mNumberChannels = pvi->nChannels;
            aData.mBuffers[0].mDataByteSize = data_size;
            aData.mBuffers[0].mData = ptr;

            err = MovieAudioExtractionFillBuffer(This->aSession, &frames, &aData, &flags);
            if (frames == 0)
            {
                TimeRecord etr;

                /* Ran out of frames, Restart the extraction session */
                TRACE("Restarting extraction session\n");
                MovieAudioExtractionEnd(This->aSession);
                This->aSession = NULL;
                QT_Create_Extract_Session(This);

                etr = tr;
                etr.value = SInt64ToWide(This->movie_time);
                MovieAudioExtractionSetProperty(This->aSession,
                    kQTPropertyClass_MovieAudioExtraction_Movie,
                    kQTMovieAudioExtractionMoviePropertyID_CurrentTime,
                    sizeof(TimeRecord), &etr );

                frames = pvi->nSamplesPerSec * duration;
                aData.mNumberBuffers = 1;
                aData.mBuffers[0].mNumberChannels = pvi->nChannels;
                aData.mBuffers[0].mDataByteSize = data_size;
                aData.mBuffers[0].mData = ptr;

                MovieAudioExtractionFillBuffer(This->aSession, &frames, &aData, &flags);
            }

            TRACE("Got %i frames\n",(int)frames);

            IMediaSample_SetActualDataLength(sample, frames * pvi->nBlockAlign);

            IMediaSample_SetMediaTime(sample, &mStart, &mStop);
            IMediaSample_SetTime(sample, &tStart, &tStop);

            hr = OutputQueue_Receive(This->pAudio_Pin->queue, sample);
            TRACE("Audio Delivered (%x)\n",hr);

audio_error:
            if (sample)
                IMediaSample_Release(sample);
        }
        else
            TRACE("Audio Pin not connected or no Audio\n");

        /* Deliver Video */
        if (This->pVideo_Pin && QTVisualContextIsNewImageAvailable(This->vContext,0))
        {
            err = QTVisualContextCopyImageForTime(This->vContext, NULL, NULL, &pixelBuffer);
            if (err == noErr)
            {
                int data_size=0;
                BYTE* ptr;
                IMediaSample *sample = NULL;

                hr = BaseOutputPinImpl_GetDeliveryBuffer(&This->pVideo_Pin->pin, &sample, NULL, NULL, 0);
                if (FAILED(hr))
                {
                    ERR("Video: Unable to get delivery buffer (%x)\n", hr);
                    goto video_error;
                }

                data_size = IMediaSample_GetSize(sample);
                if (data_size < This->outputSize)
                {
                    ERR("Sample size is too small %d < %d\n", data_size, This->outputSize)
    ;
                    hr = E_FAIL;
                    goto video_error;
                }

                hr = IMediaSample_GetPointer(sample, &ptr);
                if (FAILED(hr))
                {
                    ERR("Video: Unable to get pointer to buffer (%x)\n", hr);
                    goto video_error;
                }

                hr = AccessPixelBufferPixels( pixelBuffer, ptr);
                if (FAILED(hr))
                {
                    ERR("Failed to access Pixels\n");
                    goto video_error;
                }

                IMediaSample_SetActualDataLength(sample, This->outputSize);

                IMediaSample_SetMediaTime(sample, &mStart, &mStop);
                IMediaSample_SetTime(sample, &tStart, &tStop);

                hr = OutputQueue_Receive(This->pVideo_Pin->queue, sample);
                TRACE("Video Delivered (%x)\n",hr);

    video_error:
                if (sample)
                    IMediaSample_Release(sample);
                if (pixelBuffer)
                    CVPixelBufferRelease(pixelBuffer);
            }
        }
        else
            TRACE("No video to deliver\n");

        This->movie_time = next_time;
        LeaveCriticalSection(&This->csReceive);
    } while (hr == S_OK);

    This->state = State_Stopped;
    if (This->pAudio_Pin)
        OutputQueue_EOS(This->pAudio_Pin->queue);
    if (This->pVideo_Pin)
        OutputQueue_EOS(This->pVideo_Pin->queue);

    return hr;
}
コード例 #6
0
ファイル: qtsplitter.c プロジェクト: Kelimion/wine
static OSErr QT_Create_Extract_Session(QTSplitter *filter)
{
    AudioStreamBasicDescription aDesc;
    OSErr err;
    WAVEFORMATEX* pvi;

    pvi = (WAVEFORMATEX*)filter->pAudio_Pin->pmt->pbFormat;

    err = MovieAudioExtractionBegin(filter->pQTMovie, 0, &filter->aSession);
    if (err != noErr)
    {
        ERR("Failed to begin Extraction session %i\n",err);
        return err;
    }

    err = MovieAudioExtractionGetProperty(filter->aSession,
            kQTPropertyClass_MovieAudioExtraction_Audio,
kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
            sizeof(AudioStreamBasicDescription), &aDesc, NULL);

    if (err != noErr)
    {
        MovieAudioExtractionEnd(filter->aSession);
        filter->aSession = NULL;
        ERR("Failed to get session description %i\n",err);
        return err;
    }

    aDesc.mFormatID = kAudioFormatLinearPCM;
    aDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger +
        kAudioFormatFlagIsPacked;
    aDesc.mFramesPerPacket = 1;
    aDesc.mChannelsPerFrame = pvi->nChannels;
    aDesc.mBitsPerChannel = pvi->wBitsPerSample;
    aDesc.mSampleRate = pvi->nSamplesPerSec;
    aDesc.mBytesPerFrame = (aDesc.mBitsPerChannel * aDesc.mChannelsPerFrame) / 8;
    aDesc.mBytesPerPacket = aDesc.mBytesPerFrame * aDesc.mFramesPerPacket;

    err = MovieAudioExtractionSetProperty(filter->aSession,
        kQTPropertyClass_MovieAudioExtraction_Audio,
kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
        sizeof(AudioStreamBasicDescription), &aDesc);

    if (aDesc.mFormatID != kAudioFormatLinearPCM)
    {
        ERR("Not PCM Wave\n");
        err = -1;
    }
    if (aDesc.mFormatFlags != kLinearPCMFormatFlagIsSignedInteger +
        kAudioFormatFlagIsPacked)
    {
        ERR("Unhandled Flags\n");
        err = -1;
    }
    if (aDesc.mFramesPerPacket != 1)
    {
        ERR("Unhandled Frames per packet %li\n",aDesc.mFramesPerPacket);
        err = -1;
    }
    if (aDesc.mChannelsPerFrame != pvi->nChannels)
    {
        ERR("Unhandled channel count %li\n",aDesc.mChannelsPerFrame);
        err = -1;
    }
    if (aDesc.mBitsPerChannel != pvi->wBitsPerSample)
    {
        ERR("Unhandled bits per channel %li\n",aDesc.mBitsPerChannel);
        err = -1;
    }
    if (aDesc.mSampleRate != pvi->nSamplesPerSec)
    {
        ERR("Unhandled sample rate %f\n",aDesc.mSampleRate);
        err = -1;
    }

    if (err != noErr)
    {
        ERR("Failed to create Extraction Session\n");
        MovieAudioExtractionEnd(filter->aSession);
        filter->aSession = NULL;
    }

    return err;
}
コード例 #7
0
    QTAudioReader (InputStream* const input_, const int trackNum_)
        : AudioFormatReader (input_, TRANS (quickTimeFormatName)),
          ok (false),
          movie (0),
          trackNum (trackNum_),
          lastSampleRead (0),
          lastThreadId (0),
          extractor (0),
          dataHandle (0)
    {
        JUCE_AUTORELEASEPOOL
        bufferList.calloc (256, 1);

       #if JUCE_WINDOWS
        if (InitializeQTML (0) != noErr)
            return;
       #endif

        if (EnterMovies() != noErr)
            return;

        bool opened = juce_OpenQuickTimeMovieFromStream (input_, movie, dataHandle);

        if (! opened)
            return;

        {
            const int numTracks = GetMovieTrackCount (movie);
            int trackCount = 0;

            for (int i = 1; i <= numTracks; ++i)
            {
                track = GetMovieIndTrack (movie, i);
                media = GetTrackMedia (track);

                OSType mediaType;
                GetMediaHandlerDescription (media, &mediaType, 0, 0);

                if (mediaType == SoundMediaType
                     && trackCount++ == trackNum_)
                {
                    ok = true;
                    break;
                }
            }
        }

        if (! ok)
            return;

        ok = false;

        lengthInSamples = GetMediaDecodeDuration (media);
        usesFloatingPointData = false;

        samplesPerFrame = (int) (GetMediaDecodeDuration (media) / GetMediaSampleCount (media));

        trackUnitsPerFrame = GetMovieTimeScale (movie) * samplesPerFrame
                                / GetMediaTimeScale (media);

        OSStatus err = MovieAudioExtractionBegin (movie, 0, &extractor);

        unsigned long output_layout_size;
        err = MovieAudioExtractionGetPropertyInfo (extractor,
                                                   kQTPropertyClass_MovieAudioExtraction_Audio,
                                                   kQTMovieAudioExtractionAudioPropertyID_AudioChannelLayout,
                                                   0, &output_layout_size, 0);
        if (err != noErr)
            return;

        HeapBlock <AudioChannelLayout> qt_audio_channel_layout;
        qt_audio_channel_layout.calloc (output_layout_size, 1);

        err = MovieAudioExtractionGetProperty (extractor,
                                               kQTPropertyClass_MovieAudioExtraction_Audio,
                                               kQTMovieAudioExtractionAudioPropertyID_AudioChannelLayout,
                                               output_layout_size, qt_audio_channel_layout, 0);

        qt_audio_channel_layout[0].mChannelLayoutTag = kAudioChannelLayoutTag_Stereo;

        err = MovieAudioExtractionSetProperty (extractor,
                                               kQTPropertyClass_MovieAudioExtraction_Audio,
                                               kQTMovieAudioExtractionAudioPropertyID_AudioChannelLayout,
                                               output_layout_size,
                                               qt_audio_channel_layout);

        err = MovieAudioExtractionGetProperty (extractor,
                                               kQTPropertyClass_MovieAudioExtraction_Audio,
                                               kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
                                               sizeof (inputStreamDesc),
                                               &inputStreamDesc, 0);
        if (err != noErr)
            return;

        inputStreamDesc.mFormatFlags = kAudioFormatFlagIsSignedInteger
                                        | kAudioFormatFlagIsPacked
                                        | kAudioFormatFlagsNativeEndian;
        inputStreamDesc.mBitsPerChannel = sizeof (SInt16) * 8;
        inputStreamDesc.mChannelsPerFrame = jmin ((UInt32) 2, inputStreamDesc.mChannelsPerFrame);
        inputStreamDesc.mBytesPerFrame = sizeof (SInt16) * inputStreamDesc.mChannelsPerFrame;
        inputStreamDesc.mBytesPerPacket = inputStreamDesc.mBytesPerFrame;

        err = MovieAudioExtractionSetProperty (extractor,
                                               kQTPropertyClass_MovieAudioExtraction_Audio,
                                               kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
                                               sizeof (inputStreamDesc),
                                               &inputStreamDesc);
        if (err != noErr)
            return;

        Boolean allChannelsDiscrete = false;
        err = MovieAudioExtractionSetProperty (extractor,
                                               kQTPropertyClass_MovieAudioExtraction_Movie,
                                               kQTMovieAudioExtractionMoviePropertyID_AllChannelsDiscrete,
                                               sizeof (allChannelsDiscrete),
                                               &allChannelsDiscrete);

        if (err != noErr)
            return;

        bufferList->mNumberBuffers = 1;
        bufferList->mBuffers[0].mNumberChannels = inputStreamDesc.mChannelsPerFrame;
        bufferList->mBuffers[0].mDataByteSize =  jmax ((UInt32) 4096, (UInt32) (samplesPerFrame * inputStreamDesc.mBytesPerFrame) + 16);

        dataBuffer.malloc (bufferList->mBuffers[0].mDataByteSize);
        bufferList->mBuffers[0].mData = dataBuffer;

        sampleRate = inputStreamDesc.mSampleRate;
        bitsPerSample = 16;
        numChannels = inputStreamDesc.mChannelsPerFrame;

        detachThread();
        ok = true;
    }
コード例 #8
0
    bool readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer,
                      int64 startSampleInFile, int numSamples)
    {
        JUCE_AUTORELEASEPOOL
        checkThreadIsAttached();
        bool ok = true;

        while (numSamples > 0)
        {
            if (lastSampleRead != startSampleInFile)
            {
                TimeRecord time;
                time.scale = (TimeScale) inputStreamDesc.mSampleRate;
                time.base = 0;
                time.value.hi = 0;
                time.value.lo = (UInt32) startSampleInFile;

                OSStatus err = MovieAudioExtractionSetProperty (extractor,
                                                                kQTPropertyClass_MovieAudioExtraction_Movie,
                                                                kQTMovieAudioExtractionMoviePropertyID_CurrentTime,
                                                                sizeof (time), &time);

                if (err != noErr)
                {
                    ok = false;
                    break;
                }
            }

            int framesToDo = jmin (numSamples, (int) (bufferList->mBuffers[0].mDataByteSize / inputStreamDesc.mBytesPerFrame));
            bufferList->mBuffers[0].mDataByteSize = inputStreamDesc.mBytesPerFrame * framesToDo;

            UInt32 outFlags = 0;
            UInt32 actualNumFrames = framesToDo;
            OSStatus err = MovieAudioExtractionFillBuffer (extractor, &actualNumFrames, bufferList, &outFlags);
            if (err != noErr)
            {
                ok = false;
                break;
            }

            lastSampleRead = startSampleInFile + actualNumFrames;
            const int samplesReceived = actualNumFrames;

            for (int j = numDestChannels; --j >= 0;)
            {
                if (destSamples[j] != nullptr)
                {
                    const short* src = ((const short*) bufferList->mBuffers[0].mData) + j;

                    for (int i = 0; i < samplesReceived; ++i)
                    {
                        destSamples[j][startOffsetInDestBuffer + i] = (*src << 16);
                        src += numChannels;
                    }
                }
            }

            startOffsetInDestBuffer += samplesReceived;
            startSampleInFile += samplesReceived;
            numSamples -= samplesReceived;

            if (((outFlags & kQTMovieAudioExtractionComplete) != 0 || samplesReceived == 0) && numSamples > 0)
            {
                for (int j = numDestChannels; --j >= 0;)
                    if (destSamples[j] != nullptr)
                        zeromem (destSamples[j] + startOffsetInDestBuffer, sizeof (int) * numSamples);

                break;
            }
        }

        detachThread();
        return ok;
    }
コード例 #9
0
ファイル: ImportQT.cpp プロジェクト: ScorpioJonesy/audacity
int QTImportFileHandle::Import(TrackFactory *trackFactory,
                               Track ***outTracks,
                               int *outNumTracks,
                               Tags *tags)
{
   OSErr err = noErr;
   MovieAudioExtractionRef maer = NULL;
   int updateResult = eProgressSuccess;
   sampleCount totSamples = (sampleCount) GetMovieDuration(mMovie);
   sampleCount numSamples = 0;
   Boolean discrete = true;
   UInt32 quality = kQTAudioRenderQuality_Max;
   AudioStreamBasicDescription desc;
   UInt32 maxSampleSize;
   UInt32 numchan;
   UInt32 bufsize;
   bool res = false;

   CreateProgress();

   do
   {
      err = MovieAudioExtractionBegin(mMovie, 0, &maer);
      if (err != noErr) {
         wxMessageBox(_("Unable to start QuickTime extraction"));
         break;
      }
   
      err = MovieAudioExtractionSetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Audio,
                                            kQTMovieAudioExtractionAudioPropertyID_RenderQuality,
                                            sizeof(quality),
                                            &quality);
      if (err != noErr) {
         wxMessageBox(_("Unable to set QuickTime render quality"));
         break;
      }
   
      err = MovieAudioExtractionSetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Movie,
                                            kQTMovieAudioExtractionMoviePropertyID_AllChannelsDiscrete,
                                            sizeof(discrete),
                                            &discrete);
      if (err != noErr) {
         wxMessageBox(_("Unable to set QuickTime discrete channels property"));
         break;
      }
   
      err = MovieAudioExtractionGetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Audio,
                                            kQTAudioPropertyID_MaxAudioSampleSize,
                                            sizeof(maxSampleSize),
                                            &maxSampleSize,
                                            NULL);
      if (err != noErr) {
         wxMessageBox(_("Unable to get QuickTime sample size property"));
         break;
      }
   
      err = MovieAudioExtractionGetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Audio,
                                            kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
                                            sizeof(desc),
                                            &desc,
                                            NULL);
      if (err != noErr) {
         wxMessageBox(_("Unable to retrieve stream description"));
         break;
      }
   
      numchan = desc.mChannelsPerFrame;
      bufsize = 5 * desc.mSampleRate;
   
      // determine sample format
      sampleFormat format;
      switch (maxSampleSize)
      {
         case 16:
            format = int16Sample;
            break;
   
         case 24:
            format = int24Sample;
            break;
   
         default:
            format = floatSample;
            break;
      }
   
      AudioBufferList *abl = (AudioBufferList *)
         calloc(1, offsetof(AudioBufferList, mBuffers) + (sizeof(AudioBuffer) * numchan));
      abl->mNumberBuffers = numchan;
   
      WaveTrack **channels = new WaveTrack *[numchan];
   
      int c;
      for (c = 0; c < numchan; c++) {
         abl->mBuffers[c].mNumberChannels = 1;
         abl->mBuffers[c].mDataByteSize = sizeof(float) * bufsize;
         abl->mBuffers[c].mData = malloc(abl->mBuffers[c].mDataByteSize);
   
         channels[c] = trackFactory->NewWaveTrack(format);
         channels[c]->SetRate(desc.mSampleRate);
   
         if (numchan == 2) {
            if (c == 0) {
               channels[c]->SetChannel(Track::LeftChannel);
               channels[c]->SetLinked(true);
            }
            else if (c == 1) {
               channels[c]->SetChannel(Track::RightChannel);
            }
         }
      }
   
      do {
         UInt32 flags = 0;
         UInt32 numFrames = bufsize;
   
         err = MovieAudioExtractionFillBuffer(maer,
                                              &numFrames,
                                              abl,
                                              &flags);
         if (err != noErr) {
            wxMessageBox(_("Unable to get fill buffer"));
            break;
         }
   
         for (c = 0; c < numchan; c++) {
            channels[c]->Append((char *) abl->mBuffers[c].mData, floatSample, numFrames);
         }
   
         numSamples += numFrames;
   
         updateResult = mProgress->Update((wxULongLong_t)numSamples,
                                          (wxULongLong_t)totSamples);
   
         if (numFrames == 0 || flags & kQTMovieAudioExtractionComplete) {
            break;
         }
      } while (updateResult == eProgressSuccess);
   
      res = (updateResult == eProgressSuccess && err == noErr);
   
      if (res) {
         for (c = 0; c < numchan; c++) {
            channels[c]->Flush();
         }
   
         *outTracks = (Track **) channels;
         *outNumTracks = numchan;
      }
      else {
         for (c = 0; c < numchan; c++) {
            delete channels[c];
         }
   
         delete [] channels;
      }
   
      for (c = 0; c < numchan; c++) {
         free(abl->mBuffers[c].mData);
      }
      free(abl);
   
      //
      // Extract any metadata
      //
      if (res) {
         AddMetadata(tags);
      }
   } while (false);

done:

   if (maer) {
      MovieAudioExtractionEnd(maer);
   }

   return (res ? eProgressSuccess : eProgressFailed);
}
コード例 #10
0
QuickTimeFileReader::QuickTimeFileReader(FileSource source,
                                         DecodeMode decodeMode,
                                         CacheMode mode,
                                         size_t targetRate,
                                         ProgressReporter *reporter) :
    CodedAudioFileReader(mode, targetRate),
    m_source(source),
    m_path(source.getLocalFilename()),
    m_d(new D),
    m_reporter(reporter),
    m_cancelled(false),
    m_completion(0),
    m_decodeThread(0)
{
    m_channelCount = 0;
    m_fileRate = 0;

    Profiler profiler("QuickTimeFileReader::QuickTimeFileReader", true);

SVDEBUG << "QuickTimeFileReader: path is \"" << m_path << "\"" << endl;

    long QTversion;

#ifdef WIN32
    InitializeQTML(0); // FIXME should check QT version
#else
    m_d->err = Gestalt(gestaltQuickTime,&QTversion);
    if ((m_d->err != noErr) || (QTversion < 0x07000000)) {
        m_error = QString("Failed to find compatible version of QuickTime (version 7 or above required)");
        return;
    }
#endif 

    EnterMovies();
	
    Handle dataRef; 
    OSType dataRefType;

//    CFStringRef URLString = CFStringCreateWithCString
 //       (0, m_path.toLocal8Bit().data(), 0);


    QByteArray ba = m_path.toLocal8Bit();

    CFURLRef url = CFURLCreateFromFileSystemRepresentation
        (kCFAllocatorDefault,
         (const UInt8 *)ba.data(),
         (CFIndex)ba.length(),
         false);


//    m_d->err = QTNewDataReferenceFromURLCFString
    m_d->err = QTNewDataReferenceFromCFURL
        (url, 0, &dataRef, &dataRefType);

    if (m_d->err) { 
        m_error = QString("Error creating data reference for QuickTime decoder: code %1").arg(m_d->err);
        return;
    }
    
    short fileID = movieInDataForkResID; 
    short flags = 0; 
    m_d->err = NewMovieFromDataRef
        (&m_d->movie, flags, &fileID, dataRef, dataRefType);

    DisposeHandle(dataRef);
    if (m_d->err) { 
        m_error = QString("Error creating new movie for QuickTime decoder: code %1").arg(m_d->err); 
        return;
    }

    Boolean isProtected = 0;
    Track aTrack = GetMovieIndTrackType
        (m_d->movie, 1, SoundMediaType,
         movieTrackMediaType | movieTrackEnabledOnly);

    if (aTrack) {
        Media aMedia = GetTrackMedia(aTrack);	// get the track media
        if (aMedia) {
            MediaHandler mh = GetMediaHandler(aMedia);	// get the media handler we can query
            if (mh) {
                m_d->err = QTGetComponentProperty(mh,
                                                  kQTPropertyClass_DRM,
                                                  kQTDRMPropertyID_IsProtected,
                                                  sizeof(Boolean), &isProtected,nil);
            } else {
                m_d->err = 1;
            }
        } else {
            m_d->err = 1;
        }
    } else {
        m_d->err = 1;
    }
	
    if (m_d->err && m_d->err != kQTPropertyNotSupportedErr) { 
        m_error = QString("Error checking for DRM in QuickTime decoder: code %1").arg(m_d->err);
        return;
    } else if (!m_d->err && isProtected) { 
        m_error = QString("File is protected with DRM");
        return;
    } else if (m_d->err == kQTPropertyNotSupportedErr && !isProtected) {
        std::cerr << "QuickTime: File is not protected with DRM" << std::endl;
    }

    if (m_d->movie) {
        SetMovieActive(m_d->movie, TRUE);
        m_d->err = GetMoviesError();
        if (m_d->err) {
            m_error = QString("Error in QuickTime decoder activation: code %1").arg(m_d->err);
            return;
        }
    } else {
	m_error = QString("Error in QuickTime decoder: Movie object not valid");
	return;
    }
    
    m_d->err = MovieAudioExtractionBegin
        (m_d->movie, 0, &m_d->extractionSessionRef);
    if (m_d->err) {
        m_error = QString("Error in QuickTime decoder extraction init: code %1").arg(m_d->err);
        return;
    }

    m_d->err = MovieAudioExtractionGetProperty
        (m_d->extractionSessionRef,
         kQTPropertyClass_MovieAudioExtraction_Audio, kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
         sizeof(m_d->asbd),
         &m_d->asbd,
         nil);

    if (m_d->err) {
        m_error = QString("Error in QuickTime decoder property get: code %1").arg(m_d->err);
        return;
    }
	
    m_channelCount = m_d->asbd.mChannelsPerFrame;
    m_fileRate = m_d->asbd.mSampleRate;

    std::cerr << "QuickTime: " << m_channelCount << " channels, " << m_fileRate << " kHz" << std::endl;

    m_d->asbd.mFormatFlags =
        kAudioFormatFlagIsFloat |
        kAudioFormatFlagIsPacked |
        kAudioFormatFlagsNativeEndian;
    m_d->asbd.mBitsPerChannel = sizeof(float) * 8;
    m_d->asbd.mBytesPerFrame = sizeof(float) * m_d->asbd.mChannelsPerFrame;
    m_d->asbd.mBytesPerPacket = m_d->asbd.mBytesPerFrame;
	
    m_d->err = MovieAudioExtractionSetProperty
        (m_d->extractionSessionRef,
         kQTPropertyClass_MovieAudioExtraction_Audio,
         kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
         sizeof(m_d->asbd),
         &m_d->asbd);

    if (m_d->err) {
        m_error = QString("Error in QuickTime decoder property set: code %1").arg(m_d->err);
        m_channelCount = 0;
        return;
    }
    m_d->buffer.mNumberBuffers = 1;
    m_d->buffer.mBuffers[0].mNumberChannels = m_channelCount;
    m_d->buffer.mBuffers[0].mDataByteSize =
        sizeof(float) * m_channelCount * m_d->blockSize;
    m_d->data = new float[m_channelCount * m_d->blockSize];
    m_d->buffer.mBuffers[0].mData = m_d->data;

    initialiseDecodeCache();

    if (decodeMode == DecodeAtOnce) {

        if (m_reporter) {
            connect(m_reporter, SIGNAL(cancelled()), this, SLOT(cancelled()));
            m_reporter->setMessage
                (tr("Decoding %1...").arg(QFileInfo(m_path).fileName()));
        }

        while (1) {
            
            UInt32 framesRead = m_d->blockSize;
            UInt32 extractionFlags = 0;
            m_d->err = MovieAudioExtractionFillBuffer
                (m_d->extractionSessionRef, &framesRead, &m_d->buffer,
                 &extractionFlags);
            if (m_d->err) {
                m_error = QString("Error in QuickTime decoding: code %1")
                    .arg(m_d->err);
                break;
            }

            //!!! progress?

//    std::cerr << "Read " << framesRead << " frames (block size " << m_d->blockSize << ")" << std::endl;

            // QuickTime buffers are interleaved unless specified otherwise
            addSamplesToDecodeCache(m_d->data, framesRead);

            if (framesRead < m_d->blockSize) break;
        }
        
        finishDecodeCache();
        endSerialised();

        m_d->err = MovieAudioExtractionEnd(m_d->extractionSessionRef);
        if (m_d->err) {
            m_error = QString("Error ending QuickTime extraction session: code %1").arg(m_d->err);
        }

        m_completion = 100;

    } else {
        if (m_reporter) m_reporter->setProgress(100);

        if (m_channelCount > 0) {
            m_decodeThread = new DecodeThread(this);
            m_decodeThread->start();
        }
    }

    std::cerr << "QuickTimeFileReader::QuickTimeFileReader: frame count is now " << getFrameCount() << ", error is \"\"" << m_error << "\"" << std::endl;
}