/* returns the ImageDescriptionHandle for the track. If it's not a video track, return NULL */ static ImageDescriptionHandle track_image_description(VALUE obj) { OSErr osErr; /* restrict reporting to video track */ if (track_get_media_type(obj) != VideoMediaType) return NULL; SampleDescriptionHandle sample_description = NULL; sample_description = (SampleDescriptionHandle)NewHandle(sizeof(SampleDescription)); if (LMGetMemErr() != noErr) { rb_raise(eQuickTime, "Memory Error %d when determining image description", LMGetMemErr()); return NULL; } GetMediaSampleDescription(TRACK_MEDIA(obj), 1, sample_description); osErr = GetMoviesError(); if (osErr != noErr) { rb_raise(eQuickTime, "Movie Error %d when determining image description", osErr); DisposeHandle((Handle)sample_description); return NULL; } return (ImageDescriptionHandle)sample_description; }
TLevelReader3gp::TLevelReader3gp(const TFilePath &path) : TLevelReader(path), m_IOError(QTNoError), m_track(0), m_movie(0), m_depth(0) // ,m_timeScale(0) { FSSpec fspec; QDErr err; Boolean dataRefWasChanged; if (QuickTimeStuff::instance()->getStatus() != noErr) { m_IOError = QTNotInstalled; return; } const char *pStr = toString(m_path.getWideString()).c_str(); FSMakeFSSpec(0, 0, (const unsigned char *)pStr, &fspec); getFSSpecFromPosixPath(pStr, &fspec, false); pStr = 0; if ((err = OpenMovieFile(&fspec, &m_refNum, fsRdPerm))) { m_IOError = QTUnableToOpenFile; return; } m_resId = 0; Str255 name; err = NewMovieFromFile(&m_movie, m_refNum, &m_resId, name, fsRdPerm, &dataRefWasChanged); int numTracks = GetMovieTrackCount(m_movie); assert(numTracks == 1 || numTracks == 2); m_track = GetMovieIndTrackType(m_movie, 1, VideoMediaType, movieTrackMediaType); //m_track=GetMovieTrack(m_movie,numTracks); ImageDescriptionHandle imageH; imageH = (ImageDescriptionHandle)NewHandleClear(sizeof(ImageDescription)); TINT32 index = 1; Media theMedia = GetTrackMedia(m_track); GetMediaSampleDescription(theMedia, index, (SampleDescriptionHandle)imageH); ImageDescriptionPtr imagePtr = *imageH; m_lx = imagePtr->width; m_ly = imagePtr->height; m_depth = imagePtr->depth; m_info = new TImageInfo(); m_info->m_lx = m_lx; m_info->m_ly = m_ly; Tiio::MovWriterProperties *prop = new Tiio::MovWriterProperties(); m_info->m_properties = prop; DisposeHandle((Handle)imageH); m_info->m_frameRate = GetMediaTimeScale(theMedia); }
void MovieGlHap::allocateVisualContext() { // Load HAP Movie if( HapQTQuickTimeMovieHasHapTrackPlayable( getObj()->mMovie ) ) { // QT Visual Context attributes OSStatus err = noErr; QTVisualContextRef * visualContext = (QTVisualContextRef*)&getObj()->mVisualContext; CFDictionaryRef pixelBufferOptions = HapQTCreateCVPixelBufferOptionsDictionary(); const CFStringRef keys[] = { kQTVisualContextPixelBufferAttributesKey }; CFDictionaryRef visualContextOptions = ::CFDictionaryCreate(kCFAllocatorDefault, (const void**)&keys, (const void**)&pixelBufferOptions, sizeof(keys)/sizeof(keys[0]), &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); err = QTPixelBufferContextCreate( kCFAllocatorDefault, visualContextOptions, visualContext ); ::CFRelease( pixelBufferOptions ); ::CFRelease( visualContextOptions ); if( err != noErr ) { CI_LOG_E( "HAP ERROR :: " << err << " couldnt create visual context." ); return; } // Set the movie's visual context err = SetMovieVisualContext( getObj()->mMovie, *visualContext ); if( err != noErr ) { CI_LOG_E( "HAP ERROR :: " << err << " SetMovieVisualContext." ); return; } } // Get codec name for (long i = 1; i <= GetMovieTrackCount(getObj()->mMovie); i++) { Track track = GetMovieIndTrack(getObj()->mMovie, i); Media media = GetTrackMedia(track); OSType mediaType; GetMediaHandlerDescription(media, &mediaType, NULL, NULL); if (mediaType == VideoMediaType) { // Get the codec-type of this track ImageDescriptionHandle imageDescription = (ImageDescriptionHandle)NewHandle(0); // GetMediaSampleDescription will resize it GetMediaSampleDescription(media, 1, (SampleDescriptionHandle)imageDescription); OSType codecType = (*imageDescription)->cType; DisposeHandle((Handle)imageDescription); switch (codecType) { case 'Hap1': mCodec = Codec::HAP; break; case 'Hap5': mCodec = Codec::HAP_A; break; case 'HapY': mCodec = Codec::HAP_Q; break; default: mCodec = Codec::UNSUPPORTED; break; } } } // Set framerate callback this->setNewFrameCallback( updateMovieFPS, (void*)this ); }
static HRESULT QT_Process_Audio_Track(QTSplitter* filter, Track trk) { AM_MEDIA_TYPE amt; WAVEFORMATEX* pvi; PIN_INFO piOutput; HRESULT hr = S_OK; static const WCHAR szwAudioOut[] = {'A','u','d','i','o',0}; Media audioMedia; SoundDescriptionHandle aDesc = (SoundDescriptionHandle) NewHandle(sizeof(SoundDescription)); audioMedia = GetTrackMedia(trk); GetMediaSampleDescription(audioMedia, 1, (SampleDescriptionHandle)aDesc); ZeroMemory(&amt, sizeof(amt)); amt.formattype = FORMAT_WaveFormatEx; amt.majortype = MEDIATYPE_Audio; amt.subtype = MEDIASUBTYPE_PCM; amt.bTemporalCompression = 0; amt.cbFormat = sizeof(WAVEFORMATEX); amt.pbFormat = CoTaskMemAlloc(amt.cbFormat); ZeroMemory(amt.pbFormat, amt.cbFormat); pvi = (WAVEFORMATEX*)amt.pbFormat; pvi->cbSize = sizeof(WAVEFORMATEX); pvi->wFormatTag = WAVE_FORMAT_PCM; pvi->nChannels = ((SoundDescription)**aDesc).numChannels; if (pvi->nChannels < 1 || pvi->nChannels > 2) pvi->nChannels = 2; pvi->nSamplesPerSec = (((SoundDescription)**aDesc).sampleRate/65536); if (pvi->nSamplesPerSec < 8000 || pvi->nChannels > 48000) pvi->nSamplesPerSec = 44100; pvi->wBitsPerSample = ((SoundDescription)**aDesc).sampleSize; if (pvi->wBitsPerSample < 8 || pvi->wBitsPerSample > 32) pvi->wBitsPerSample = 16; pvi->nBlockAlign = (pvi->nChannels * pvi->wBitsPerSample) / 8; pvi->nAvgBytesPerSec = pvi->nSamplesPerSec * pvi->nBlockAlign; DisposeHandle((Handle)aDesc); piOutput.dir = PINDIR_OUTPUT; piOutput.pFilter = &filter->filter.IBaseFilter_iface; lstrcpyW(piOutput.achName,szwAudioOut); hr = QT_AddPin(filter, &piOutput, &amt, FALSE); if (FAILED(hr)) ERR("Failed to add Audio Track\n"); else TRACE("Audio Pin %p\n",filter->pAudio_Pin); return hr; }
int main( int argc, char **argv ) { Movie movie; Track track; Media media; short refNum; short resID = 0; Boolean wasChanged; OSErr err = noErr; FSSpec fsspec; AudioFormatAtomPtr outAudioAtom; CmpSoundHeader outSoundInfo; SoundComponentData theInputFormat, theOutputFormat; SoundConverter mySoundConverter = NULL; // SCFillBufferData scFillBufferData = { NULL }; Ptr pDecomBuffer0 = NULL, pDecomBuffer1 = NULL; long kMaxOutputBuffer = 64 * 1024; long noFrames = 0, niFrames = 0, noBytes = 0, noSamples = 0; #define MAX_BUFFER_SIZE 256 * 1024 * 1024 /** Initialise MovieToolbox */ EnterMovies(); /** Open the movie file from the first argument */ printf( "opening audio file: '%s'\n", argv[1] ); path2fss( &fsspec, argv[1] ); err = OpenMovieFile( &fsspec, &refNum, fsRdPerm ); if ( err != noErr ) { printf( "failed to open audio: %d\n", GetMoviesError() ); exit( -1 ); } /** Instantiate the movie */ err = NewMovieFromFile( &movie, refNum, &resID, NULL, newMovieActive, &wasChanged ); if ( err ) { printf( "failed to instantiate movie\n" ); exit( -1 ); } CloseMovieFile( refNum ); refNum = 0; /** Get the first sound track */ track = GetMovieIndTrackType( movie, 1, SoundMediaType, movieTrackMediaType ); if ( track == NULL ) { printf( "failed to get sound track\n" ); exit( -1 ); } /** Get the sound track media */ media = GetTrackMedia( track ); if ( media == NULL ) { printf( "failed to get media from audio track\n" ); exit( -1 ); } Size size; Handle extension; SoundDescriptionHandle sourceSoundDescription; sourceSoundDescription = (SoundDescriptionHandle)NewHandle(0); /** Get the description of the sample data */ GetMediaSampleDescription( media, 1, (SampleDescriptionHandle)sourceSoundDescription ); err = GetMoviesError(); if ( err ) { printf( "failed to get description of sample data\n" ); exit( -1 ); } extension = NewHandle( 0 ); // get the "magic" decompression atom // This extension to the SoundDescription information stores // data specific to a given audio decompressor. Some audio // decompression algorithms require a set of out-of-stream // values to configure the decompressor. err = GetSoundDescriptionExtension( (SoundDescriptionHandle)sourceSoundDescription, &extension, siDecompressionParams ); if ( noErr == err ) { size = GetHandleSize( extension ); printf( "transferring data to audio buffer: %d bytes\n", size ); HLock( extension ); outAudioAtom = (AudioFormatAtom*)NewPtr( size ); err = MemError(); // copy the atom data to our buffer... BlockMoveData( *extension, outAudioAtom, size ); HUnlock( extension ); } else { // if it doesn't have an atom, that's ok outAudioAtom = NULL; err = noErr; } /** Setup our sound header */ outSoundInfo.format = (*sourceSoundDescription)->dataFormat; outSoundInfo.numChannels = (*sourceSoundDescription)->numChannels; outSoundInfo.sampleSize = (*sourceSoundDescription)->sampleSize; outSoundInfo.sampleRate = (*sourceSoundDescription)->sampleRate; outSoundInfo.compressionID = (*sourceSoundDescription)->compressionID; float db = ((float)outSoundInfo.sampleRate)/(1<<16); printf( "sample: %d\tchannels: %d\tsample size: %d\tsample rate: %f\tcompressionID: %d\n", outSoundInfo.format, outSoundInfo.numChannels, outSoundInfo.sampleSize, db, outSoundInfo.compressionID ); DisposeHandle( extension ); DisposeHandle( (Handle)sourceSoundDescription ); /** * Now that we've figured out what the audio file is, allocate buffers * and so on for conversion and playback */ printf( "initialising input/output conversion buffers\n" ); /** setup input/output format for sound converter */ theInputFormat.flags = 0; theInputFormat.format = outSoundInfo.format; theInputFormat.numChannels = outSoundInfo.numChannels; theInputFormat.sampleSize = outSoundInfo.sampleSize; theInputFormat.sampleRate = outSoundInfo. sampleRate; theInputFormat.sampleCount = 0; theInputFormat.buffer = NULL; theInputFormat.reserved = 0; theOutputFormat.flags = 0; theOutputFormat.format = kSoundNotCompressed; theOutputFormat.numChannels = theInputFormat.numChannels; theOutputFormat.sampleSize = theInputFormat.sampleSize; theOutputFormat.sampleRate = theInputFormat.sampleRate; theOutputFormat.sampleCount = 0; theOutputFormat.buffer = NULL; theOutputFormat.reserved = 0; // variableCompression means we're going to use the commonFrameSize field and the kExtendedSoundCommonFrameSizeValid flag // scFillBufferData.isSourceVBR = (outSoundInfo.compressionID == variableCompression ); err = SoundConverterOpen( &theInputFormat, &theOutputFormat, &mySoundConverter ); if ( err != noErr ) { printf( "failed to open sound converter\n" ); exit( -1 ); } else { printf( "opened sound converter ok\n" ); } // this isn't crucial or even required for decompression only, but it does tell // the sound converter that we're cool with VBR audio Ptr tptr = NewPtr( 1 ); tptr[0] = 1; SoundConverterSetInfo( mySoundConverter, siClientAcceptsVBR, tptr ); free( tptr ); /** * Set up the sound converters decompresson 'environment' by passing * in the 'magic' decompression atom */ err = SoundConverterSetInfo( mySoundConverter, siDecompressionParams, outAudioAtom ); if ( err != noErr ) { printf( "failed to set sound converter info\n" ); exit( -1 ); } else { printf( "set sound converter info ok\n" ); } if ( outAudioAtom ) { DisposePtr( (Ptr)outAudioAtom ); } if ( siUnknownInfoType == err ) { // clear this error, the decompressor didn't // need the decompression atom and that's OK err = noErr; } else { // BailErr(err); } /** * The input buffer has to be large enough so GetMediaSample isn't * going to fail, your mileage may vary */ Handle inputBuffer = NewHandle( MAX_BUFFER_SIZE ); // HLock( inputBuffer ); /** Start the sound conversion */ err = SoundConverterBeginConversion(mySoundConverter); // BailErr(err); /** Extract compressed audio from media track */ TimeValue tperSample = 0; err = GetMediaSample( media, inputBuffer, 0, &noBytes, 0, NULL, &tperSample, NULL, NULL, 0, &noSamples, NULL ); if ( err != noErr ) { printf( "failed to fetch media sample data: %d\n", GetMoviesError() ); exit( -1 ); } else { printf( "media sample: %d (%d) bytes / %ld samples / %d per sample\n", noBytes, GetHandleSize( inputBuffer ), noSamples, tperSample ); } unsigned long niBytes = 0; SoundConverterGetBufferSizes( mySoundConverter, noBytes * noSamples, &niFrames, &niBytes, &noBytes ); printf( "buffer sizes: frames: %d\tibytes: %d\tobytes: %d\n", niFrames, niBytes, noBytes ); /** Convert into uncompressed audio */ Ptr outputBuffer = NewPtr( noBytes * 1.2 ); SoundConverterConvertBuffer( mySoundConverter, inputBuffer, noSamples /* niFrames */, outputBuffer, &noFrames, &noBytes ); printf( "converted: %d frames / %d bytes\n", noFrames, noBytes ); /** Shutdown the sound converter */ err = SoundConverterEndConversion( mySoundConverter, outputBuffer, &noFrames, &noBytes ); printf( "converted final: %d frames / %d bytes\n", noFrames, noBytes ); // HUnlock( inputBuffer ); /** We now should have decompressed audio for the input file */ /** * So, generate visuals using a sliding sample grid at the * given framerate */ /** Create a new movie clip with audio and video tracks */ /** PROJECTM CRAP HERE -- stuff frames into QuickTime */ /** Close movie file */ /** Shutdown MovieToolbox */ ExitMovies(); return 0; }
bool QTImportFileHandle::Import(TrackFactory *trackFactory, Track ***outTracks, int *outNumTracks) { OSErr err = noErr; // // Determine the file format. // // GetMediaSampleDescription takes a SampleDescriptionHandle, but apparently // if the media is a sound (which presumably we know it is) then it will treat // it as a SoundDescriptionHandle (which in addition to the format of single // samples, also tells you sample rate, number of channels, etc.) // Pretty messed up interface, if you ask me. SoundDescriptionHandle soundDescription = (SoundDescriptionHandle)NewHandle(0); GetMediaSampleDescription(mMedia, 1, (SampleDescriptionHandle)soundDescription); // If this is a compressed format, it may have out-of-stream compression // parameters that need to be passed to the sound converter. We retrieve // these in the form of an audio atom. To do this, however we have to // get the data by way of a handle, then copy it manually from the handle to // the atom. These interfaces get worse all the time! Handle decompressionParamsHandle = NewHandle(0); AudioFormatAtomPtr decompressionParamsAtom = NULL; err = GetSoundDescriptionExtension(soundDescription, &decompressionParamsHandle, siDecompressionParams); if(err == noErr) { // this stream has decompression parameters. copy from the handle to the atom. int paramsSize = GetHandleSize(decompressionParamsHandle); HLock(decompressionParamsHandle); decompressionParamsAtom = (AudioFormatAtomPtr)NewPtr(paramsSize); //err = MemError(); BlockMoveData(*decompressionParamsHandle, decompressionParamsAtom, paramsSize); HUnlock(decompressionParamsHandle); } if(decompressionParamsHandle) DisposeHandle(decompressionParamsHandle); // // Now we set up a sound converter to decompress the data if it is compressed. // SoundComponentData inputFormat; SoundComponentData outputFormat; SoundConverter soundConverter = NULL; inputFormat.flags = outputFormat.flags = 0; inputFormat.sampleCount = outputFormat.sampleCount = 0; inputFormat.reserved = outputFormat.reserved = 0; inputFormat.buffer = outputFormat.buffer = NULL; inputFormat.numChannels = outputFormat.numChannels = (*soundDescription)->numChannels; inputFormat.sampleSize = outputFormat.sampleSize = (*soundDescription)->sampleSize; inputFormat.sampleRate = outputFormat.sampleRate = (*soundDescription)->sampleRate; inputFormat.format = (*soundDescription)->dataFormat; outputFormat.format = kSoundNotCompressed; err = SoundConverterOpen(&inputFormat, &outputFormat, &soundConverter); // // Create the Audacity WaveTracks to house the new data // *outNumTracks = outputFormat.numChannels; WaveTrack **channels = new WaveTrack *[*outNumTracks]; // determine sample format sampleFormat format; int bytesPerSample; // TODO: do we know for sure that 24 and 32 bit samples are the same kind // of 24 and 32 bit samples we expect? switch(outputFormat.sampleSize) { case 16: format = int16Sample; bytesPerSample = 2; break; case 24: format = int24Sample; bytesPerSample = 3; break; case 32: format = floatSample; bytesPerSample = 4; break; default: printf("I can't import a %d-bit file!\n", outputFormat.sampleSize); return false; } int c; for (c = 0; c < *outNumTracks; c++) { channels[c] = trackFactory->NewWaveTrack(format); channels[c]->SetRate(outputFormat.sampleRate / 65536.0); if(*outNumTracks == 2) { if(c == 0) { channels[c]->SetChannel(Track::LeftChannel); channels[c]->SetLinked(true); } else if(c == 1) { channels[c]->SetChannel(Track::RightChannel); } } } // // Give the converter the decompression atom. // // (judging from the sample code, it's OK if the atom is NULL, which // it will be if there was no decompression information) err = SoundConverterSetInfo(soundConverter, siDecompressionParams, decompressionParamsAtom); if(err == siUnknownInfoType) { // the decompressor didn't need the decompression atom, but that's ok. err = noErr; } // Tell the converter we're cool with VBR audio SoundConverterSetInfo(soundConverter, siClientAcceptsVBR, Ptr(true)); // // Determine buffer sizes and allocate output buffer // int inputBufferSize = 655360; int outputBufferSize = 524288; char *outputBuffer = new char[outputBufferSize]; // // Populate the structure of data that is passed to the callback // CallbackData cbData; memset(&cbData.compData, 0, sizeof(ExtendedSoundComponentData)); cbData.isSourceVBR = ((*soundDescription)->compressionID == variableCompression); cbData.sourceMedia = mMedia; cbData.getMediaAtThisTime = 0; cbData.sourceDuration = GetMediaDuration(mMedia); cbData.isThereMoreSource = true; cbData.maxBufferSize = inputBufferSize; // allocate source media buffer cbData.hSource = NewHandle((long)cbData.maxBufferSize); MoveHHi(cbData.hSource); HLock(cbData.hSource); cbData.compData.desc = inputFormat; cbData.compData.desc.buffer = (BytePtr)*cbData.hSource; cbData.compData.desc.flags = kExtendedSoundData; cbData.compData.extendedFlags = kExtendedSoundBufferSizeValid | kExtendedSoundSampleCountNotValid; if(cbData.isSourceVBR) cbData.compData.extendedFlags |= kExtendedSoundCommonFrameSizeValid; cbData.compData.bufferSize = 0; // filled in during callback // this doesn't make sense to me, but it is taken from sample code cbData.compData.recordSize = sizeof(ExtendedSoundComponentData); // // Begin the Conversion // err = SoundConverterBeginConversion(soundConverter); SoundConverterFillBufferDataUPP fillBufferUPP; fillBufferUPP = NewSoundConverterFillBufferDataUPP(SoundConverterFillBufferCallback); bool done = false; bool cancelled = false; sampleCount samplesSinceLastCallback = 0; UInt32 outputFrames; UInt32 outputBytes; UInt32 outputFlags; #define SAMPLES_PER_CALLBACK 10000 while(!done && !cancelled) { err = SoundConverterFillBuffer(soundConverter, // a sound converter fillBufferUPP, // the callback &cbData, // refCon passed to FillDataProc outputBuffer, // the buffer to decompress into outputBufferSize, // size of that buffer &outputBytes, // number of bytes actually output &outputFrames, // number of frames actually output &outputFlags); // fillbuffer retured advisor flags if (err) break; if((outputFlags & kSoundConverterHasLeftOverData) == false) done = true; for(c = 0; c < *outNumTracks; c++) channels[c]->Append(outputBuffer + (c*bytesPerSample), format, outputFrames, *outNumTracks); samplesSinceLastCallback += outputFrames; if( samplesSinceLastCallback > SAMPLES_PER_CALLBACK ) { if( mProgressCallback ) cancelled = mProgressCallback(mUserData, (float)cbData.getMediaAtThisTime / cbData.sourceDuration); samplesSinceLastCallback -= SAMPLES_PER_CALLBACK; } } HUnlock(cbData.hSource); // Flush any remaining data to the output buffer. // It appears that we have no way of telling this routine how big the output // buffer is! We had better hope that there isn't more data left than // the buffer is big. SoundConverterEndConversion(soundConverter, outputBuffer, &outputFrames, &outputBytes); for(c = 0; c < *outNumTracks; c++) { channels[c]->Append(outputBuffer + (c*bytesPerSample), format, outputFrames, *outNumTracks); channels[c]->Flush(); } delete[] outputBuffer; DisposeHandle(cbData.hSource); SoundConverterClose(soundConverter); DisposeMovie(mMovie); if (cancelled || err != noErr) { for (c = 0; c < *outNumTracks; c++) delete channels[c]; delete[] channels; return false; } else { *outTracks = new Track *[*outNumTracks]; for(c = 0; c < *outNumTracks; c++) (*outTracks)[c] = channels[c]; delete[] channels; return true; } }