Boolean QTDR_IsMovieSelfContained (Movie theMovie) { long myTrackCount = 0L; long myTrackIndex = 0L; short myMediaRefCount = 0L; short myMediaRefIndex = 0L; Media myMedia = NULL; long myAttrs = 0L; OSErr myErr = noErr; myTrackCount = GetMovieTrackCount(theMovie); for (myTrackIndex = 1; myTrackIndex <= myTrackCount; myTrackIndex++) { myMedia = GetTrackMedia(GetMovieIndTrack(theMovie, myTrackIndex)); if (myMedia != NULL) { myErr = GetMediaDataRefCount(myMedia, &myMediaRefCount); if (myErr == noErr) { for (myMediaRefIndex = 1; myMediaRefIndex <= myMediaRefCount; myMediaRefIndex++) { myErr = GetMediaDataRef(myMedia, myMediaRefIndex, NULL, NULL, &myAttrs); if (myErr == noErr) { if (!(myAttrs & dataRefSelfReference)) return(false); } } } } } return(true); }
void MovieGlHap::allocateVisualContext() { // Load HAP Movie if( HapQTQuickTimeMovieHasHapTrackPlayable( getObj()->mMovie ) ) { // QT Visual Context attributes OSStatus err = noErr; QTVisualContextRef * visualContext = (QTVisualContextRef*)&getObj()->mVisualContext; CFDictionaryRef pixelBufferOptions = HapQTCreateCVPixelBufferOptionsDictionary(); const CFStringRef keys[] = { kQTVisualContextPixelBufferAttributesKey }; CFDictionaryRef visualContextOptions = ::CFDictionaryCreate(kCFAllocatorDefault, (const void**)&keys, (const void**)&pixelBufferOptions, sizeof(keys)/sizeof(keys[0]), &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); err = QTPixelBufferContextCreate( kCFAllocatorDefault, visualContextOptions, visualContext ); ::CFRelease( pixelBufferOptions ); ::CFRelease( visualContextOptions ); if( err != noErr ) { CI_LOG_E( "HAP ERROR :: " << err << " couldnt create visual context." ); return; } // Set the movie's visual context err = SetMovieVisualContext( getObj()->mMovie, *visualContext ); if( err != noErr ) { CI_LOG_E( "HAP ERROR :: " << err << " SetMovieVisualContext." ); return; } } // Get codec name for (long i = 1; i <= GetMovieTrackCount(getObj()->mMovie); i++) { Track track = GetMovieIndTrack(getObj()->mMovie, i); Media media = GetTrackMedia(track); OSType mediaType; GetMediaHandlerDescription(media, &mediaType, NULL, NULL); if (mediaType == VideoMediaType) { // Get the codec-type of this track ImageDescriptionHandle imageDescription = (ImageDescriptionHandle)NewHandle(0); // GetMediaSampleDescription will resize it GetMediaSampleDescription(media, 1, (SampleDescriptionHandle)imageDescription); OSType codecType = (*imageDescription)->cType; DisposeHandle((Handle)imageDescription); switch (codecType) { case 'Hap1': mCodec = Codec::HAP; break; case 'Hap5': mCodec = Codec::HAP_A; break; case 'HapY': mCodec = Codec::HAP_Q; break; default: mCodec = Codec::UNSUPPORTED; break; } } } // Set framerate callback this->setNewFrameCallback( updateMovieFPS, (void*)this ); }
TLevelReader3gp::TLevelReader3gp(const TFilePath &path) : TLevelReader(path), m_IOError(QTNoError), m_track(0), m_movie(0), m_depth(0) // ,m_timeScale(0) { FSSpec fspec; QDErr err; Boolean dataRefWasChanged; if (QuickTimeStuff::instance()->getStatus() != noErr) { m_IOError = QTNotInstalled; return; } const char *pStr = toString(m_path.getWideString()).c_str(); FSMakeFSSpec(0, 0, (const unsigned char *)pStr, &fspec); getFSSpecFromPosixPath(pStr, &fspec, false); pStr = 0; if ((err = OpenMovieFile(&fspec, &m_refNum, fsRdPerm))) { m_IOError = QTUnableToOpenFile; return; } m_resId = 0; Str255 name; err = NewMovieFromFile(&m_movie, m_refNum, &m_resId, name, fsRdPerm, &dataRefWasChanged); int numTracks = GetMovieTrackCount(m_movie); assert(numTracks == 1 || numTracks == 2); m_track = GetMovieIndTrackType(m_movie, 1, VideoMediaType, movieTrackMediaType); //m_track=GetMovieTrack(m_movie,numTracks); ImageDescriptionHandle imageH; imageH = (ImageDescriptionHandle)NewHandleClear(sizeof(ImageDescription)); TINT32 index = 1; Media theMedia = GetTrackMedia(m_track); GetMediaSampleDescription(theMedia, index, (SampleDescriptionHandle)imageH); ImageDescriptionPtr imagePtr = *imageH; m_lx = imagePtr->width; m_ly = imagePtr->height; m_depth = imagePtr->depth; m_info = new TImageInfo(); m_info->m_lx = m_lx; m_info->m_ly = m_ly; Tiio::MovWriterProperties *prop = new Tiio::MovWriterProperties(); m_info->m_properties = prop; DisposeHandle((Handle)imageH); m_info->m_frameRate = GetMediaTimeScale(theMedia); }
Media GetMediaFromMovie(Movie mov) { Track theTrack; // get the first sound track theTrack = GetMovieIndTrackType(mov, 1, SoundMediaType, movieTrackMediaType); if(theTrack == NULL) return NULL; return GetTrackMedia(theTrack); }
static HRESULT QT_Process_Audio_Track(QTSplitter* filter, Track trk) { AM_MEDIA_TYPE amt; WAVEFORMATEX* pvi; PIN_INFO piOutput; HRESULT hr = S_OK; static const WCHAR szwAudioOut[] = {'A','u','d','i','o',0}; Media audioMedia; SoundDescriptionHandle aDesc = (SoundDescriptionHandle) NewHandle(sizeof(SoundDescription)); audioMedia = GetTrackMedia(trk); GetMediaSampleDescription(audioMedia, 1, (SampleDescriptionHandle)aDesc); ZeroMemory(&amt, sizeof(amt)); amt.formattype = FORMAT_WaveFormatEx; amt.majortype = MEDIATYPE_Audio; amt.subtype = MEDIASUBTYPE_PCM; amt.bTemporalCompression = 0; amt.cbFormat = sizeof(WAVEFORMATEX); amt.pbFormat = CoTaskMemAlloc(amt.cbFormat); ZeroMemory(amt.pbFormat, amt.cbFormat); pvi = (WAVEFORMATEX*)amt.pbFormat; pvi->cbSize = sizeof(WAVEFORMATEX); pvi->wFormatTag = WAVE_FORMAT_PCM; pvi->nChannels = ((SoundDescription)**aDesc).numChannels; if (pvi->nChannels < 1 || pvi->nChannels > 2) pvi->nChannels = 2; pvi->nSamplesPerSec = (((SoundDescription)**aDesc).sampleRate/65536); if (pvi->nSamplesPerSec < 8000 || pvi->nChannels > 48000) pvi->nSamplesPerSec = 44100; pvi->wBitsPerSample = ((SoundDescription)**aDesc).sampleSize; if (pvi->wBitsPerSample < 8 || pvi->wBitsPerSample > 32) pvi->wBitsPerSample = 16; pvi->nBlockAlign = (pvi->nChannels * pvi->wBitsPerSample) / 8; pvi->nAvgBytesPerSec = pvi->nSamplesPerSec * pvi->nBlockAlign; DisposeHandle((Handle)aDesc); piOutput.dir = PINDIR_OUTPUT; piOutput.pFilter = &filter->filter.IBaseFilter_iface; lstrcpyW(piOutput.achName,szwAudioOut); hr = QT_AddPin(filter, &piOutput, &amt, FALSE); if (FAILED(hr)) ERR("Failed to add Audio Track\n"); else TRACE("Audio Pin %p\n",filter->pAudio_Pin); return hr; }
void ofQtVideoSaver::addAudioTrack(string audioPath) { if(audioPath == "") return; OSErr err; Handle dataRef = NULL; FSSpec fileSpec; short audioMovieRefNum = 0; short audioMovieResId = 0; Movie audioMovie = NULL; Track audioCopyTrack = NULL; Media audioCopyMedia = NULL; Track destTrack = NULL; Media destMedia = NULL; destTrack = NewMovieTrack (movie, 0, 0, kFullVolume); destMedia = NewTrackMedia (destTrack, SoundMediaType, 30. * 100, /* Video Time Scale */ nil, 0); err = BeginMediaEdits (destMedia); char * p = new char[audioPath.length()+1]; strcpy(p, audioPath.c_str()); NativePathNameToFSSpec(p, &fileSpec, 0L); err = OpenMovieFile(&fileSpec, &audioMovieRefNum, fsRdPerm); err = NewMovieFromFile(&audioMovie, audioMovieRefNum, &audioMovieResId, NULL, newMovieActive, NULL); err = CloseMovieFile(audioMovieRefNum); SetMovieTimeScale(audioMovie, 30.*100); audioCopyTrack = GetMovieTrack(audioMovie, 1); audioCopyMedia = GetTrackMedia(audioCopyTrack); long duration = GetMovieDuration(audioMovie); err = AddEmptyTrackToMovie (audioCopyTrack, movie, nil, nil, &destTrack); err = InsertTrackSegment(audioCopyTrack, destTrack, 0, duration, 0); err = EndMediaEdits(destMedia); }
/* This function prepares the target Track to receivve the movie data, * it is called if QT has asked an import operation which should just * load this track. After success, *outmap points to a valid stream maping * Return values: * 0: ok * < 0: couldn't find a matching track */ int prepare_track(ff_global_ptr storage, Track targetTrack, Handle dataRef, OSType dataRefType) { int j; AVStream *st = NULL; AVStream *outstr = NULL; Media media; NCStream *map = NULL; AVFormatContext *ic = storage->format_context; /* If the track has already a media, return an err */ media = GetTrackMedia(targetTrack); if(media) goto err; /* Search the AVFormatContext for a video stream */ for(j = 0; j < ic->nb_streams && !outstr; j++) { st = ic->streams[j]; if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO) outstr = st; } /* Search the AVFormatContext for an audio stream (no video stream exists) */ for(j = 0; j < ic->nb_streams && !outstr; j++) { st = ic->streams[j]; if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) outstr = st; } /* Still no stream, then err */ if(!outstr) goto err; /* prepare the stream map & initialize*/ map = av_mallocz(sizeof(NCStream)); map->index = st->index; map->str = outstr; if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO) initialize_video_map(map, targetTrack, dataRef, dataRefType, storage->firstFrames + st->index); else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) initialize_audio_map(map, targetTrack, dataRef, dataRefType, storage->firstFrames + st->index); map->valid = map->media && map->sampleHdl; /* return the map */ storage->stream_map = map; return 0; err: if(map) av_free(map); return -1; } /* prepare_track() */
void ofxQtVideoSaver::addAudioTrack(string audioPath) { OSErr err; Handle dataRef = NULL; FSSpec fileSpec; short audioMovieRefNum = 0; short audioMovieResId = 0; Movie audioMovie = NULL; Track audioCopyTrack = NULL; Media audioCopyMedia = NULL; Track destTrack = NULL; Media destMedia = NULL; destTrack = NewMovieTrack (movie, 0, 0, kFullVolume); destMedia = NewTrackMedia (destTrack, SoundMediaType, (TimeScale) (30.f * 100), /* Video Time Scale */ nil, 0); err = BeginMediaEdits (destMedia); char * p = new char[audioPath.length()+1]; strcpy(p, audioPath.c_str()); #ifdef TARGET_WIN32 NativePathNameToFSSpec(p, &fileSpec, 0L); #endif #ifdef TARGET_OSX Boolean isdir; FSPathMakeRef((const UInt8*)p, &fsref, &isdir); FSGetCatalogInfo(&fsref, kFSCatInfoNone, NULL, NULL, &fileSpec, NULL); #endif err = OpenMovieFile(&fileSpec, &audioMovieRefNum, fsRdPerm); err = NewMovieFromFile(&audioMovie, audioMovieRefNum, &audioMovieResId, NULL, newMovieActive, NULL); err = CloseMovieFile(audioMovieRefNum); SetMovieTimeScale(audioMovie, (TimeScale) (30.*100)); audioCopyTrack = GetMovieTrack(audioMovie, 1); audioCopyMedia = GetTrackMedia(audioCopyTrack); long duration = GetMovieDuration(audioMovie); err = AddEmptyTrackToMovie (audioCopyTrack, movie, 0, 0, &destTrack); err = InsertTrackSegment(audioCopyTrack, destTrack, 0, duration, 0); err = EndMediaEdits(destMedia); }
quicktime_player* quicktime_player::open( const char *path ) { OSErr err; OSType dataRefType; Handle dataRef = NULL; short resID = 0; impl *m = new impl; std::string pathStd = path; #ifdef WIN32 for (std::string::iterator p = pathStd.begin(); p != pathStd.end(); ++p) if (*p == '/') *p = '\\'; #endif CFStringRef cfPath = CFStringCreateWithCString(NULL, pathStd.c_str(), kCFStringEncodingISOLatin1); err = QTNewDataReferenceFromFullPathCFString(cfPath, (QTPathStyle)kQTNativeDefaultPathStyle, 0, &dataRef, &dataRefType); CFRelease(cfPath); if (err != noErr) { delete m; return NULL; } err = NewMovieFromDataRef(&m->movie, newMovieActive, &resID, dataRef, dataRefType); DisposeHandle(dataRef); if (err != noErr) { delete m; return NULL; } m->track = GetMovieIndTrackType(m->movie, 1, VisualMediaCharacteristic, movieTrackCharacteristic); m->media = GetTrackMedia(m->track); Rect bounds; GetMovieBox(m->movie, &bounds); m->width = bounds.right; m->height = bounds.bottom; m->buffer = (unsigned char*)malloc(4 * m->width * m->height); err = QTNewGWorldFromPtr(&m->gworld, k32BGRAPixelFormat, &bounds, NULL, NULL, 0, m->buffer, 4 * m->width); if (err != noErr) { delete m; return NULL; } SetMovieGWorld(m->movie, m->gworld, NULL); return new quicktime_player(m); }
/* Get the media identifier for the media that contains the first video track's sample data, and also get the media handler for this media. */ void MovieGetVideoMediaAndMediaHandler(Movie inMovie, Media *outMedia, MediaHandler *outMediaHandler) { *outMedia = NULL; *outMediaHandler = NULL; /* get first video track */ Track videoTrack = GetMovieIndTrackType(inMovie, 1, kCharacteristicHasVideoFrameRate, movieTrackCharacteristic | movieTrackEnabledOnly); if (videoTrack != NULL) { /* get media ref. for track's sample data */ *outMedia = GetTrackMedia(videoTrack); if (*outMedia) { /* get a reference to the media handler component */ *outMediaHandler = GetMediaHandler(*outMedia); } } }
int convertToMP4PathThrough(CFStringRef inFile, CFStringRef outFile) { OSStatus error; MovieExportComponent movieExporter = NULL; Handle inDataRef=0, outDataRef=0; OSType inDataRefType, outDataRefType; short inResID = 0; Movie theMovie=0; int ret = -1; error = OpenADefaultComponent(MovieExportType, kQTFileTypeMP4, &movieExporter); if(error) { fprintf(stderr,"OpenADefaultComponent error: cannot find the QuickTime conponent\n"); goto last; } error = QTNewDataReferenceFromFullPathCFString(inFile, kQTNativeDefaultPathStyle, 0, &inDataRef, &inDataRefType); if(error) { fprintf(stderr,"QTNewDataReferenceFromFullPathCFString error: input file path is invalid\n"); goto last; } error = QTNewDataReferenceFromFullPathCFString(outFile, kQTNativeDefaultPathStyle, 0, &outDataRef, &outDataRefType); if(error) { fprintf(stderr,"QTNewDataReferenceFromFullPathCFString error: output file path is invalid\n"); goto last; } error = NewMovieFromDataRef(&theMovie, newMovieActive, &inResID, inDataRef, inDataRefType); if(error) { fprintf(stderr,"NewMovieFromDataRef error: cannot open the input file\n"); goto last; } Track theTrack = getSoundTrack(theMovie); Media theMedia = GetTrackMedia(theTrack); DeleteTrackSegment(theTrack, 0, GetTrackDuration(theTrack)); SetMovieTimeScale(theMovie, GetMediaTimeScale(theMedia)); InsertMediaIntoTrack(theTrack, 0, 0, GetMediaDuration(theMedia), fixed1); Boolean useHighResolutionAudio = true; QTSetComponentProperty( movieExporter, kQTPropertyClass_MovieExporter, kQTMovieExporterPropertyID_EnableHighResolutionAudioFeatures, sizeof(Boolean), &useHighResolutionAudio ); UInt32 ftyp = 'mp42'; QTSetComponentProperty( movieExporter, kQTPropertyClass_MovieExporter, 'ftyp', 4, &ftyp ); QTAtomContainer ac; MovieExportGetSettingsAsAtomContainer(movieExporter, &ac); QTAtom ensoAtom = QTFindChildByID(ac, kParentAtomIsContainer, kQTSettingsMovieExportEnableSound, 1, NULL); if(ensoAtom) { long size, *data; QTGetAtomDataPtr(ac,ensoAtom,&size,(Ptr *)&data); data[0] = EndianS32_NtoB('past'); QTSetAtomData(ac, ensoAtom, size, data); MovieExportSetSettingsFromAtomContainer(movieExporter, ac); } DisposeHandle(ac); /*Boolean cancelled; error = MovieExportDoUserDialog(movieExporter, theMovie, NULL, 0, GetMovieDuration(theMovie), &cancelled); if(cancelled) goto last; if(error) { printf("MovieExportDoUserDialog error\n"); goto last; }*/ error = ConvertMovieToDataRef(theMovie, 0, outDataRef, outDataRefType, kQTFileTypeMP4, FOUR_CHAR_CODE('TVOD'), createMovieFileDeleteCurFile|createMovieFileDontCreateResFile, movieExporter); if(error) { fprintf(stderr,"ConvertMovieToDataRef error: cannot translate .mov into .m4a (%d)\n",error); goto last; } ret = 0; last: if(movieExporter) CloseComponent(movieExporter); if(theMovie) DisposeMovie(theMovie); if(inDataRef) DisposeHandle(inDataRef); if(outDataRef) DisposeHandle(outDataRef); return ret; }
int main( int argc, char **argv ) { Movie movie; Track track; Media media; short refNum; short resID = 0; Boolean wasChanged; OSErr err = noErr; FSSpec fsspec; AudioFormatAtomPtr outAudioAtom; CmpSoundHeader outSoundInfo; SoundComponentData theInputFormat, theOutputFormat; SoundConverter mySoundConverter = NULL; // SCFillBufferData scFillBufferData = { NULL }; Ptr pDecomBuffer0 = NULL, pDecomBuffer1 = NULL; long kMaxOutputBuffer = 64 * 1024; long noFrames = 0, niFrames = 0, noBytes = 0, noSamples = 0; #define MAX_BUFFER_SIZE 256 * 1024 * 1024 /** Initialise MovieToolbox */ EnterMovies(); /** Open the movie file from the first argument */ printf( "opening audio file: '%s'\n", argv[1] ); path2fss( &fsspec, argv[1] ); err = OpenMovieFile( &fsspec, &refNum, fsRdPerm ); if ( err != noErr ) { printf( "failed to open audio: %d\n", GetMoviesError() ); exit( -1 ); } /** Instantiate the movie */ err = NewMovieFromFile( &movie, refNum, &resID, NULL, newMovieActive, &wasChanged ); if ( err ) { printf( "failed to instantiate movie\n" ); exit( -1 ); } CloseMovieFile( refNum ); refNum = 0; /** Get the first sound track */ track = GetMovieIndTrackType( movie, 1, SoundMediaType, movieTrackMediaType ); if ( track == NULL ) { printf( "failed to get sound track\n" ); exit( -1 ); } /** Get the sound track media */ media = GetTrackMedia( track ); if ( media == NULL ) { printf( "failed to get media from audio track\n" ); exit( -1 ); } Size size; Handle extension; SoundDescriptionHandle sourceSoundDescription; sourceSoundDescription = (SoundDescriptionHandle)NewHandle(0); /** Get the description of the sample data */ GetMediaSampleDescription( media, 1, (SampleDescriptionHandle)sourceSoundDescription ); err = GetMoviesError(); if ( err ) { printf( "failed to get description of sample data\n" ); exit( -1 ); } extension = NewHandle( 0 ); // get the "magic" decompression atom // This extension to the SoundDescription information stores // data specific to a given audio decompressor. Some audio // decompression algorithms require a set of out-of-stream // values to configure the decompressor. err = GetSoundDescriptionExtension( (SoundDescriptionHandle)sourceSoundDescription, &extension, siDecompressionParams ); if ( noErr == err ) { size = GetHandleSize( extension ); printf( "transferring data to audio buffer: %d bytes\n", size ); HLock( extension ); outAudioAtom = (AudioFormatAtom*)NewPtr( size ); err = MemError(); // copy the atom data to our buffer... BlockMoveData( *extension, outAudioAtom, size ); HUnlock( extension ); } else { // if it doesn't have an atom, that's ok outAudioAtom = NULL; err = noErr; } /** Setup our sound header */ outSoundInfo.format = (*sourceSoundDescription)->dataFormat; outSoundInfo.numChannels = (*sourceSoundDescription)->numChannels; outSoundInfo.sampleSize = (*sourceSoundDescription)->sampleSize; outSoundInfo.sampleRate = (*sourceSoundDescription)->sampleRate; outSoundInfo.compressionID = (*sourceSoundDescription)->compressionID; float db = ((float)outSoundInfo.sampleRate)/(1<<16); printf( "sample: %d\tchannels: %d\tsample size: %d\tsample rate: %f\tcompressionID: %d\n", outSoundInfo.format, outSoundInfo.numChannels, outSoundInfo.sampleSize, db, outSoundInfo.compressionID ); DisposeHandle( extension ); DisposeHandle( (Handle)sourceSoundDescription ); /** * Now that we've figured out what the audio file is, allocate buffers * and so on for conversion and playback */ printf( "initialising input/output conversion buffers\n" ); /** setup input/output format for sound converter */ theInputFormat.flags = 0; theInputFormat.format = outSoundInfo.format; theInputFormat.numChannels = outSoundInfo.numChannels; theInputFormat.sampleSize = outSoundInfo.sampleSize; theInputFormat.sampleRate = outSoundInfo. sampleRate; theInputFormat.sampleCount = 0; theInputFormat.buffer = NULL; theInputFormat.reserved = 0; theOutputFormat.flags = 0; theOutputFormat.format = kSoundNotCompressed; theOutputFormat.numChannels = theInputFormat.numChannels; theOutputFormat.sampleSize = theInputFormat.sampleSize; theOutputFormat.sampleRate = theInputFormat.sampleRate; theOutputFormat.sampleCount = 0; theOutputFormat.buffer = NULL; theOutputFormat.reserved = 0; // variableCompression means we're going to use the commonFrameSize field and the kExtendedSoundCommonFrameSizeValid flag // scFillBufferData.isSourceVBR = (outSoundInfo.compressionID == variableCompression ); err = SoundConverterOpen( &theInputFormat, &theOutputFormat, &mySoundConverter ); if ( err != noErr ) { printf( "failed to open sound converter\n" ); exit( -1 ); } else { printf( "opened sound converter ok\n" ); } // this isn't crucial or even required for decompression only, but it does tell // the sound converter that we're cool with VBR audio Ptr tptr = NewPtr( 1 ); tptr[0] = 1; SoundConverterSetInfo( mySoundConverter, siClientAcceptsVBR, tptr ); free( tptr ); /** * Set up the sound converters decompresson 'environment' by passing * in the 'magic' decompression atom */ err = SoundConverterSetInfo( mySoundConverter, siDecompressionParams, outAudioAtom ); if ( err != noErr ) { printf( "failed to set sound converter info\n" ); exit( -1 ); } else { printf( "set sound converter info ok\n" ); } if ( outAudioAtom ) { DisposePtr( (Ptr)outAudioAtom ); } if ( siUnknownInfoType == err ) { // clear this error, the decompressor didn't // need the decompression atom and that's OK err = noErr; } else { // BailErr(err); } /** * The input buffer has to be large enough so GetMediaSample isn't * going to fail, your mileage may vary */ Handle inputBuffer = NewHandle( MAX_BUFFER_SIZE ); // HLock( inputBuffer ); /** Start the sound conversion */ err = SoundConverterBeginConversion(mySoundConverter); // BailErr(err); /** Extract compressed audio from media track */ TimeValue tperSample = 0; err = GetMediaSample( media, inputBuffer, 0, &noBytes, 0, NULL, &tperSample, NULL, NULL, 0, &noSamples, NULL ); if ( err != noErr ) { printf( "failed to fetch media sample data: %d\n", GetMoviesError() ); exit( -1 ); } else { printf( "media sample: %d (%d) bytes / %ld samples / %d per sample\n", noBytes, GetHandleSize( inputBuffer ), noSamples, tperSample ); } unsigned long niBytes = 0; SoundConverterGetBufferSizes( mySoundConverter, noBytes * noSamples, &niFrames, &niBytes, &noBytes ); printf( "buffer sizes: frames: %d\tibytes: %d\tobytes: %d\n", niFrames, niBytes, noBytes ); /** Convert into uncompressed audio */ Ptr outputBuffer = NewPtr( noBytes * 1.2 ); SoundConverterConvertBuffer( mySoundConverter, inputBuffer, noSamples /* niFrames */, outputBuffer, &noFrames, &noBytes ); printf( "converted: %d frames / %d bytes\n", noFrames, noBytes ); /** Shutdown the sound converter */ err = SoundConverterEndConversion( mySoundConverter, outputBuffer, &noFrames, &noBytes ); printf( "converted final: %d frames / %d bytes\n", noFrames, noBytes ); // HUnlock( inputBuffer ); /** We now should have decompressed audio for the input file */ /** * So, generate visuals using a sliding sample grid at the * given framerate */ /** Create a new movie clip with audio and video tracks */ /** PROJECTM CRAP HERE -- stuff frames into QuickTime */ /** Close movie file */ /** Shutdown MovieToolbox */ ExitMovies(); return 0; }
static HRESULT QT_Process_Video_Track(QTSplitter* filter, Track trk) { AM_MEDIA_TYPE amt; VIDEOINFOHEADER * pvi; PIN_INFO piOutput; HRESULT hr = S_OK; OSErr err; static const WCHAR szwVideoOut[] = {'V','i','d','e','o',0}; CFMutableDictionaryRef pixelBufferOptions = NULL; CFMutableDictionaryRef visualContextOptions = NULL; CFNumberRef n = NULL; int t; DWORD outputWidth, outputHeight, outputDepth; Fixed trackWidth, trackHeight; Media videoMedia; long sampleCount; TimeValue64 duration; TimeScale timeScale; ZeroMemory(&amt, sizeof(amt)); amt.formattype = FORMAT_VideoInfo; amt.majortype = MEDIATYPE_Video; amt.subtype = MEDIASUBTYPE_RGB24; GetTrackDimensions(trk, &trackWidth, &trackHeight); outputDepth = 3; outputWidth = Fix2Long(trackWidth); outputHeight = Fix2Long(trackHeight); TRACE("Width %i Height %i\n",outputWidth, outputHeight); amt.cbFormat = sizeof(VIDEOINFOHEADER); amt.pbFormat = CoTaskMemAlloc(amt.cbFormat); ZeroMemory(amt.pbFormat, amt.cbFormat); pvi = (VIDEOINFOHEADER *)amt.pbFormat; pvi->bmiHeader.biSize = sizeof (BITMAPINFOHEADER); pvi->bmiHeader.biWidth = outputWidth; pvi->bmiHeader.biHeight = outputHeight; pvi->bmiHeader.biPlanes = 1; pvi->bmiHeader.biBitCount = 24; pvi->bmiHeader.biCompression = BI_RGB; pvi->bmiHeader.biSizeImage = outputWidth * outputHeight * outputDepth; filter->outputSize = pvi->bmiHeader.biSizeImage; amt.lSampleSize = 0; pixelBufferOptions = CFDictionaryCreateMutable(NULL, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); t = k32ARGBPixelFormat; n = CFNumberCreate(NULL, kCFNumberIntType, &t); CFDictionaryAddValue(pixelBufferOptions, kCVPixelBufferPixelFormatTypeKey, n); CFRelease(n); n = CFNumberCreate(NULL, kCFNumberIntType, &outputWidth); CFDictionaryAddValue(pixelBufferOptions, kCVPixelBufferWidthKey, n); CFRelease(n); n = CFNumberCreate(NULL, kCFNumberIntType, &outputHeight); CFDictionaryAddValue(pixelBufferOptions, kCVPixelBufferHeightKey, n); CFRelease(n); t = 16; n = CFNumberCreate(NULL, kCFNumberIntType, &t); CFDictionaryAddValue(pixelBufferOptions, kCVPixelBufferBytesPerRowAlignmentKey, n); CFRelease(n); visualContextOptions = CFDictionaryCreateMutable(NULL, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); CFDictionarySetValue(visualContextOptions, kQTVisualContextPixelBufferAttributesKey, pixelBufferOptions); err = QTPixelBufferContextCreate(NULL, visualContextOptions,&filter->vContext); CFRelease(pixelBufferOptions); CFRelease(visualContextOptions); if (err != noErr) { ERR("Failed to create Visual Context\n"); return E_FAIL; } err = SetMovieVisualContext(filter->pQTMovie, filter->vContext); if (err != noErr) { ERR("Failed to set Visual Context\n"); return E_FAIL; } videoMedia = GetTrackMedia(trk); sampleCount = GetMediaSampleCount(videoMedia); timeScale = GetMediaTimeScale(videoMedia); duration = GetMediaDisplayDuration(videoMedia); pvi->AvgTimePerFrame = (100000.0 * sampleCount * timeScale) / duration; piOutput.dir = PINDIR_OUTPUT; piOutput.pFilter = &filter->filter.IBaseFilter_iface; lstrcpyW(piOutput.achName,szwVideoOut); hr = QT_AddPin(filter, &piOutput, &amt, TRUE); if (FAILED(hr)) ERR("Failed to add Video Track\n"); else TRACE("Video Pin %p\n",filter->pVideo_Pin); return hr; }
OSErr QTDR_CreateReferenceCopy (Movie theSrcMovie, FSSpecPtr theDstMovieFile, FSSpecPtr theDstMediaFile) { Track mySrcTrack = NULL; Media mySrcMedia = NULL; Movie myDstMovie = NULL; Track myDstTrack = NULL; Media myDstMedia = NULL; Handle myMediaRef = NULL; // data reference for the media file #if !USE_ADDEMPTYTRACKTOMOVIE Fixed myWidth, myHeight; OSType myType; #endif long myFlags = createMovieFileDeleteCurFile | createMovieFileDontCreateResFile; short myResRefNum = 0; short myResID = movieInDataForkResID; OSErr myErr = paramErr; // get the first video track and media in the source movie mySrcTrack = GetMovieIndTrackType(theSrcMovie, 1, VideoMediaType, movieTrackMediaType); if (mySrcTrack == NULL) goto bail; mySrcMedia = GetTrackMedia(mySrcTrack); if (mySrcMedia == NULL) goto bail; // create a file data reference for the new media file myMediaRef = QTDR_MakeFileDataRef(theDstMediaFile); if (myMediaRef == NULL) goto bail; // create a file for the destination movie data myErr = FSpCreate(theDstMediaFile, sigMoviePlayer, MovieFileType, 0); if (myErr != noErr) goto bail; // create a file for the destination movie atom and create an empty movie myErr = CreateMovieFile(theDstMovieFile, sigMoviePlayer, smCurrentScript, myFlags, &myResRefNum, &myDstMovie); if (myErr != noErr) goto bail; // assign the default progress proc to the destination movie SetMovieProgressProc(myDstMovie, (MovieProgressUPP)-1, 0); #if USE_ADDEMPTYTRACKTOMOVIE myErr = AddEmptyTrackToMovie(mySrcTrack, myDstMovie, myMediaRef, rAliasType, &myDstTrack); if (myErr != noErr) goto bail; myDstMedia = GetTrackMedia(myDstTrack); myErr = GetMoviesError(); if (myErr != noErr) goto bail; #else // get some information about the source track and media GetTrackDimensions(mySrcTrack, &myWidth, &myHeight); GetMediaHandlerDescription(mySrcMedia, &myType, 0, 0); // create the destination movie track and media myDstTrack = NewMovieTrack(myDstMovie, myWidth, myHeight, kNoVolume); myErr = GetMoviesError(); if (myErr != noErr) goto bail; myDstMedia = NewTrackMedia(myDstTrack, myType, GetMediaTimeScale(mySrcMedia), myMediaRef, rAliasType); myErr = GetMoviesError(); if (myErr != noErr) goto bail; CopyTrackSettings(mySrcTrack, myDstTrack); #endif // copy the entire source track into the destination track; this copies the track's media // samples into the destination media file myErr = BeginMediaEdits(myDstMedia); if (myErr != noErr) goto bail; myErr = InsertTrackSegment(mySrcTrack, myDstTrack, 0, GetTrackDuration(mySrcTrack), 0); if (myErr != noErr) goto bail; myErr = EndMediaEdits(myDstMedia); if (myErr != noErr) goto bail; // add the movie atom to the data fork of the movie file myErr = AddMovieResource(myDstMovie, myResRefNum, &myResID, NULL); bail: return(myErr); }
QTAudioReader (InputStream* const input_, const int trackNum_) : AudioFormatReader (input_, TRANS (quickTimeFormatName)), ok (false), movie (0), trackNum (trackNum_), lastSampleRead (0), lastThreadId (0), extractor (0), dataHandle (0) { JUCE_AUTORELEASEPOOL bufferList.calloc (256, 1); #if JUCE_WINDOWS if (InitializeQTML (0) != noErr) return; #endif if (EnterMovies() != noErr) return; bool opened = juce_OpenQuickTimeMovieFromStream (input_, movie, dataHandle); if (! opened) return; { const int numTracks = GetMovieTrackCount (movie); int trackCount = 0; for (int i = 1; i <= numTracks; ++i) { track = GetMovieIndTrack (movie, i); media = GetTrackMedia (track); OSType mediaType; GetMediaHandlerDescription (media, &mediaType, 0, 0); if (mediaType == SoundMediaType && trackCount++ == trackNum_) { ok = true; break; } } } if (! ok) return; ok = false; lengthInSamples = GetMediaDecodeDuration (media); usesFloatingPointData = false; samplesPerFrame = (int) (GetMediaDecodeDuration (media) / GetMediaSampleCount (media)); trackUnitsPerFrame = GetMovieTimeScale (movie) * samplesPerFrame / GetMediaTimeScale (media); OSStatus err = MovieAudioExtractionBegin (movie, 0, &extractor); unsigned long output_layout_size; err = MovieAudioExtractionGetPropertyInfo (extractor, kQTPropertyClass_MovieAudioExtraction_Audio, kQTMovieAudioExtractionAudioPropertyID_AudioChannelLayout, 0, &output_layout_size, 0); if (err != noErr) return; HeapBlock <AudioChannelLayout> qt_audio_channel_layout; qt_audio_channel_layout.calloc (output_layout_size, 1); err = MovieAudioExtractionGetProperty (extractor, kQTPropertyClass_MovieAudioExtraction_Audio, kQTMovieAudioExtractionAudioPropertyID_AudioChannelLayout, output_layout_size, qt_audio_channel_layout, 0); qt_audio_channel_layout[0].mChannelLayoutTag = kAudioChannelLayoutTag_Stereo; err = MovieAudioExtractionSetProperty (extractor, kQTPropertyClass_MovieAudioExtraction_Audio, kQTMovieAudioExtractionAudioPropertyID_AudioChannelLayout, output_layout_size, qt_audio_channel_layout); err = MovieAudioExtractionGetProperty (extractor, kQTPropertyClass_MovieAudioExtraction_Audio, kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription, sizeof (inputStreamDesc), &inputStreamDesc, 0); if (err != noErr) return; inputStreamDesc.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian; inputStreamDesc.mBitsPerChannel = sizeof (SInt16) * 8; inputStreamDesc.mChannelsPerFrame = jmin ((UInt32) 2, inputStreamDesc.mChannelsPerFrame); inputStreamDesc.mBytesPerFrame = sizeof (SInt16) * inputStreamDesc.mChannelsPerFrame; inputStreamDesc.mBytesPerPacket = inputStreamDesc.mBytesPerFrame; err = MovieAudioExtractionSetProperty (extractor, kQTPropertyClass_MovieAudioExtraction_Audio, kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription, sizeof (inputStreamDesc), &inputStreamDesc); if (err != noErr) return; Boolean allChannelsDiscrete = false; err = MovieAudioExtractionSetProperty (extractor, kQTPropertyClass_MovieAudioExtraction_Movie, kQTMovieAudioExtractionMoviePropertyID_AllChannelsDiscrete, sizeof (allChannelsDiscrete), &allChannelsDiscrete); if (err != noErr) return; bufferList->mNumberBuffers = 1; bufferList->mBuffers[0].mNumberChannels = inputStreamDesc.mChannelsPerFrame; bufferList->mBuffers[0].mDataByteSize = jmax ((UInt32) 4096, (UInt32) (samplesPerFrame * inputStreamDesc.mBytesPerFrame) + 16); dataBuffer.malloc (bufferList->mBuffers[0].mDataByteSize); bufferList->mBuffers[0].mData = dataBuffer; sampleRate = inputStreamDesc.mSampleRate; bitsPerSample = 16; numChannels = inputStreamDesc.mChannelsPerFrame; detachThread(); ok = true; }
Boolean QTApp_HandleMenu (UInt16 theMenuItem) { WindowObject myWindowObject = NULL; ApplicationDataHdl myAppData = NULL; MovieController myMC = NULL; Movie myMovie = NULL; Boolean myIsHandled = false; // false => allow caller to process the menu item OSErr myErr = noErr; myWindowObject = QTFrame_GetWindowObjectFromFrontWindow(); if (myWindowObject != NULL) { myMC = (**myWindowObject).fController; myMovie = (**myWindowObject).fMovie; myAppData = (ApplicationDataHdl)QTFrame_GetAppDataFromWindowObject(myWindowObject); } switch (theMenuItem) { case IDM_SET_TEXT: // put up a dialog box to get a text string QTText_SetSearchText(); myIsHandled = true; break; case IDM_FIND_TEXT: QTText_FindText(myWindowObject, gSearchText); myIsHandled = true; break; case IDM_EDIT_TEXT: QTText_EditText(myWindowObject); myIsHandled = true; break; case IDM_SEARCH_FORWARD: gSearchForward = true; myIsHandled = true; break; case IDM_SEARCH_BACKWARD: gSearchForward = false; myIsHandled = true; break; case IDM_WRAP_SEARCH: gSearchWrap = !gSearchWrap; myIsHandled = true; break; case IDM_USE_CASE: gSearchWithCase = !gSearchWithCase; myIsHandled = true; break; case IDM_ADD_TEXT_TRACK: { // add a text track to the specified movie; // for purposes of illustration, we'll add 11 (count 'em!) text strings to the movie, // with each string occupying about one-tenth of the movie duration Track myTypeTrack = NULL; Track myTextTrack = NULL; TimeValue myMovieDuration = 0; TimeValue mySampleDuration = 0; char *myStrings[] = {"0%", "10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%"}; short myFrames[11]; Boolean isChapter = true; myTypeTrack = GetMovieIndTrackType(myMovie, 1, VideoMediaType, movieTrackMediaType); if (myTypeTrack == NULL) break; // get the duration of the movie and the duration of a single frame; // this tells us how many frames fit into one-tenth of the movie myMovieDuration = GetMovieDuration(myMovie); mySampleDuration = QTUtils_GetFrameDuration(myTypeTrack); myFrames[0] = (myMovieDuration / mySampleDuration) / 10; myFrames[1] = (myMovieDuration / mySampleDuration) / 10; myFrames[2] = (myMovieDuration / mySampleDuration) / 10; myFrames[3] = (myMovieDuration / mySampleDuration) / 10; myFrames[4] = (myMovieDuration / mySampleDuration) / 10; myFrames[5] = (myMovieDuration / mySampleDuration) / 10; myFrames[6] = (myMovieDuration / mySampleDuration) / 10; myFrames[7] = (myMovieDuration / mySampleDuration) / 10; myFrames[8] = (myMovieDuration / mySampleDuration) / 10; myFrames[9] = ((myMovieDuration / mySampleDuration) / 10) - 1; myFrames[10] = 1; myTextTrack = QTText_AddTextTrack(myMovie, myStrings, myFrames, 11, VideoMediaType, isChapter); if (myTextTrack != NULL) { MCMovieChanged(myMC, myMovie); // stamp the movie as dirty and update our saved data (**myWindowObject).fIsDirty = true; (**myAppData).fMovieHasText = true; (**myAppData).fTextIsChapter = isChapter; (**myAppData).fTextTrack = myTextTrack; (**myAppData).fTextHandler = GetMediaHandler(GetTrackMedia(myTextTrack)); } } myIsHandled = true; break; case IDM_CUT_TEXT_TRACK: // remove all existing text tracks from the specified movie myErr = QTText_RemoveIndTextTrack(myWindowObject, kAllTextTracks); if (myErr == noErr) { MCMovieChanged(myMC, myMovie); // stamp the movie as dirty and update our saved data (**myWindowObject).fIsDirty = true; (**myAppData).fMovieHasText = false; (**myAppData).fTextIsChapter = false; (**myAppData).fTextIsHREF = false; (**myAppData).fTextTrack = NULL; (**myAppData).fTextHandler = NULL; } myIsHandled = true; break; case IDM_CHAPTER_TRACK: (**myAppData).fTextIsChapter = !(**myAppData).fTextIsChapter; QTText_SetTextTrackAsChapterTrack(myWindowObject, VideoMediaType, (**myAppData).fTextIsChapter); (**myWindowObject).fIsDirty = true; myIsHandled = true; break; case IDM_HREF_TRACK: (**myAppData).fTextIsHREF = !(**myAppData).fTextIsHREF; QTText_SetTextTrackAsHREFTrack((**myAppData).fTextTrack, (**myAppData).fTextIsHREF); (**myWindowObject).fIsDirty = true; myIsHandled = true; break; default: break; } // switch (theMenuItem) return(myIsHandled); }
void QTEffects_RespondToDialogSelection (OSErr theErr) { Boolean myDialogWasCancelled = false; short myResID = movieInDataForkResID; UInt16 myMovieIter; short mySrcMovieRefNum = 0; Movie myPrevSrcMovie = NULL; Track myPrevSrcTrack = NULL; Movie myNextSrcMovie = NULL; Track myNextSrcTrack = NULL; short myDestMovieRefNum = 0; FSSpec myFile; Boolean myIsSelected = false; Boolean myIsReplacing = false; StringPtr myPrompt = QTUtils_ConvertCToPascalString(kEffectsSaveMoviePrompt); StringPtr myFileName = QTUtils_ConvertCToPascalString(kEffectsSaveMovieFileName); Movie myDestMovie = NULL; Fixed myDestMovieWidth, myDestMovieHeight; ImageDescriptionHandle myDesc = NULL; Track videoTrackFX, videoTrackA, videoTrackB; Media videoMediaFX, videoMediaA, videoMediaB; TimeValue myCurrentDuration = 0; TimeValue myReturnedDuration; Boolean isFirstTransition = true; TimeValue myMediaTransitionDuration; TimeValue myMediaFXStartTime, myMediaFXDuration; OSType myEffectCode; long myFlags = createMovieFileDeleteCurFile | createMovieFileDontCreateResFile; long myLong; OSErr myErr = noErr; // standard parameter box has been dismissed, so remember that fact gEffectsDialog = 0L; myDialogWasCancelled = (theErr == userCanceledErr); // we're finished with the effect list and movie posters QTDisposeAtomContainer(gEffectList); if (gPosterA != NULL) KillPicture(gPosterA); if (gPosterB != NULL) KillPicture(gPosterB); // when the sign says stop, then stop if (myDialogWasCancelled) goto bail; // add atoms naming the sources to gEffectSample myLong = EndianU32_NtoB(kSourceOneName); QTInsertChild(gEffectSample, kParentAtomIsContainer, kEffectSourceName, 1, 0, sizeof(myLong), &myLong, NULL); myLong = EndianU32_NtoB(kSourceTwoName); QTInsertChild(gEffectSample, kParentAtomIsContainer, kEffectSourceName, 2, 0, sizeof(myLong), &myLong, NULL); // extract the 'what' atom to find out what kind of effect it is { QTAtom myEffectAtom; QTAtomID myEffectAtomID; long myEffectCodeSize; Ptr myEffectCodePtr; myEffectAtom = QTFindChildByIndex(gEffectSample, kParentAtomIsContainer, kParameterWhatName, kParameterWhatID, &myEffectAtomID); myErr = QTLockContainer(gEffectSample); BailError(myErr); myErr = QTGetAtomDataPtr(gEffectSample, myEffectAtom, &myEffectCodeSize, &myEffectCodePtr); BailError(myErr); if (myEffectCodeSize != sizeof(OSType)) { myErr = paramErr; goto bail; } myEffectCode = *(OSType *)myEffectCodePtr; // "tsk" myEffectCode = EndianU32_BtoN(myEffectCode); // because the data is read from an atom container myErr = QTUnlockContainer(gEffectSample); BailError(myErr); } // ask the user for the name of the new movie file QTFrame_PutFile(myPrompt, myFileName, &myFile, &myIsSelected, &myIsReplacing); if (!myIsSelected) goto bail; // deal with user cancelling // create a movie file for the destination movie myErr = CreateMovieFile(&myFile, FOUR_CHAR_CODE('TVOD'), 0, myFlags, &myDestMovieRefNum, &myDestMovie); BailError(myErr); // open the first file as a movie; call the first movie myPrevSrcMovie myErr = OpenMovieFile(&gSpecList[0], &mySrcMovieRefNum, fsRdPerm); BailError(myErr); myErr = NewMovieFromFile(&myPrevSrcMovie, mySrcMovieRefNum, NULL, NULL, 0, NULL); BailError(myErr); myErr = CloseMovieFile(mySrcMovieRefNum); BailError(myErr); // if the movie is shorter than kMinimumDuration, scale it to that length SetMovieTimeScale(myPrevSrcMovie, kTimeScale); myErr = QTEffects_GetFirstVideoTrackInMovie(myPrevSrcMovie, &myPrevSrcTrack); BailNil(myPrevSrcTrack); if (GetTrackDuration(myPrevSrcTrack) < kMinimumDuration) { myErr = ScaleTrackSegment(myPrevSrcTrack, 0, GetTrackDuration(myPrevSrcTrack), kMinimumDuration); BailError(myErr); } // find out how big the first movie is; we'll use it as the size of all our tracks GetTrackDimensions(myPrevSrcTrack, &myDestMovieWidth, &myDestMovieHeight); #if USES_MAKE_IMAGE_DESC_FOR_EFFECT // create a new sample description for the effect, // which is just an image description specifying the effect and its dimensions myErr = MakeImageDescriptionForEffect(myEffectCode, &myDesc); if (myErr != noErr) BailError(myErr); #else // create a new sample description for the effect, // which is just an image description specifying the effect and its dimensions myDesc = (ImageDescriptionHandle)NewHandleClear(sizeof(ImageDescription)); BailNil(myDesc); (**myDesc).idSize = sizeof(ImageDescription); (**myDesc).cType = myEffectCode; (**myDesc).hRes = 72L << 16; (**myDesc).vRes = 72L << 16; (**myDesc).dataSize = 0L; (**myDesc).frameCount = 1; (**myDesc).depth = 0; (**myDesc).clutID = -1; #endif // fill in the fields of the sample description (**myDesc).vendor = kAppleManufacturer; (**myDesc).temporalQuality = codecNormalQuality; (**myDesc).spatialQuality = codecNormalQuality; (**myDesc).width = FixRound(myDestMovieWidth); (**myDesc).height = FixRound(myDestMovieHeight); // add three video tracks to the destination movie: // - videoTrackFX is where the effects and stills live; it's user-visible. // - videoTrackA is where the "source A"s for effects live; it's hidden by the input map // - videoTrackB is where the "source B"s for effects live; it's hidden by the input map videoTrackFX = NewMovieTrack(myDestMovie, myDestMovieWidth, myDestMovieHeight, 0); BailNil(videoTrackFX); videoMediaFX = NewTrackMedia(videoTrackFX, VideoMediaType, kTimeScale, NULL, 0); BailNil(videoMediaFX); myErr = BeginMediaEdits(videoMediaFX); BailError(myErr); videoTrackA = NewMovieTrack(myDestMovie, myDestMovieWidth, myDestMovieHeight, 0); BailNil(videoTrackA); videoMediaA = NewTrackMedia(videoTrackA, VideoMediaType, kTimeScale, NULL, 0); BailNil(videoMediaA); videoTrackB = NewMovieTrack(myDestMovie, myDestMovieWidth, myDestMovieHeight, 0); BailNil(videoTrackB); videoMediaB = NewTrackMedia(videoTrackB, VideoMediaType, kTimeScale, NULL, 0); BailNil(videoMediaB); // create the input map { long myRefIndex1, myRefIndex2; QTAtomContainer myInputMap; QTAtom myInputAtom; OSType myInputType; QTNewAtomContainer(&myInputMap); // first input if (videoTrackA) { AddTrackReference(videoTrackFX, videoTrackA, kTrackModifierReference, &myRefIndex1); QTInsertChild(myInputMap, kParentAtomIsContainer, kTrackModifierInput, myRefIndex1, 0, 0, NULL, &myInputAtom); myInputType = EndianU32_NtoB(kTrackModifierTypeImage); QTInsertChild(myInputMap, myInputAtom, kTrackModifierType, 1, 0, sizeof(myInputType), &myInputType, NULL); myLong = EndianU32_NtoB(kSourceOneName); QTInsertChild(myInputMap, myInputAtom, kEffectDataSourceType, 1, 0, sizeof(myLong), &myLong, NULL); } // second input if (videoTrackB) { AddTrackReference(videoTrackFX, videoTrackB, kTrackModifierReference, &myRefIndex2); QTInsertChild(myInputMap, kParentAtomIsContainer, kTrackModifierInput, myRefIndex2, 0, 0, NULL, &myInputAtom); myInputType = EndianU32_NtoB(kTrackModifierTypeImage); QTInsertChild(myInputMap, myInputAtom, kTrackModifierType, 1, 0, sizeof(myInputType), &myInputType, NULL); myLong = EndianU32_NtoB(kSourceTwoName); QTInsertChild(myInputMap, myInputAtom, kEffectDataSourceType, 1, 0, sizeof(myLong), &myLong, NULL); } // set that map SetMediaInputMap(GetTrackMedia(videoTrackFX), myInputMap); QTDisposeAtomContainer(myInputMap); } myCurrentDuration = 0; #if MAKE_STILL_SECTIONS // copy the first sample of the first video track of the first movie to videoTrackFX, with duration kStillDuration. myErr = CopyPortionOfTrackToTrack(myPrevSrcTrack, eStartPortion + eMiddlePortion, videoTrackFX, myCurrentDuration, &myReturnedDuration); BailError(myErr); myCurrentDuration += myReturnedDuration; #endif // now process any remaining files myMovieIter = 1; while (myMovieIter < gSpecCount) { // open the next file as a movie; call it nextSourceMovie myErr = OpenMovieFile(&gSpecList[myMovieIter], &mySrcMovieRefNum, fsRdPerm); BailError(myErr); myErr = NewMovieFromFile(&myNextSrcMovie, mySrcMovieRefNum, NULL, NULL, 0, NULL); BailError(myErr); // we're done with the movie file, so close it myErr = CloseMovieFile(mySrcMovieRefNum); BailError(myErr); // if the movie is shorter than kMinimumDuration, scale it to that length SetMovieTimeScale(myNextSrcMovie, kTimeScale); myErr = QTEffects_GetFirstVideoTrackInMovie(myNextSrcMovie, &myNextSrcTrack); BailNil(myNextSrcTrack); if (GetTrackDuration(myNextSrcTrack) < kMinimumDuration) { myErr = ScaleTrackSegment(myNextSrcTrack, 0, GetTrackDuration(myNextSrcTrack), kMinimumDuration); BailError(myErr); } // create a transition effect from the previous source movie's first video sample to the next source movie's first video sample // (the effect should have duration kEffectDuration); // this involves adding one sample to each of the three video tracks: // sample from previous source movie -> videoTrackA myErr = QTEffects_CopyPortionOfTrackToTrack(myPrevSrcTrack, eFinishPortion, videoTrackA, myCurrentDuration, &myReturnedDuration); BailError(myErr); // sample from next source movie -> videoTrackB myErr = QTEffects_CopyPortionOfTrackToTrack(myNextSrcTrack, eStartPortion, videoTrackB, myCurrentDuration, &myReturnedDuration); BailError(myErr); // effect sample -> videoTrackFX if (isFirstTransition) { myMediaTransitionDuration = myReturnedDuration; myMediaFXStartTime = GetMediaDuration(videoMediaFX); myErr = AddMediaSample(videoMediaFX, gEffectSample, 0, GetHandleSize(gEffectSample), myMediaTransitionDuration, (SampleDescriptionHandle)myDesc, 1, 0, NULL); BailError(myErr); myMediaFXDuration = GetMediaDuration(videoMediaFX) - myMediaFXStartTime; isFirstTransition = false; } myErr = InsertMediaIntoTrack(videoTrackFX, myCurrentDuration, myMediaFXStartTime, myMediaFXDuration, FixRatio(myReturnedDuration, myMediaTransitionDuration)); BailError(myErr); myCurrentDuration += myReturnedDuration; #if MAKE_STILL_SECTIONS // copy the first video sample of myNextSrcMovie to videoTrackFX, with duration kStillDuration. myErr = QTEffects_CopyPortionOfTrackToTrack(myNextSrcTrack, eMiddlePortion + (myMovieIter + 1 == theSpecCount) ? eFinishPortion : 0, videoTrackFX, myCurrentDuration, &myReturnedDuration); BailError(myErr); myCurrentDuration += myReturnedDuration; #endif // MAKE_STILL_SECTIONS // dispose of previous source movie. DisposeMovie(myPrevSrcMovie); myPrevSrcMovie = myNextSrcMovie; myPrevSrcTrack = myNextSrcTrack; myNextSrcMovie = NULL; myNextSrcTrack = NULL; myMovieIter++; } // while myErr = EndMediaEdits(videoMediaFX); BailError(myErr); myErr = AddMovieResource(myDestMovie, myDestMovieRefNum, &myResID, "\pMovie 1"); BailError(myErr); CloseMovieFile(myDestMovieRefNum); if (myPrevSrcMovie != NULL) DisposeMovie(myPrevSrcMovie); DisposeMovie(myDestMovie); bail: free(myPrompt); free(myFileName); QTDisposeAtomContainer(gEffectSample); DisposeHandle((Handle)myDesc); return; }
QuickTimeFileReader::QuickTimeFileReader(FileSource source, DecodeMode decodeMode, CacheMode mode, size_t targetRate, ProgressReporter *reporter) : CodedAudioFileReader(mode, targetRate), m_source(source), m_path(source.getLocalFilename()), m_d(new D), m_reporter(reporter), m_cancelled(false), m_completion(0), m_decodeThread(0) { m_channelCount = 0; m_fileRate = 0; Profiler profiler("QuickTimeFileReader::QuickTimeFileReader", true); SVDEBUG << "QuickTimeFileReader: path is \"" << m_path << "\"" << endl; long QTversion; #ifdef WIN32 InitializeQTML(0); // FIXME should check QT version #else m_d->err = Gestalt(gestaltQuickTime,&QTversion); if ((m_d->err != noErr) || (QTversion < 0x07000000)) { m_error = QString("Failed to find compatible version of QuickTime (version 7 or above required)"); return; } #endif EnterMovies(); Handle dataRef; OSType dataRefType; // CFStringRef URLString = CFStringCreateWithCString // (0, m_path.toLocal8Bit().data(), 0); QByteArray ba = m_path.toLocal8Bit(); CFURLRef url = CFURLCreateFromFileSystemRepresentation (kCFAllocatorDefault, (const UInt8 *)ba.data(), (CFIndex)ba.length(), false); // m_d->err = QTNewDataReferenceFromURLCFString m_d->err = QTNewDataReferenceFromCFURL (url, 0, &dataRef, &dataRefType); if (m_d->err) { m_error = QString("Error creating data reference for QuickTime decoder: code %1").arg(m_d->err); return; } short fileID = movieInDataForkResID; short flags = 0; m_d->err = NewMovieFromDataRef (&m_d->movie, flags, &fileID, dataRef, dataRefType); DisposeHandle(dataRef); if (m_d->err) { m_error = QString("Error creating new movie for QuickTime decoder: code %1").arg(m_d->err); return; } Boolean isProtected = 0; Track aTrack = GetMovieIndTrackType (m_d->movie, 1, SoundMediaType, movieTrackMediaType | movieTrackEnabledOnly); if (aTrack) { Media aMedia = GetTrackMedia(aTrack); // get the track media if (aMedia) { MediaHandler mh = GetMediaHandler(aMedia); // get the media handler we can query if (mh) { m_d->err = QTGetComponentProperty(mh, kQTPropertyClass_DRM, kQTDRMPropertyID_IsProtected, sizeof(Boolean), &isProtected,nil); } else { m_d->err = 1; } } else { m_d->err = 1; } } else { m_d->err = 1; } if (m_d->err && m_d->err != kQTPropertyNotSupportedErr) { m_error = QString("Error checking for DRM in QuickTime decoder: code %1").arg(m_d->err); return; } else if (!m_d->err && isProtected) { m_error = QString("File is protected with DRM"); return; } else if (m_d->err == kQTPropertyNotSupportedErr && !isProtected) { std::cerr << "QuickTime: File is not protected with DRM" << std::endl; } if (m_d->movie) { SetMovieActive(m_d->movie, TRUE); m_d->err = GetMoviesError(); if (m_d->err) { m_error = QString("Error in QuickTime decoder activation: code %1").arg(m_d->err); return; } } else { m_error = QString("Error in QuickTime decoder: Movie object not valid"); return; } m_d->err = MovieAudioExtractionBegin (m_d->movie, 0, &m_d->extractionSessionRef); if (m_d->err) { m_error = QString("Error in QuickTime decoder extraction init: code %1").arg(m_d->err); return; } m_d->err = MovieAudioExtractionGetProperty (m_d->extractionSessionRef, kQTPropertyClass_MovieAudioExtraction_Audio, kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription, sizeof(m_d->asbd), &m_d->asbd, nil); if (m_d->err) { m_error = QString("Error in QuickTime decoder property get: code %1").arg(m_d->err); return; } m_channelCount = m_d->asbd.mChannelsPerFrame; m_fileRate = m_d->asbd.mSampleRate; std::cerr << "QuickTime: " << m_channelCount << " channels, " << m_fileRate << " kHz" << std::endl; m_d->asbd.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian; m_d->asbd.mBitsPerChannel = sizeof(float) * 8; m_d->asbd.mBytesPerFrame = sizeof(float) * m_d->asbd.mChannelsPerFrame; m_d->asbd.mBytesPerPacket = m_d->asbd.mBytesPerFrame; m_d->err = MovieAudioExtractionSetProperty (m_d->extractionSessionRef, kQTPropertyClass_MovieAudioExtraction_Audio, kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription, sizeof(m_d->asbd), &m_d->asbd); if (m_d->err) { m_error = QString("Error in QuickTime decoder property set: code %1").arg(m_d->err); m_channelCount = 0; return; } m_d->buffer.mNumberBuffers = 1; m_d->buffer.mBuffers[0].mNumberChannels = m_channelCount; m_d->buffer.mBuffers[0].mDataByteSize = sizeof(float) * m_channelCount * m_d->blockSize; m_d->data = new float[m_channelCount * m_d->blockSize]; m_d->buffer.mBuffers[0].mData = m_d->data; initialiseDecodeCache(); if (decodeMode == DecodeAtOnce) { if (m_reporter) { connect(m_reporter, SIGNAL(cancelled()), this, SLOT(cancelled())); m_reporter->setMessage (tr("Decoding %1...").arg(QFileInfo(m_path).fileName())); } while (1) { UInt32 framesRead = m_d->blockSize; UInt32 extractionFlags = 0; m_d->err = MovieAudioExtractionFillBuffer (m_d->extractionSessionRef, &framesRead, &m_d->buffer, &extractionFlags); if (m_d->err) { m_error = QString("Error in QuickTime decoding: code %1") .arg(m_d->err); break; } //!!! progress? // std::cerr << "Read " << framesRead << " frames (block size " << m_d->blockSize << ")" << std::endl; // QuickTime buffers are interleaved unless specified otherwise addSamplesToDecodeCache(m_d->data, framesRead); if (framesRead < m_d->blockSize) break; } finishDecodeCache(); endSerialised(); m_d->err = MovieAudioExtractionEnd(m_d->extractionSessionRef); if (m_d->err) { m_error = QString("Error ending QuickTime extraction session: code %1").arg(m_d->err); } m_completion = 100; } else { if (m_reporter) m_reporter->setProgress(100); if (m_channelCount > 0) { m_decodeThread = new DecodeThread(this); m_decodeThread->start(); } } std::cerr << "QuickTimeFileReader::QuickTimeFileReader: frame count is now " << getFrameCount() << ", error is \"\"" << m_error << "\"" << std::endl; }