/*
 *  PsychSetMovieTimeIndex()  -- Set current playback time of movie.
 */
double PsychSetMovieTimeIndex(int moviehandle, double timeindex)
{
    Movie   theMovie;
    double  oldtime;
    
    if (moviehandle < 0 || moviehandle >= PSYCH_MAX_MOVIES) {
        PsychErrorExitMsg(PsychError_user, "Invalid moviehandle provided!");
    }
    
    // Fetch references to objects we need:
    theMovie = movieRecordBANK[moviehandle].theMovie;    
    if (theMovie == NULL) {
        PsychErrorExitMsg(PsychError_user, "Invalid moviehandle provided. No movie associated with this handle !!!");
    }
    
    // Retrieve current timeindex:
    oldtime = (double) GetMovieTime(theMovie, NULL) / (double) GetMovieTimeScale(theMovie);
    
    // Set new timeindex:
    SetMovieTimeValue(theMovie, (TimeValue) (((timeindex * (double) GetMovieTimeScale(theMovie))) + 0.5f));

    // Check if end of movie is reached. Rewind, if so...
    if (IsMovieDone(theMovie) && movieRecordBANK[moviehandle].loopflag > 0) {
        if (GetMovieRate(theMovie)>0) {
            GoToBeginningOfMovie(theMovie);
        } else {
            GoToEndOfMovie(theMovie);
        }
    }
        
    MoviesTask(theMovie, 0);
    
    // Return old value:
    return(oldtime);
}
Exemple #2
0
ComponentResult create_placeholder_track(Movie movie, Track *placeholderTrack, TimeValue duration, Handle dataRef, OSType dataRefType) {
    SampleDescriptionHandle sdH = NULL;
    Media placeholderMedia;
    TimeScale movieTimeScale;
    ComponentResult result = noErr;

    movieTimeScale = GetMovieTimeScale(movie);

    sdH = (SampleDescriptionHandle)NewHandleClear(sizeof(SampleDescription));
    (*sdH)->descSize = sizeof(SampleDescription);

    *placeholderTrack = NewMovieTrack(movie, 0, 0, kNoVolume);
    placeholderMedia = NewTrackMedia(*placeholderTrack, BaseMediaType, movieTimeScale, dataRef, dataRefType);

    result = AddMediaSampleReference(placeholderMedia, 0, 1, duration, sdH, 1, 0, NULL);
    if(result != noErr)
        goto bail;

    result = InsertMediaIntoTrack(*placeholderTrack, -1, 0, duration, fixed1);

bail:
    if (sdH)
        DisposeHandle((Handle) sdH);
    return(result);
}
void platformSetSoundPlayPoint (long theSound, float theTime)
{
#ifdef QuickTimeInstalled
	TimeScale scale = GetMovieTimeScale((Movie)theSound);
	SetMovieTimeValue((Movie)theSound, (long)(scale * theTime));
#endif // QuickTimeInstalled
}
/** Internal helper function: Returns fps rate of movie and optionally
 *  the total number of video frames in the movie. Framecount is determined
 *  by stepping through the whole movie and counting frames. This can take
 *  significant time on big movie files.
 *
 *  Always returns fps as a double. Only counts and returns full framecount,
 *  if *nrframes is non-NULL.
 */
double PsychDetermineMovieFramecountAndFps(Movie theMovie, int* nrframes)
{
    // Count total number of videoframes: This code is derived from Apple
    // example code.
    long		myCount = -1;
    short		myFlags;
    TimeValue           myTime = 0;
    TimeValue           myDuration = 0;
    OSType		myTypes[1];
    // We want video samples.
    myTypes[0] = VisualMediaCharacteristic;
    // We want to begin with the first frame in the movie:
    myFlags = nextTimeStep + nextTimeEdgeOK;
    
    // We count either the first 3 frames if nrframes==NULL aka only
    // fps requested, or if framecount is requested, we count all frames.
    while (myTime >= 0 && (myCount<2 || nrframes!=NULL)) {
        myCount++;        
        // look for the next frame in the track; when there are no more frames,
        // myTime is set to -1, so we'll exit the while loop
        GetMovieNextInterestingTime(theMovie, myFlags, 1, myTypes, myTime, FloatToFixed(1), &myTime, &myDuration);        
        // after the first interesting time, don't include the time we're currently at
        myFlags = nextTimeStep;
    }    
    
    // Return optional count of frames:
    if (nrframes) *nrframes = (int) myCount;
    
    GoToBeginningOfMovie(theMovie);
    MoviesTask(theMovie, 0);
    
    // Compute and return frame rate in fps as (Ticks per second / Duration of single frame in ticks): 
    return((double) GetMovieTimeScale(theMovie) / (double) myDuration);    
}
/*----------------------------------------------------------------------
|   AP4_Track::Clone
+---------------------------------------------------------------------*/
AP4_Track* 
AP4_Track::Clone(AP4_Result* result)
{
    AP4_SyntheticSampleTable* sample_table = new AP4_SyntheticSampleTable();
    
    // default return value
    if (result) *result = AP4_SUCCESS;
    
    // add clones of the sample descriptions to the new sample table
    for (unsigned int i=0; ;i++) {
        AP4_SampleDescription* sample_description = GetSampleDescription(i);
        if (sample_description == NULL) break;
        sample_table->AddSampleDescription(sample_description->Clone());
    }

    AP4_Sample  sample;
    AP4_Ordinal index = 0;
    while (AP4_SUCCEEDED(GetSample(index, sample))) {
        AP4_ByteStream* data_stream;
        data_stream = sample.GetDataStream();
        sample_table->AddSample(*data_stream,
                                sample.GetOffset(),
                                sample.GetSize(),
                                sample.GetDuration(),
                                sample.GetDescriptionIndex(),
                                sample.GetDts(),
                                sample.GetCtsDelta(),
                                sample.IsSync());
        AP4_RELEASE(data_stream); // release our ref, the table has kept its own ref.
        index++;
    }    
    
    // create the cloned track
    AP4_Track* clone = new AP4_Track(GetType(),
                                     sample_table,
                                     GetId(),
                                     GetMovieTimeScale(),
                                     GetDuration(),
                                     GetMediaTimeScale(),
                                     GetMediaDuration(),
                                     GetTrackLanguage(),
                                     GetWidth(),
                                     GetHeight());
                                     
    return clone;
}
//---------------------------------------------------------------------------
float ofQuickTimePlayer::getDuration() const{
	if( !isLoaded() ){
		ofLogError("ofQuickTimePlayer") << "getDuration(): movie not loaded";
		return 0.0;
	}
	
	//--------------------------------------
	#ifdef OF_VIDEO_PLAYER_QUICKTIME
	//--------------------------------------

		return (float) (GetMovieDuration (moviePtr) / (double) GetMovieTimeScale (moviePtr));

	//--------------------------------------
	#endif
	//--------------------------------------

}
//---------------------------------------------------------------------------
float ofQuickTimePlayer::getDuration(){
	if( !isLoaded() ){
		ofLog(OF_LOG_ERROR, "ofQuickTimePlayer: movie not loaded!");
		return 0.0;
	}
	
	//--------------------------------------
	#ifdef OF_VIDEO_PLAYER_QUICKTIME
	//--------------------------------------

		return (float) (GetMovieDuration (moviePtr) / (double) GetMovieTimeScale (moviePtr));

	//--------------------------------------
	#endif
	//--------------------------------------

}
/*
 *  PsychQTGetMovieTimeIndex()  -- Return current playback time of movie.
 */
double PsychQTGetMovieTimeIndex(int moviehandle)
{
    Movie   theMovie;
    
    if (moviehandle < 0 || moviehandle >= PSYCH_MAX_MOVIES) {
        PsychErrorExitMsg(PsychError_user, "Invalid moviehandle provided!");
    }
    
    // Fetch references to objects we need:
    theMovie = movieRecordBANK[moviehandle].theMovie;    
    if (theMovie == NULL) {
        PsychErrorExitMsg(PsychError_user, "Invalid moviehandle provided. No movie associated with this handle !!!");
    }

    // Retrieve timeindex:
    return((double) GetMovieTime(theMovie, NULL) / (double) GetMovieTimeScale(theMovie));
}
Exemple #9
0
OSStatus MovieMaker::addFrame()
{
	OSStatus	error = noErr;
	Handle	compressedData;
	short   syncFlag;
	long	dataSize;
	UnsignedWide now;

	CGrafPtr oldPort;
	GDHandle oldGDeviceH;

	GetGWorld(&oldPort, &oldGDeviceH);
	SetGWorld(gworld, nil);
	
	// Compress the frame and add it to the movie

	error = SCCompressSequenceFrame(ci,GetPortPixMap(gworld),&rect,&compressedData,&dataSize,&syncFlag);
	
	Microseconds(&now);

	if (error == noErr)
	{
		double duration = (now.lo - lastFrameTime.lo);	// duration in microseconds
		duration *= GetMovieTimeScale(movie);	
		duration *= 1.0 / 1000000.0;				

		error = AddMediaSample(
				media,
				compressedData,
				0,
				dataSize,
				(TimeValue)duration,
				(SampleDescriptionHandle)idh,
				1,
				syncFlag,
				nil);
		
	}

	lastFrameTime = now;

	SetGWorld(oldPort, oldGDeviceH);
	
	return error;
}
//---------------------------------------------------------------------------
float ofVideoPlayer::getDuration(){

	//--------------------------------------
	#ifdef OF_VIDEO_PLAYER_QUICKTIME
	//--------------------------------------

		return (float) (GetMovieDuration (moviePtr) / (double) GetMovieTimeScale (moviePtr));

	//--------------------------------------
	#else
	//--------------------------------------

		return gstUtils.getDuration();

	//--------------------------------------
	#endif
	//--------------------------------------

}
//---------------------------------------------------------------------------
void ofVideoPlayer::setFrame(int frame){

	//--------------------------------------
	#ifdef OF_VIDEO_PLAYER_QUICKTIME
	//--------------------------------------

	// frame 0 = first frame...

	// this is the simple way...
	//float durationPerFrame = getDuration() / getTotalNumFrames();

	// seems that freezing, doing this and unfreezing seems to work alot
	// better then just SetMovieTimeValue() ;

	if (!bPaused) SetMovieRate(moviePtr, X2Fix(0));

	// this is better with mpeg, etc:
	double frameRate = 0;
	double movieTimeScale = 0;
	MovieGetStaticFrameRate(moviePtr, &frameRate);
	movieTimeScale = GetMovieTimeScale(moviePtr);

	if (frameRate > 0){
		double frameDuration = 1 / frameRate;
		TimeValue t = (TimeValue)(frame * frameDuration * movieTimeScale);
		SetMovieTimeValue(moviePtr, t);
		MoviesTask(moviePtr, 0);
	}

   if (!bPaused) SetMovieRate(moviePtr, X2Fix(speed));

   //--------------------------------------
    #else
   //--------------------------------------

	   gstUtils.setFrame(frame);


   //--------------------------------------
    #endif
   //--------------------------------------

}
//---------------------------------------------------------------------------
float ofVideoPlayer::getDuration(){

	//--------------------------------------
	#ifdef OF_VIDEO_PLAYER_QUICKTIME
	//--------------------------------------

		return GetMovieDuration (moviePtr) / (double) GetMovieTimeScale (moviePtr);

	//--------------------------------------
	#else
	//--------------------------------------

		return fobsDecoder->getDurationSeconds();

	//--------------------------------------
	#endif
	//--------------------------------------

}
Exemple #13
0
ComponentResult FFAvi_MovieImportIdle(ff_global_ptr storage, long inFlags, long *outFlags) {
	ComponentResult err = noErr;
	TimeValue currentIdleTime = GetMovieTime(storage->movie, NULL);
	TimeScale movieTimeScale = GetMovieTimeScale(storage->movie);
	int addSamples = false;
	
	storage->idlesSinceLastAdd++;
	
	if (currentIdleTime == storage->lastIdleTime && storage->idlesSinceLastAdd > 5 || 
		storage->loadedTime < currentIdleTime + 5*movieTimeScale)
	{
		storage->idlesSinceLastAdd = 0;
		addSamples = true;
	}
	
	err = import_with_idle(storage, inFlags | movieImportWithIdle, outFlags, 0, 1000, addSamples);
	
	storage->lastIdleTime = currentIdleTime;
	return err;
}
Exemple #14
0
OSErr QTInfo_GoToPosterFrame (Movie theMovie, MovieController theMC)
{
    TimeRecord			myTimeRecord;
    ComponentResult		myErr = noErr;

    // stop the movie from playing
    myErr = MCDoAction(theMC, mcActionPlay, (void *)0L);
    if (myErr != noErr)
        goto bail;

    // set up a time record with the desired movie time, scale, and base
    myTimeRecord.value.hi = 0;
    myTimeRecord.value.lo = GetMoviePosterTime(theMovie);
    myTimeRecord.base = GetMovieTimeBase(theMovie);
    myTimeRecord.scale = GetMovieTimeScale(theMovie);

    myErr = MCDoAction(theMC, mcActionGoToTime, &myTimeRecord);

bail:
    return((OSErr)myErr);
}
////////////////////////////////////////////////////////////////////////////////
// virtual
bool LLMediaImplQuickTime::seek( double time )
{
	if ( mMovieController )
	{
		TimeRecord when;
		when.scale = GetMovieTimeScale( mMovieHandle );
		when.base = 0;

		// 'time' is in (floating point) seconds.  The timebase time will be in 'units', where
		// there are 'scale' units per second.
		SInt64 raw_time = ( SInt64 )( time * (double)( when.scale ) );

		when.value.hi = ( SInt32 )( raw_time >> 32 );
		when.value.lo = ( SInt32 )( ( raw_time & 0x00000000FFFFFFFF ) );

		MCDoAction( mMovieController, mcActionGoToTime, &when );

		return true;
	}

	return false;
}
Exemple #16
0
    virtual bool Seek(double pos)
    {
        FLEXT_ASSERT(movie);
        FLEXT_ASSERT(extractionSessionRef != nil);

        QTThread qt(movie);

        TimeRecord timeRec;
        timeRec.scale	= GetMovieTimeScale(movie);
        timeRec.base	= NULL;
        unsigned long long fpos = (long long)(pos*timeRec.scale);
        timeRec.value.hi = int(fpos>>32);
        timeRec.value.lo = int(fpos&((1LL<<32)-1));

        // Set the extraction current time.  The duration will 
        // be determined by how much is pulled.
        OSStatus err = MovieAudioExtractionSetProperty(extractionSessionRef,
                    kQTPropertyClass_MovieAudioExtraction_Movie,
                    kQTMovieAudioExtractionMoviePropertyID_CurrentTime,
                    sizeof(TimeRecord), &timeRec);

        return err == 0;
    }
Exemple #17
0
/* Import function for movies that lack an index.
 * Supports progressive importing, but will not idle if maxFrames == 0.
 */
ComponentResult import_with_idle(ff_global_ptr storage, long inFlags, long *outFlags, int minFrames, int maxFrames, bool addSamples) {
    SampleReference64Record sampleRec;
    AVFormatContext *formatContext;
    AVCodecContext *codecContext;
    AVStream *stream;
    AVPacket packet;
    NCStream *ncstream;
    ComponentResult dataResult; //used for data handler operations that can fail.
    ComponentResult result;
    TimeValue minLoadedTime;
    TimeValue movieTimeScale = GetMovieTimeScale(storage->movie);
    int64_t availableSize, margin;
    long idling;
    int readResult, framesProcessed, i;
    int firstPts[storage->map_count];
    short flags;

    formatContext = storage->format_context;
    result = noErr;
    minLoadedTime = -1;
    availableSize = 0;
    idling = (inFlags & movieImportWithIdle);
    framesProcessed = 0;

    if(idling) {
        //get the size of immediately available data
        if(storage->dataHandlerSupportsWideOffsets) {
            wide wideSize;

            dataResult = DataHGetAvailableFileSize64(storage->dataHandler, &wideSize);
            if(dataResult == noErr) availableSize = ((int64_t)wideSize.hi << 32) + wideSize.lo;
        } else {
            long longSize;

            dataResult = DataHGetAvailableFileSize(storage->dataHandler, &longSize);
            if(dataResult == noErr) availableSize = longSize;
        }
    }

    for(i = 0; i < storage->map_count; i++) {
        ncstream = &storage->stream_map[i];
        Media media = ncstream->media;

        firstPts[i] = -1;
        if(media && ncstream->duration == -1)
            ncstream->duration = GetMediaDuration(media);
    }

    while((readResult = av_read_frame(formatContext, &packet)) == 0) {
        bool trustPacketDuration = true;
        int64_t dts = packet.dts;
        ncstream = &storage->stream_map[packet.stream_index];
        stream = ncstream->str;
        codecContext = stream->codec;
        flags = 0;

        if (!ncstream->valid)
            continue;

        if((packet.flags & AV_PKT_FLAG_KEY) == 0)
            flags |= mediaSampleNotSync;

        if(IS_NUV(storage->componentType) && codecContext->codec_id == CODEC_ID_MP3) trustPacketDuration = false;
        if(IS_FLV(storage->componentType)) trustPacketDuration = false;

        memset(&sampleRec, 0, sizeof(sampleRec));
        sampleRec.dataOffset.hi = packet.pos >> 32;
        sampleRec.dataOffset.lo = (uint32_t)packet.pos;
        sampleRec.dataSize = packet.size;
        sampleRec.sampleFlags = flags;

        if (packet.pos <= 0)
            continue;

        if(firstPts[packet.stream_index] < 0)
            firstPts[packet.stream_index] = packet.pts;

        if(packet.size > storage->largestPacketSize)
            storage->largestPacketSize = packet.size;

        if(sampleRec.dataSize <= 0)
            continue;

        if(codecContext->codec_type == AVMEDIA_TYPE_AUDIO && !ncstream->vbr)
            sampleRec.numberOfSamples = (packet.size * ncstream->asbd.mFramesPerPacket) / ncstream->asbd.mBytesPerPacket;
        else
            sampleRec.numberOfSamples = 1; //packet.duration;

        //add any samples waiting to be added
        if(ncstream->lastSample.numberOfSamples > 0) {
            //calculate the duration of the sample before adding it
            ncstream->lastSample.durationPerSample = (dts - ncstream->lastdts) * ncstream->base.num;

            AddMediaSampleReferences64(ncstream->media, ncstream->sampleHdl, 1, &ncstream->lastSample, NULL);
        }

#if 0
        if (0) {
            Codecprintf(NULL, "Stream:%d Pts:%lld Dts:%lld DtsUsed:%lld Pos:%lld Size:%d\n", packet.stream_index, packet.pts, packet.dts, dts, packet.pos, packet.size);
            Codecprintf(NULL, "Stream:%d Nsamples:%ld RealDuration:%d CalcDuration:%ld TimeDts:%lld TimeDurations:%lld FrameDts:%d FrameGuess:%lld\n",
                        packet.stream_index, sampleRec.numberOfSamples, packet.duration, ncstream->lastSample.durationPerSample,
                        packet.dts, ncstream->timeByDurations, (int)((packet.dts * stream->time_base.num * ncstream->asbd.mSampleRate) / stream->time_base.den),
                        ncstream->timeByFrames);

            ncstream->timeByDurations += packet.duration;
            ncstream->timeByFrames += ncstream->asbd.mFramesPerPacket;
        }
#endif

        ncstream->lastSample = sampleRec;
        ncstream->lastdts = packet.dts;

        // If this is a nuv file, then we want to set the duration to zero.
        // This is because the nuv container doesn't have the framesize info
        // for audio.
        if(packet.duration == 0 || !trustPacketDuration) {
            //no duration, we'll have to wait for the next packet to calculate it
            // keep the duration of the last sample, so we can use it if it's the last frame
            sampleRec.durationPerSample = ncstream->lastSample.durationPerSample;
        } else {
            ncstream->lastSample.numberOfSamples = 0;

            if(codecContext->codec_type == AVMEDIA_TYPE_AUDIO && !ncstream->vbr)
                sampleRec.durationPerSample = 1;
            else
                sampleRec.durationPerSample = ncstream->base.num * packet.duration;

            AddMediaSampleReferences64(ncstream->media, ncstream->sampleHdl, 1, &sampleRec, NULL);
        }

        framesProcessed++;

        //if we're idling, try really not to read past the end of available data
        //otherwise we will cause blocking i/o.
        if(idling && framesProcessed >= minFrames && availableSize > 0 && availableSize < storage->dataSize) {
            margin = availableSize - (packet.pos + packet.size);
            if(margin < (storage->largestPacketSize * 8)) { // 8x fudge factor for comfortable margin, could be tweaked.
                av_free_packet(&packet);
                break;
            }
        }

        av_free_packet(&packet);

        //stop processing if we've hit the max frame limit
        if(maxFrames > 0 && framesProcessed >= maxFrames)
            break;
    }

    if(readResult != 0) {
        //if readResult != 0, we've hit the end of the stream.
        //add any pending last frames.
        for(i = 0; i < formatContext->nb_streams; i++) {
            ncstream = &storage->stream_map[i];
            if(ncstream->lastSample.numberOfSamples > 0)
                AddMediaSampleReferences64(ncstream->media, ncstream->sampleHdl, 1, &ncstream->lastSample, NULL);
        }
    }

    for(i = 0; i < storage->map_count && result == noErr; i++) {
        ncstream = &storage->stream_map[i];
        Media media = ncstream->media;

        if(ncstream->valid && (addSamples || readResult != 0)) {
            Track track = GetMediaTrack(media);
            TimeScale mediaTimeScale = GetMediaTimeScale(media);
            TimeValue prevDuration = ncstream->duration;
            TimeValue mediaDuration = GetMediaDuration(media);
            TimeValue addedDuration = mediaDuration - prevDuration;
            TimeValue mediaLoadedTime = movieTimeScale * mediaDuration / mediaTimeScale;

            if(minLoadedTime == -1 || mediaLoadedTime < minLoadedTime)
                minLoadedTime = mediaLoadedTime;

            if(addedDuration > 0) {
                result = InsertMediaIntoTrack(track, -1, prevDuration, addedDuration, fixed1);
            }

            if (!prevDuration && firstPts[i] > 0) {
                TimeRecord startTimeRec;
                startTimeRec.value.hi = 0;
                startTimeRec.value.lo = firstPts[i] * formatContext->streams[i]->time_base.num;
                startTimeRec.scale = formatContext->streams[i]->time_base.den;
                startTimeRec.base = NULL;
                ConvertTimeScale(&startTimeRec, movieTimeScale);
                SetTrackOffset(track, startTimeRec.value.lo);
            }
            ncstream->duration = -1;
        }
    }

    //set the loaded time to the length of the shortest track.
    if(minLoadedTime > 0)
        storage->loadedTime = minLoadedTime;

    if(readResult != 0) {
        //remove the placeholder track
        if(storage->placeholderTrack != NULL) {
            DisposeMovieTrack(storage->placeholderTrack);
            storage->placeholderTrack = NULL;
        }

        //set the movie load state to complete, as well as mark the import output flag.
        storage->movieLoadState = kMovieLoadStateComplete;
        *outFlags |= movieImportResultComplete;
    } else {
        //if we're not yet done with the import, calculate the movie load state.
        int64_t timeToCompleteFile; //time until the file should be completely available, in terms of AV_TIME_BASE
        long dataRate = 0;

        dataResult = DataHGetDataRate(storage->dataHandler, 0, &dataRate);
        if(dataResult == noErr && dataRate > 0) {
            timeToCompleteFile = (AV_TIME_BASE * (storage->dataSize - availableSize)) / dataRate;

            if(storage->loadedTime > (10 * GetMovieTimeScale(storage->movie)) && timeToCompleteFile < (storage->format_context->duration * .85))
                storage->movieLoadState = kMovieLoadStatePlaythroughOK;
            else
                storage->movieLoadState = kMovieLoadStatePlayable;

        } else {
            storage->movieLoadState = kMovieLoadStatePlayable;
        }

        *outFlags |= movieImportResultNeedIdles;
    }

    send_movie_changed_notification(storage->movie);

    //tell the idle manager to idle us again in 500ms.
    if(idling && storage->idleManager && storage->isStreamed)
        QTIdleManagerSetNextIdleTimeDelta(storage->idleManager, 1, 2);

    return(result);
} /* import_with_idle() */
Exemple #18
0
static HRESULT QT_Process_Movie(QTSplitter* filter)
{
    HRESULT hr = S_OK;
    OSErr err;
    WineDataRefRecord ptrDataRefRec;
    Handle dataRef = NULL;
    Track trk;
    short id = 0;
    DWORD tid;
    LONGLONG time;

    TRACE("Trying movie connect\n");

    ptrDataRefRec.pReader = filter->pInputPin.pReader;
    ptrDataRefRec.streamSubtype = filter->pInputPin.subType;
    PtrToHand( &ptrDataRefRec, &dataRef, sizeof(WineDataRefRecord));

    err = NewMovieFromDataRef(&filter->pQTMovie, newMovieActive|newMovieDontInteractWithUser|newMovieDontAutoUpdateClock|newMovieDontAskUnresolvedDataRefs|newMovieAsyncOK, &id, dataRef, 'WINE');

    DisposeHandle(dataRef);

    if (err != noErr)
    {
        FIXME("QuickTime cannot handle media type(%i)\n",err);
        return VFW_E_TYPE_NOT_ACCEPTED;
    }

    PrePrerollMovie(filter->pQTMovie, 0, fixed1, NULL, NULL);
    PrerollMovie(filter->pQTMovie, 0, fixed1);
    GoToBeginningOfMovie(filter->pQTMovie);
    SetMovieActive(filter->pQTMovie,TRUE);

    if (GetMovieLoadState(filter->pQTMovie) < kMovieLoadStateLoaded)
        MoviesTask(filter->pQTMovie,100);

    trk = GetMovieIndTrackType(filter->pQTMovie, 1, VisualMediaCharacteristic, movieTrackCharacteristic | movieTrackEnabledOnly);
    TRACE("%p is a video track\n",trk);
    if (trk)
       hr = QT_Process_Video_Track(filter, trk);

    if (FAILED(hr))
        return hr;

    trk = GetMovieIndTrackType(filter->pQTMovie, 1, AudioMediaCharacteristic, movieTrackCharacteristic | movieTrackEnabledOnly);
    TRACE("%p is a audio track\n",trk);
    if (trk)
        hr = QT_Process_Audio_Track(filter, trk);

    time = GetMovieDuration(filter->pQTMovie);
    filter->movie_scale = GetMovieTimeScale(filter->pQTMovie);
    filter->sourceSeeking.llDuration = ((double)time / filter->movie_scale) * 10000000;
    filter->sourceSeeking.llStop = filter->sourceSeeking.llDuration;

    TRACE("Movie duration is %s\n",wine_dbgstr_longlong(filter->sourceSeeking.llDuration));

    filter->loaderThread = CreateThread(NULL, 0, QTSplitter_loading_thread, filter, 0, &tid);
    if (filter->loaderThread)
        TRACE("Created loading thread 0x%08x\n", tid);
    filter->splitterThread = CreateThread(NULL, 0, QTSplitter_thread, filter, 0, &tid);
    if (filter->splitterThread)
        TRACE("Created processing thread 0x%08x\n", tid);
    else
        hr = HRESULT_FROM_WIN32(GetLastError());

    return hr;
}
Exemple #19
0
ComponentResult process_stream_page__flac(OggImportGlobals *globals, StreamInfo *si, ogg_page *opg)
{
    ComponentResult ret = noErr;
    int ovret = 0;
    Boolean loop = true;
    Boolean movie_changed = false;

    TimeValue movieTS = GetMovieTimeScale(globals->theMovie);
    TimeValue mediaTS = 0;
    TimeValue mediaTS_fl = 0.0;

    ogg_packet op;

    switch(si->si_flac.state) {
    case kFStateReadingComments:
    case kFStateReadingAdditionalMDBlocks:
        ogg_stream_pagein(&si->os, opg);
        break;
    default:
        break;
    }

    do {
        switch(si->si_flac.state) {
        case kFStateReadingComments:
            ovret = ogg_stream_packetout(&si->os, &op);
            if (ovret < 0) {
                loop = false;
                ret = invalidMedia;
            } else if (ovret < 1) {
                loop = false;
            } else {
                ret = CreateTrackAndMedia(globals, si, opg);
                if (ret != noErr) {
                    dbg_printf("??? -- CreateTrackAndMedia failed?: %ld\n", (long)ret);
                    loop = false;
                    break;
                }

                if (si->si_flac.metablocks == 0 && (*((unsigned char*) op.packet) == 0xff)) {
                    si->si_flac.metablocks = si->si_flac.skipped;
                    si->si_flac.state = kFStateReadingAdditionalMDBlocks;
                    break;
                }

                {
                    unsigned long atomhead[2] = { EndianU32_NtoB(op.bytes + sizeof(atomhead)), EndianU32_NtoB(kCookieTypeFLACMetadata) };

                    PtrAndHand(atomhead, si->soundDescExtension, sizeof(atomhead));
                    PtrAndHand(op.packet, si->soundDescExtension, op.bytes);
                }

                if (((* (char *) op.packet) & 0x7f) == 4) {
                    dbg_printf("!  > - flac_stream_page - mb: %ld, skipped: %ld, h: %02x\n", si->si_flac.metablocks, si->si_flac.skipped,
                               (*(char *) op.packet) & 0x7f);
                    unpack_vorbis_comments(&si->si_flac.vc, ((char *) op.packet) + 4, op.bytes - 4);
                    /*err =*/ DecodeCommentsQT(globals, si, &si->si_flac.vc);
                    //NotifyMovieChanged(globals);
                }

                si->si_flac.skipped += 1;
                si->si_flac.state = kFStateReadingAdditionalMDBlocks;
            }

            break;

        case kFStateReadingAdditionalMDBlocks:
            dbg_printf("! -- - flac_stream_page - mb: %ld, skipped: %ld\n", si->si_flac.metablocks, si->si_flac.skipped);
            if (si->si_flac.metablocks > 0 && si->si_flac.skipped >= si->si_flac.metablocks) {
                unsigned long endAtom[2] = { EndianU32_NtoB(sizeof(endAtom)), EndianU32_NtoB(kAudioTerminatorAtomType) };

                ret = PtrAndHand(endAtom, si->soundDescExtension, sizeof(endAtom));
                if (ret == noErr) {
                    ret = AddSoundDescriptionExtension((SoundDescriptionHandle) si->sampleDesc,
                                                       si->soundDescExtension, siDecompressionParams);
                    //dbg_printf("??? -- Adding extension: %ld\n", ret);
                } else {
                    //dbg_printf("??? -- Hmm, something went wrong: %ld\n", ret);
                }

                si->insertTime = 0;
                si->streamOffset = globals->currentGroupOffset;
                mediaTS = GetMediaTimeScale(si->theMedia);
                mediaTS_fl = (Float64) mediaTS;
                si->streamOffsetSamples = (TimeValue) (mediaTS_fl * globals->currentGroupOffsetSubSecond) -
                    ((globals->currentGroupOffset % movieTS) * mediaTS / movieTS);
                dbg_printf("---/  / streamOffset: [%ld, %ld], %lg\n", si->streamOffset, si->streamOffsetSamples, globals->currentGroupOffsetSubSecond);
                si->incompleteCompensation = 0;
                si->si_flac.state = kFStateReadingFirstPacket;

                loop = false; // the audio data is supposed to start on a fresh page
                break;
            }

            ovret = ogg_stream_packetout(&si->os, &op);
            dbg_printf("! -- - flac_stream_page - ovret: %d\n", ovret);
            if (ovret < 0) {
                loop = false;
                ret = invalidMedia;
            } else if (ovret < 1) {
                loop = false;
            } else {
                // not much here so far, basically just skip the extra header packet
                unsigned long atomhead[2] = { EndianU32_NtoB(op.bytes + sizeof(atomhead)), EndianU32_NtoB(kCookieTypeFLACMetadata) };

                if (si->si_flac.metablocks == 0 && (* (unsigned char*) op.packet) == 0xff) {
                    si->si_flac.metablocks = si->si_flac.skipped;
                    break;
                }

                PtrAndHand(atomhead, si->soundDescExtension, sizeof(atomhead));
                PtrAndHand(op.packet, si->soundDescExtension, op.bytes);

                if (((* (unsigned char *) op.packet) & 0x7f) == 4) {
                    dbg_printf("!  > - flac_stream_page - mb: %ld, skipped: %ld, h: %02x\n", si->si_flac.metablocks, si->si_flac.skipped,
                               (*(char *) op.packet) & 0x7f);
                    unpack_vorbis_comments(&si->si_flac.vc, ((char *) op.packet) + 4, op.bytes - 4);
                    /*err =*/ DecodeCommentsQT(globals, si, &si->si_flac.vc);
                    //NotifyMovieChanged(globals);
                }

                si->si_flac.skipped += 1;
            }

            break;

        case kFStateReadingFirstPacket:
            // what to do with this one? is it needed at all??
            if (ogg_page_pageno(opg) > 2 && false) {
                si->lastGranulePos = ogg_page_granulepos(opg);
                dbg_printf("----==< skipping: %llx, %lx\n", si->lastGranulePos, ogg_page_pageno(opg));
                loop = false;

                if (si->lastGranulePos < 0)
                    si->lastGranulePos = 0;
            }
            si->si_flac.state = kFStateReadingPackets;
            break;

        case kFStateReadingPackets:
            {
                ogg_int64_t pos       = ogg_page_granulepos(opg);
                int         len       = opg->header_len + opg->body_len;
                TimeValue   duration  = pos - si->lastGranulePos;
                short       smp_flags = 0;

                if (ogg_page_continued(opg) || si->incompleteCompensation != 0)
                    smp_flags |= mediaSampleNotSync;

                if (duration <= 0) {
                    duration = INCOMPLETE_PAGE_DURATION;
                    si->incompleteCompensation -= INCOMPLETE_PAGE_DURATION;
                } else if (si->incompleteCompensation != 0) {
                    duration += si->incompleteCompensation;
                    si->incompleteCompensation = 0;
                    if (duration <= 0) {
                        ret = badFileFormat;
                        loop = false;
                        break;
                    }
                }

                if (si->insertTime == 0 && si->streamOffsetSamples > 0) {
                    dbg_printf("   -   :++: increasing duration (%ld) by sampleOffset: %ld\n", duration, si->streamOffsetSamples);
                    duration += si->streamOffsetSamples;
                }

                ret = _store_sample_reference(si, &globals->dataOffset, len, duration, smp_flags);
                if (ret != noErr) {
                    loop = false;
                    break;
                }

                if (!globals->usingIdle) {
#if !defined(XIPHQT_FORCE_SINGLE_SAMPLE_REF)
                    if (si->sample_refs_count >= si->sample_refs_size)
                    //if (si->sample_refs_count >= kFSRefsInitial)
#endif
                    {
                        ret = _commit_srefs(globals, si, &movie_changed);
                    }
                }

                if (pos != -1)
                    si->lastGranulePos = pos;
            }
            loop = false;
            break;

        default:
            loop = false;
        }
    } while(loop);

    if (movie_changed)
        NotifyMovieChanged(globals, false);

    return ret;
};
Exemple #20
0
ComponentResult process_stream_page__speex(OggImportGlobals *globals, StreamInfo *si, ogg_page *opg)
{
    ComponentResult ret = noErr;
    int ovret = 0;
    Boolean loop = true;
    Boolean movie_changed = false;

    TimeValue movieTS = GetMovieTimeScale(globals->theMovie);
    TimeValue mediaTS = 0;
    TimeValue mediaTS_fl = 0.0;

    ogg_packet op;

    switch(si->si_speex.state) {
    case kSStateReadingComments:
    case kSStateReadingAdditionalHeaders:
        ogg_stream_pagein(&si->os, opg);
        break;
    default:
        break;
    }

    do {
        switch(si->si_speex.state) {
        case kSStateReadingComments:
            ovret = ogg_stream_packetout(&si->os, &op);
            if (ovret < 0) {
                loop = false;
                ret = invalidMedia;
            } else if (ovret < 1) {
                loop = false;
            } else {
                unsigned long atomhead[2] = { EndianU32_NtoB(op.bytes + sizeof(atomhead)), EndianU32_NtoB(kCookieTypeSpeexComments) };

                PtrAndHand(atomhead, si->soundDescExtension, sizeof(atomhead));
                PtrAndHand(op.packet, si->soundDescExtension, op.bytes);

                ret = CreateTrackAndMedia(globals, si, opg);
                if (ret != noErr) {
                    dbg_printf("??? -- CreateTrackAndMedia failed?: %ld\n", (long)ret);
                }

                unpack_vorbis_comments(&si->si_speex.vc, op.packet, op.bytes);
                /*err =*/ DecodeCommentsQT(globals, si, &si->si_speex.vc);
                //NotifyMovieChanged(globals);

                si->si_speex.state = kSStateReadingAdditionalHeaders;
            }

            break;

        case kSStateReadingAdditionalHeaders:
            if (si->si_speex.skipped_headers >= si->si_speex.header.extra_headers) {
                unsigned long endAtom[2] = { EndianU32_NtoB(sizeof(endAtom)), EndianU32_NtoB(kAudioTerminatorAtomType) };

                ret = PtrAndHand(endAtom, si->soundDescExtension, sizeof(endAtom));
                if (ret == noErr) {
                    ret = AddSoundDescriptionExtension((SoundDescriptionHandle) si->sampleDesc,
                                                       si->soundDescExtension, siDecompressionParams);
                    //dbg_printf("??? -- Adding extension: %ld\n", ret);
                } else {
                    //dbg_printf("??? -- Hmm, something went wrong: %ld\n", ret);
                }

                si->insertTime = 0;
                si->streamOffset = globals->currentGroupOffset;
                mediaTS = GetMediaTimeScale(si->theMedia);
                mediaTS_fl = (Float64) mediaTS;
                si->streamOffsetSamples = (TimeValue) (mediaTS_fl * globals->currentGroupOffsetSubSecond) -
                    ((globals->currentGroupOffset % movieTS) * mediaTS / movieTS);
                dbg_printf("---/  / streamOffset: [%ld, %ld], %lg\n", si->streamOffset, si->streamOffsetSamples, globals->currentGroupOffsetSubSecond);
                si->incompleteCompensation = 0;
                si->si_speex.state = kSStateReadingFirstPacket;

                loop = false; // ??!
                break;
            }

            ovret = ogg_stream_packetout(&si->os, &op);
            if (ovret < 0) {
                loop = false;
                ret = invalidMedia;
            } else if (ovret < 1) {
                loop = false;
            } else {
                // not much here so far, basically just skip the extra header packet
                unsigned long atomhead[2] = { EndianU32_NtoB(op.bytes + sizeof(atomhead)), EndianU32_NtoB(kCookieTypeSpeexExtraHeader) };
                PtrAndHand(atomhead, si->soundDescExtension, sizeof(atomhead));
                PtrAndHand(op.packet, si->soundDescExtension, op.bytes);

                si->si_speex.skipped_headers += 1;
            }

            break;

        case kSStateReadingFirstPacket:
            if (ogg_page_pageno(opg) > 2) {
                si->lastGranulePos = ogg_page_granulepos(opg);
                dbg_printf("----==< skipping: %llx, %lx\n", si->lastGranulePos, ogg_page_pageno(opg));
                loop = false;

                if (si->lastGranulePos < 0)
                    si->lastGranulePos = 0;
            }
            si->si_speex.state = kSStateReadingPackets;
            break;

        case kVStateReadingPackets:
            {
                ogg_int64_t pos       = ogg_page_granulepos(opg);
                int         len       = opg->header_len + opg->body_len;
                TimeValue   duration  = pos - si->lastGranulePos;
                short       smp_flags = 0;

                if (ogg_page_continued(opg) || si->incompleteCompensation != 0)
                    smp_flags |= mediaSampleNotSync;

                if (duration <= 0) {
                    duration = INCOMPLETE_PAGE_DURATION;
                    si->incompleteCompensation -= INCOMPLETE_PAGE_DURATION;
                } else if (si->incompleteCompensation != 0) {
                    duration += si->incompleteCompensation;
                    si->incompleteCompensation = 0;
                    if (duration <= 0) {
                        ret = badFileFormat;
                        loop = false;
                        break;
                    }
                }

                if (si->insertTime == 0 && si->streamOffsetSamples > 0) {
                    dbg_printf("   -   :++: increasing duration (%ld) by sampleOffset: %ld\n", duration, si->streamOffsetSamples);
                    duration += si->streamOffsetSamples;
                }

                ret = _store_sample_reference(si, &globals->dataOffset, len, duration, smp_flags);
                if (ret != noErr) {
                    loop = false;
                    break;
                }

                if (!globals->usingIdle) {
                    if (si->sample_refs_count >= kSSRefsInitial)
                        ret = _commit_srefs(globals, si, &movie_changed);
                }

                if (pos != -1)
                    si->lastGranulePos = pos;
            }
            loop = false;
            break;

        default:
            loop = false;
        }
    } while(loop);

    if (movie_changed)
        NotifyMovieChanged(globals, false);

    return ret;
};
    QTAudioReader (InputStream* const input_, const int trackNum_)
        : AudioFormatReader (input_, TRANS (quickTimeFormatName)),
          ok (false),
          movie (0),
          trackNum (trackNum_),
          lastSampleRead (0),
          lastThreadId (0),
          extractor (0),
          dataHandle (0)
    {
        JUCE_AUTORELEASEPOOL
        bufferList.calloc (256, 1);

       #if JUCE_WINDOWS
        if (InitializeQTML (0) != noErr)
            return;
       #endif

        if (EnterMovies() != noErr)
            return;

        bool opened = juce_OpenQuickTimeMovieFromStream (input_, movie, dataHandle);

        if (! opened)
            return;

        {
            const int numTracks = GetMovieTrackCount (movie);
            int trackCount = 0;

            for (int i = 1; i <= numTracks; ++i)
            {
                track = GetMovieIndTrack (movie, i);
                media = GetTrackMedia (track);

                OSType mediaType;
                GetMediaHandlerDescription (media, &mediaType, 0, 0);

                if (mediaType == SoundMediaType
                     && trackCount++ == trackNum_)
                {
                    ok = true;
                    break;
                }
            }
        }

        if (! ok)
            return;

        ok = false;

        lengthInSamples = GetMediaDecodeDuration (media);
        usesFloatingPointData = false;

        samplesPerFrame = (int) (GetMediaDecodeDuration (media) / GetMediaSampleCount (media));

        trackUnitsPerFrame = GetMovieTimeScale (movie) * samplesPerFrame
                                / GetMediaTimeScale (media);

        OSStatus err = MovieAudioExtractionBegin (movie, 0, &extractor);

        unsigned long output_layout_size;
        err = MovieAudioExtractionGetPropertyInfo (extractor,
                                                   kQTPropertyClass_MovieAudioExtraction_Audio,
                                                   kQTMovieAudioExtractionAudioPropertyID_AudioChannelLayout,
                                                   0, &output_layout_size, 0);
        if (err != noErr)
            return;

        HeapBlock <AudioChannelLayout> qt_audio_channel_layout;
        qt_audio_channel_layout.calloc (output_layout_size, 1);

        err = MovieAudioExtractionGetProperty (extractor,
                                               kQTPropertyClass_MovieAudioExtraction_Audio,
                                               kQTMovieAudioExtractionAudioPropertyID_AudioChannelLayout,
                                               output_layout_size, qt_audio_channel_layout, 0);

        qt_audio_channel_layout[0].mChannelLayoutTag = kAudioChannelLayoutTag_Stereo;

        err = MovieAudioExtractionSetProperty (extractor,
                                               kQTPropertyClass_MovieAudioExtraction_Audio,
                                               kQTMovieAudioExtractionAudioPropertyID_AudioChannelLayout,
                                               output_layout_size,
                                               qt_audio_channel_layout);

        err = MovieAudioExtractionGetProperty (extractor,
                                               kQTPropertyClass_MovieAudioExtraction_Audio,
                                               kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
                                               sizeof (inputStreamDesc),
                                               &inputStreamDesc, 0);
        if (err != noErr)
            return;

        inputStreamDesc.mFormatFlags = kAudioFormatFlagIsSignedInteger
                                        | kAudioFormatFlagIsPacked
                                        | kAudioFormatFlagsNativeEndian;
        inputStreamDesc.mBitsPerChannel = sizeof (SInt16) * 8;
        inputStreamDesc.mChannelsPerFrame = jmin ((UInt32) 2, inputStreamDesc.mChannelsPerFrame);
        inputStreamDesc.mBytesPerFrame = sizeof (SInt16) * inputStreamDesc.mChannelsPerFrame;
        inputStreamDesc.mBytesPerPacket = inputStreamDesc.mBytesPerFrame;

        err = MovieAudioExtractionSetProperty (extractor,
                                               kQTPropertyClass_MovieAudioExtraction_Audio,
                                               kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
                                               sizeof (inputStreamDesc),
                                               &inputStreamDesc);
        if (err != noErr)
            return;

        Boolean allChannelsDiscrete = false;
        err = MovieAudioExtractionSetProperty (extractor,
                                               kQTPropertyClass_MovieAudioExtraction_Movie,
                                               kQTMovieAudioExtractionMoviePropertyID_AllChannelsDiscrete,
                                               sizeof (allChannelsDiscrete),
                                               &allChannelsDiscrete);

        if (err != noErr)
            return;

        bufferList->mNumberBuffers = 1;
        bufferList->mBuffers[0].mNumberChannels = inputStreamDesc.mChannelsPerFrame;
        bufferList->mBuffers[0].mDataByteSize =  jmax ((UInt32) 4096, (UInt32) (samplesPerFrame * inputStreamDesc.mBytesPerFrame) + 16);

        dataBuffer.malloc (bufferList->mBuffers[0].mDataByteSize);
        bufferList->mBuffers[0].mData = dataBuffer;

        sampleRate = inputStreamDesc.mSampleRate;
        bitsPerSample = 16;
        numChannels = inputStreamDesc.mChannelsPerFrame;

        detachThread();
        ok = true;
    }
/*
 *  PsychQTSetMovieTimeIndex()  -- Set current playback time of movie.
 */
double PsychQTSetMovieTimeIndex(int moviehandle, double timeindex, psych_bool indexIsFrames)
{
    Movie		theMovie;
    double		oldtime;
	long		targetIndex, myIndex;
    short		myFlags;
    TimeValue	myTime;
    OSType		myTypes[1];
    
    if (moviehandle < 0 || moviehandle >= PSYCH_MAX_MOVIES) {
        PsychErrorExitMsg(PsychError_user, "Invalid moviehandle provided!");
    }
    
    // Fetch references to objects we need:
    theMovie = movieRecordBANK[moviehandle].theMovie;    
    if (theMovie == NULL) {
        PsychErrorExitMsg(PsychError_user, "Invalid moviehandle provided. No movie associated with this handle !!!");
    }
    
    // Retrieve current timeindex:
    oldtime = (double) GetMovieTime(theMovie, NULL) / (double) GetMovieTimeScale(theMovie);
    
	// Index based or target time based seeking?
	if (indexIsFrames) {
		// Index based seeking:
		
		// Seek to given targetIndex:
		targetIndex = (long) (timeindex + 0.5);

		// We want video samples.
		myTypes[0] = VisualMediaCharacteristic;
		
		// We want to begin with the first frame in the movie:
		myFlags = nextTimeStep + nextTimeEdgeOK;
		
		// Start with iteration at beginning:
		myTime = 0;
		myIndex = -1;
		
		// We iterate until end of movie (myTime < 0) or targetIndex reached:
		while ((myTime >= 0) && (myIndex < targetIndex)) {
			// Increment our index position:
			myIndex++;
			
			// Look for the next frame in the track; when there are no more frames,
			// myTime is set to -1, so we'll exit the while loop
			GetMovieNextInterestingTime(theMovie, myFlags, 1, myTypes, myTime, FloatToFixed(1), &myTime, NULL);

			// after the first interesting time, don't include the time we're currently at
			myFlags = nextTimeStep;
		}    
		
		// Valid time for existing target frame?
		if (myTime >= 0) {
			// Yes. Seek to it:
			SetMovieTimeValue(theMovie, myTime);
		}

		// Done with seek.
	}
	else {
		// Time based seeking:

		// Set new timeindex as time in seconds:
		SetMovieTimeValue(theMovie, (TimeValue) (((timeindex * (double) GetMovieTimeScale(theMovie))) + 0.5f));

		// Done with seek.
	}

    // Check if end of movie is reached. Rewind, if so...
    if (IsMovieDone(theMovie) && movieRecordBANK[moviehandle].loopflag > 0) {
        if (GetMovieRate(theMovie) > 0) {
            GoToBeginningOfMovie(theMovie);
        } else {
            GoToEndOfMovie(theMovie);
        }
    }

	// Yield some processing time to Quicktime to update properly:
    MoviesTask(theMovie, 0);
    
    // Return old time value of previous position:
    return(oldtime);
}
Exemple #23
0
ComponentResult FFAvi_MovieImportDataRef(ff_global_ptr storage, Handle dataRef, OSType dataRefType, Movie theMovie, Track targetTrack,
										 Track *usedTrack, TimeValue atTime, TimeValue *addedDuration, long inFlags, long *outFlags)
{
	ComponentResult result = noErr;
	ByteIOContext *byteContext;
	AVFormatContext *ic = NULL;
	AVFormatParameters params;
	OSType mediaType;
	Media media;
	int count, hadIndex, j;
		
	/* make sure that in case of error, the flag movieImportResultComplete is not set */
	*outFlags = 0;
	
	/* probe the format first */
	UInt8 valid = 0;
	FFAvi_MovieImportValidateDataRef(storage, dataRef, dataRefType, &valid);
	if(valid != 255)
		goto bail;
			
	/* Prepare the iocontext structure */
	result = url_open_dataref(&byteContext, dataRef, dataRefType, &storage->dataHandler, &storage->dataHandlerSupportsWideOffsets, &storage->dataSize);
	storage->isStreamed = dataRefType == URLDataHandlerSubType;
	require_noerr(result, bail);
	
	/* Open the Format Context */
	memset(&params, 0, sizeof(params));
	result = av_open_input_stream(&ic, byteContext, "", storage->format, &params);
	require_noerr(result,bail);
	storage->format_context = ic;
	
	// AVIs without an index currently add a few entries to the index so it can
	// determine codec parameters.  Check for index existence here before it
	// reads any packets.
	hadIndex = 1;
	for (j = 0; j < ic->nb_streams; j++) {
		if (ic->streams[j]->nb_index_entries <= 1)
		{
			hadIndex = 0;
			break;
		}
	}
	
	/* Get the Stream Infos if not already read */
	result = av_find_stream_info(ic);
	
	// -1 means it couldn't understand at least one stream
	// which might just mean we don't have its video decoder enabled
	if(result < 0 && result != -1)
		goto bail;
	
	// we couldn't find any streams, bail with an error.
	if(ic->nb_streams == 0) {
		result = -1; //is there a more appropriate error code?
		goto bail;
	}
	
	//determine a header offset (needed by index-based import).
	result = determine_header_offset(storage);
	if(result < 0)
		goto bail;
	
	/* Initialize the Movie */
	storage->movie = theMovie;
	if(inFlags & movieImportMustUseTrack) {
		storage->map_count = 1;
		prepare_track(storage, targetTrack, dataRef, dataRefType);
	} else {
		storage->map_count = ic->nb_streams;
		result = prepare_movie(storage, theMovie, dataRef, dataRefType);
		if (result != 0)
			goto bail;
	}
	
	/* replace the SampleDescription if user called MovieImportSetSampleDescription() */
	if(storage->imgHdl) {
		for(j = 0; j < storage->map_count; j++) {
			NCStream ncstream = storage->stream_map[j];
			GetMediaHandlerDescription(ncstream.media, &mediaType, NULL, NULL);
			if(mediaType == VideoMediaType && ncstream.sampleHdl) {
				DisposeHandle((Handle)ncstream.sampleHdl);
				ncstream.sampleHdl = (SampleDescriptionHandle)storage->imgHdl;
			}
		}
	}
	if(storage->sndHdl) {
		for(j = 0; j < storage->map_count; j++) {
			NCStream ncstream = storage->stream_map[j];
			GetMediaHandlerDescription(ncstream.media, &mediaType, NULL, NULL);
			if(mediaType == SoundMediaType && ncstream.sampleHdl) {
				DisposeHandle((Handle)ncstream.sampleHdl);
				ncstream.sampleHdl = (SampleDescriptionHandle)storage->sndHdl;
			}
		}
	}
	
	count = 0; media = NULL;
	for(j = 0; j < storage->map_count; j++) {
		media = storage->stream_map[j].media;
		if(media)
			count++;
	}
	
	if(count > 1)
		*outFlags |= movieImportResultUsedMultipleTracks;
	
	/* The usedTrack parameter. Count the number of Tracks and set usedTrack if we operated
		* on a single track. Note that this requires the media to be set by track counting above*/
	if(usedTrack && count == 1 && media)
		*usedTrack = GetMediaTrack(media);
	
	result = noErr;

	*addedDuration = 0;
	
	//attempt to import using indexes.
	result = import_using_index(storage, &hadIndex, addedDuration);
	require_noerr(result, bail);
	
	if(hadIndex) {
		//file had an index and was imported; we are done.
		*outFlags |= movieImportResultComplete;
		
	} else if(inFlags & movieImportWithIdle) {
		if(addedDuration && ic->duration > 0) {
			TimeScale movieTimeScale = GetMovieTimeScale(theMovie);
			*addedDuration = movieTimeScale * ic->duration / AV_TIME_BASE;
			
			//create a placeholder track so that progress displays correctly.
			create_placeholder_track(storage->movie, &storage->placeholderTrack, *addedDuration, dataRef, dataRefType);
			
			//give the data handler a hint as to how fast we need the data.
			//suggest a speed that's faster than the bare minimum.
			//if there's an error, the data handler probably doesn't support
			//this, so we can just ignore.
			DataHPlaybackHints(storage->dataHandler, 0, 0, -1, (storage->dataSize * 1.15) / ((double)ic->duration / AV_TIME_BASE));
		}
			
		//import with idle. Decode a little bit of data now.
		import_with_idle(storage, inFlags, outFlags, 10, 300, true);
	} else {
		//QuickTime didn't request import with idle, so do it all now.
		import_with_idle(storage, inFlags, outFlags, 0, 0, true);			
	}
	
	LoadExternalSubtitlesFromFileDataRef(dataRef, dataRefType, theMovie);

bail:
	if(result == noErr)
		storage->movieLoadState = kMovieLoadStateLoaded;
	else
		storage->movieLoadState = kMovieLoadStateError;
		
	if (result == -1)
		result = invalidMovie; // a bit better error message
	
	return result;
} /* FFAvi_MovieImportDataRef */
/*
 *  PsychQTGetTextureFromMovie() -- Create an OpenGL texture map from a specific videoframe from given movie object.
 *
 *  win = Window pointer of onscreen window for which a OpenGL texture should be created.
 *  moviehandle = Handle to the movie object.
 *  checkForImage = true == Just check if new image available, false == really retrieve the image, blocking if necessary.
 *  timeindex = When not in playback mode, this allows specification of a requested frame by presentation time.
 *              If set to -1, or if in realtime playback mode, this parameter is ignored and the next video frame is returned.
 *  out_texture = Pointer to the Psychtoolbox texture-record where the new texture should be stored.
 *  presentation_timestamp = A ptr to a double variable, where the presentation timestamp of the returned frame should be stored.
 *
 *  Returns true (1) on success, false (0) if no new image available, -1 if no new image available and there won't be any in future.
 */
int PsychQTGetTextureFromMovie(PsychWindowRecordType *win, int moviehandle, int checkForImage, double timeindex, PsychWindowRecordType *out_texture, double *presentation_timestamp)
{
	static TimeValue myNextTimeCached = -2;
	static TimeValue nextFramesTimeCached = -2;
    TimeValue		myCurrTime;
    TimeValue		myNextTime;
    TimeValue       nextFramesTime=0;
    short		myFlags;
    OSType		myTypes[1];
    OSErr		error = noErr;
    Movie               theMovie;
    CVOpenGLTextureRef newImage = NULL;
    QTVisualContextRef  theMoviecontext;
    unsigned int failcount=0;
    float lowerLeft[2];
    float lowerRight[2];    
    float upperRight[2];    
    float upperLeft[2];
    GLuint texid;
    Rect rect;
    float rate;
    double targetdelta, realdelta, frames;
	PsychRectType outRect;

    if (!PsychIsOnscreenWindow(win)) {
        PsychErrorExitMsg(PsychError_user, "Need onscreen window ptr!!!");
    }
    
    // Activate OpenGL context of target window:
    PsychSetGLContext(win);

    // Explicitely disable Apple's Client storage extensions. For now they are not really useful to us.
    glPixelStorei(GL_UNPACK_CLIENT_STORAGE_APPLE, GL_FALSE);
    
    if (moviehandle < 0 || moviehandle >= PSYCH_MAX_MOVIES) {
        PsychErrorExitMsg(PsychError_user, "Invalid moviehandle provided.");
    }
    
    if ((timeindex!=-1) && (timeindex < 0 || timeindex >= 10000.0)) {
        PsychErrorExitMsg(PsychError_user, "Invalid timeindex provided.");
    }
    
    if (NULL == out_texture && !checkForImage) {
        PsychErrorExitMsg(PsychError_internal, "NULL-Ptr instead of out_texture ptr passed!!!");
    }
    
    // Fetch references to objects we need:
    theMovie = movieRecordBANK[moviehandle].theMovie;
    theMoviecontext = movieRecordBANK[moviehandle].QTMovieContext;

    if (theMovie == NULL) {
        PsychErrorExitMsg(PsychError_user, "Invalid moviehandle provided. No movie associated with this handle.");
    }

    // Check if end of movie is reached. Rewind, if so...
    if (IsMovieDone(theMovie) && movieRecordBANK[moviehandle].loopflag > 0) {
        if (GetMovieRate(theMovie)>0) {
            GoToBeginningOfMovie(theMovie);
        } else {
            GoToEndOfMovie(theMovie);
        }
    }
    
    // Is movie actively playing (automatic async playback, possibly with synced sound)?
    // If so, then we ignore the 'timeindex' parameter, because the automatic playback
    // process determines which frames should be delivered to PTB when. This function will
    // simply wait or poll for arrival/presence of a new frame that hasn't been fetched
    // in previous calls.
    if (0 == GetMovieRate(theMovie)) {
        // Movie playback inactive. We are in "manual" mode: No automatic async playback,
        // no synced audio output. The user just wants to manually fetch movie frames into
        // textures for manual playback in a standard Matlab-loop.

		// First pass - checking for new image?
		if (checkForImage) {
			// Image for specific point in time requested?
			if (timeindex >= 0) {
				// Yes. We try to retrieve the next possible image for requested timeindex.
				myCurrTime = (TimeValue) ((timeindex * (double) GetMovieTimeScale(theMovie)) + 0.5f);
			}
			else {
				// No. We just retrieve the next frame, given the current movie time.
				myCurrTime = GetMovieTime(theMovie, NULL);
			}
            
			// Retrieve timeindex of the closest image sample after myCurrTime:
			myFlags = nextTimeStep + nextTimeEdgeOK;	// We want the next frame in the movie's media.
			myTypes[0] = VisualMediaCharacteristic;		// We want video samples.
			GetMovieNextInterestingTime(theMovie, myFlags, 1, myTypes, myCurrTime, FloatToFixed(1), &myNextTime, &nextFramesTime);
			error = GetMoviesError();
			if (error != noErr) {
				PsychErrorExitMsg(PsychError_internal, "Failed to fetch texture from movie for given timeindex!");
			}
			
			// Found useful event?
			if (myNextTime == -1) {
				if (PsychPrefStateGet_Verbosity() > 3) printf("PTB-WARNING: Bogus timevalue in movie track for movie %i. Trying to keep going.\n", moviehandle);
				
				// No. Just push timestamp to current time plus a little bit in the hope
				// this will get us unstuck:
				myNextTime = myCurrTime + (TimeValue) 1;
				nextFramesTime = (TimeValue) 0;
			}
			
			if (myNextTime != myNextTimeCached) {
				// Set movies current time to myNextTime, so the next frame will be fetched from there:
				SetMovieTimeValue(theMovie, myNextTime);
				
				// nextFramesTime is the timeindex to which we need to advance for retrieval of next frame: (see code below)
				nextFramesTime=myNextTime + nextFramesTime;
				
				if (PsychPrefStateGet_Verbosity() > 5) printf("PTB-DEBUG: Current timevalue in movie track for movie %i is %lf secs.\n", moviehandle, (double) myNextTime / (double) GetMovieTimeScale(theMovie));
				if (PsychPrefStateGet_Verbosity() > 5) printf("PTB-DEBUG: Next timevalue in movie track for movie %i is %lf secs.\n", moviehandle, (double) nextFramesTime / (double) GetMovieTimeScale(theMovie));
				
				// Cache values for 2nd pass:
				myNextTimeCached = myNextTime;
				nextFramesTimeCached = nextFramesTime;
			}
			else {
				// Somehow got stuck? Do nothing...
				if (PsychPrefStateGet_Verbosity() > 5) printf("PTB-DEBUG: Seem to be a bit stuck at timevalue [for movie %i] of %lf secs. Nudging a bit forward...\n", moviehandle, (double) myNextTime / (double) GetMovieTimeScale(theMovie));
				// Nudge the timeindex a bit forware in the hope that this helps:
				SetMovieTimeValue(theMovie, GetMovieTime(theMovie, NULL) + 1);
			}
		}
		else {
			// This is the 2nd pass: Image fetching. Use cached values from first pass:
			// Caching in a static works because we're always called immediately for 2nd
			// pass after successfull return from 1st pass, and we're not multi-threaded,
			// i.e., don't need to be reentrant or thread-safe here:
			myNextTime = myNextTimeCached;
			nextFramesTime = nextFramesTimeCached;
			myNextTimeCached = -2;
		}
	}
    else {
        // myNextTime unavailable if in autoplayback-mode:
        myNextTime=-1;
    }
    
    // Presentation timestamp requested?
    if (presentation_timestamp) {
        // Already available?
        if (myNextTime==-1) {
            // Retrieve the exact presentation timestamp of the retrieved frame (in movietime):
            myFlags = nextTimeStep + nextTimeEdgeOK;            // We want the next frame in the movie's media.
            myTypes[0] = VisualMediaCharacteristic;		// We want video samples.
                                                                // We search backward for the closest available image for the current time. Either we get the current time
                                                                // if we happen to fetch a frame exactly when it becomes ready, or we get a bit earlier timestamp, which is
                                                                // the optimal presentation timestamp for this frame:
            GetMovieNextInterestingTime(theMovie, myFlags, 1, myTypes, GetMovieTime(theMovie, NULL), FloatToFixed(-1), &myNextTime, NULL);
        }
        // Convert pts (in Quicktime ticks) to pts in seconds since start of movie and return it:
        *presentation_timestamp = (double) myNextTime / (double) GetMovieTimeScale(theMovie);
    }

    // Allow quicktime visual context task to do its internal bookkeeping and cleanup work:
    if (theMoviecontext) QTVisualContextTask(theMoviecontext);

    // Perform decompress-operation:
    if (checkForImage) MoviesTask(theMovie, 0);
    
    // Should we just check for new image? If so, just return availability status:
    if (checkForImage) {
        if (PSYCH_USE_QT_GWORLDS) {
            // We use GWorlds. In this case we either suceed immediately due to the
            // synchronous nature of GWorld rendering, or we fail completely at end
            // of non-looping movie:
            if (IsMovieDone(theMovie) && movieRecordBANK[moviehandle].loopflag == 0) {
                // No new frame available and there won't be any in the future, because this is a non-looping
                // movie that has reached its end.
                return(-1);
            }
            
            // Is this the special case of a movie without video, but only sound? In that case,
			// we always return a 'false' because there ain't no image to return.
			if (movieRecordBANK[moviehandle].QTMovieGWorld == NULL) return(false);
			
			// Success!
            return(true);
        }
        
        // Code which uses QTVisualContextTasks...
        if (QTVisualContextIsNewImageAvailable(theMoviecontext, NULL)) {
            // New frame ready!
            return(true);
        }
        else if (IsMovieDone(theMovie) && movieRecordBANK[moviehandle].loopflag == 0) {
            // No new frame available and there won't be any in the future, because this is a non-looping
            // movie that has reached its end.
            return(-1);
        }
        else {
            // No new frame available yet:
            return(false);
        }
    }
    
    if (!PSYCH_USE_QT_GWORLDS) {
        // Blocking wait-code for non-GWorld mode:
        // Try up to 1000 iterations for arrival of requested image data in wait-mode:
        failcount=0;
        while ((failcount < 1000) && !QTVisualContextIsNewImageAvailable(theMoviecontext, NULL)) {
            PsychWaitIntervalSeconds(0.005);
            MoviesTask(theMovie, 0);
            failcount++;
        }
        
        // No new frame available and there won't be any in the future, because this is a non-looping
        // movie that has reached its end.
        if ((failcount>=1000) && IsMovieDone(theMovie) && (movieRecordBANK[moviehandle].loopflag == 0)) {
            return(-1);
        }
        
        // Fetch new OpenGL texture with the new movie image frame:
        error = QTVisualContextCopyImageForTime(theMoviecontext, kCFAllocatorDefault, NULL, &newImage);
        if ((error!=noErr) || newImage == NULL) {
            PsychErrorExitMsg(PsychError_internal, "OpenGL<->Quicktime texture fetch failed!!!");
        }
    
        // Disable client storage, if it was enabled:
        glPixelStorei(GL_UNPACK_CLIENT_STORAGE_APPLE, GL_FALSE);
        
        // Build a standard PTB texture record:    
        CVOpenGLTextureGetCleanTexCoords (newImage, lowerLeft, lowerRight, upperRight, upperLeft);
        texid = CVOpenGLTextureGetName(newImage);
        
        // Assign texture rectangle:
        PsychMakeRect(outRect, upperLeft[0], upperLeft[1], lowerRight[0], lowerRight[1]);    
        
        // Set texture orientation as if it were an inverted Offscreen window: Upside-down.
        out_texture->textureOrientation = (CVOpenGLTextureIsFlipped(newImage)) ? 3 : 4;

        // Assign OpenGL texture id:
        out_texture->textureNumber = texid;
        
        // Store special texture object as part of the PTB texture record:
        out_texture->targetSpecific.QuickTimeGLTexture = newImage;
    }
    else {
        // Synchronous texture fetch code for GWorld rendering mode:
        // At this point, the GWorld should contain the source image for creating a
        // standard OpenGL texture:
        
        // Disable client storage, if it was enabled:
        glPixelStorei(GL_UNPACK_CLIENT_STORAGE_APPLE, GL_FALSE);
        
        // Build a standard PTB texture record:    

        // Assign texture rectangle:
        GetMovieBox(theMovie, &rect);

        // Hack: Need to extend rect by 4 pixels, because GWorlds are 4 pixels-aligned via
        // image row padding:
        rect.right = rect.right + 4;
        PsychMakeRect(out_texture->rect, rect.left, rect.top, rect.right, rect.bottom);    
        
        // Set NULL - special texture object as part of the PTB texture record:
        out_texture->targetSpecific.QuickTimeGLTexture = NULL;

        // Set texture orientation as if it were an inverted Offscreen window: Upside-down.
        out_texture->textureOrientation = 3;
        
        // Setup a pointer to our GWorld as texture data pointer:
        out_texture->textureMemorySizeBytes = 0;

		// Quicktime textures are aligned on 4 Byte boundaries:
		out_texture->textureByteAligned = 4;

        // Lock GWorld:
        if(!LockPixels(GetGWorldPixMap(movieRecordBANK[moviehandle].QTMovieGWorld))) {
            // Locking surface failed! We abort.
            PsychErrorExitMsg(PsychError_internal, "PsychQTGetTextureFromMovie(): Locking GWorld pixmap surface failed!!!");
        }
        
        // This will retrieve an OpenGL compatible pointer to the GWorlds pixel data and assign it to our texmemptr:
        out_texture->textureMemory = (GLuint*) GetPixBaseAddr(GetGWorldPixMap(movieRecordBANK[moviehandle].QTMovieGWorld));
            
        // Let PsychCreateTexture() do the rest of the job of creating, setting up and
        // filling an OpenGL texture with GWorlds content:
        PsychCreateTexture(out_texture);
        
        // Undo hack from above after texture creation: Now we need the real width of the
        // texture for proper texture coordinate assignments in drawing code et al.
        rect.right = rect.right - 4;
        PsychMakeRect(outRect, rect.left, rect.top, rect.right, rect.bottom);    

        // Unlock GWorld surface. We do a glFinish() before, for safety reasons...
        //glFinish();
        UnlockPixels(GetGWorldPixMap(movieRecordBANK[moviehandle].QTMovieGWorld));

        // Ready to use the texture... We're done.
    }
    
	// Normalize texture rectangle and assign it:
	PsychNormalizeRect(outRect, out_texture->rect);
	
    rate = FixedToFloat(GetMovieRate(theMovie));
    
    // Detection of dropped frames: This is a heuristic. We'll see how well it works out...
    if (rate && presentation_timestamp) {
        // Try to check for dropped frames in playback mode:

        // Expected delta between successive presentation timestamps:
        targetdelta = 1.0f / (movieRecordBANK[moviehandle].fps * rate);

        // Compute real delta, given rate and playback direction:
        if (rate>0) {
            realdelta = *presentation_timestamp - movieRecordBANK[moviehandle].last_pts;
            if (realdelta<0) realdelta = 0;
        }
        else {
            realdelta = -1.0 * (*presentation_timestamp - movieRecordBANK[moviehandle].last_pts);
            if (realdelta<0) realdelta = 0;
        }
        
        frames = realdelta / targetdelta;
        // Dropped frames?
        if (frames > 1 && movieRecordBANK[moviehandle].last_pts>=0) {
            movieRecordBANK[moviehandle].nr_droppedframes += (int) (frames - 1 + 0.5);
        }

        movieRecordBANK[moviehandle].last_pts = *presentation_timestamp;
    }
    
    // Manually advance movie time, if in fetch mode:
    if (0 == GetMovieRate(theMovie)) {
        // We are in manual fetch mode: Need to manually advance movie time to next
        // media sample:
		if (nextFramesTime == myNextTime) {
			// Invalid value? Try to hack something that gets us unstuck:
			myNextTime = GetMovieTime(theMovie, NULL);
			nextFramesTime = myNextTime + (TimeValue) 1;
		}

        SetMovieTimeValue(theMovie, nextFramesTime);        
    }
    
    // Check if end of movie is reached. Rewind, if so...
    if (IsMovieDone(theMovie) && movieRecordBANK[moviehandle].loopflag > 0) {
        if (GetMovieRate(theMovie)>0) {
            GoToBeginningOfMovie(theMovie);
        } else {
            GoToEndOfMovie(theMovie);
        }
    }

    return(TRUE);
}
/*
 *      PsychQTCreateMovie() -- Create a movie object.
 *
 *      This function tries to open a Quicktime-Moviefile and create an
 *      associated movie object for it.
 *
 *      win = Pointer to window record of associated onscreen window.
 *      moviename = char* with the name of the moviefile.
 *      preloadSecs = How many seconds of the movie should be preloaded/prefetched into RAM at movie open time?
 *      moviehandle = handle to the new movie.
 */
void PsychQTCreateMovie(PsychWindowRecordType *win, const char* moviename, double preloadSecs, int* moviehandle)
{
    Movie theMovie = NULL;
    QTVisualContextRef QTMovieContext = NULL;
    QTAudioContextRef  QTAudioContext = NULL;
    int i, slotid;
    OSErr error;
    CFStringRef movieLocation;
	CFURLRef movieURLLocation;
    CFStringRef coreAudioDeviceUID;
    psych_bool trueValue = TRUE;
    QTNewMoviePropertyElement newMovieProperties[4] = {0};
    int propcount = 0;
    char msgerr[10000];
    char errdesc[1000];
    Rect movierect;
    psych_bool printErrors;

    // Suppress output of error-messages if moviehandle == 1000. That means we
    // run in our own Posix-Thread, not in the Matlab-Thread. Printing via Matlabs
    // printing facilities would likely cause a terrible crash.
    printErrors = (*moviehandle == -1000) ? FALSE : TRUE;
    
    // Set movie handle to "failed" initially:
    *moviehandle = -1;

    // We startup the Quicktime subsystem only on first invocation.
    if (firsttime) {
#if PSYCH_SYSTEM == PSYCH_WINDOWS
        // Initialize Quicktime for Windows compatibility layer: This will fail if
        // QT isn't installed on the Windows machine...
        error = InitializeQTML(0);
        if (error!=noErr) {
            if (printErrors) {
                PsychErrorExitMsg(PsychError_internal, "Quicktime Media Layer initialization failed: Quicktime not properly installed?!?");
            } else return;

        }
#endif

        // Initialize Quicktime-Subsystem:
        error = EnterMovies();
        if (error!=noErr) {
            if (printErrors) PsychErrorExitMsg(PsychError_internal, "Quicktime EnterMovies() failed!!!"); else return;
        }
        firsttime = FALSE;
    }
    
    if (!PsychIsOnscreenWindow(win)) {
        if (printErrors) PsychErrorExitMsg(PsychError_user, "Provided windowPtr is not an onscreen window."); else return;
    }

    if (NULL==moviename) {
        if (printErrors) PsychErrorExitMsg(PsychError_internal, "NULL-Ptr instead of moviename passed!"); else return;
    }

    if (numMovieRecords >= PSYCH_MAX_MOVIES) {
        *moviehandle = -2;
        if (printErrors) PsychErrorExitMsg(PsychError_user, "Allowed maximum number of simultaneously open movies exceeded!"); else return;
    }

    // Search first free slot in movieRecordBANK:
    for (i=0; (i < PSYCH_MAX_MOVIES) && (movieRecordBANK[i].theMovie); i++);
    if (i>=PSYCH_MAX_MOVIES) {
        *moviehandle = -2;
        if (printErrors) PsychErrorExitMsg(PsychError_user, "Allowed maximum number of simultaneously open movies exceeded!"); else return;
    }

    // Slot slotid will contain the movie record for our new movie object:
    slotid=i;
    
    // Create name-string for moviename:
    movieLocation = CFStringCreateWithCString (kCFAllocatorDefault, moviename, kCFStringEncodingASCII);

    // Zero-out new record in moviebank:
    movieRecordBANK[slotid].theMovie=NULL;    
    movieRecordBANK[slotid].QTMovieContext=NULL;    
    movieRecordBANK[slotid].QTAudioContext=NULL;
    movieRecordBANK[slotid].QTMovieGWorld=NULL;
    
    if (!PSYCH_USE_QT_GWORLDS) {
        // Create QTGLTextureContext:
#if PSYCH_SYSTEM != PSYCH_WINDOWS
        error = QTOpenGLTextureContextCreate (kCFAllocatorDefault,
                                                    win->targetSpecific.contextObject,
                                                    win->targetSpecific.pixelFormatObject,
                                                    NULL,
                                                    &QTMovieContext);
#endif
        if (error!=noErr) {
            if (printErrors) PsychErrorExitMsg(PsychError_internal, "OpenGL Quicktime visual context creation failed!!!"); else return;
        }        
    }

    // The Movie location 
    newMovieProperties[propcount].propClass = kQTPropertyClass_DataLocation;
	if (strstr(moviename, "http:") || strstr(moviename, "ftp:")) {
		// Open movie from URL, e.g., http- or ftp- server:
		movieURLLocation = CFURLCreateWithString(kCFAllocatorDefault, movieLocation, NULL);
		newMovieProperties[propcount].propID = kQTDataLocationPropertyID_CFURL;
		newMovieProperties[propcount].propValueSize = sizeof(movieURLLocation);
		newMovieProperties[propcount++].propValueAddress = (void*) &movieURLLocation;
	}
	else {
		// Open movie file from filesystem:
		newMovieProperties[propcount].propID = kQTDataLocationPropertyID_CFStringPosixPath;
		newMovieProperties[propcount].propValueSize = sizeof(CFStringRef);
		newMovieProperties[propcount++].propValueAddress = &movieLocation;
    }
	
    if (!PSYCH_USE_QT_GWORLDS) {
        // The Movie visual context
        newMovieProperties[propcount].propClass = kQTPropertyClass_Context;
        newMovieProperties[propcount].propID = kQTContextPropertyID_VisualContext;
        newMovieProperties[propcount].propValueSize = sizeof(QTVisualContextRef);
        newMovieProperties[propcount++].propValueAddress = &QTMovieContext;
    }
    
    if (TRUE) {
        // Create QTAudioContext for default CoreAudio device:
        coreAudioDeviceUID = NULL; // Use default audio-output device.
        error =QTAudioContextCreateForAudioDevice (kCFAllocatorDefault,
                                                   coreAudioDeviceUID,
                                                   NULL,
                                                   &QTAudioContext);
        if (error!=noErr) {
            if (printErrors) PsychErrorExitMsg(PsychError_internal, "Quicktime audio context creation failed!!!"); else return;
        }
        
        // The Movie audio context
        newMovieProperties[propcount].propClass = kQTPropertyClass_Context;
        newMovieProperties[propcount].propID = kQTContextPropertyID_AudioContext;
        newMovieProperties[propcount].propValueSize = sizeof(QTAudioContextRef);
        newMovieProperties[propcount++].propValueAddress = &QTAudioContext;
    }
    
    // The Movie active
    newMovieProperties[propcount].propClass = kQTPropertyClass_NewMovieProperty;
    newMovieProperties[propcount].propID = kQTNewMoviePropertyID_Active;
    newMovieProperties[propcount].propValueSize = sizeof(trueValue);
    newMovieProperties[propcount++].propValueAddress = &trueValue;
    
    // Instantiate the Movie
    error = NewMovieFromProperties(propcount, newMovieProperties, 0, NULL, &theMovie);
    if (error!=noErr) {
        QTVisualContextRelease(QTMovieContext);
        QTAudioContextRelease(QTAudioContext);
        switch(error) {
            case -2000:
            case -50:
            case -43:
                sprintf(errdesc, "File not found.");
            break;
            
            case -2048:
                sprintf(errdesc, "This is not a file that Quicktime understands.");
            break;
            
            case -2003:
                sprintf(errdesc, "Can't find media handler (codec) for this movie.");
            break;
            
            default:
                sprintf(errdesc, "Unknown: Check http://developer.apple.com/documentation/QuickTime/APIREF/ErrorCodes.htm#//apple_ref/doc/constant_group/Error_Codes");
        }
        
        sprintf(msgerr, "Couldn't load movie %s! Quicktime error code %i [%s]", moviename, (int) error, errdesc);
        *moviehandle = (int) error;
        if (printErrors) PsychErrorExitMsg(PsychError_user, msgerr); else return;
    }
    
    CFRelease(movieLocation);

    if (PSYCH_USE_QT_GWORLDS) {
        // Determine size of images in movie:
        GetMovieBox(theMovie, &movierect);
        
		// Only create a GWorld if movie frames contain at least 1 pixel. This way we skip GWorld
		// setup on "movies" which only consist of sound tracks.
		if ((movierect.right - movierect.left != 0) && (movierect.bottom - movierect.top != 0)) {
			// Create GWorld for this movie object:
			// error = QTNewGWorld(&movieRecordBANK[slotid].QTMovieGWorld, k32ABGRPixelFormat, &movierect,  NULL, NULL, 0);
			error = QTNewGWorld(&movieRecordBANK[slotid].QTMovieGWorld, 0, &movierect,  NULL, NULL, 0);
			if (error!=noErr) {
				QTAudioContextRelease(QTAudioContext);
				DisposeMovie(movieRecordBANK[slotid].theMovie);
				movieRecordBANK[slotid].theMovie=NULL;    
				if (printErrors) PsychErrorExitMsg(PsychError_internal, "Quicktime GWorld creation failed!!!"); else return;
			}
			
			// Attach this GWorld as rendering target for Quicktime:
			SetMovieGWorld(theMovie, movieRecordBANK[slotid].QTMovieGWorld, NULL);
		}
    }
    
    // Preload preloadSecs seconds of movie into system RAM for faster playback:
	if (preloadSecs > 0) LoadMovieIntoRam(theMovie, 0, ((long) preloadSecs + 0.5) * GetMovieTimeScale(theMovie),  keepInRam);
	// Special setting - 1 means: Load whole movie into RAM:
	if (preloadSecs == -1) LoadMovieIntoRam(theMovie, 0, GetMovieDuration(theMovie),  keepInRam);

    // We don't preroll: Didn't help for async playback, but leads to failure in
    // manual playback mode: PrerollMovie(theMovie, 0, FloatToFixed(1));

    // MoviesTask() it to make sure start of plaback will be as stutter-free as possible:
    MoviesTask(theMovie, 10000);
    
    // Assign new record in moviebank:
    movieRecordBANK[slotid].theMovie=theMovie;    
    movieRecordBANK[slotid].QTMovieContext=QTMovieContext;    
    movieRecordBANK[slotid].QTAudioContext=QTAudioContext;
    movieRecordBANK[slotid].loopflag = 0;
    *moviehandle = slotid;

    // Increase counter:
    numMovieRecords++;

    // Compute basic movie properties - Duration and fps as well as image size:
    
    // Compute duration in seconds:
    movieRecordBANK[slotid].movieduration = (double) GetMovieDuration(theMovie) / (double) GetMovieTimeScale(theMovie);

    // Compute expected framerate, assuming a linear spacing between frames: It is derived as
    // reciprocal of the duration of the first video frame in the movie:
    movieRecordBANK[slotid].fps = PsychDetermineMovieFramecountAndFps(theMovie, NULL);

    // Determine size of images in movie:
    GetMovieBox(theMovie, &movierect);
    movieRecordBANK[slotid].width = movierect.right - movierect.left;
    movieRecordBANK[slotid].height = movierect.bottom - movierect.top;
    
    // We set nrframes == -1 to indicate that this value is not yet available.
    // Will do counting on first query for this parameter as it is very time-consuming:
    movieRecordBANK[slotid].nrframes = -1;
    
    return;
}
Exemple #26
0
/* This function imports the avi represented by the AVFormatContext to the movie media represented
 * in the map function. The aviheader_offset is used to calculate the packet offset from the
 * beginning of the file. It returns whether it was successful or not (i.e. whether the file had an index) */
int import_using_index(ff_global_ptr storage, int *hadIndex, TimeValue *addedDuration) {
    int j, k, l;
    NCStream *map;
    NCStream *ncstr;
    AVFormatContext *ic;
    AVStream *stream;
    AVCodecContext *codec;
    SampleReference64Ptr sampleRec;
    int64_t header_offset, offset, duration;
    short flags;
    int sampleNum;
    ComponentResult result = noErr;

    map = storage->stream_map;
    ic = storage->format_context;
    header_offset = storage->header_offset;

    if(*hadIndex == 0)
        goto bail;

    //FLVs have unusable indexes, so don't even bother.
    if(storage->componentType == 'FLV ')
        goto bail;

    /* process each stream in ic */
    for(j = 0; j < ic->nb_streams; j++) {
        ncstr = &map[j];
        stream = ncstr->str;
        codec = stream->codec;

        /* no stream we can read */
        if(!ncstr->valid)
            continue;

        /* no index, we might as well skip */
        if(stream->nb_index_entries == 0)
            continue;

        sampleNum = 0;
        ncstr->sampleTable = calloc(stream->nb_index_entries, sizeof(SampleReference64Record));

        /* now parse the index entries */
        for(k = 0; k < stream->nb_index_entries; k++) {

            /* file offset */
            offset = header_offset + stream->index_entries[k].pos;

            /* flags */
            flags = 0;
            if((stream->index_entries[k].flags & AVINDEX_KEYFRAME) == 0)
                flags |= mediaSampleNotSync;

            sampleRec = &ncstr->sampleTable[sampleNum++];

            /* set as many fields in sampleRec as possible */
            sampleRec->dataOffset.hi = offset >> 32;
            sampleRec->dataOffset.lo = (uint32_t)offset;
            sampleRec->dataSize = stream->index_entries[k].size;
            sampleRec->sampleFlags = flags;

            /* some samples have a data_size of zero. if that's the case, ignore them
            	* they seem to be used to stretch the frame duration & are already handled
            	* by the previous pkt */
            if(sampleRec->dataSize <= 0) {
                sampleNum--;
                continue;
            }

            /* switch for the remaining fields */
            if(codec->codec_type == AVMEDIA_TYPE_VIDEO) {

                /* Calculate the frame duration */
                duration = 1;
                for(l = k+1; l < stream->nb_index_entries; l++) {
                    if(stream->index_entries[l].size > 0)
                        break;
                    duration++;
                }

                sampleRec->durationPerSample = map->base.num * duration;
                sampleRec->numberOfSamples = 1;
            }
            else if(codec->codec_type == AVMEDIA_TYPE_AUDIO) {

                /* FIXME: check if that's really the right thing to do here */
                if(ncstr->vbr) {
                    sampleRec->numberOfSamples = 1;

                    if (k + 1 < stream->nb_index_entries)
                        sampleRec->durationPerSample = (stream->index_entries[k+1].timestamp - stream->index_entries[k].timestamp) * ncstr->base.num;
                    else if (sampleNum - 2 >= 0)
                        // if we're at the last index entry, use the duration of the previous sample
                        // FIXME: this probably could be better
                        sampleRec->durationPerSample = ncstr->sampleTable[sampleNum-2].durationPerSample;

                } else {
                    sampleRec->durationPerSample = 1;
                    sampleRec->numberOfSamples = (stream->index_entries[k].size * ncstr->asbd.mFramesPerPacket) / ncstr->asbd.mBytesPerPacket;
                }
            }
        }
        if(sampleNum != 0)
        {
            /* Add all of the samples to the media */
            AddMediaSampleReferences64(ncstr->media, ncstr->sampleHdl, sampleNum, ncstr->sampleTable, NULL);

            /* The index is both present and not empty */
            *hadIndex = 1;
        }
        free(ncstr->sampleTable);
    }

    if(*hadIndex == 0)
        //No index, the remainder of this function will fail.
        goto bail;

    // insert media and set addedDuration;
    for(j = 0; j < storage->map_count && result == noErr; j++) {
        ncstr = &map[j];
        if(ncstr->valid) {
            Media media = ncstr->media;
            Track track;
            TimeRecord time;
            TimeValue mediaDuration;
            TimeScale mediaTimeScale;
            TimeScale movieTimeScale;
            int startTime = map[j].str->index_entries[0].timestamp;

            mediaDuration = GetMediaDuration(media);
            mediaTimeScale = GetMediaTimeScale(media);
            movieTimeScale = GetMovieTimeScale(storage->movie);

            /* we could handle this stream.
            * convert the atTime parameter to track scale.
            * FIXME: check if that's correct */
            time.value.hi = 0;
            time.value.lo = storage->atTime;
            time.scale = movieTimeScale;
            time.base = NULL;
            ConvertTimeScale(&time, mediaTimeScale);

            track = GetMediaTrack(media);
            result = InsertMediaIntoTrack(track, time.value.lo, 0, mediaDuration, fixed1);

            // set audio/video start delay
            // note str.start_time exists but is always 0 for AVI
            if (startTime) {
                TimeRecord startTimeRec;
                startTimeRec.value.hi = 0;
                startTimeRec.value.lo = startTime * map[j].str->time_base.num;
                startTimeRec.scale = map[j].str->time_base.den;
                startTimeRec.base = NULL;
                ConvertTimeScale(&startTimeRec, movieTimeScale);
                SetTrackOffset(track, startTimeRec.value.lo);
            }

            if(result != noErr)
                goto bail;

            time.value.hi = 0;
            time.value.lo = mediaDuration;
            time.scale = mediaTimeScale;
            time.base = NULL;
            ConvertTimeScale(&time, movieTimeScale);

            if(time.value.lo > *addedDuration)
                *addedDuration = time.value.lo;
        }
    }

    storage->loadedTime = *addedDuration;

bail:
    return result;
} /* import_using_index() */
Exemple #27
0
int quicktime_player::get_time_scale() {
    return GetMovieTimeScale(m->movie);
}
OSErr QTTarg_AddTextToggleButtonTrack (Movie theMovie)
{
	Track					myTrack = NULL;
	Media					myMedia = NULL;
	MatrixRecord			myMatrix;
	RGBColor				myKeyColor;
	Fixed					myWidth, myHeight;
	TimeValue				myDuration = 0L;
	TimeValue				myTimeScale = 0L;
	OSErr					myErr = noErr;

	//////////
	//
	// get some information about the target movie
	//
	//////////

	if (theMovie == NULL) {
		myErr = paramErr;
		goto bail;
	}

	myWidth = Long2Fix(2 * kButtonWidth);
	myHeight = Long2Fix(2 * kButtonHeight);
	myDuration = GetMovieDuration(theMovie);
	myTimeScale = GetMovieTimeScale(theMovie);
	
	//////////
	//
	// create a new sprite track in the target movie
	//
	//////////
	
	myTrack = NewMovieTrack(theMovie, myWidth, myHeight, kNoVolume);
	myMedia = NewTrackMedia(myTrack, SpriteMediaType, myTimeScale, NULL, 0);

	// set the track matrix to compensate for any existing movie matrix
	GetMovieMatrix(theMovie, &myMatrix);
	if (InverseMatrix(&myMatrix, &myMatrix))
		SetTrackMatrix(myTrack, &myMatrix);

	myErr = BeginMediaEdits(myMedia);
	if (myErr != noErr)
		goto bail;
	
	//////////
	//
	// add sprite images and sprites to the sprite track; add actions to the sprites
	//
	//////////
	
	QTTarg_AddTextButtonSamplesToMedia(myMedia, 2 * kButtonWidth, 2 * kButtonHeight, myDuration);
	
	//////////
	//
	// insert media into track
	//
	//////////
	
	myErr = EndMediaEdits(myMedia);
	if (myErr != noErr)
		goto bail;
	
	// add the media to the track
	InsertMediaIntoTrack(myTrack, 0, 0, GetMediaDuration(myMedia), fixed1);
		
	//////////
	//
	// set the sprite track properties
	//
	//////////
	
	QTTarg_SetTrackProperties(myMedia, kNoQTIdleEvents);				// no idle events
	
	myKeyColor.red = myKeyColor.green = myKeyColor.blue = 0xffff;		// white
	MediaSetGraphicsMode(GetMediaHandler(myMedia), transparent, &myKeyColor);
	
	// make sure that the sprite track is in the frontmost layer
	SetTrackLayer(myTrack, kMaxLayerNumber);
	SetTrackLayer(myTrack, QTTarg_GetLowestLayerInMovie(theMovie) - 1);
		
bail:
	return(myErr);
}
void QTCmpr_CompressSequence (WindowObject theWindowObject)
{
	ComponentInstance			myComponent = NULL;
	GWorldPtr					myImageWorld = NULL;		// the graphics world we draw the images in
	PixMapHandle				myPixMap = NULL;
	Movie						mySrcMovie = NULL;
	Track						mySrcTrack = NULL;
	Movie						myDstMovie = NULL;
	Track						myDstTrack = NULL;
	Media						myDstMedia = NULL;
	Rect						myRect;
	PicHandle					myPicture = NULL;
	CGrafPtr					mySavedPort = NULL;
	GDHandle					mySavedDevice = NULL;
	SCTemporalSettings			myTimeSettings;
	SCDataRateSettings			myRateSettings;
	FSSpec						myFile;
	Boolean						myIsSelected = false;
	Boolean						myIsReplacing = false;	
	short						myRefNum = -1;
	StringPtr 					myMoviePrompt = QTUtils_ConvertCToPascalString(kQTCSaveMoviePrompt);
	StringPtr 					myMovieFileName = QTUtils_ConvertCToPascalString(kQTCSaveMovieFileName);
	MatrixRecord				myMatrix;
	ImageDescriptionHandle		myImageDesc = NULL;
	TimeValue					myCurMovieTime = 0L;
	TimeValue					myOrigMovieTime = 0L;		// current movie time, when compression is begun
	short						myFrameNum;		
	long						myFlags = 0L;
	long						myNumFrames = 0L;
	long						mySrcMovieDuration = 0L;	// duration of source movie
	OSErr						myErr = noErr;
#if USE_ASYNC_COMPRESSION
	ICMCompletionProcRecord		myICMComplProcRec;
	ICMCompletionProcRecordPtr	myICMComplProcPtr = NULL;
	OSErr						myICMComplProcErr = noErr;

	myICMComplProcRec.completionProc = NULL;
	myICMComplProcRec.completionRefCon = 0L;
#endif

	if (theWindowObject == NULL)
		goto bail;

	//////////
	//
	// get the movie and the first video track in the movie
	//
	//////////
	
	mySrcMovie = (**theWindowObject).fMovie;
	if (mySrcMovie == NULL)
		goto bail;

	mySrcTrack = GetMovieIndTrackType(mySrcMovie, 1, VideoMediaType, movieTrackMediaType);
	if (mySrcTrack == NULL)
		goto bail;
	
	// stop the movie; we don't want it to be playing while we're (re)compressing it
	SetMovieRate(mySrcMovie, (Fixed)0L);

	// get the current movie time, when compression is begun; we'll restore this later
	myOrigMovieTime = GetMovieTime(mySrcMovie, NULL);

	//////////
	//
	// configure and display the Standard Image Compression dialog box
	//
	//////////
	
	// open an instance of the Standard Image Compression dialog component
	myComponent = OpenDefaultComponent(StandardCompressionType, StandardCompressionSubType);
	if (myComponent == NULL)
		goto bail;

	// turn off "best depth" option in the compression dialog, because all of our
	// buffering is done at 32-bits (regardless of the depth of the source data)
	//
	// a more ambitious approach would be to loop through each of the video sample
	// descriptions in each of the video tracks looking for the deepest depth, and
	// using that for the best depth; better yet, we could find out which compressors
	// were used and set one of those as the default in the compression dialog
	SCGetInfo(myComponent, scPreferenceFlagsType, &myFlags);
	myFlags &= ~scShowBestDepth;
	SCSetInfo(myComponent, scPreferenceFlagsType, &myFlags);

	// because we are recompressing a movie that may have a variable frame rate,
	// we want to allow the user to leave the frame rate text field blank (in which
	// case we can preserve the frame durations of the source movie); if the user
	// enters a number, we will resample the movie at a new frame rate; if we don't
	// clear this flag, the compression dialog will not allow zero in the frame rate field
	//
	// NOTE: we could have set this flag above when we cleared the scShowBestDepth flag;
	// it is done here for clarity.	
	SCGetInfo(myComponent, scPreferenceFlagsType, &myFlags);
	myFlags |= scAllowZeroFrameRate;
	SCSetInfo(myComponent, scPreferenceFlagsType, &myFlags);

	// get the number of video frames in the movie
	myNumFrames = QTUtils_GetFrameCount(mySrcTrack);

	// get the bounding rectangle of the movie, create a 32-bit GWorld with those
	// dimensions, and draw the movie poster picture into it; this GWorld will be
	// used for the test image in the compression dialog box and for rendering movie
	// frames
	myPicture = GetMoviePosterPict(mySrcMovie);
	if (myPicture == NULL)
		goto bail;
		
	GetMovieBox(mySrcMovie, &myRect);

	myErr = NewGWorld(&myImageWorld, 32, &myRect, NULL, NULL, 0L);
	if (myErr != noErr)
		goto bail;
		
	// get the pixmap of the GWorld; we'll lock the pixmap, just to be safe
	myPixMap = GetGWorldPixMap(myImageWorld);
	if (!LockPixels(myPixMap))
		goto bail;

	// draw the movie poster image into the GWorld
	GetGWorld(&mySavedPort, &mySavedDevice);
	SetGWorld(myImageWorld, NULL);
	EraseRect(&myRect);
	DrawPicture(myPicture, &myRect);
	KillPicture(myPicture);
	SetGWorld(mySavedPort, mySavedDevice);

	// set the picture to be displayed in the dialog box; passing NULL for the rect
	// means use the entire image; passing 0 for the flags means to use the default
	// system method of displaying the test image, which is currently a combination
	// of cropping and scaling; personally, I prefer scaling (your mileage may vary)
	SCSetTestImagePixMap(myComponent, myPixMap, NULL, scPreferScaling);

	// install the custom procs, if requested
	// we can install two kinds of custom procedures for use in connection with
	// the standard dialog box: (1) a modal-dialog filter function, and (2) a hook
	// function to handle the custom button in the dialog box
	if (gUseExtendedProcs)
		QTCmpr_InstallExtendedProcs(myComponent, (long)myPixMap);
	
	// set up some default settings for the compression dialog
	SCDefaultPixMapSettings(myComponent, myPixMap, true);
	
	// clear out the default frame rate chosen by Standard Compression (a frame rate
	// of 0 means to use the rate of the source movie)
	myErr = SCGetInfo(myComponent, scTemporalSettingsType, &myTimeSettings);
	if (myErr != noErr)
		goto bail;

	myTimeSettings.frameRate = 0;
	SCSetInfo(myComponent, scTemporalSettingsType, &myTimeSettings);

	// request image compression settings from the user; in other words, put up the dialog box
	myErr = SCRequestSequenceSettings(myComponent);
	if (myErr == scUserCancelled)
		goto bail;

	// get a copy of the temporal settings the user entered; we'll need them for some
	// of our calculations (in a simpler application, we'd never have to look at them)	
	SCGetInfo(myComponent, scTemporalSettingsType, &myTimeSettings);

	//////////
	//
	// adjust the data rate [to be supplied][relevant only for movies that have sound tracks]
	//
	//////////

	
	//////////
	//
	// adjust the sample count
	//
	// if the user wants to resample the frame rate of the movie (as indicated a non-zero
	// value in the frame rate field) calculate the number of frames and duration for the new movie
	//
	//////////
	
	if (myTimeSettings.frameRate != 0) {
		long	myDuration = GetMovieDuration(mySrcMovie);
		long	myTimeScale = GetMovieTimeScale(mySrcMovie);
		float	myFloat = (float)myDuration * myTimeSettings.frameRate;
		
		myNumFrames = myFloat / myTimeScale / 65536;
		if (myNumFrames == 0)
			myNumFrames = 1;
	}

	//////////
	//
	// get the name and location of the new movie file
	//
	//////////

	// prompt the user for a file to put the compressed image into; in theory, the name
	// should have a file extension appropriate to the type of compressed data selected by the user;
	// this is left as an exercise for the reader
	QTFrame_PutFile(myMoviePrompt, myMovieFileName, &myFile, &myIsSelected, &myIsReplacing);
	if (!myIsSelected)
		goto bail;

	// delete any existing file of that name
	if (myIsReplacing) {
		myErr = DeleteMovieFile(&myFile);
		if (myErr != noErr)
			goto bail;
	}
		
	//////////
	//
	// create the target movie
	//
	//////////
	
	myErr = CreateMovieFile(&myFile, sigMoviePlayer, smSystemScript, 
								createMovieFileDeleteCurFile | createMovieFileDontCreateResFile, &myRefNum, &myDstMovie);
	if (myErr != noErr)
		goto bail;
	
	// create a new video movie track with the same dimensions as the entire source movie
	myDstTrack = NewMovieTrack(myDstMovie,
								(long)(myRect.right - myRect.left) << 16,
								(long)(myRect.bottom - myRect.top) << 16, kNoVolume);
	if (myDstTrack == NULL)
		goto bail;
	
	// create a media for the new track with the same time scale as the source movie;
	// because the time scales are the same, we don't have to do any time scale conversions.
	myDstMedia = NewTrackMedia(myDstTrack, VIDEO_TYPE, GetMovieTimeScale(mySrcMovie), 0, 0);
	if (myDstMedia == NULL)
		goto bail;
	
	// copy the user data and settings from the source to the dest movie
	CopyMovieSettings(mySrcMovie, myDstMovie);
	
	// set movie matrix to identity and clear the movie clip region (because the conversion
	// process transforms and composites all video tracks into one untransformed video track)
	SetIdentityMatrix(&myMatrix);
	SetMovieMatrix(myDstMovie, &myMatrix);
	SetMovieClipRgn(myDstMovie, NULL);
	
	// set the movie to highest quality imaging
	SetMoviePlayHints(mySrcMovie, hintsHighQuality, hintsHighQuality);

	myImageDesc = (ImageDescriptionHandle)NewHandleClear(sizeof(ImageDescription));
	if (myImageDesc == NULL)
		goto bail;

	// prepare for adding frames to the movie
	myErr = BeginMediaEdits(myDstMedia);
	if (myErr != noErr)
		goto bail;

	//////////
	//
	// compress the image sequence
	//
	// we are going to step through the source movie, compress each frame, and then add
	// the compressed frame to the destination movie
	//
	//////////
	
	myErr = SCCompressSequenceBegin(myComponent, myPixMap, NULL, &myImageDesc);
	if (myErr != noErr)
		goto bail;
	
#if USE_ASYNC_COMPRESSION
	myFlags = codecFlagUpdatePrevious + codecFlagUpdatePreviousComp + codecFlagLiveGrab;
	SCSetInfo(myComponent, scCodecFlagsType, &myFlags);
#endif

	// clear out our image GWorld and set movie to draw into it
	SetGWorld(myImageWorld, NULL);
	EraseRect(&myRect);
	SetMovieGWorld(mySrcMovie, myImageWorld, GetGWorldDevice(myImageWorld));

	// set current time value to beginning of the source movie
	myCurMovieTime = 0;

	// get a value we'll need inside the loop
	mySrcMovieDuration = GetMovieDuration(mySrcMovie);

	// loop through all of the interesting times we counted above
	for (myFrameNum = 0; myFrameNum < myNumFrames; myFrameNum++) {
		short			mySyncFlag;
		TimeValue		myDuration;
		long			myDataSize;
		Handle			myCompressedData;

		//////////
		//
		// get the next frame of the source movie
		//
		//////////
		
		// if we are resampling the movie, step to the next frame
		if (myTimeSettings.frameRate) {
			myCurMovieTime = myFrameNum * mySrcMovieDuration / (myNumFrames - 1);
			myDuration = mySrcMovieDuration / myNumFrames;
		} else {
			OSType		myMediaType = VIDEO_TYPE;
			
			myFlags = nextTimeMediaSample;

			// if this is the first frame, include the frame we are currently on		
			if (myFrameNum == 0)
				myFlags |= nextTimeEdgeOK;
			
			// if we are maintaining the frame durations of the source movie,
			// skip to the next interesting time and get the duration for that frame
			GetMovieNextInterestingTime(mySrcMovie, myFlags, 1, &myMediaType, myCurMovieTime, 0, &myCurMovieTime, &myDuration);
		}
		
		SetMovieTimeValue(mySrcMovie, myCurMovieTime);
		MoviesTask(mySrcMovie, 0);
		MoviesTask(mySrcMovie, 0);
		MoviesTask(mySrcMovie, 0);

		// if data rate constraining is being done, tell Standard Compression the
		// duration of the current frame in milliseconds; we only need to do this
		// if the frames have variable durations
		if (!SCGetInfo(myComponent, scDataRateSettingsType, &myRateSettings)) {
			myRateSettings.frameDuration = myDuration * 1000 / GetMovieTimeScale(mySrcMovie);
			SCSetInfo(myComponent, scDataRateSettingsType, &myRateSettings);
		}

		//////////
		//
		// compress the current frame of the source movie and add it to the destination movie
		//
		//////////
		
		// if SCCompressSequenceFrame completes successfully, myCompressedData will hold
		// a handle to the newly-compressed image data and myDataSize will be the size of
		// the compressed data (which will usually be different from the size of the handle);
		// also mySyncFlag will be a value that that indicates whether or not the frame is a
		// key frame (and which we pass directly to AddMediaSample); note that we do not need
		// to dispose of myCompressedData, since SCCompressSequenceEnd will do that for us
#if !USE_ASYNC_COMPRESSION
		myErr = SCCompressSequenceFrame(myComponent, myPixMap, &myRect, &myCompressedData, &myDataSize, &mySyncFlag);
		if (myErr != noErr)
			goto bail;
#else
		if (myICMComplProcPtr == NULL) {
			myICMComplProcRec.completionProc = NewICMCompletionProc(QTCmpr_CompletionProc);
			myICMComplProcRec.completionRefCon = (long)&myICMComplProcErr;
			myICMComplProcPtr = &myICMComplProcRec;
		}
		
		myICMComplProcErr = kAsyncDefaultValue;
		
		myErr = SCCompressSequenceFrameAsync(myComponent, myPixMap, &myRect, &myCompressedData, &myDataSize, &mySyncFlag, myICMComplProcPtr);
		if (myErr != noErr)
			goto bail;

		// spin our wheels while we're waiting for the compress call to complete
		while (myICMComplProcErr == kAsyncDefaultValue) {
			EventRecord			myEvent;
			
			WaitNextEvent(0, &myEvent, 60, NULL);
			SCAsyncIdle(myComponent);
		}
		myErr = myICMComplProcErr;
#endif

		myErr = AddMediaSample(myDstMedia, myCompressedData, 0, myDataSize, myDuration, (SampleDescriptionHandle)myImageDesc, 1, mySyncFlag, NULL);
		if (myErr != noErr)
			goto bail;
	}
	
	// close the compression sequence; this will dispose of the image description
	// and compressed data handles allocated by SCCompressSequenceBegin
	SCCompressSequenceEnd(myComponent);

	//////////
	//
	// add the media data to the destination movie
	//
	//////////
	
	myErr = EndMediaEdits(myDstMedia);
	if (myErr != noErr)
		goto bail;
	
	InsertMediaIntoTrack(myDstTrack, 0, 0, GetMediaDuration(myDstMedia), fixed1);

	// add the movie resource to the dst movie file.
	myErr = AddMovieResource(myDstMovie, myRefNum, NULL, NULL);
	if (myErr != noErr)
		goto bail;

	// flatten the movie data [to be supplied]
	
	// close the movie file
	CloseMovieFile(myRefNum);
	
bail:
	// close the Standard Compression component
	if (myComponent != NULL)
		CloseComponent(myComponent);

	if (mySrcMovie != NULL) {
		// restore the source movie's original graphics port and device
		SetMovieGWorld(mySrcMovie, mySavedPort, mySavedDevice);

		// restore the source movie's original movie time
		SetMovieTimeValue(mySrcMovie, myOrigMovieTime);
	}
	
	// restore the original graphics port and device
	SetGWorld(mySavedPort, mySavedDevice);

	// delete the GWorld we were drawing frames into
	if (myImageWorld != NULL)
		DisposeGWorld(myImageWorld);
	
#if USE_ASYNC_COMPRESSION
	if (myICMComplProcRec.completionProc != NULL)
		DisposeICMCompletionUPP(myICMComplProcRec.completionProc);
#endif

	free(myMoviePrompt);
	free(myMovieFileName);
}