Beispiel #1
0
ComponentResult create_placeholder_track(Movie movie, Track *placeholderTrack, TimeValue duration, Handle dataRef, OSType dataRefType) {
    SampleDescriptionHandle sdH = NULL;
    Media placeholderMedia;
    TimeScale movieTimeScale;
    ComponentResult result = noErr;

    movieTimeScale = GetMovieTimeScale(movie);

    sdH = (SampleDescriptionHandle)NewHandleClear(sizeof(SampleDescription));
    (*sdH)->descSize = sizeof(SampleDescription);

    *placeholderTrack = NewMovieTrack(movie, 0, 0, kNoVolume);
    placeholderMedia = NewTrackMedia(*placeholderTrack, BaseMediaType, movieTimeScale, dataRef, dataRefType);

    result = AddMediaSampleReference(placeholderMedia, 0, 1, duration, sdH, 1, 0, NULL);
    if(result != noErr)
        goto bail;

    result = InsertMediaIntoTrack(*placeholderTrack, -1, 0, duration, fixed1);

bail:
    if (sdH)
        DisposeHandle((Handle) sdH);
    return(result);
}
Beispiel #2
0
static OSStatus finish_video(void)
{
    video_ready = 0;
    
    // ----- PixelBuffer -----
    CVPixelBufferRelease(pixelBuffer);

    // ----- Codec -----
    
    OSErr theError = ICMCompressionSessionCompleteFrames(videoCompressionSession, true, 0, 0);
    if (theError)
        log_debug("quicktime_video: error completing frames!");
        
    ICMCompressionSessionRelease(videoCompressionSession);
	
    // ----- Movie -----

    //End media editing
    theError = EndMediaEdits(videoMedia);
    if (theError)
        log_debug("quicktime_video: error ending media edits");

    theError = ExtendMediaDecodeDurationToDisplayEndTime(videoMedia, NULL);
    if (theError)
        log_debug("quicktime_video: error setting decode duration!");

    //Add media to track
    theError = InsertMediaIntoTrack(videoTrack, 0, 0, GetMediaDisplayDuration(videoMedia), fixed1);
    if (theError)
        log_debug("quicktime_video: error inserting media into track!");

    videoTrack=NULL;
    videoMedia=NULL;
    return theError;
}
void ofxQtAudioRecorder::addAudioSamples( float * audioBuffer, int sampleCount){
	
	if(audioMedia != NULL)
	{
		
		printf("adding %i samples\n", sampleCount);
				
		OSErr err = BeginMediaEdits(audioMedia);
		checkErr(0x0201);
		
		int bytesPerSample = sizeof(Float32);
		
		//printf("bytes %i\n", bytesPerSample);
		
		AddMediaSample2(audioMedia,   // insert into audio media
						(UInt8*)audioBuffer, // size
						sampleCount * bytesPerSample, // number of bytes of audio data
						kSoundSampleDuration,  // normal decode duration
						0,            // no display offset (won't work anyway)
						(SampleDescriptionHandle) soundDesc,
						sampleCount / numChannels, // number of samples 
						0,            // no flags
						NULL);        // not interested in decode time
		
		// end media editing and add media to the track
		EndMediaEdits(audioMedia);
		checkErr(0x0202);
		
		TimeValue trackStart = (TimeValue)(0); 
		InsertMediaIntoTrack(audioTrack, trackStart, 0, GetMediaDuration(audioMedia), fixed1);
		checkErr(0x0203);
		
		//printf("added audio media\n");
	}
}
Beispiel #4
0
void finish_audio(void)
{
    OSStatus theError;
    
    // flush buffer
    if (audioBuffer.used > 0)
        encode_audio(&audioBuffer);
    
    //End media editing
    theError = EndMediaEdits(audioMedia);
    if (theError)
        log_debug("quicktime_audio: error ending media edits");

    theError = ExtendMediaDecodeDurationToDisplayEndTime(audioMedia, NULL);
    if (theError)
        log_debug("quicktime_audio: error setting decode duration!");

    //Add media to track
    theError = InsertMediaIntoTrack(audioTrack, 0, 0, GetMediaDisplayDuration(audioMedia), fixed1);
    if (theError)
        log_debug("quicktime_audio: error inserting media into track!");

    audioTrack=NULL;
    audioMedia=NULL;
    
    DisposeHandle((Handle)soundDescriptionHandle);

    // free buffer
    if (audioBuffer.buffer != NULL) {
        free(audioBuffer.buffer);
        audioBuffer.buffer = NULL;
    }

    audio_ready = 0;
}
Beispiel #5
0
void QTVideo_CreateMyVideoTrack(Movie theMovie)
{
	Track theTrack;
	Media theMedia;
	OSErr err = noErr;
	Rect trackFrame = {0,0,100,320};

		theTrack = NewMovieTrack (theMovie, 
								FixRatio(trackFrame.right,1),
								FixRatio(trackFrame.bottom,1), 
								kNoVolume);
		CheckError( GetMoviesError(), "NewMovieTrack error" );

		theMedia = NewTrackMedia (theTrack, VideoMediaType,
								kVideoTimeScale, /* Video Time Scale */
								nil, 0);
		CheckError( GetMoviesError(), "NewTrackMedia error" );

		err = BeginMediaEdits (theMedia);
		CheckError( err, "BeginMediaEdits error" );

		QTVideo_AddVideoSamplesToMedia (theMedia, &trackFrame);

		err = EndMediaEdits (theMedia);
		CheckError( err, "EndMediaEdits error" );

		err = InsertMediaIntoTrack (theTrack, kTrackStart,/* track start time */
									kMediaStart, /* media start time */
									GetMediaDuration (theMedia),
									fixed1);
		CheckError( err, "InsertMediaIntoTrack error" );
} 
Beispiel #6
0
void MovieMaker::EndCapture()
{
	OSStatus	error = noErr;

	if (movie && movieResRef)
	{
		if (media && track)
		{
			// Errors adding the frame aren't too important here.
			(void)addFrame();
			
			error = EndMediaEdits(media);
			if (error == noErr)
			{
				error = SCCompressSequenceEnd(ci);
			}

			if (error == noErr)
			{
				error = InsertMediaIntoTrack(track, 0, 0, GetMediaDuration(media), fixed1);
			}
			media = NULL;
			track = NULL;
		}
		
		short resId = movieInDataForkResID;
		error = AddMovieResource(movie, movieResRef, &resId, "\pSecond Life");
		CloseMovieFile(movieResRef);
		movieResRef = 0;
		movie = NULL;
	}
	
	// NOTE:  idh is disposed by SCCompressSequenceEnd.
	idh = NULL;
	
	if(ci)
	{
		CloseComponent(ci);
		ci = NULL;
	}
	
	if(gworld)
	{
		DisposeGWorld(gworld);
		gworld = NULL;
	}

	if(buffer)
	{
		free(buffer);
		buffer = NULL;
	}

	if(invertedBuffer)
	{
		free(invertedBuffer);
		invertedBuffer = NULL;
	}
}
Beispiel #7
0
TLevelWriter3gp::~TLevelWriter3gp()
{
#if 0
if (m_pixmap) 
  UnlockPixels(m_pixmap);
if (m_compressedData)
  DisposeHandle(m_compressedData);
if (m_gworld)
  DisposeGWorld(m_gworld);
#endif

	QDErr err;

	if (m_videoMedia)
		if ((err = EndMediaEdits(m_videoMedia)) != noErr) {
		} // throw TImageException(getFilePath(), "can't end edit media");

	if (m_videoTrack)
		if ((err = InsertMediaIntoTrack(m_videoTrack, 0, 0,
										GetMediaDuration(m_videoMedia), fixed1))) {
		} // throw TImageException(getFilePath(), "can't insert media into track");

	short resId = movieInDataForkResID;
	if (m_movie) {
		FSSpec fspec;
		long myFlags = 0L;
		OSErr myErr = noErr;
		//UCHAR myCancelled = FALSE;

		const char *pStr = toString(m_path.getWideString()).c_str();
		getFSSpecFromPosixPath(pStr, &fspec, true);

		myFlags = createMovieFileDeleteCurFile; // |
												//movieFileSpecValid | movieToFileOnlyExport;

		myErr = ConvertMovieToFile(
			m_movie,				// the movie to convert
			NULL,					// all tracks in the movie
			&fspec,					// the output file
			'3gpp',					// the output file type
			FOUR_CHAR_CODE('TVOD'), // the output file creator
			smSystemScript,			// the script
			&resId,					// no resource ID to be returned
			myFlags,				// export flags
			m_myExporter);			// no specific exp
	}

	DisposeHandle(m_hMovieData);
	DisposeHandle(m_dataRef);
	if (m_hSoundMovieData)
		DisposeHandle(m_hSoundMovieData);

	if (m_refNum)
		CloseMovieFile(m_refNum);
	DisposeMovie(m_movie);
}
Beispiel #8
0
void quicktime_recorder::finish() {
    EndMediaEdits( m->media );
    TimeValue duration = GetMediaDuration( m->media );
    InsertMediaIntoTrack( m->track, 0, 0, duration, fixed1 );

    OSErr err = UpdateMovieInStorage( m->movie, m->data_handler );
    err = CloseMovieStorage( m->data_handler );

    delete m;
}
OSErr QTDR_CreateTrackInRAM (Movie theMovie)
{
	Track					myTrack = NULL;
	Media					myMedia = NULL;
	Handle					myDataRef = NULL;
	unsigned long			myAtomHeader[2];
	OSErr					myErr = noErr;

	if (theMovie == NULL)
		return(paramErr);
	
	myDataRef = NewHandleClear(sizeof(Handle) + sizeof(char));
	if (myDataRef == NULL)
		return(MemError());
	
	myAtomHeader[0] = EndianU32_NtoB(sizeof(myAtomHeader));
	myAtomHeader[1] = EndianU32_NtoB(kDataRefExtensionInitializationData);

	myErr = PtrAndHand(myAtomHeader, myDataRef, sizeof(myAtomHeader));
	if (myErr != noErr)
		goto bail;
		
	// create the movie track and media
	myTrack = NewMovieTrack(theMovie, FixRatio(kVideoTrackWidth, 1), FixRatio(kVideoTrackHeight, 1), kNoVolume);
	myErr = GetMoviesError();
	if (myErr != noErr)
		goto bail;
		
	myMedia = NewTrackMedia(myTrack, VideoMediaType, kVideoTimeScale, myDataRef, HandleDataHandlerSubType);
	myErr = GetMoviesError();
	if (myErr != noErr)
		goto bail;

	// create the media samples
	myErr = BeginMediaEdits(myMedia);
	if (myErr != noErr)
		goto bail;

	myErr = QTDR_AddVideoSamplesToMedia(myMedia, kVideoTrackWidth, kVideoTrackHeight);
	if (myErr != noErr)
		goto bail;

	myErr = EndMediaEdits(myMedia);
	if (myErr != noErr)
		goto bail;
	
	// add the media to the track
	myErr = InsertMediaIntoTrack(myTrack, 0, 0, GetMediaDuration(myMedia), fixed1);
	
bail:
	if (myDataRef != NULL)
		DisposeHandle(myDataRef);

	return(myErr);
}
void
qtCanvas::Impl::finishTrack()
{
	if (mMedia) {
		EndMediaEdits(mMedia);
		CheckMoviesError("EndMediaEdits");

		if (mTrack) {
			InsertMediaIntoTrack(mTrack,
						0, 0, GetMediaDuration(mMedia), fixed1);
			CheckMoviesError("InsertMediaIntoTrack" );
		}
	}
}
Beispiel #11
0
static void QT_EndCreateMyVideoTrack(ReportList *reports)
{
	OSErr err = noErr;

	QT_EndAddVideoSamplesToMedia ();

	err = EndMediaEdits (qtexport->theMedia);
	CheckError( err, "EndMediaEdits error", reports );

	err = InsertMediaIntoTrack (qtexport->theTrack,
								kTrackStart,/* track start time */
								kMediaStart,/* media start time */
								GetMediaDuration (qtexport->theMedia),
								fixed1);
	CheckError( err, "InsertMediaIntoTrack error", reports );
} 
OSErr QTTarg_AddTextToggleButtonTrack (Movie theMovie)
{
	Track					myTrack = NULL;
	Media					myMedia = NULL;
	MatrixRecord			myMatrix;
	RGBColor				myKeyColor;
	Fixed					myWidth, myHeight;
	TimeValue				myDuration = 0L;
	TimeValue				myTimeScale = 0L;
	OSErr					myErr = noErr;

	//////////
	//
	// get some information about the target movie
	//
	//////////

	if (theMovie == NULL) {
		myErr = paramErr;
		goto bail;
	}

	myWidth = Long2Fix(2 * kButtonWidth);
	myHeight = Long2Fix(2 * kButtonHeight);
	myDuration = GetMovieDuration(theMovie);
	myTimeScale = GetMovieTimeScale(theMovie);
	
	//////////
	//
	// create a new sprite track in the target movie
	//
	//////////
	
	myTrack = NewMovieTrack(theMovie, myWidth, myHeight, kNoVolume);
	myMedia = NewTrackMedia(myTrack, SpriteMediaType, myTimeScale, NULL, 0);

	// set the track matrix to compensate for any existing movie matrix
	GetMovieMatrix(theMovie, &myMatrix);
	if (InverseMatrix(&myMatrix, &myMatrix))
		SetTrackMatrix(myTrack, &myMatrix);

	myErr = BeginMediaEdits(myMedia);
	if (myErr != noErr)
		goto bail;
	
	//////////
	//
	// add sprite images and sprites to the sprite track; add actions to the sprites
	//
	//////////
	
	QTTarg_AddTextButtonSamplesToMedia(myMedia, 2 * kButtonWidth, 2 * kButtonHeight, myDuration);
	
	//////////
	//
	// insert media into track
	//
	//////////
	
	myErr = EndMediaEdits(myMedia);
	if (myErr != noErr)
		goto bail;
	
	// add the media to the track
	InsertMediaIntoTrack(myTrack, 0, 0, GetMediaDuration(myMedia), fixed1);
		
	//////////
	//
	// set the sprite track properties
	//
	//////////
	
	QTTarg_SetTrackProperties(myMedia, kNoQTIdleEvents);				// no idle events
	
	myKeyColor.red = myKeyColor.green = myKeyColor.blue = 0xffff;		// white
	MediaSetGraphicsMode(GetMediaHandler(myMedia), transparent, &myKeyColor);
	
	// make sure that the sprite track is in the frontmost layer
	SetTrackLayer(myTrack, kMaxLayerNumber);
	SetTrackLayer(myTrack, QTTarg_GetLowestLayerInMovie(theMovie) - 1);
		
bail:
	return(myErr);
}
OSErr QTTarg_CreateTwinSpritesMovie (void)
{
	Movie					myMovie = NULL;
	Track					myTrack = NULL;
	Media					myMedia = NULL;
	FSSpec					myFile;
	Boolean					myIsSelected = false;
	Boolean					myIsReplacing = false;	
	Fixed					myHeight = 0;
	Fixed					myWidth = 0;
	StringPtr 				myPrompt = QTUtils_ConvertCToPascalString(kSpriteSavePrompt);
	StringPtr 				myFileName = QTUtils_ConvertCToPascalString(kSpriteSaveMovieFileName);
	long					myFlags = createMovieFileDeleteCurFile | createMovieFileDontCreateResFile;
	OSType					myType = FOUR_CHAR_CODE('none');
	short					myResRefNum = 0;
	short					myResID = movieInDataForkResID;
	OSErr					myErr = noErr;

	//////////
	//
	// create a new movie file
	//
	//////////

	// prompt the user for the destination file name
	QTFrame_PutFile(myPrompt, myFileName, &myFile, &myIsSelected, &myIsReplacing);
	myErr = myIsSelected ? noErr : userCanceledErr;
	if (!myIsSelected)
		goto bail;

	// create a movie file for the destination movie
	myErr = CreateMovieFile(&myFile, FOUR_CHAR_CODE('TVOD'), smSystemScript, myFlags, &myResRefNum, &myMovie);
	if (myErr != noErr)
		goto bail;
	
	// select the "no-interface" movie controller
	myType = EndianU32_NtoB(myType);
	SetUserDataItem(GetMovieUserData(myMovie), &myType, sizeof(myType), kUserDataMovieControllerType, 1);
	
	//////////
	//
	// create the sprite track and media
	//
	//////////
	
	myWidth = Long2Fix(kIconSpriteTrackWidth);
	myHeight = Long2Fix(kIconSpriteTrackHeight);

	myTrack = NewMovieTrack(myMovie, myWidth, myHeight, kNoVolume);
	myMedia = NewTrackMedia(myTrack, SpriteMediaType, kSpriteMediaTimeScale, NULL, 0);

	myErr = BeginMediaEdits(myMedia);
	if (myErr != noErr)
		goto bail;

	//////////
	//
	// add the appropriate samples to the sprite media
	//
	//////////
	
	myErr = QTTarg_AddIconMovieSamplesToMedia(myMedia);
	if (myErr != noErr)
		goto bail;
	
	myErr = EndMediaEdits(myMedia);
	if (myErr != noErr)
		goto bail;
	
	// add the media to the track
	InsertMediaIntoTrack(myTrack, 0, 0, GetMediaDuration(myMedia), fixed1);
		
	//////////
	//
	// set the sprite track properties
	//
	//////////
	
	QTTarg_SetTrackProperties(myMedia, 1);
	
	//////////
	//
	// add the movie resource to the movie file
	//
	//////////
	
	myErr = AddMovieResource(myMovie, myResRefNum, &myResID, myFile.name);
		
bail:
	if (myResRefNum != 0)
		CloseMovieFile(myResRefNum);

	if (myMovie != NULL)
		DisposeMovie(myMovie);
		
	free(myPrompt);
	free(myFileName);

	return(myErr);
}
OSErr QTTarg_MakeDualVRControllerMovie (void)
{
	Movie					myMovie = NULL;
	Track					myTrack = NULL;
	Media					myMedia = NULL;
	RGBColor				myKeyColor;
	Fixed					myWidth, myHeight;
	FSSpec					myFile;
	Boolean					myIsSelected = false;
	Boolean					myIsReplacing = false;	
	StringPtr 				myPrompt = QTUtils_ConvertCToPascalString(kSpriteSavePrompt);
	StringPtr 				myFileName = QTUtils_ConvertCToPascalString(kSpriteSaveMovieFileName);
	long					myFlags = createMovieFileDeleteCurFile | createMovieFileDontCreateResFile;
	short					myResRefNum = 0;
	short					myResID = movieInDataForkResID;
	OSType					myType = FOUR_CHAR_CODE('none');
	OSErr					myErr = noErr;

	//////////
	//
	// create a new movie file
	//
	//////////

	// prompt the user for the destination file name
	QTFrame_PutFile(myPrompt, myFileName, &myFile, &myIsSelected, &myIsReplacing);
	myErr = myIsSelected ? noErr : userCanceledErr;
	if (!myIsSelected)
		goto bail;

	// create a movie file for the destination movie
	myErr = CreateMovieFile(&myFile, FOUR_CHAR_CODE('TVOD'), smSystemScript, myFlags, &myResRefNum, &myMovie);
	if (myErr != noErr)
		goto bail;
	
	// select the "no-interface" movie controller
	myType = EndianU32_NtoB(myType);
	SetUserDataItem(GetMovieUserData(myMovie), &myType, sizeof(myType), kUserDataMovieControllerType, 1);

	//////////
	//
	// get some information about the target movie
	//
	//////////

	myWidth = Long2Fix(kVRControlMovieWidth);
	myHeight = Long2Fix(kVRControlMovieHeight);
	
	//////////
	//
	// create a new sprite track in the target movie
	//
	//////////
	
	myTrack = NewMovieTrack(myMovie, myWidth, myHeight, kNoVolume);
	myMedia = NewTrackMedia(myTrack, SpriteMediaType, kVRControlMovieDuration, NULL, 0);

	myErr = BeginMediaEdits(myMedia);
	if (myErr != noErr)
		goto bail;
	
	//////////
	//
	// add sprite images and sprites to the sprite track; add actions to the sprites
	//
	//////////
	
	QTTarg_AddVRControllerButtonSamplesToMedia(myMedia, kVRControlMovieWidth, kVRControlMovieHeight, kVRControlMovieDuration);
	
	//////////
	//
	// insert media into track
	//
	//////////
	
	myErr = EndMediaEdits(myMedia);
	if (myErr != noErr)
		goto bail;
	
	// add the media to the track
	InsertMediaIntoTrack(myTrack, 0, 0, GetMediaDuration(myMedia), fixed1);
		
	//////////
	//
	// set the sprite track properties
	//
	//////////
	
	QTTarg_SetTrackProperties(myMedia, 0);								// idle as fast as possible
	
	myKeyColor.red = myKeyColor.green = myKeyColor.blue = 0xffff;		// white
	MediaSetGraphicsMode(GetMediaHandler(myMedia), transparent, &myKeyColor);
	
	//////////
	//
	// add the movie resource to the movie file
	//
	//////////
	
	myErr = AddMovieResource(myMovie, myResRefNum, &myResID, myFile.name);
		
bail:
	if (myResRefNum != 0)
		CloseMovieFile(myResRefNum);

	if (myMovie != NULL)
		DisposeMovie(myMovie);
		
	free(myPrompt);
	free(myFileName);

	return(myErr);
}
Beispiel #15
0
/* This function imports the avi represented by the AVFormatContext to the movie media represented
 * in the map function. The aviheader_offset is used to calculate the packet offset from the
 * beginning of the file. It returns whether it was successful or not (i.e. whether the file had an index) */
int import_using_index(ff_global_ptr storage, int *hadIndex, TimeValue *addedDuration) {
    int j, k, l;
    NCStream *map;
    NCStream *ncstr;
    AVFormatContext *ic;
    AVStream *stream;
    AVCodecContext *codec;
    SampleReference64Ptr sampleRec;
    int64_t header_offset, offset, duration;
    short flags;
    int sampleNum;
    ComponentResult result = noErr;

    map = storage->stream_map;
    ic = storage->format_context;
    header_offset = storage->header_offset;

    if(*hadIndex == 0)
        goto bail;

    //FLVs have unusable indexes, so don't even bother.
    if(storage->componentType == 'FLV ')
        goto bail;

    /* process each stream in ic */
    for(j = 0; j < ic->nb_streams; j++) {
        ncstr = &map[j];
        stream = ncstr->str;
        codec = stream->codec;

        /* no stream we can read */
        if(!ncstr->valid)
            continue;

        /* no index, we might as well skip */
        if(stream->nb_index_entries == 0)
            continue;

        sampleNum = 0;
        ncstr->sampleTable = calloc(stream->nb_index_entries, sizeof(SampleReference64Record));

        /* now parse the index entries */
        for(k = 0; k < stream->nb_index_entries; k++) {

            /* file offset */
            offset = header_offset + stream->index_entries[k].pos;

            /* flags */
            flags = 0;
            if((stream->index_entries[k].flags & AVINDEX_KEYFRAME) == 0)
                flags |= mediaSampleNotSync;

            sampleRec = &ncstr->sampleTable[sampleNum++];

            /* set as many fields in sampleRec as possible */
            sampleRec->dataOffset.hi = offset >> 32;
            sampleRec->dataOffset.lo = (uint32_t)offset;
            sampleRec->dataSize = stream->index_entries[k].size;
            sampleRec->sampleFlags = flags;

            /* some samples have a data_size of zero. if that's the case, ignore them
            	* they seem to be used to stretch the frame duration & are already handled
            	* by the previous pkt */
            if(sampleRec->dataSize <= 0) {
                sampleNum--;
                continue;
            }

            /* switch for the remaining fields */
            if(codec->codec_type == AVMEDIA_TYPE_VIDEO) {

                /* Calculate the frame duration */
                duration = 1;
                for(l = k+1; l < stream->nb_index_entries; l++) {
                    if(stream->index_entries[l].size > 0)
                        break;
                    duration++;
                }

                sampleRec->durationPerSample = map->base.num * duration;
                sampleRec->numberOfSamples = 1;
            }
            else if(codec->codec_type == AVMEDIA_TYPE_AUDIO) {

                /* FIXME: check if that's really the right thing to do here */
                if(ncstr->vbr) {
                    sampleRec->numberOfSamples = 1;

                    if (k + 1 < stream->nb_index_entries)
                        sampleRec->durationPerSample = (stream->index_entries[k+1].timestamp - stream->index_entries[k].timestamp) * ncstr->base.num;
                    else if (sampleNum - 2 >= 0)
                        // if we're at the last index entry, use the duration of the previous sample
                        // FIXME: this probably could be better
                        sampleRec->durationPerSample = ncstr->sampleTable[sampleNum-2].durationPerSample;

                } else {
                    sampleRec->durationPerSample = 1;
                    sampleRec->numberOfSamples = (stream->index_entries[k].size * ncstr->asbd.mFramesPerPacket) / ncstr->asbd.mBytesPerPacket;
                }
            }
        }
        if(sampleNum != 0)
        {
            /* Add all of the samples to the media */
            AddMediaSampleReferences64(ncstr->media, ncstr->sampleHdl, sampleNum, ncstr->sampleTable, NULL);

            /* The index is both present and not empty */
            *hadIndex = 1;
        }
        free(ncstr->sampleTable);
    }

    if(*hadIndex == 0)
        //No index, the remainder of this function will fail.
        goto bail;

    // insert media and set addedDuration;
    for(j = 0; j < storage->map_count && result == noErr; j++) {
        ncstr = &map[j];
        if(ncstr->valid) {
            Media media = ncstr->media;
            Track track;
            TimeRecord time;
            TimeValue mediaDuration;
            TimeScale mediaTimeScale;
            TimeScale movieTimeScale;
            int startTime = map[j].str->index_entries[0].timestamp;

            mediaDuration = GetMediaDuration(media);
            mediaTimeScale = GetMediaTimeScale(media);
            movieTimeScale = GetMovieTimeScale(storage->movie);

            /* we could handle this stream.
            * convert the atTime parameter to track scale.
            * FIXME: check if that's correct */
            time.value.hi = 0;
            time.value.lo = storage->atTime;
            time.scale = movieTimeScale;
            time.base = NULL;
            ConvertTimeScale(&time, mediaTimeScale);

            track = GetMediaTrack(media);
            result = InsertMediaIntoTrack(track, time.value.lo, 0, mediaDuration, fixed1);

            // set audio/video start delay
            // note str.start_time exists but is always 0 for AVI
            if (startTime) {
                TimeRecord startTimeRec;
                startTimeRec.value.hi = 0;
                startTimeRec.value.lo = startTime * map[j].str->time_base.num;
                startTimeRec.scale = map[j].str->time_base.den;
                startTimeRec.base = NULL;
                ConvertTimeScale(&startTimeRec, movieTimeScale);
                SetTrackOffset(track, startTimeRec.value.lo);
            }

            if(result != noErr)
                goto bail;

            time.value.hi = 0;
            time.value.lo = mediaDuration;
            time.scale = mediaTimeScale;
            time.base = NULL;
            ConvertTimeScale(&time, movieTimeScale);

            if(time.value.lo > *addedDuration)
                *addedDuration = time.value.lo;
        }
    }

    storage->loadedTime = *addedDuration;

bail:
    return result;
} /* import_using_index() */
Beispiel #16
0
void QTSound_CreateMySoundTrack (Movie theMovie)
{
    Track theTrack;
    Media theMedia;
    Handle sndHandle = nil;
    SoundDescriptionHandle sndDesc = nil;
    long sndDataOffset;
    long sndDataSize;
    long numSamples;
    OSErr err = noErr;
#if TARGET_OS_WIN32

    char path[MAX_PATH+1];
    short resID;
    FSSpec fsspec;


    fsspec.vRefNum = 0;
    fsspec.parID = 0;
    GetModuleFileName( NULL, path, MAX_PATH+1);

    NativePathNameToFSSpec((char *)&path, &fsspec, 0);

    /* open our application resource file so we
    	can access the Macintosh 'snd ' resource */
    resID = FSpOpenResFile(&fsspec, fsRdPerm);
    CheckError (ResError(), "FSpOpenResFile error" );

#endif



    sndHandle = GetResource ('snd ', kOurSoundResourceID);
    CheckError (ResError(), "GetResource error" );
    if (sndHandle == nil)
    {
        return;
    }

    sndDesc = (SoundDescriptionHandle) NewHandle(4);
    CheckError (MemError(), "NewHandle error" );

    QTSound_CreateSoundDescription (sndHandle,
                                    sndDesc,
                                    &sndDataOffset,
                                    &numSamples,
                                    &sndDataSize );

    theTrack = NewMovieTrack (theMovie, 0, 0, kFullVolume);
    CheckError (GetMoviesError(), "NewMovieTrack error" );

    theMedia = NewTrackMedia (theTrack, SoundMediaType,
                              FixRound ((**sndDesc).sampleRate),
                              nil, 0);
    CheckError (GetMoviesError(), "NewTrackMedia error" );

    err = BeginMediaEdits (theMedia);
    CheckError( err, "BeginMediaEdits error" );

    err = AddMediaSample(theMedia,
                         sndHandle,
                         sndDataOffset,/* offset in data */
                         sndDataSize,
                         kSoundSampleDuration,/* duration of each sound sample */
                         (SampleDescriptionHandle) sndDesc,
                         numSamples,
                         kSyncSample,/* self-contained samples */
                         nil);
    CheckError( err, "AddMediaSample error" );

    err = EndMediaEdits (theMedia);
    CheckError( err, "EndMediaEdits error" );

    err = InsertMediaIntoTrack (theTrack,
                                kTrackStart,/* track start time */
                                kMediaStart,/* media start time */
                                GetMediaDuration (theMedia),
                                fixed1);
    CheckError( err, "InsertMediaIntoTrack error" );

    if (sndDesc != nil)
    {
        DisposeHandle( (Handle)sndDesc);
    }
}
Beispiel #17
0
void MolDisplayWin::WriteQTMovie(wxString & filepath) {
	//Create a QuickTime movie using the standard animation codecs with normal quality, and 
	//temporal compression. The final file is flattened for cross platform compatability
	
	QTExport * QTOptions = new QTExport(this);
	//setup controls for the current data
	if (MainData->GetNumFrames() > 1) { //default to frame animation
		QTOptions->SetMovieChoice(0);
	} else {
		QTOptions->EnableFrameMovie(false);
	}
	if (MainData->cFrame->GetNumberNormalModes() <= 0) {
		QTOptions->EnableModeMovie(false);
	}
	
	if (QTOptions->ShowModal() != wxID_OK) {
		//user cancelled the operation
		QTOptions->Destroy();
		return;
	}
	//retrieve the value of each option
	int MovieType = QTOptions->GetMovieChoice();
	bool IncludeEPlot = QTOptions->AddEnergyPlot();
	int compressorChoice = QTOptions->GetCompressorChoice();
	int keyFrameRate = QTOptions->GetKeyFrameRate();
	if (keyFrameRate < 0) keyFrameRate = 0;
	
	QTOptions->Destroy();
	
	CodecType mCodec;
	switch (compressorChoice) {
		case 0:
			mCodec = kCinepakCodecType;
			break;
		case 1:
			mCodec = kGraphicsCodecType;
			break;
		case 2:
			mCodec = kAnimationCodecType;
			break;
		case 3:
		default:
			mCodec = kMPEG4VisualCodecType;
	}

	OSStatus s;
	OSErr myErr = myErr;
	FSSpec targetSpec;
	//ugh I need to get an FSSpec to hand to quicktime, but these calls only seem to work if
	//the file already exists...
	const char * t = filepath.mb_str(wxConvUTF8);
	FILE * temp = fopen(t, "wb");
	fclose(temp);

#ifdef __WXOSX_COCOA__
	//This function is not found in the wxCocoa implementation, it is probably possible to work around it
	//Otherwise the code appears to link and run currently. However, it is probably better to redue the
	//code to use the Cocoa qtKit framework rather than the old Carbon QT library.
	
	//This path is not tested as the current Cocoa code does not properly support the extended save dialog.
	//	void wxMacFilename2FSSpec( const wxString& path , FSSpec *spec )
	{
		OSStatus err = noErr;
		FSRef fsRef;
		wxMacPathToFSRef( filepath , &fsRef );
		err = FSGetCatalogInfo(&fsRef, kFSCatInfoNone, NULL, NULL, &targetSpec, NULL);
		verify_noerr( err );
	}
#else
	wxMacFilename2FSSpec(filepath, &targetSpec);
#endif
	
	Movie	theMovie = NULL;
		
	FSSpec tempSpec = targetSpec;
	strcpy((char *) &(tempSpec.name[1]), "MacMolPlt8933tempMovie");
	tempSpec.name[0] = 22;

	BeginOperation();
	ProgressInd->ChangeText("Creating movie...");
		
	myErr = EnterMovies();	//initialize the quicktime manager
	if (myErr != noErr) {
		FinishOperation();
		MessageAlert("Error initializing QuickTime!");
		return;
	}
		//Create the movie file and initialize file data
		//Use Quicktime creator code 'TVOD' instead of simpletext 'ttxt'
	short	resRefNum = 0;
	short	resId = 0;
	myErr = CreateMovieFile(&tempSpec, 'TVOD', smCurrentScript, createMovieFileDeleteCurFile,
							&resRefNum, &theMovie);
	if (myErr != noErr) {
		MessageAlert("Error creating movie file!");
	} else {
		bool KillEPlot = false;
		int width, height, savedEPlotWidth, savedEPlotHeight;
		glCanvas->GetClientSize(&width, &height);
		Rect lDisplayRect={0,0,0,0};
		lDisplayRect.right = width;
		lDisplayRect.bottom = height;
		Rect		gRect = lDisplayRect;

		Rect EPlotRect = lDisplayRect;
		//If we are including an energy plot add space for it here
		if (IncludeEPlot && (MovieType == 0)) {
			EPlotRect.left = EPlotRect.right;
			EPlotRect.right = EPlotRect.left + height;
			if (!energyPlotWindow) {
				energyPlotWindow = new EnergyPlotDialog(this);
				KillEPlot = true;
			} else {
				energyPlotWindow->GetSize(&savedEPlotWidth, &savedEPlotHeight);
			}
			gRect.right += height;
			width += height;
			energyPlotWindow->Show(false);
			energyPlotWindow->SetSize(height, height);
			energyPlotWindow->Update();	//This is needed to initialise the window if we just created it
		}

		LocalToGlobal ((Point *) &(gRect.top));
		LocalToGlobal ((Point *) &(gRect.bottom));
		WindowRef TempWindow;
		s = CreateNewWindow(kDocumentWindowClass, kWindowNoAttributes, &gRect, &TempWindow);
		if (s == noErr) {
													//Create the video track
			Track theTrack = NewMovieTrack (theMovie, FixRatio(width,1),
											FixRatio(height,1), kNoVolume);
			if ((noErr == GetMoviesError())&&theTrack) {
				Media theMedia = NewTrackMedia (theTrack, VideoMediaType,
												60, // Video Time Scale
												NULL, 0);
				if ((noErr == GetMoviesError())&&theMedia) {
					myErr = BeginMediaEdits (theMedia);
					if (myErr == noErr) {
						//create the actual movie frames
						GWorldPtr	lgWorld=NULL;
						
						if (! NewGWorld (&lgWorld, 0, &gRect, (CTabHandle) NULL, (GDHandle) NULL,
										 (GWorldFlags) (pixPurge + useTempMem))) {
							long MaxCompressedSize;
							ImageSequence seqID;
							ImageDescriptionHandle imageDesc = (ImageDescriptionHandle)NewHandle(4);
							PixMapHandle myPixMap = GetPortPixMap(lgWorld);
							LockPixels (myPixMap);
							myErr = CompressSequenceBegin(&seqID, myPixMap, NULL, &gRect, &gRect, 0,
														  mCodec, bestCompressionCodec, codecNormalQuality, codecNormalQuality, keyFrameRate, NULL,
														  codecFlagUpdatePreviousComp, imageDesc);
							GetMaxCompressionSize (myPixMap, &gRect,
												   0, codecNormalQuality, mCodec, (CompressorComponent) anyCodec, &MaxCompressedSize);
							Handle Buffer = TempNewHandle(MaxCompressedSize, &myErr);
							if (!Buffer)
								Buffer = NewHandle(MaxCompressedSize);
							if (Buffer) {
								qtData myqtData = {theMedia, imageDesc, seqID, lDisplayRect, gRect};
								if (MovieType == 0) {
									CreateFrameMovie(lgWorld, Buffer, myqtData, IncludeEPlot);
								} else {
									CreateModeMovie(lgWorld, Buffer, myqtData);
								}
								DisposeHandle(Buffer);
							}
							myErr = CDSequenceEnd(seqID);
							if (lgWorld != NULL) DisposeGWorld (lgWorld);
							if (imageDesc) DisposeHandle((Handle) imageDesc);
						}
						
						myErr = EndMediaEdits (theMedia);
					}
					myErr = InsertMediaIntoTrack (theTrack, 0,/* track start time */
						0,        /* media start time */
						GetMediaDuration (theMedia),
						FixRatio(1,1));
				}
				myErr = AddMovieResource (theMovie, resRefNum, &resId, NULL);
			}
			if (resRefNum) {
						//Create the actual file as a flat data fork so it can be placed on the www
				ProgressInd->ChangeText("Flattening movieÉ");
				FlattenMovie(theMovie, flattenAddMovieToDataFork, &targetSpec, 'TVOD', smCurrentScript, 
							 createMovieFileDeleteCurFile, &resId, NULL);
				CloseMovieFile (resRefNum);
			}
			DisposeWindow(TempWindow);
		}
		DisposeMovie (theMovie);
		DeleteMovieFile (&tempSpec);	//delete the temp file after disposing of the movie

		if (energyPlotWindow) {
			if (KillEPlot) {
				delete energyPlotWindow;
				energyPlotWindow = NULL;
			} else {
				energyPlotWindow->SetSize(savedEPlotWidth, savedEPlotHeight);
				energyPlotWindow->FrameChanged();
				energyPlotWindow->Show(true);
			}
		}
	}
	ExitMovies();	//Close out quicktime as we are done with it for now
	FinishOperation();
}
Beispiel #18
0
/* Import function for movies that lack an index.
 * Supports progressive importing, but will not idle if maxFrames == 0.
 */
ComponentResult import_with_idle(ff_global_ptr storage, long inFlags, long *outFlags, int minFrames, int maxFrames, bool addSamples) {
    SampleReference64Record sampleRec;
    AVFormatContext *formatContext;
    AVCodecContext *codecContext;
    AVStream *stream;
    AVPacket packet;
    NCStream *ncstream;
    ComponentResult dataResult; //used for data handler operations that can fail.
    ComponentResult result;
    TimeValue minLoadedTime;
    TimeValue movieTimeScale = GetMovieTimeScale(storage->movie);
    int64_t availableSize, margin;
    long idling;
    int readResult, framesProcessed, i;
    int firstPts[storage->map_count];
    short flags;

    formatContext = storage->format_context;
    result = noErr;
    minLoadedTime = -1;
    availableSize = 0;
    idling = (inFlags & movieImportWithIdle);
    framesProcessed = 0;

    if(idling) {
        //get the size of immediately available data
        if(storage->dataHandlerSupportsWideOffsets) {
            wide wideSize;

            dataResult = DataHGetAvailableFileSize64(storage->dataHandler, &wideSize);
            if(dataResult == noErr) availableSize = ((int64_t)wideSize.hi << 32) + wideSize.lo;
        } else {
            long longSize;

            dataResult = DataHGetAvailableFileSize(storage->dataHandler, &longSize);
            if(dataResult == noErr) availableSize = longSize;
        }
    }

    for(i = 0; i < storage->map_count; i++) {
        ncstream = &storage->stream_map[i];
        Media media = ncstream->media;

        firstPts[i] = -1;
        if(media && ncstream->duration == -1)
            ncstream->duration = GetMediaDuration(media);
    }

    while((readResult = av_read_frame(formatContext, &packet)) == 0) {
        bool trustPacketDuration = true;
        int64_t dts = packet.dts;
        ncstream = &storage->stream_map[packet.stream_index];
        stream = ncstream->str;
        codecContext = stream->codec;
        flags = 0;

        if (!ncstream->valid)
            continue;

        if((packet.flags & AV_PKT_FLAG_KEY) == 0)
            flags |= mediaSampleNotSync;

        if(IS_NUV(storage->componentType) && codecContext->codec_id == CODEC_ID_MP3) trustPacketDuration = false;
        if(IS_FLV(storage->componentType)) trustPacketDuration = false;

        memset(&sampleRec, 0, sizeof(sampleRec));
        sampleRec.dataOffset.hi = packet.pos >> 32;
        sampleRec.dataOffset.lo = (uint32_t)packet.pos;
        sampleRec.dataSize = packet.size;
        sampleRec.sampleFlags = flags;

        if (packet.pos <= 0)
            continue;

        if(firstPts[packet.stream_index] < 0)
            firstPts[packet.stream_index] = packet.pts;

        if(packet.size > storage->largestPacketSize)
            storage->largestPacketSize = packet.size;

        if(sampleRec.dataSize <= 0)
            continue;

        if(codecContext->codec_type == AVMEDIA_TYPE_AUDIO && !ncstream->vbr)
            sampleRec.numberOfSamples = (packet.size * ncstream->asbd.mFramesPerPacket) / ncstream->asbd.mBytesPerPacket;
        else
            sampleRec.numberOfSamples = 1; //packet.duration;

        //add any samples waiting to be added
        if(ncstream->lastSample.numberOfSamples > 0) {
            //calculate the duration of the sample before adding it
            ncstream->lastSample.durationPerSample = (dts - ncstream->lastdts) * ncstream->base.num;

            AddMediaSampleReferences64(ncstream->media, ncstream->sampleHdl, 1, &ncstream->lastSample, NULL);
        }

#if 0
        if (0) {
            Codecprintf(NULL, "Stream:%d Pts:%lld Dts:%lld DtsUsed:%lld Pos:%lld Size:%d\n", packet.stream_index, packet.pts, packet.dts, dts, packet.pos, packet.size);
            Codecprintf(NULL, "Stream:%d Nsamples:%ld RealDuration:%d CalcDuration:%ld TimeDts:%lld TimeDurations:%lld FrameDts:%d FrameGuess:%lld\n",
                        packet.stream_index, sampleRec.numberOfSamples, packet.duration, ncstream->lastSample.durationPerSample,
                        packet.dts, ncstream->timeByDurations, (int)((packet.dts * stream->time_base.num * ncstream->asbd.mSampleRate) / stream->time_base.den),
                        ncstream->timeByFrames);

            ncstream->timeByDurations += packet.duration;
            ncstream->timeByFrames += ncstream->asbd.mFramesPerPacket;
        }
#endif

        ncstream->lastSample = sampleRec;
        ncstream->lastdts = packet.dts;

        // If this is a nuv file, then we want to set the duration to zero.
        // This is because the nuv container doesn't have the framesize info
        // for audio.
        if(packet.duration == 0 || !trustPacketDuration) {
            //no duration, we'll have to wait for the next packet to calculate it
            // keep the duration of the last sample, so we can use it if it's the last frame
            sampleRec.durationPerSample = ncstream->lastSample.durationPerSample;
        } else {
            ncstream->lastSample.numberOfSamples = 0;

            if(codecContext->codec_type == AVMEDIA_TYPE_AUDIO && !ncstream->vbr)
                sampleRec.durationPerSample = 1;
            else
                sampleRec.durationPerSample = ncstream->base.num * packet.duration;

            AddMediaSampleReferences64(ncstream->media, ncstream->sampleHdl, 1, &sampleRec, NULL);
        }

        framesProcessed++;

        //if we're idling, try really not to read past the end of available data
        //otherwise we will cause blocking i/o.
        if(idling && framesProcessed >= minFrames && availableSize > 0 && availableSize < storage->dataSize) {
            margin = availableSize - (packet.pos + packet.size);
            if(margin < (storage->largestPacketSize * 8)) { // 8x fudge factor for comfortable margin, could be tweaked.
                av_free_packet(&packet);
                break;
            }
        }

        av_free_packet(&packet);

        //stop processing if we've hit the max frame limit
        if(maxFrames > 0 && framesProcessed >= maxFrames)
            break;
    }

    if(readResult != 0) {
        //if readResult != 0, we've hit the end of the stream.
        //add any pending last frames.
        for(i = 0; i < formatContext->nb_streams; i++) {
            ncstream = &storage->stream_map[i];
            if(ncstream->lastSample.numberOfSamples > 0)
                AddMediaSampleReferences64(ncstream->media, ncstream->sampleHdl, 1, &ncstream->lastSample, NULL);
        }
    }

    for(i = 0; i < storage->map_count && result == noErr; i++) {
        ncstream = &storage->stream_map[i];
        Media media = ncstream->media;

        if(ncstream->valid && (addSamples || readResult != 0)) {
            Track track = GetMediaTrack(media);
            TimeScale mediaTimeScale = GetMediaTimeScale(media);
            TimeValue prevDuration = ncstream->duration;
            TimeValue mediaDuration = GetMediaDuration(media);
            TimeValue addedDuration = mediaDuration - prevDuration;
            TimeValue mediaLoadedTime = movieTimeScale * mediaDuration / mediaTimeScale;

            if(minLoadedTime == -1 || mediaLoadedTime < minLoadedTime)
                minLoadedTime = mediaLoadedTime;

            if(addedDuration > 0) {
                result = InsertMediaIntoTrack(track, -1, prevDuration, addedDuration, fixed1);
            }

            if (!prevDuration && firstPts[i] > 0) {
                TimeRecord startTimeRec;
                startTimeRec.value.hi = 0;
                startTimeRec.value.lo = firstPts[i] * formatContext->streams[i]->time_base.num;
                startTimeRec.scale = formatContext->streams[i]->time_base.den;
                startTimeRec.base = NULL;
                ConvertTimeScale(&startTimeRec, movieTimeScale);
                SetTrackOffset(track, startTimeRec.value.lo);
            }
            ncstream->duration = -1;
        }
    }

    //set the loaded time to the length of the shortest track.
    if(minLoadedTime > 0)
        storage->loadedTime = minLoadedTime;

    if(readResult != 0) {
        //remove the placeholder track
        if(storage->placeholderTrack != NULL) {
            DisposeMovieTrack(storage->placeholderTrack);
            storage->placeholderTrack = NULL;
        }

        //set the movie load state to complete, as well as mark the import output flag.
        storage->movieLoadState = kMovieLoadStateComplete;
        *outFlags |= movieImportResultComplete;
    } else {
        //if we're not yet done with the import, calculate the movie load state.
        int64_t timeToCompleteFile; //time until the file should be completely available, in terms of AV_TIME_BASE
        long dataRate = 0;

        dataResult = DataHGetDataRate(storage->dataHandler, 0, &dataRate);
        if(dataResult == noErr && dataRate > 0) {
            timeToCompleteFile = (AV_TIME_BASE * (storage->dataSize - availableSize)) / dataRate;

            if(storage->loadedTime > (10 * GetMovieTimeScale(storage->movie)) && timeToCompleteFile < (storage->format_context->duration * .85))
                storage->movieLoadState = kMovieLoadStatePlaythroughOK;
            else
                storage->movieLoadState = kMovieLoadStatePlayable;

        } else {
            storage->movieLoadState = kMovieLoadStatePlayable;
        }

        *outFlags |= movieImportResultNeedIdles;
    }

    send_movie_changed_notification(storage->movie);

    //tell the idle manager to idle us again in 500ms.
    if(idling && storage->idleManager && storage->isStreamed)
        QTIdleManagerSetNextIdleTimeDelta(storage->idleManager, 1, 2);

    return(result);
} /* import_with_idle() */
OSErr QTDR_CreateMovieInRAM (void)
{
	Movie					myMovie = NULL;
	Track					myTrack = NULL;
	Media					myMedia = NULL;
	short					myResRefNum = 0;
	short					myResID = 0;
	Handle					myDataRef = NULL;
	Handle					myHandle = NULL;
	FSSpec					myFSSpec;
	OSErr					myErr = noErr;
	
	// create a new handle to hold the media data
	myHandle = NewHandleClear(0);
	if (myHandle == NULL)
		goto bail;
	
	// create a data reference to that handle
	myDataRef = QTDR_MakeHandleDataRef(myHandle);
	if (myDataRef == NULL)
		goto bail;
		
	myMovie = NewMovie(newMovieActive);
	if (myMovie == NULL)
		goto bail;
	
	myErr = SetMovieDefaultDataRef(myMovie, myDataRef, HandleDataHandlerSubType);
	if (myErr != noErr)
		goto bail;

	// create the movie track and media
	myTrack = NewMovieTrack(myMovie, FixRatio(kVideoTrackWidth, 1), FixRatio(kVideoTrackHeight, 1), kNoVolume);
	myErr = GetMoviesError();
	if (myErr != noErr)
		goto bail;
		
	myMedia = NewTrackMedia(myTrack, VideoMediaType, kVideoTimeScale, NULL, 0);
	myErr = GetMoviesError();
	if (myErr != noErr)
		goto bail;

	// create the media samples
	myErr = BeginMediaEdits(myMedia);
	if (myErr != noErr)
		goto bail;

	myErr = QTDR_AddVideoSamplesToMedia(myMedia, kVideoTrackWidth, kVideoTrackHeight);
	if (myErr != noErr)
		goto bail;

	myErr = EndMediaEdits(myMedia);
	if (myErr != noErr)
		goto bail;
	
	// add the media to the track
	myErr = InsertMediaIntoTrack(myTrack, 0, 0, GetMediaDuration(myMedia), fixed1);
	if (myErr != noErr)
		goto bail;
	
	// add the movie atom to the movie file
	myErr = AddMovieResource(myMovie, myResRefNum, &myResID, NULL);

	myFSSpec.name[0] = (unsigned char)0;
	myFSSpec.parID = 0;
	myFSSpec.vRefNum = 0;
	
	QTFrame_OpenMovieInWindow(myMovie, &myFSSpec);

bail:
	if (myDataRef != NULL)
		DisposeHandle(myDataRef);
		
	return(myErr);
}
void QTEffects_RespondToDialogSelection (OSErr theErr)
{
	Boolean					myDialogWasCancelled = false;
	short					myResID = movieInDataForkResID;
	UInt16					myMovieIter;
	short					mySrcMovieRefNum = 0;
	Movie					myPrevSrcMovie = NULL;
	Track					myPrevSrcTrack = NULL;
	Movie					myNextSrcMovie = NULL;
	Track					myNextSrcTrack = NULL;
	short					myDestMovieRefNum = 0;
	FSSpec					myFile;
	Boolean					myIsSelected = false;
	Boolean					myIsReplacing = false;	
	StringPtr 				myPrompt = QTUtils_ConvertCToPascalString(kEffectsSaveMoviePrompt);
	StringPtr 				myFileName = QTUtils_ConvertCToPascalString(kEffectsSaveMovieFileName);
	Movie					myDestMovie = NULL;
	Fixed					myDestMovieWidth, myDestMovieHeight;
	ImageDescriptionHandle	myDesc = NULL;
	Track					videoTrackFX, videoTrackA, videoTrackB;
	Media					videoMediaFX, videoMediaA, videoMediaB;
	TimeValue				myCurrentDuration = 0;
	TimeValue				myReturnedDuration;
	Boolean					isFirstTransition = true;
	TimeValue				myMediaTransitionDuration;
	TimeValue				myMediaFXStartTime, myMediaFXDuration;
	OSType					myEffectCode;
	long					myFlags = createMovieFileDeleteCurFile | createMovieFileDontCreateResFile;
	long					myLong;
	OSErr					myErr = noErr;

	// standard parameter box has been dismissed, so remember that fact
	gEffectsDialog = 0L;
	
	myDialogWasCancelled = (theErr == userCanceledErr);
	
	// we're finished with the effect list and movie posters	
	QTDisposeAtomContainer(gEffectList);
	
	if (gPosterA != NULL)
		KillPicture(gPosterA);
		
	if (gPosterB != NULL)
		KillPicture(gPosterB);
	
	// when the sign says stop, then stop
	if (myDialogWasCancelled)
		goto bail;

	// add atoms naming the sources to gEffectSample
	myLong = EndianU32_NtoB(kSourceOneName);
	QTInsertChild(gEffectSample, kParentAtomIsContainer, kEffectSourceName, 1, 0, sizeof(myLong), &myLong, NULL);

	myLong = EndianU32_NtoB(kSourceTwoName);
	QTInsertChild(gEffectSample, kParentAtomIsContainer, kEffectSourceName, 2, 0, sizeof(myLong), &myLong, NULL);
	
	// extract the 'what' atom to find out what kind of effect it is
	{
		QTAtom			myEffectAtom;
		QTAtomID		myEffectAtomID;
		long			myEffectCodeSize;
		Ptr				myEffectCodePtr;

		myEffectAtom = QTFindChildByIndex(gEffectSample, kParentAtomIsContainer, kParameterWhatName, kParameterWhatID, &myEffectAtomID);
		
		myErr = QTLockContainer(gEffectSample);
		BailError(myErr);

		myErr = QTGetAtomDataPtr(gEffectSample, myEffectAtom, &myEffectCodeSize, &myEffectCodePtr);
		BailError(myErr);

		if (myEffectCodeSize != sizeof(OSType)) {
			myErr = paramErr;
			goto bail;
		}
		
		myEffectCode = *(OSType *)myEffectCodePtr;		// "tsk"
		myEffectCode = EndianU32_BtoN(myEffectCode);	// because the data is read from an atom container
		
		myErr = QTUnlockContainer(gEffectSample);
		BailError(myErr);
	}

	// ask the user for the name of the new movie file
	QTFrame_PutFile(myPrompt, myFileName, &myFile, &myIsSelected, &myIsReplacing);
	if (!myIsSelected)
		goto bail;				// deal with user cancelling

	// create a movie file for the destination movie
	myErr = CreateMovieFile(&myFile, FOUR_CHAR_CODE('TVOD'), 0, myFlags, &myDestMovieRefNum, &myDestMovie);
	BailError(myErr);
	
	// open the first file as a movie; call the first movie myPrevSrcMovie
	myErr = OpenMovieFile(&gSpecList[0], &mySrcMovieRefNum, fsRdPerm);
	BailError(myErr);
	
	myErr = NewMovieFromFile(&myPrevSrcMovie, mySrcMovieRefNum, NULL, NULL, 0, NULL);
	BailError(myErr);
	
	myErr = CloseMovieFile(mySrcMovieRefNum);
	BailError(myErr);
	
	// if the movie is shorter than kMinimumDuration, scale it to that length
	SetMovieTimeScale(myPrevSrcMovie, kTimeScale);
	myErr = QTEffects_GetFirstVideoTrackInMovie(myPrevSrcMovie, &myPrevSrcTrack);
	BailNil(myPrevSrcTrack);
	
	if (GetTrackDuration(myPrevSrcTrack) < kMinimumDuration) {
		myErr = ScaleTrackSegment(myPrevSrcTrack, 0, GetTrackDuration(myPrevSrcTrack), kMinimumDuration);
		BailError(myErr);
	}
	
	// find out how big the first movie is; we'll use it as the size of all our tracks
	GetTrackDimensions(myPrevSrcTrack, &myDestMovieWidth, &myDestMovieHeight);
	
#if USES_MAKE_IMAGE_DESC_FOR_EFFECT
	// create a new sample description for the effect,
	// which is just an image description specifying the effect and its dimensions
	myErr = MakeImageDescriptionForEffect(myEffectCode, &myDesc);
	if (myErr != noErr)
		BailError(myErr);
#else
	// create a new sample description for the effect,
	// which is just an image description specifying the effect and its dimensions
	myDesc = (ImageDescriptionHandle)NewHandleClear(sizeof(ImageDescription));
	BailNil(myDesc);
	
	(**myDesc).idSize = sizeof(ImageDescription);
	(**myDesc).cType = myEffectCode;
	(**myDesc).hRes = 72L << 16;
	(**myDesc).vRes = 72L << 16;
	(**myDesc).dataSize = 0L;
	(**myDesc).frameCount = 1;
	(**myDesc).depth = 0;
	(**myDesc).clutID = -1;
#endif
	
	// fill in the fields of the sample description
	(**myDesc).vendor = kAppleManufacturer;
	(**myDesc).temporalQuality = codecNormalQuality;
	(**myDesc).spatialQuality = codecNormalQuality;
	(**myDesc).width = FixRound(myDestMovieWidth);
	(**myDesc).height = FixRound(myDestMovieHeight);

	// add three video tracks to the destination movie:
	// 	- videoTrackFX is where the effects and stills live; it's user-visible.
	//	- videoTrackA is where the "source A"s for effects live; it's hidden by the input map
	//	- videoTrackB is where the "source B"s for effects live; it's hidden by the input map
	videoTrackFX = NewMovieTrack(myDestMovie, myDestMovieWidth, myDestMovieHeight, 0);
	BailNil(videoTrackFX);
	videoMediaFX = NewTrackMedia(videoTrackFX, VideoMediaType, kTimeScale, NULL, 0);
	BailNil(videoMediaFX);
	myErr = BeginMediaEdits(videoMediaFX);
	BailError(myErr);
	
	videoTrackA = NewMovieTrack(myDestMovie, myDestMovieWidth, myDestMovieHeight, 0);
	BailNil(videoTrackA);
	videoMediaA = NewTrackMedia(videoTrackA, VideoMediaType, kTimeScale, NULL, 0);
	BailNil(videoMediaA);

	videoTrackB = NewMovieTrack(myDestMovie, myDestMovieWidth, myDestMovieHeight, 0);
	BailNil(videoTrackB);
	videoMediaB = NewTrackMedia(videoTrackB, VideoMediaType, kTimeScale, NULL, 0);
	BailNil(videoMediaB);

	// create the input map
	{
		long				myRefIndex1, myRefIndex2;
		QTAtomContainer		myInputMap;
		QTAtom				myInputAtom;
		OSType				myInputType;

		QTNewAtomContainer(&myInputMap);

		// first input
		if (videoTrackA) {
		
			AddTrackReference(videoTrackFX, videoTrackA, kTrackModifierReference, &myRefIndex1);
			QTInsertChild(myInputMap, kParentAtomIsContainer, kTrackModifierInput, myRefIndex1, 0, 0, NULL, &myInputAtom);
	
			myInputType = EndianU32_NtoB(kTrackModifierTypeImage);
			QTInsertChild(myInputMap, myInputAtom, kTrackModifierType, 1, 0, sizeof(myInputType), &myInputType, NULL);
	
			myLong = EndianU32_NtoB(kSourceOneName);
			QTInsertChild(myInputMap, myInputAtom, kEffectDataSourceType, 1, 0, sizeof(myLong), &myLong, NULL);
		}

		// second input
		if (videoTrackB) {
		
			AddTrackReference(videoTrackFX, videoTrackB, kTrackModifierReference, &myRefIndex2);
			QTInsertChild(myInputMap, kParentAtomIsContainer, kTrackModifierInput, myRefIndex2, 0, 0, NULL, &myInputAtom);
	
			myInputType = EndianU32_NtoB(kTrackModifierTypeImage);
			QTInsertChild(myInputMap, myInputAtom, kTrackModifierType, 1, 0, sizeof(myInputType), &myInputType, NULL);
	
			myLong = EndianU32_NtoB(kSourceTwoName);
			QTInsertChild(myInputMap, myInputAtom, kEffectDataSourceType, 1, 0, sizeof(myLong), &myLong, NULL);
		}

		// set that map
		SetMediaInputMap(GetTrackMedia(videoTrackFX), myInputMap);
		
		QTDisposeAtomContainer(myInputMap);
	}

	myCurrentDuration = 0;

#if MAKE_STILL_SECTIONS
	// copy the first sample of the first video track of the first movie to videoTrackFX, with duration kStillDuration.
	myErr = CopyPortionOfTrackToTrack(myPrevSrcTrack, eStartPortion + eMiddlePortion, videoTrackFX, myCurrentDuration, &myReturnedDuration);
	BailError(myErr);
	
	myCurrentDuration += myReturnedDuration;
#endif 

	// now process any remaining files
	myMovieIter = 1;
	while (myMovieIter < gSpecCount) {
		
		// open the next file as a movie; call it nextSourceMovie
		myErr = OpenMovieFile(&gSpecList[myMovieIter], &mySrcMovieRefNum, fsRdPerm);
		BailError(myErr);
		
		myErr = NewMovieFromFile(&myNextSrcMovie, mySrcMovieRefNum, NULL, NULL, 0, NULL);
		BailError(myErr);
		
		// we're done with the movie file, so close it
		myErr = CloseMovieFile(mySrcMovieRefNum);
		BailError(myErr);
		
		// if the movie is shorter than kMinimumDuration, scale it to that length
		SetMovieTimeScale(myNextSrcMovie, kTimeScale);
		myErr = QTEffects_GetFirstVideoTrackInMovie(myNextSrcMovie, &myNextSrcTrack);
		BailNil(myNextSrcTrack);
		
		if (GetTrackDuration(myNextSrcTrack) < kMinimumDuration) {
			myErr = ScaleTrackSegment(myNextSrcTrack, 0, GetTrackDuration(myNextSrcTrack), kMinimumDuration);
			BailError(myErr);
		}

		// create a transition effect from the previous source movie's first video sample to the next source movie's first video sample
		// (the effect should have duration kEffectDuration);
		// this involves adding one sample to each of the three video tracks:
		
		//    sample from previous source movie	 -> videoTrackA
		myErr = QTEffects_CopyPortionOfTrackToTrack(myPrevSrcTrack, eFinishPortion, videoTrackA, myCurrentDuration, &myReturnedDuration);
		BailError(myErr);
		
		//    sample from next source movie    	 -> videoTrackB
		myErr = QTEffects_CopyPortionOfTrackToTrack(myNextSrcTrack, eStartPortion, videoTrackB, myCurrentDuration, &myReturnedDuration);
		BailError(myErr);
		
		//    effect sample                 	  -> videoTrackFX
		if (isFirstTransition) {
			myMediaTransitionDuration = myReturnedDuration;
			myMediaFXStartTime = GetMediaDuration(videoMediaFX);
			myErr = AddMediaSample(videoMediaFX, gEffectSample, 0, GetHandleSize(gEffectSample), myMediaTransitionDuration, (SampleDescriptionHandle)myDesc, 1, 0, NULL);
			BailError(myErr);
			
			myMediaFXDuration = GetMediaDuration(videoMediaFX) - myMediaFXStartTime;
			isFirstTransition = false;
		}
		
		myErr = InsertMediaIntoTrack(videoTrackFX, myCurrentDuration, myMediaFXStartTime, myMediaFXDuration, FixRatio(myReturnedDuration, myMediaTransitionDuration));
		BailError(myErr);
		
		myCurrentDuration += myReturnedDuration;
		
#if MAKE_STILL_SECTIONS
		// copy the first video sample of myNextSrcMovie to videoTrackFX, with duration kStillDuration.
		myErr = QTEffects_CopyPortionOfTrackToTrack(myNextSrcTrack, eMiddlePortion + (myMovieIter + 1 == theSpecCount) ? eFinishPortion : 0, videoTrackFX, myCurrentDuration, &myReturnedDuration);
		BailError(myErr);
		
		myCurrentDuration += myReturnedDuration;
#endif // MAKE_STILL_SECTIONS
		
		// dispose of previous source movie.  
		DisposeMovie(myPrevSrcMovie);
		
		myPrevSrcMovie = myNextSrcMovie;
		myPrevSrcTrack = myNextSrcTrack;
		myNextSrcMovie = NULL;
		myNextSrcTrack = NULL;
		
		myMovieIter++;
	} // while
	
	myErr = EndMediaEdits(videoMediaFX);
	BailError(myErr);

	myErr = AddMovieResource(myDestMovie, myDestMovieRefNum, &myResID, "\pMovie 1");
	BailError(myErr);
	
	CloseMovieFile(myDestMovieRefNum);
	
	if (myPrevSrcMovie != NULL)
		DisposeMovie(myPrevSrcMovie);
		
	DisposeMovie(myDestMovie);
	
bail:
	free(myPrompt);
	free(myFileName);

	QTDisposeAtomContainer(gEffectSample);
	DisposeHandle((Handle)myDesc);

	return;
}
Beispiel #21
0
void TLevelWriter3gp::saveSoundTrack(TSoundTrack *st)
{
	Track theTrack;
	OSErr myErr = noErr;
	SoundDescriptionV1Handle mySampleDesc;
	Media myMedia;
	Handle myDestHandle;
	SoundComponentData sourceInfo;
	SoundComponentData destInfo;
	SoundConverter converter;
	CompressionInfo compressionInfo;
	int err;

	if (!st)
		throw TException("null reference to soundtrack");

	if (st->getBitPerSample() != 16) {
		throw TImageException(m_path, "Only 16 bits per sample is supported");
	}

	theTrack = NewMovieTrack(m_movie, 0, 0, kFullVolume);
	myErr = GetMoviesError();
	if (myErr != noErr)
		throw TImageException(m_path, "error creating audio track");

	FailIf(myErr != noErr, CompressErr);

	myDestHandle = NewHandle(0);

	FailWithAction(myDestHandle == NULL, myErr = MemError(), NoDest);

	*myDestHandle = (char *)st->getRawData();

	//////////
	//
	// create a media for the track passed in
	//
	//////////

	// set new track to be a sound track
	m_soundDataRef = nil;
	m_hSoundMovieData = NewHandle(0);

	// Construct the Handle data reference
	err = PtrToHand(&m_hSoundMovieData, &m_soundDataRef, sizeof(Handle));

	if ((err = GetMoviesError() != noErr))
		throw TImageException(getFilePath(), "can't create Data Ref");

	myMedia = NewTrackMedia(theTrack, SoundMediaType, st->getSampleRate(), m_soundDataRef, HandleDataHandlerSubType); //track->rate >> 16

	myErr = GetMoviesError();
	if (myErr != noErr)
		throw TImageException(m_path, "error setting audio track");
	FailIf(myErr != noErr, Exit);

	// start a media editing session
	myErr = BeginMediaEdits(myMedia);
	if (myErr != noErr)
		throw TImageException(m_path, "error beginning edit audio track");

	FailIf(myErr != noErr, Exit);

	sourceInfo.flags = 0x0;
	sourceInfo.format = kSoundNotCompressed;
	sourceInfo.numChannels = st->getChannelCount();
	sourceInfo.sampleSize = st->getBitPerSample();
	sourceInfo.sampleRate = st->getSampleRate();
	sourceInfo.sampleCount = st->getSampleCount();
	sourceInfo.buffer = (unsigned char *)st->getRawData();
	sourceInfo.reserved = 0x0;

	destInfo.flags = kNoSampleRateConversion | kNoSampleSizeConversion |
					 kNoSampleFormatConversion | kNoChannelConversion |
					 kNoDecompression | kNoVolumeConversion |
					 kNoRealtimeProcessing;

	destInfo.format = k16BitNativeEndianFormat;

	destInfo.numChannels = st->getChannelCount();
	destInfo.sampleSize = st->getBitPerSample();
	destInfo.sampleRate = st->getSampleRate();
	destInfo.sampleCount = st->getSampleCount();
	destInfo.buffer = (unsigned char *)st->getRawData();
	destInfo.reserved = 0x0;

	SoundConverterOpen(&sourceInfo, &destInfo, &converter);

	myErr = SoundConverterGetInfo(converter, siCompressionFactor, &compressionInfo);
	if (myErr != noErr)
		throw TImageException(m_path, "error getting audio converter info");

	myErr = GetCompressionInfo(fixedCompression, sourceInfo.format, sourceInfo.numChannels, sourceInfo.sampleSize, &compressionInfo);
	if (myErr != noErr)
		throw TImageException(m_path, "error getting audio compression info");
	FailIf(myErr != noErr, ConverterErr);

	compressionInfo.bytesPerFrame = compressionInfo.bytesPerPacket * destInfo.numChannels;

	//////////
	//
	// create a sound sample description
	//
	//////////

	// use the SoundDescription format 1 because it adds fields for data size information
	// and is required by AddSoundDescriptionExtension if an extension is required for the compression format

	mySampleDesc = (SoundDescriptionV1Handle)NewHandleClear(sizeof(SoundDescriptionV1));
	FailWithAction(myErr != noErr, myErr = MemError(), Exit);

	(**mySampleDesc).desc.descSize = sizeof(SoundDescriptionV1);
	(**mySampleDesc).desc.dataFormat = destInfo.format;
	(**mySampleDesc).desc.resvd1 = 0;
	(**mySampleDesc).desc.resvd2 = 0;
	(**mySampleDesc).desc.dataRefIndex = 1;
	(**mySampleDesc).desc.version = 1;
	(**mySampleDesc).desc.revlevel = 0;
	(**mySampleDesc).desc.vendor = 0;
	(**mySampleDesc).desc.numChannels = destInfo.numChannels;
	(**mySampleDesc).desc.sampleSize = destInfo.sampleSize;
	(**mySampleDesc).desc.compressionID = 0;
	(**mySampleDesc).desc.packetSize = 0;
	(**mySampleDesc).desc.sampleRate = st->getSampleRate() << 16;
	(**mySampleDesc).samplesPerPacket = compressionInfo.samplesPerPacket;
	(**mySampleDesc).bytesPerPacket = compressionInfo.bytesPerPacket;
	(**mySampleDesc).bytesPerFrame = compressionInfo.bytesPerFrame;
	(**mySampleDesc).bytesPerSample = compressionInfo.bytesPerSample;

	//////////
	//
	// add samples to the media
	//
	//////////

	myErr = AddMediaSample(myMedia, myDestHandle,
						   0,
						   destInfo.sampleCount * compressionInfo.bytesPerFrame,
						   1,
						   (SampleDescriptionHandle)mySampleDesc,
						   destInfo.sampleCount * compressionInfo.samplesPerPacket,
						   0,
						   NULL);

	if (myErr != noErr)
		throw TImageException(m_path, "error adding audio samples");

	FailIf(myErr != noErr, MediaErr);

	myErr = EndMediaEdits(myMedia);
	if (myErr != noErr)
		throw TImageException(m_path, "error ending audio edit");

	FailIf(myErr != noErr, MediaErr);

	//////////
	//
	// insert the media into the track
	//
	//////////

	myErr = InsertMediaIntoTrack(theTrack, 0, 0, GetMediaDuration(myMedia), fixed1);
	if (myErr != noErr)
		throw TImageException(m_path, "error inserting audio track");

	FailIf(myErr != noErr, MediaErr);
	goto Done;

ConverterErr:
NoDest:
CompressErr:
Exit:

Done:

MediaErr:
	if (mySampleDesc != NULL)
		DisposeHandle((Handle)mySampleDesc);

	if (converter)
		SoundConverterClose(converter);

	if (myErr != noErr)
		throw TImageException(m_path, "error saving audio track");
}
OSErr QTWired_CreateWiredSpritesMovie (void)
{
	short					myResRefNum = 0;
	short					myResID = movieInDataForkResID;
	Movie					myMovie = NULL;
	Track					myTrack;
	Media					myMedia;
	FSSpec					myFile;
	Boolean					myIsSelected = false;
	Boolean					myIsReplacing = false;	
	StringPtr 				myPrompt = QTUtils_ConvertCToPascalString(kWiredSavePrompt);
	StringPtr 				myFileName = QTUtils_ConvertCToPascalString(kWiredSaveFileName);
	QTAtomContainer			mySample = NULL;
	QTAtomContainer			myActions = NULL;
	QTAtomContainer			myBeginButton, myPrevButton, myNextButton, myEndButton;
	QTAtomContainer			myPenguinOne, myPenguinTwo, myPenguinOneOverride;
	QTAtomContainer			myBeginActionButton, myPrevActionButton, myNextActionButton, myEndActionButton;
	QTAtomContainer			myPenguinOneAction, myPenguinTwoAction;
	RGBColor				myKeyColor;
	Point					myLocation;
	short					isVisible, myLayer, myIndex, myID, i, myDelta;
	Boolean					hasActions;
	long					myFlags = createMovieFileDeleteCurFile | createMovieFileDontCreateResFile;
	OSType					myType = FOUR_CHAR_CODE('none');
	UInt32					myFrequency;
	QTAtom					myEventAtom;
	long					myLoopingFlags;
	ModifierTrackGraphicsModeRecord		myGraphicsMode;
	OSErr					myErr = noErr;

	//////////
	//
	// create a new movie file and set its controller type
	//
	//////////

	// ask the user for the name of the new movie file
	QTFrame_PutFile(myPrompt, myFileName, &myFile, &myIsSelected, &myIsReplacing);
	if (!myIsSelected)
		goto bail;

	// create a movie file for the destination movie
	myErr = CreateMovieFile(&myFile, FOUR_CHAR_CODE('TVOD'), smSystemScript, myFlags, &myResRefNum, &myMovie);
	if (myErr != noErr)
		goto bail;
	
	// select the "no controller" movie controller
	myType = EndianU32_NtoB(myType);
	SetUserDataItem(GetMovieUserData(myMovie), &myType, sizeof(myType), kUserDataMovieControllerType, 1);
	
	//////////
	//
	// create the sprite track and media
	//
	//////////
	
	myTrack = NewMovieTrack(myMovie, ((long)kSpriteTrackWidth << 16), ((long)kSpriteTrackHeight << 16), kNoVolume);
	myMedia = NewTrackMedia(myTrack, SpriteMediaType, kSpriteMediaTimeScale, NULL, 0);

	//////////
	//
	// create a key frame sample containing six sprites and all of their shared images
	//
	//////////

	// create a new, empty key frame sample
	myErr = QTNewAtomContainer(&mySample);
	if (myErr != noErr)
		goto bail;

	myKeyColor.red = 0xffff;						// white
	myKeyColor.green = 0xffff;
	myKeyColor.blue = 0xffff;

	// add images to the key frame sample
	SpriteUtils_AddPICTImageToKeyFrameSample(mySample, kGoToBeginningButtonUp, &myKeyColor, kGoToBeginningButtonUpIndex, NULL, NULL);
	SpriteUtils_AddPICTImageToKeyFrameSample(mySample, kGoToBeginningButtonDown, &myKeyColor, kGoToBeginningButtonDownIndex, NULL, NULL);
	SpriteUtils_AddPICTImageToKeyFrameSample(mySample, kGoToEndButtonUp, &myKeyColor, kGoToEndButtonUpIndex, NULL, NULL);
	SpriteUtils_AddPICTImageToKeyFrameSample(mySample, kGoToEndButtonDown, &myKeyColor, kGoToEndButtonDownIndex, NULL, NULL);
	SpriteUtils_AddPICTImageToKeyFrameSample(mySample, kGoToPrevButtonUp, &myKeyColor, kGoToPrevButtonUpIndex, NULL, NULL);
	SpriteUtils_AddPICTImageToKeyFrameSample(mySample, kGoToPrevButtonDown, &myKeyColor, kGoToPrevButtonDownIndex, NULL, NULL);
	SpriteUtils_AddPICTImageToKeyFrameSample(mySample, kGoToNextButtonUp, &myKeyColor, kGoToNextButtonUpIndex, NULL, NULL);
	SpriteUtils_AddPICTImageToKeyFrameSample(mySample, kGoToNextButtonDown, &myKeyColor, kGoToNextButtonDownIndex, NULL, NULL);
	SpriteUtils_AddPICTImageToKeyFrameSample(mySample, kPenguinForward, &myKeyColor, kPenguinForwardIndex, NULL, NULL);
	SpriteUtils_AddPICTImageToKeyFrameSample(mySample, kPenguinLeft, &myKeyColor, kPenguinLeftIndex, NULL, NULL);
	SpriteUtils_AddPICTImageToKeyFrameSample(mySample, kPenguinRight, &myKeyColor, kPenguinRightIndex, NULL, NULL);
	SpriteUtils_AddPICTImageToKeyFrameSample(mySample, kPenguinClosed, &myKeyColor, kPenguinClosedIndex, NULL, NULL);

	for (myIndex = kPenguinDownRightCycleStartIndex, myID = kWalkDownRightCycleStart; myIndex <= kPenguinDownRightCycleEndIndex; myIndex++, myID++)
		SpriteUtils_AddPICTImageToKeyFrameSample(mySample, myID, &myKeyColor, myIndex, NULL, NULL);
	
	// assign group IDs to the images
	SpriteUtils_AssignImageGroupIDsToKeyFrame(mySample);
	
	//////////
	//
	// add samples to the sprite track's media
	//
	//////////
	
	BeginMediaEdits(myMedia);

	// go to beginning button with no actions
	myErr = QTNewAtomContainer(&myBeginButton);
	if (myErr != noErr)
		goto bail;
	myLocation.h	= (1 * kSpriteTrackWidth / 8) - (kStartEndButtonWidth / 2);
	myLocation.v	= (4 * kSpriteTrackHeight / 5) - (kStartEndButtonHeight / 2);
	isVisible		= false;
	myLayer			= 1;
	myIndex			= kGoToBeginningButtonUpIndex;
	myErr = SpriteUtils_SetSpriteData(myBeginButton, &myLocation, &isVisible, &myLayer, &myIndex, NULL, NULL, myActions);
	if (myErr != noErr)
		goto bail;

	// go to previous button with no actions
	myErr = QTNewAtomContainer(&myPrevButton);
	if (myErr != noErr)
		goto bail;
	myLocation.h 	= (3 * kSpriteTrackWidth / 8) - (kNextPrevButtonWidth / 2);
	myLocation.v	= (4 * kSpriteTrackHeight / 5) - (kStartEndButtonHeight / 2);
	isVisible		= false;
	myLayer			= 1;
	myIndex			= kGoToPrevButtonUpIndex;
	myErr = SpriteUtils_SetSpriteData(myPrevButton, &myLocation, &isVisible, &myLayer, &myIndex, NULL, NULL, myActions);
	if (myErr != noErr)
		goto bail;

	// go to next button with no actions
	myErr = QTNewAtomContainer(&myNextButton);
	if (myErr != noErr)
		goto bail;
	myLocation.h 	= (5 * kSpriteTrackWidth / 8) - (kNextPrevButtonWidth / 2);
	myLocation.v	= (4 * kSpriteTrackHeight / 5) - (kStartEndButtonHeight / 2);
	isVisible		= false;
	myLayer			= 1;
	myIndex			= kGoToNextButtonUpIndex;
	myErr = SpriteUtils_SetSpriteData(myNextButton, &myLocation, &isVisible, &myLayer, &myIndex, NULL, NULL, myActions);
	if (myErr != noErr)
		goto bail;

	// go to end button with no actions
	myErr = QTNewAtomContainer(&myEndButton);
	if (myErr != noErr)
		goto bail;
	myLocation.h 	= (7 * kSpriteTrackWidth / 8) - (kStartEndButtonWidth / 2);
	myLocation.v	= (4 * kSpriteTrackHeight / 5) - (kStartEndButtonHeight / 2);
	isVisible		= false;
	myLayer			= 1;
	myIndex			= kGoToEndButtonUpIndex;
	myErr = SpriteUtils_SetSpriteData(myEndButton, &myLocation, &isVisible, &myLayer, &myIndex, NULL, NULL, myActions);
	if (myErr != noErr)
		goto bail;

	// first penguin sprite with no actions
	myErr = QTNewAtomContainer(&myPenguinOne);
	if (myErr != noErr)
		goto bail;
	myLocation.h 	= (3 * kSpriteTrackWidth / 8) - (kPenguinWidth / 2);
	myLocation.v 	= (kSpriteTrackHeight / 4) - (kPenguinHeight / 2);
	isVisible		= true;
	myLayer			= 2;
	myIndex			= kPenguinDownRightCycleStartIndex;
	myGraphicsMode.graphicsMode = blend;
	myGraphicsMode.opColor.red = myGraphicsMode.opColor.green = myGraphicsMode.opColor.blue = 0x8FFF;	// grey
	myErr = SpriteUtils_SetSpriteData(myPenguinOne, &myLocation, &isVisible, &myLayer, &myIndex, &myGraphicsMode, NULL, myActions);
	if (myErr != noErr)
		goto bail;
		
	// second penguin sprite with no actions
	myErr = QTNewAtomContainer(&myPenguinTwo);
	if (myErr != noErr)
		goto bail;
	myLocation.h 	= (5 * kSpriteTrackWidth / 8) - (kPenguinWidth / 2);
	myLocation.v 	= (kSpriteTrackHeight / 4) - (kPenguinHeight / 2);
	isVisible		= true;
	myLayer			= 3;
	myIndex			= kPenguinForwardIndex;
	myErr = SpriteUtils_SetSpriteData(myPenguinTwo, &myLocation, &isVisible, &myLayer, &myIndex, NULL, NULL, myActions);
	if (myErr != noErr)
		goto bail;

	//////////
	//
	// add actions to the six sprites
	//
	//////////

	// add go to beginning button
	myErr = QTCopyAtom(myBeginButton, kParentAtomIsContainer, &myBeginActionButton);
	if (myErr != noErr)
		goto bail;

	WiredUtils_AddSpriteSetImageIndexAction(myBeginActionButton, kParentAtomIsContainer, kQTEventMouseClick, 0, NULL, 0, 0, NULL, kGoToBeginningButtonDownIndex, NULL);
	WiredUtils_AddSpriteSetImageIndexAction(myBeginActionButton, kParentAtomIsContainer, kQTEventMouseClickEnd, 0, NULL, 0, 0, NULL, kGoToBeginningButtonUpIndex, NULL);
	WiredUtils_AddMovieGoToBeginningAction(myBeginActionButton, kParentAtomIsContainer, kQTEventMouseClickEndTriggerButton);
	WiredUtils_AddSpriteSetVisibleAction(myBeginActionButton, kParentAtomIsContainer, kQTEventMouseEnter, 0, NULL, 0, 0, NULL, true, NULL);
	WiredUtils_AddSpriteSetVisibleAction(myBeginActionButton, kParentAtomIsContainer, kQTEventMouseExit, 0, NULL, 0, 0, NULL, false, NULL);
	SpriteUtils_AddSpriteToSample(mySample, myBeginActionButton, kGoToBeginningSpriteID);	
	QTDisposeAtomContainer(myBeginActionButton);

	// add go to prev button
	myErr = QTCopyAtom(myPrevButton, kParentAtomIsContainer, &myPrevActionButton);
	if (myErr != noErr)
		goto bail;

	WiredUtils_AddSpriteSetImageIndexAction(myPrevActionButton, kParentAtomIsContainer, kQTEventMouseClick, 0, NULL, 0, 0, NULL, kGoToPrevButtonDownIndex, NULL);
	WiredUtils_AddSpriteSetImageIndexAction(myPrevActionButton, kParentAtomIsContainer, kQTEventMouseClickEnd, 0, NULL, 0, 0, NULL, kGoToPrevButtonUpIndex, NULL);
	WiredUtils_AddMovieStepBackwardAction(myPrevActionButton, kParentAtomIsContainer, kQTEventMouseClickEndTriggerButton);
	WiredUtils_AddSpriteSetVisibleAction(myPrevActionButton, kParentAtomIsContainer, kQTEventMouseEnter, 0, NULL, 0, 0, NULL, true, NULL);
	WiredUtils_AddSpriteSetVisibleAction(myPrevActionButton, kParentAtomIsContainer, kQTEventMouseExit, 0, NULL, 0, 0, NULL, false, NULL);
	SpriteUtils_AddSpriteToSample(mySample, myPrevActionButton, kGoToPrevSpriteID);
	QTDisposeAtomContainer(myPrevActionButton);

	// add go to next button
	myErr = QTCopyAtom(myNextButton, kParentAtomIsContainer, &myNextActionButton);
	if (myErr != noErr)
		goto bail;

	WiredUtils_AddSpriteSetImageIndexAction(myNextActionButton, kParentAtomIsContainer, kQTEventMouseClick, 0, NULL, 0, 0, NULL, kGoToNextButtonDownIndex, NULL);
	WiredUtils_AddSpriteSetImageIndexAction(myNextActionButton, kParentAtomIsContainer, kQTEventMouseClickEnd, 0, NULL, 0, 0, NULL, kGoToNextButtonUpIndex, NULL);
	WiredUtils_AddMovieStepForwardAction(myNextActionButton, kParentAtomIsContainer, kQTEventMouseClickEndTriggerButton);
	WiredUtils_AddSpriteSetVisibleAction(myNextActionButton, kParentAtomIsContainer, kQTEventMouseEnter, 0, NULL, 0, 0, NULL, true, NULL);
	WiredUtils_AddSpriteSetVisibleAction(myNextActionButton, kParentAtomIsContainer, kQTEventMouseExit, 0, NULL, 0, 0, NULL, false, NULL);
	SpriteUtils_AddSpriteToSample(mySample, myNextActionButton, kGoToNextSpriteID);
	QTDisposeAtomContainer(myNextActionButton);

	// add go to end button
	myErr = QTCopyAtom(myEndButton, kParentAtomIsContainer, &myEndActionButton);
	if (myErr != noErr)
		goto bail;

	WiredUtils_AddSpriteSetImageIndexAction(myEndActionButton, kParentAtomIsContainer, kQTEventMouseClick, 0, NULL, 0, 0, NULL, kGoToEndButtonDownIndex, NULL);
	WiredUtils_AddSpriteSetImageIndexAction(myEndActionButton, kParentAtomIsContainer, kQTEventMouseClickEnd, 0, NULL, 0, 0, NULL, kGoToEndButtonUpIndex, NULL);
	WiredUtils_AddMovieGoToEndAction(myEndActionButton, kParentAtomIsContainer, kQTEventMouseClickEndTriggerButton);
	WiredUtils_AddSpriteSetVisibleAction(myEndActionButton, kParentAtomIsContainer, kQTEventMouseEnter, 0, NULL, 0, 0, NULL, true, NULL);
	WiredUtils_AddSpriteSetVisibleAction(myEndActionButton, kParentAtomIsContainer, kQTEventMouseExit, 0, NULL, 0, 0, NULL, false, NULL);
	SpriteUtils_AddSpriteToSample(mySample, myEndActionButton, kGoToEndSpriteID);
	QTDisposeAtomContainer(myEndActionButton);

	// add penguin one
	myErr = QTCopyAtom(myPenguinOne, kParentAtomIsContainer, &myPenguinOneAction);
	if (myErr != noErr)
		goto bail;

	// show the buttons on mouse enter and hide them on mouse exit
	WiredUtils_AddSpriteSetVisibleAction(myPenguinOneAction, kParentAtomIsContainer, kQTEventMouseEnter, 0, NULL, 0, kTargetSpriteID, (void *)kGoToBeginningSpriteID, true, NULL);
	WiredUtils_AddSpriteSetVisibleAction(myPenguinOneAction, kParentAtomIsContainer, kQTEventMouseExit, 0, NULL, 0, kTargetSpriteID, (void *)kGoToBeginningSpriteID, false, NULL);
	WiredUtils_AddSpriteSetVisibleAction(myPenguinOneAction, kParentAtomIsContainer, kQTEventMouseEnter, 0, NULL, 0, kTargetSpriteID, (void *)kGoToPrevSpriteID, true, NULL);
	WiredUtils_AddSpriteSetVisibleAction(myPenguinOneAction, kParentAtomIsContainer, kQTEventMouseExit, 0, NULL, 0, kTargetSpriteID, (void *)kGoToPrevSpriteID, false, NULL);
	WiredUtils_AddSpriteSetVisibleAction(myPenguinOneAction, kParentAtomIsContainer, kQTEventMouseEnter, 0, NULL, 0, kTargetSpriteID, (void *)kGoToNextSpriteID, true, NULL);
	WiredUtils_AddSpriteSetVisibleAction(myPenguinOneAction, kParentAtomIsContainer, kQTEventMouseExit, 0, NULL, 0, kTargetSpriteID, (void *)kGoToNextSpriteID, false, NULL);
	WiredUtils_AddSpriteSetVisibleAction(myPenguinOneAction, kParentAtomIsContainer, kQTEventMouseEnter, 0, NULL, 0, kTargetSpriteID, (void *)kGoToEndSpriteID, true, NULL);
	WiredUtils_AddSpriteSetVisibleAction(myPenguinOneAction, kParentAtomIsContainer, kQTEventMouseExit, 0, NULL, 0, kTargetSpriteID, (void *)kGoToEndSpriteID, false, NULL);
	SpriteUtils_AddSpriteToSample(mySample, myPenguinOneAction, kPenguinOneSpriteID);
	QTWired_AddCursorChangeOnMouseOver(mySample, kPenguinOneSpriteID);
	QTDisposeAtomContainer(myPenguinOneAction);

	// add penguin two
	myErr = QTCopyAtom(myPenguinTwo, kParentAtomIsContainer, &myPenguinTwoAction);
	if (myErr != noErr)
		goto bail;

	// blink when clicked on
	WiredUtils_AddSpriteSetImageIndexAction(myPenguinTwoAction, kParentAtomIsContainer, kQTEventMouseClick, 0, NULL, 0, 0, NULL, kPenguinClosedIndex, NULL);
	WiredUtils_AddSpriteSetImageIndexAction(myPenguinTwoAction, kParentAtomIsContainer, kQTEventMouseClickEnd, 0, NULL, 0, 0, NULL, kPenguinForwardIndex, NULL);

	WiredUtils_AddQTEventAtom(myPenguinTwoAction, kParentAtomIsContainer, kQTEventMouseClickEndTriggerButton, &myEventAtom);

	// toggle the movie rate and both of the birds' graphics modes
	QTWired_AddPenguinTwoConditionalActions(myPenguinTwoAction, myEventAtom);

	QTWired_AddWraparoundMatrixOnIdle(myPenguinTwoAction);

	SpriteUtils_AddSpriteToSample(mySample, myPenguinTwoAction, kPenguinTwoSpriteID);
	QTDisposeAtomContainer(myPenguinTwoAction);
	
	// add an action for when the key frame is loaded, to set the movie's looping mode to palindrome;
	// note that this will actually be triggered every time the key frame is reloaded,
	// so if the operation was expensive we could use a conditional to test if we've already done it
	myLoopingFlags = loopTimeBase | palindromeLoopTimeBase;
	WiredUtils_AddMovieSetLoopingFlagsAction(mySample, kParentAtomIsContainer, kQTEventFrameLoaded, myLoopingFlags);

	// add the key frame sample to the sprite track media
	//
	// to add the sample data in a compressed form, you would use a QuickTime DataCodec to perform the
	// compression; replace the call to the utility AddSpriteSampleToMedia with a call to the utility
	// AddCompressedSpriteSampleToMedia to do this
	
	SpriteUtils_AddSpriteSampleToMedia(myMedia, mySample, kSpriteMediaFrameDuration, true, NULL);	
	//SpriteUtils_AddCompressedSpriteSampleToMedia(myMedia, mySample, kSpriteMediaFrameDuration, true, zlibDataCompressorSubType, NULL);

	//////////
	//
	// add a few override samples to move penguin one and change its image index
	//
	//////////

	// original penguin one location
	myLocation.h 	= (3 * kSpriteTrackWidth / 8) - (kPenguinWidth / 2);
	myLocation.v 	= (kSpriteTrackHeight / 4) - (kPenguinHeight / 2);

	myDelta = (kSpriteTrackHeight / 2) / kNumOverrideSamples;
	myIndex = kPenguinDownRightCycleStartIndex;
	
	for (i = 1; i <= kNumOverrideSamples; i++) {
		QTRemoveChildren(mySample, kParentAtomIsContainer);
		QTNewAtomContainer(&myPenguinOneOverride);

		myLocation.h += myDelta;
		myLocation.v += myDelta;
		myIndex++;
		if (myIndex > kPenguinDownRightCycleEndIndex)
			myIndex = kPenguinDownRightCycleStartIndex;
			
		SpriteUtils_SetSpriteData(myPenguinOneOverride, &myLocation, NULL, NULL, &myIndex, NULL, NULL, NULL);
		SpriteUtils_AddSpriteToSample(mySample, myPenguinOneOverride, kPenguinOneSpriteID);
		SpriteUtils_AddSpriteSampleToMedia(myMedia, mySample, kSpriteMediaFrameDuration, false, NULL);	
		QTDisposeAtomContainer(myPenguinOneOverride);
	}

	EndMediaEdits(myMedia);
	
	// add the media to the track
	InsertMediaIntoTrack(myTrack, 0, 0, GetMediaDuration(myMedia), fixed1);
	
	//////////
	//
	// set the sprite track properties
	//
	//////////
	{
		QTAtomContainer		myTrackProperties;
		RGBColor			myBackgroundColor;
		
		// add a background color to the sprite track
		myBackgroundColor.red = EndianU16_NtoB(0x8000);
		myBackgroundColor.green = EndianU16_NtoB(0);
		myBackgroundColor.blue = EndianU16_NtoB(0xffff);
		
		QTNewAtomContainer(&myTrackProperties);
		QTInsertChild(myTrackProperties, 0, kSpriteTrackPropertyBackgroundColor, 1, 1, sizeof(RGBColor), &myBackgroundColor, NULL);

		// tell the movie controller that this sprite track has actions, Jackson
		hasActions = true;
		QTInsertChild(myTrackProperties, 0, kSpriteTrackPropertyHasActions, 1, 1, sizeof(hasActions), &hasActions, NULL);
	
		// tell the sprite track to generate QTIdleEvents
		myFrequency = EndianU32_NtoB(2);
		QTInsertChild(myTrackProperties, 0, kSpriteTrackPropertyQTIdleEventsFrequency, 1, 1, sizeof(myFrequency), &myFrequency, NULL);
		myErr = SetMediaPropertyAtom(myMedia, myTrackProperties);
		if (myErr != noErr)
			goto bail;

		QTDisposeAtomContainer(myTrackProperties);
	}
	
	//////////
	//
	// finish up
	//
	//////////
	
	// add the movie resource to the movie file
	myErr = AddMovieResource(myMovie, myResRefNum, &myResID, myFile.name);
	
bail:
	free(myPrompt);
	free(myFileName);

	if (mySample != NULL)
		QTDisposeAtomContainer(mySample);

	if (myBeginButton != NULL)
		QTDisposeAtomContainer(myBeginButton);	
			
	if (myPrevButton != NULL)
		QTDisposeAtomContainer(myPrevButton);
				
	if (myNextButton != NULL)
		QTDisposeAtomContainer(myNextButton);
				
	if (myEndButton != NULL)
		QTDisposeAtomContainer(myEndButton);		
		
	if (myResRefNum != 0)
		CloseMovieFile(myResRefNum);

	if (myMovie != NULL)
		DisposeMovie(myMovie);
		
	return(myErr);
}
int convertToMP4PathThrough(CFStringRef inFile, CFStringRef outFile)
{
	OSStatus error;
	MovieExportComponent movieExporter = NULL;
	Handle inDataRef=0, outDataRef=0;
	OSType inDataRefType, outDataRefType;
	short inResID = 0;
	Movie theMovie=0;
	int ret = -1;

	error = OpenADefaultComponent(MovieExportType, kQTFileTypeMP4, &movieExporter);
	if(error) {
		fprintf(stderr,"OpenADefaultComponent error: cannot find the QuickTime conponent\n");
		goto last;
	}
	error = QTNewDataReferenceFromFullPathCFString(inFile, kQTNativeDefaultPathStyle, 0, &inDataRef, &inDataRefType);
	if(error) {
		fprintf(stderr,"QTNewDataReferenceFromFullPathCFString error: input file path is invalid\n");
		goto last;
	}
	error = QTNewDataReferenceFromFullPathCFString(outFile, kQTNativeDefaultPathStyle, 0, &outDataRef, &outDataRefType);
	if(error) {
		fprintf(stderr,"QTNewDataReferenceFromFullPathCFString error: output file path is invalid\n");
		goto last;
	}
	error = NewMovieFromDataRef(&theMovie, newMovieActive, &inResID, inDataRef, inDataRefType);
	if(error) {
		fprintf(stderr,"NewMovieFromDataRef error: cannot open the input file\n");
		goto last;
	}

	Track theTrack = getSoundTrack(theMovie);
	Media theMedia = GetTrackMedia(theTrack);
	DeleteTrackSegment(theTrack, 0, GetTrackDuration(theTrack));
	SetMovieTimeScale(theMovie, GetMediaTimeScale(theMedia));
	InsertMediaIntoTrack(theTrack, 0, 0, GetMediaDuration(theMedia), fixed1);

	Boolean useHighResolutionAudio = true;
	QTSetComponentProperty(
		movieExporter,
		kQTPropertyClass_MovieExporter,
		kQTMovieExporterPropertyID_EnableHighResolutionAudioFeatures,
		sizeof(Boolean),
		&useHighResolutionAudio
	);

	UInt32 ftyp = 'mp42';
	QTSetComponentProperty(
		movieExporter,
		kQTPropertyClass_MovieExporter,
		'ftyp',
		4,
		&ftyp
	);

	QTAtomContainer ac;
	MovieExportGetSettingsAsAtomContainer(movieExporter, &ac);
	QTAtom ensoAtom = QTFindChildByID(ac, kParentAtomIsContainer, kQTSettingsMovieExportEnableSound, 1, NULL);
	if(ensoAtom) {
		long size, *data;
		QTGetAtomDataPtr(ac,ensoAtom,&size,(Ptr *)&data);
		data[0] = EndianS32_NtoB('past');
		QTSetAtomData(ac, ensoAtom, size, data);
		MovieExportSetSettingsFromAtomContainer(movieExporter, ac);
	}
	DisposeHandle(ac);
	
	/*Boolean cancelled;
	error = MovieExportDoUserDialog(movieExporter, theMovie, NULL, 0, GetMovieDuration(theMovie), &cancelled);
	if(cancelled) goto last;
	if(error) {
		printf("MovieExportDoUserDialog error\n");
		goto last;
	}*/
	
	error = ConvertMovieToDataRef(theMovie, 0, outDataRef, outDataRefType, kQTFileTypeMP4, FOUR_CHAR_CODE('TVOD'), createMovieFileDeleteCurFile|createMovieFileDontCreateResFile, movieExporter);
	if(error) {
        fprintf(stderr,"ConvertMovieToDataRef error: cannot translate .mov into .m4a (%d)\n",error);
		goto last;
	}

	ret = 0;
	
last:
	if(movieExporter) CloseComponent(movieExporter);
	if(theMovie) DisposeMovie(theMovie);
	if(inDataRef) DisposeHandle(inDataRef);
	if(outDataRef) DisposeHandle(outDataRef);

	return ret;
}
//-----------------------------------------------------------------------------
void ofQtVideoSaver::finishMovie(){

	if (!bSetupForRecordingMovie) return;
	
	bSetupForRecordingMovie = false;
	
    
    EndMediaEdits (media);             /* Inform the Movie Toolbox that they */
                                       /*   can close the media container.   */


	/*  Step 5:  Insert a reference into the track that specifies which of the
    media samples to play and when to start playing them. 
    ======================================================================  */
    
    InsertMediaIntoTrack 
      (
      track,                           /* the track to update.               */
      0,                               /* time in track where the specified  */
                                       /*   media samples should start playg */
                                       /*   using movie time scale.          */
      0,                               /* time in media samples of the first */
                                       /*   sample to play using media time  */
                                       /*   scale.                           */
      GetMediaDuration (media),        /* duration of media samples to play  */
                                       /*   using media time scale.          */
      1L<<16 //fixed1                  /* rate at which to play the samples. */
      );


/*  Step 6:  Append the movie atom to the movie file (AddMovieResource).
    ====================================================================  */
    
    sResId = movieInDataForkResID;
    osErr = AddMovieResource
      (
      movie,                           /* movie to create moov atom from     */
      sResRefNum,                      /* file to receive the moov atom      */
      &sResId,                         /* id num of movie resource (res fork)*/
      (unsigned char *) fileName.c_str()                      /* name of movie resource (res fork)  */
      );
    if (osErr) 
      { 
      printf ("AddMovieResource failed %d\n", osErr); 
      goto bail; 
      }

    if (sResRefNum != 0) 
      {
      CloseMovieFile (sResRefNum);     /* close file CreateMovieFile opened  */
      sResRefNum = 0;
      }


	/*  Step 7 (optional):  Place the movie atom as the first atom in a new 
    movie file, and interleave the media data (FlattenMovieData).        
    ===================================================================  */
    
    // no flattening necessary I think .....  
	//if (true) flatten_my_movie (movie, pszFlatFilename);


	/*  Step 8:  Close the movie file that CreateMovieFile opened (if necessary) 
    and dispose of the movie memory structures (DisposeMovie). 
    ========================================================================  */
    


	SetGWorld (pSavedPort, hSavedDevice);
    DisposeMovie (movie);
	if (hImageDescription != NULL) DisposeHandle ((Handle) hImageDescription);
    if (hCompressedData   != NULL) DisposeHandle (hCompressedData);
    if (pMovieGWorld      != NULL) DisposeGWorld (pMovieGWorld);

  bail:    

    if (sResRefNum != 0) CloseMovieFile (sResRefNum);
    if (movie     != NULL) DisposeMovie (movie);

}
Beispiel #25
0
void QTVectors_CreateVectorMovie (UInt32 theBuildAtomMethod)
{
	Handle						myHandle = NULL;
	ImageDescriptionHandle		mySampleDesc = NULL;
	short						myResRefNum = 0;
	short						myResID = movieInDataForkResID;
	Movie						myMovie = NULL;
	Track						myTrack;
	Media						myMedia;
	FSSpec						myFile;
	Boolean						myIsSelected = false;
	Boolean						myIsReplacing = false;	
	StringPtr 					myPrompt = QTUtils_ConvertCToPascalString(kVectorSavePrompt);
	StringPtr 					myFileName = QTUtils_ConvertCToPascalString(kVectorSaveMovieFileName);
	ComponentInstance			myComponent;
	ComponentResult				myResult;
	long						myFlags = createMovieFileDeleteCurFile | createMovieFileDontCreateResFile;
	OSErr						myErr = noErr;
	
	// METHOD ONE: use a raw data stream
	
	if (theBuildAtomMethod == kUseRawDataStream) {
	
		// kUseRawDataStream: build the vector data using a stream of hard-coded raw data
		// NOTE: the data in the stream *must* be big-endian, since it's stored in a QuickTime atom container.

		long					myPath[] = {	
			
		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(long)), EndianU32_NtoB(kCurveAntialiasControlAtom),
			EndianU32_NtoB(kCurveAntialiasOn),

		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(long)), EndianU32_NtoB(kCurveFillTypeAtom),
			EndianU32_NtoB(gxEvenOddFill),

		// a big white enclosing rectangle (600 x 600)
		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(ARGBColor)), EndianU32_NtoB(kCurveARGBColorAtom),
			EndianU32_NtoB(0xffffffff),	// alpha, red
			EndianU32_NtoB(0xffffffff),	// green, blue
										// it's white!

		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(long)*11), EndianU32_NtoB(kCurvePathAtom),
			EndianU32_NtoB(1),			// one contour in path
			EndianU32_NtoB(4),			// four points in path
			EndianU32_NtoB(0x00000000),	// all points are on the curve: it's a rectangle! 
			EndianU32_NtoB(0x00000000), EndianU32_NtoB(0x00000000), 	// top left
			EndianU32_NtoB(0x02580000), EndianU32_NtoB(0x00000000),		// top right
			EndianU32_NtoB(0x02580000), EndianU32_NtoB(0x02580000),		// bottom right 
			EndianU32_NtoB(0x00000000), EndianU32_NtoB(0x02580000),		// bottom left

		// a black rounded square, centered at 150,150
		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(ARGBColor)), EndianU32_NtoB(kCurveARGBColorAtom),
			EndianU32_NtoB(0x00000000),	// alpha, red
			EndianU32_NtoB(0x00000000),	// green, blue
										// it's black!

		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(long)*11), EndianU32_NtoB(kCurvePathAtom),
			EndianU32_NtoB(1),			// one contour in path
			EndianU32_NtoB(4),			// four points in path
			EndianU32_NtoB(0xffffffff), // all points are off the curve: it's a rounded square! 
			EndianU32_NtoB(0x00640000), EndianU32_NtoB(0x00640000),
			EndianU32_NtoB(0x00C80000), EndianU32_NtoB(0x00640000),
			EndianU32_NtoB(0x00C80000), EndianU32_NtoB(0x00C80000), 
			EndianU32_NtoB(0x00640000), EndianU32_NtoB(0x00C80000),

		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(long)), EndianU32_NtoB(kCurveFillTypeAtom),
			EndianU32_NtoB(gxEvenOddFill),

		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(long)), EndianU32_NtoB(kCurvePenThicknessAtom),
			EndianU32_NtoB(0x100000),
											
		// enable linear gradient for all following atoms
		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(long)), EndianU32_NtoB(kCurveGradientTypeAtom),
			EndianU32_NtoB(kLinearGradient),
		
		// define the gradient: red -> green -> red -> blue									
		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(GradientColorRecord)*4), EndianU32_NtoB(kCurveGradientRecordAtom),
										
			EndianU32_NtoB(0xffffffff),	// gradient color record 1:
			EndianU32_NtoB(0x00000000),	// red
			EndianU32_NtoB(0x00000000),	// beginning of gradient
										
			EndianU32_NtoB(0x77770000),	// gradient color record 2:
			EndianU32_NtoB(0xffff0000),	// green
			EndianU32_NtoB(0x00004000),
										
			EndianU32_NtoB(0x3333ffff),	// gradient color record 3:
			EndianU32_NtoB(0x00000000),	// red
			EndianU32_NtoB(0x0000C000),
										
			EndianU32_NtoB(0xffff0000),	// gradient color record 4:
			EndianU32_NtoB(0x0000ffff),	// blue
			EndianU32_NtoB(0x00010000),	// end of gradient

		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(long)), EndianU32_NtoB(kCurveGradientAngleAtom),
			EndianU32_NtoB(0x00450000),	// gradient at 45û angle
		
		// a green rectangle, centered at 40,40, painted with a linear gradient									
		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(ARGBColor)), EndianU32_NtoB(kCurveARGBColorAtom),
			EndianU32_NtoB(0x00000000),	// alpha, red
			EndianU32_NtoB(0xffff0000),	// green, blue
										// it's green!

		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(long)*11), EndianU32_NtoB(kCurvePathAtom),
			EndianU32_NtoB(1),			// one contour in path
			EndianU32_NtoB(4),			// four points in path
			EndianU32_NtoB(0x00000000),	// all points are on the curve: it's a rectangle! 
			EndianU32_NtoB(0x00100000), EndianU32_NtoB(0x00100000),
			EndianU32_NtoB(0x00400000), EndianU32_NtoB(0x00100000),
			EndianU32_NtoB(0x00400000), EndianU32_NtoB(0x00400000),
			EndianU32_NtoB(0x00100000), EndianU32_NtoB(0x00400000),

		// disable gradient for all following atoms (since no atom data)
		EndianU32_NtoB(kSizeOfSizeAndTagFields), EndianU32_NtoB(kCurveGradientRecordAtom),
									
		// a red rounded square, centered at 50,50
		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(ARGBColor)), EndianU32_NtoB(kCurveARGBColorAtom),
			EndianU32_NtoB(0x3333ffff),	// alpha, red
			EndianU32_NtoB(0x00000000),	// green, blue
										// it's red!

		EndianU32_NtoB(kSizeOfSizeAndTagFields + sizeof(long)*11), EndianU32_NtoB(kCurvePathAtom),
			EndianU32_NtoB(1L),			// one contour in path
			EndianU32_NtoB(4L),			// four points in path
			EndianU32_NtoB(0xffffffff), // all points are off the curve: it's a rounded square! 
			EndianU32_NtoB(0x001e0000), EndianU32_NtoB(0x001e0000),
			EndianU32_NtoB(0x00460000), EndianU32_NtoB(0x001e0000),
			EndianU32_NtoB(0x00460000), EndianU32_NtoB(0x00460000),
			EndianU32_NtoB(0x001e0000), EndianU32_NtoB(0x00460000),

		EndianU32_NtoB(kSizeOfZeroAtomHeader), EndianU32_NtoB(kCurveEndAtom),
	};
			
		myHandle = NewHandle(sizeof(myPath));
		if (myHandle == NULL)
			goto bail;
			
		BlockMove(myPath, *myHandle, sizeof(myPath));
	
	}	// end of kUseRawDataStream

	
	// METHOD TWO: use the Curve Utilities API
	
	if (theBuildAtomMethod == kUseCurveUtilities) {
	
		// kUseCurveUtilities: build the vector data using the Curve Utilities API		
		Handle						myPath;
		gxPoint						myPoint;
		long						myAtomData[14];
		ARGBColor					myColor;
		GradientColorRecord			myGradients[4];
	
		// open the vector codec; we'll need it for some subsequent calls
		myComponent = OpenDefaultComponent(decompressorComponentType, kVectorCodecType);
		if (myComponent == NULL)
			goto bail;

		// create a new, empty vector data stream
		myResult = CurveCreateVectorStream(myComponent, &myHandle);
		if (myResult != noErr)
			goto bail;
		
		// now start adding atoms holding the vector data
		
		// set antialiasing on
		myAtomData[0] = EndianU32_NtoB(kCurveAntialiasOn);
		CurveAddAtomToVectorStream(myComponent, kCurveAntialiasControlAtom, sizeof(long), myAtomData, myHandle);

		// set fill type
		myAtomData[0] = EndianU32_NtoB(gxEvenOddFill);
		CurveAddAtomToVectorStream(myComponent, kCurveFillTypeAtom, sizeof(long), myAtomData, myHandle);

		// a big white enclosing rectangle (600 x 600)
		myColor.alpha = EndianU16_NtoB(0xffff);
		myColor.red = EndianU16_NtoB(0xffff);
		myColor.green = EndianU16_NtoB(0xffff);
		myColor.blue = EndianU16_NtoB(0xffff);
		CurveAddAtomToVectorStream(myComponent, kCurveARGBColorAtom, sizeof(ARGBColor), &myColor, myHandle);

#if USE_CURVE_INSERT_POINT_INTO_PATH
		// create a new, empty path
		CurveNewPath(myComponent, &myPath);

		myPoint.x = 0x00000000;
		myPoint.y = 0x00000000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 0, true);
		
		myPoint.x = 0x02580000;
		myPoint.y = 0x00000000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 1, true);
		
		myPoint.x = 0x02580000;
		myPoint.y = 0x02580000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 2, true);
		
		myPoint.x = 0x00000000;
		myPoint.y = 0x02580000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 3, true);

		// add the 'path' atom to the vector data stream
		CurveAddPathAtomToVectorStream(myComponent, myPath, myHandle);
		DisposeHandle(myPath);
#else
		myAtomData[0] = EndianU32_NtoB(1L);
		myAtomData[1] = EndianU32_NtoB(4L);
		myAtomData[2] = EndianU32_NtoB(0x00000000);
		myAtomData[3] = EndianU32_NtoB(0x00000000);
		myAtomData[4] = EndianU32_NtoB(0x00000000);
		myAtomData[5] = EndianU32_NtoB(0x02580000);
		myAtomData[6] = EndianU32_NtoB(0x00000000);
		myAtomData[7] = EndianU32_NtoB(0x02580000);
		myAtomData[8] = EndianU32_NtoB(0x02580000);
		myAtomData[9] = EndianU32_NtoB(0x00000000);
		myAtomData[10] = EndianU32_NtoB(0x02580000);
		CurveAddAtomToVectorStream(myComponent, kCurvePathAtom, sizeof(long)*11, myAtomData, myHandle);
#endif
		
		// a black rounded square, centered at 150,150
		myColor.alpha = EndianU16_NtoB(0x0000);
		myColor.red = EndianU16_NtoB(0x0000);
		myColor.green = EndianU16_NtoB(0x0000);
		myColor.blue = EndianU16_NtoB(0x0000);
		CurveAddAtomToVectorStream(myComponent, kCurveARGBColorAtom, sizeof(ARGBColor), &myColor, myHandle);

#if USE_CURVE_INSERT_POINT_INTO_PATH
		// create a new, empty path
		CurveNewPath(myComponent, &myPath);

		myPoint.x = 0x00640000;
		myPoint.y = 0x00640000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 0, false);
		
		myPoint.x = 0x00C80000;
		myPoint.y = 0x00640000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 1, false);
		
		myPoint.x = 0x00C80000;
		myPoint.y = 0x00C80000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 2, false);
		
		myPoint.x = 0x00640000;
		myPoint.y = 0x00C80000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 3, false);

		// add the 'path' atom to the vector data stream
		CurveAddPathAtomToVectorStream(myComponent, myPath, myHandle);
		DisposeHandle(myPath);
#else
		myAtomData[0] = EndianU32_NtoB(1L);
		myAtomData[1] = EndianU32_NtoB(4L);
		myAtomData[2] = EndianU32_NtoB(0xffffffff);
		myAtomData[3] = EndianU32_NtoB(0x00640000);
		myAtomData[4] = EndianU32_NtoB(0x00640000);
		myAtomData[5] = EndianU32_NtoB(0x00C80000);
		myAtomData[6] = EndianU32_NtoB(0x00640000);
		myAtomData[7] = EndianU32_NtoB(0x00C80000);
		myAtomData[8] = EndianU32_NtoB(0x00C80000);
		myAtomData[9] = EndianU32_NtoB(0x00640000);
		myAtomData[10] = EndianU32_NtoB(0x00C80000);
		CurveAddAtomToVectorStream(myComponent, kCurvePathAtom, sizeof(long)*11, myAtomData, myHandle);
#endif

		// set fill type
		myAtomData[0] = EndianU32_NtoB(gxEvenOddFill);
		CurveAddAtomToVectorStream(myComponent, kCurveFillTypeAtom, sizeof(long), myAtomData, myHandle);

		// set pen thickness
		myAtomData[0] = EndianU32_NtoB(0x100000);
		CurveAddAtomToVectorStream(myComponent, kCurvePenThicknessAtom, sizeof(long), myAtomData, myHandle);

		// enable linear gradient for all following atoms
		myAtomData[0] = EndianU32_NtoB(kLinearGradient);
		CurveAddAtomToVectorStream(myComponent, kCurveGradientTypeAtom, sizeof(long), myAtomData, myHandle);

		// define the gradient: red -> green -> red -> blue									
		myGradients[0].thisColor.alpha = EndianU16_NtoB(0xffff);
		myGradients[0].thisColor.red = EndianU16_NtoB(0xffff);
		myGradients[0].thisColor.green = EndianU16_NtoB(0x0000);
		myGradients[0].thisColor.blue = EndianU16_NtoB(0x0000);
		myGradients[0].endingPercentage = EndianU32_NtoB(0x00000000);
		myGradients[1].thisColor.alpha = EndianU16_NtoB(0x7777);
		myGradients[1].thisColor.red = EndianU16_NtoB(0x0000);
		myGradients[1].thisColor.green = EndianU16_NtoB(0xffff);
		myGradients[1].thisColor.blue = EndianU16_NtoB(0x0000);
		myGradients[1].endingPercentage = EndianU32_NtoB(0x00004000);
		myGradients[2].thisColor.alpha = EndianU16_NtoB(0x3333);
		myGradients[2].thisColor.red = EndianU16_NtoB(0xffff);
		myGradients[2].thisColor.green = EndianU16_NtoB(0x0000);
		myGradients[2].thisColor.blue = EndianU16_NtoB(0x0000);
		myGradients[2].endingPercentage = EndianU32_NtoB(0x0000C000);
		myGradients[3].thisColor.alpha = EndianU16_NtoB(0xffff);
		myGradients[3].thisColor.red = EndianU16_NtoB(0x0000);
		myGradients[3].thisColor.green = EndianU16_NtoB(0x0000);
		myGradients[3].thisColor.blue = EndianU16_NtoB(0xffff);
		myGradients[3].endingPercentage = EndianU32_NtoB(0x00010000);
		CurveAddAtomToVectorStream(myComponent, kCurveGradientRecordAtom, sizeof(GradientColorRecord)*4, myGradients, myHandle);

		// set gradient angle
		myAtomData[0] = EndianU32_NtoB(0x00450000);
		CurveAddAtomToVectorStream(myComponent, kCurveGradientAngleAtom, sizeof(long), myAtomData, myHandle);

		// a green rectangle, centered at 40,40, painted with a linear gradient									
		myColor.alpha = EndianU16_NtoB(0x0000);
		myColor.red = EndianU16_NtoB(0x0000);
		myColor.green = EndianU16_NtoB(0xffff);
		myColor.blue = EndianU16_NtoB(0x0000);
		CurveAddAtomToVectorStream(myComponent, kCurveARGBColorAtom, sizeof(ARGBColor), &myColor, myHandle);

#if USE_CURVE_INSERT_POINT_INTO_PATH
		// create a new, empty path
		CurveNewPath(myComponent, &myPath);

		myPoint.x = 0x00100000;
		myPoint.y = 0x00100000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 0, true);
		
		myPoint.x = 0x00400000;
		myPoint.y = 0x00100000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 1, true);
		
		myPoint.x = 0x00400000;
		myPoint.y = 0x00400000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 2, true);
		
		myPoint.x = 0x00100000;
		myPoint.y = 0x00400000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 3, true);

		// add the 'path' atom to the vector data stream
		CurveAddPathAtomToVectorStream(myComponent, myPath, myHandle);
		DisposeHandle(myPath);
#else
		myAtomData[0] = EndianU32_NtoB(1L);
		myAtomData[1] = EndianU32_NtoB(4L);
		myAtomData[2] = EndianU32_NtoB(0x00000000);
		myAtomData[3] = EndianU32_NtoB(0x00100000);
		myAtomData[4] = EndianU32_NtoB(0x00100000);
		myAtomData[5] = EndianU32_NtoB(0x00400000);
		myAtomData[6] = EndianU32_NtoB(0x00100000);
		myAtomData[7] = EndianU32_NtoB(0x00400000);
		myAtomData[8] = EndianU32_NtoB(0x00400000);
		myAtomData[9] = EndianU32_NtoB(0x00100000);
		myAtomData[10] = EndianU32_NtoB(0x00400000);
		CurveAddAtomToVectorStream(myComponent, kCurvePathAtom, sizeof(long)*11, myAtomData, myHandle);
#endif

		// disable gradient for all following atoms (since no atom data)
		CurveAddAtomToVectorStream(myComponent, kCurveGradientTypeAtom, 0, NULL, myHandle);
		
		// a red rounded square, centered at 50,50
		myColor.alpha = EndianU16_NtoB(0x3333);
		myColor.red = EndianU16_NtoB(0xffff);
		myColor.green = EndianU16_NtoB(0x0000);
		myColor.blue = EndianU16_NtoB(0x0000);
		CurveAddAtomToVectorStream(myComponent, kCurveARGBColorAtom, sizeof(ARGBColor), &myColor, myHandle);

#if USE_CURVE_INSERT_POINT_INTO_PATH
		// create a new, empty path
		CurveNewPath(myComponent, &myPath);

		myPoint.x = 0x001e0000;
		myPoint.y = 0x001e0000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 0, false);
		
		myPoint.x = 0x00460000;
		myPoint.y = 0x001e0000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 1, false);
		
		myPoint.x = 0x00460000;
		myPoint.y = 0x00460000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 2, false);
		
		myPoint.x = 0x001e0000;
		myPoint.y = 0x00460000;
		CurveInsertPointIntoPath(myComponent, &myPoint, myPath, 0, 3, false);

		// add the 'path' atom to the vector data stream
		CurveAddPathAtomToVectorStream(myComponent, myPath, myHandle);
		DisposeHandle(myPath);
#else
		myAtomData[0] = EndianU32_NtoB(1L);
		myAtomData[1] = EndianU32_NtoB(4L);
		myAtomData[2] = EndianU32_NtoB(0xffffffff);
		myAtomData[3] = EndianU32_NtoB(0x001e0000);
		myAtomData[4] = EndianU32_NtoB(0x001e0000);
		myAtomData[5] = EndianU32_NtoB(0x00460000);
		myAtomData[6] = EndianU32_NtoB(0x001e0000);
		myAtomData[7] = EndianU32_NtoB(0x00460000);
		myAtomData[8] = EndianU32_NtoB(0x00460000);
		myAtomData[9] = EndianU32_NtoB(0x001e0000);
		myAtomData[10] = EndianU32_NtoB(0x00460000);
		CurveAddAtomToVectorStream(myComponent, kCurvePathAtom, sizeof(long)*11, myAtomData, myHandle);
#endif

		// add the 'zero' atom to the vector data stream
		CurveAddZeroAtomToVectorStream(myComponent, myHandle);
		
	}	// end of kUseCurveUtilities
	
	// create the image description
	mySampleDesc = (ImageDescriptionHandle)NewHandleClear(sizeof(ImageDescription));
	if (mySampleDesc == NULL)
		goto bail;
	
	// fill in the fields of the image description
	(**mySampleDesc).idSize = sizeof(ImageDescription);
	(**mySampleDesc).cType = kVectorCodecType;
	(**mySampleDesc).vendor = kAppleManufacturer;
	(**mySampleDesc).temporalQuality = codecNormalQuality;
	(**mySampleDesc).spatialQuality = codecNormalQuality;
	(**mySampleDesc).width = 300;
	(**mySampleDesc).height = 300;
	(**mySampleDesc).hRes = 72L << 16;
	(**mySampleDesc).vRes = 72L << 16;
	(**mySampleDesc).dataSize = 0L;
	(**mySampleDesc).frameCount = 1;
	(**mySampleDesc).depth = 0;
	(**mySampleDesc).clutID = -1;
		
	// prompt user for new file name
	QTFrame_PutFile(myPrompt, myFileName, &myFile, &myIsSelected, &myIsReplacing);
	if (!myIsSelected)
		goto bail;
	
	// create a movie file for the destination movie
	myErr = CreateMovieFile(&myFile, FOUR_CHAR_CODE('TVOD'), smCurrentScript, myFlags, &myResRefNum, &myMovie);
	if (myErr != noErr)
		goto bail;
	
	// create the vector track and media
	myTrack = NewMovieTrack(myMovie, FixDiv(300, 1), FixDiv(300, 1), kNoVolume);
	myMedia = NewTrackMedia(myTrack, VideoMediaType, 600, NULL, 0);
	
	// create the vector media sample
	BeginMediaEdits(myMedia);
		
	myErr = AddMediaSample(myMedia, myHandle, 0, GetHandleSize(myHandle), 600, (SampleDescriptionHandle)mySampleDesc, 1, 0, NULL);
	if (myErr != noErr)
		goto bail;
		
	EndMediaEdits(myMedia);
	
	// add the media to the track
	InsertMediaIntoTrack(myTrack, 0, 0, GetMediaDuration(myMedia), fixed1);
	AddMovieResource(myMovie, myResRefNum, &myResID, NULL);

bail:
	free(myPrompt);
	free(myFileName);

	if (mySampleDesc != NULL)
		DisposeHandle((Handle)mySampleDesc);
	
	if (myResRefNum != 0)
		CloseMovieFile(myResRefNum);

	if (myHandle != NULL)
		DisposeHandle(myHandle);

	if (myMovie != NULL)
		DisposeMovie(myMovie);

	if (myComponent != NULL)
		CloseComponent(myComponent);
}
OSErr makeMovieFromVideoFramesFile(char *inDestMovieFile)
{
    Handle					dataRef = NULL;
    OSType					dataRefType;
    ImageDescriptionHandle	videoDescH = NULL;
	OSErr					err;

#if TARGET_OS_WIN32
	err = InitializeQTML(0);
	if ((err = GetMoviesError()) != noErr) goto bail;
#endif
	err = EnterMovies();
	if ((err = GetMoviesError()) != noErr) goto bail;

	// create a data reference from the full path to the video frames file
	makeDataRefFromFullPath(inDestMovieFile, &dataRef, &dataRefType);
	if (dataRef == NULL) goto bail;

    Movie m = NewMovie(0);
	if ((err = GetMoviesError()) != noErr) goto bail;

	// create the video track for the movie
    Track videoT = NewMovieTrack( m, Long2Fix(kFrameWidth), Long2Fix(kFrameHeight), kNoVolume);
	if ((err = GetMoviesError()) != noErr) goto bail;
	
	// create the video track media
    Media videoM = NewTrackMedia( videoT, VideoMediaType, kMediaTimeScale, dataRef, dataRefType);
	if ((err = GetMoviesError()) != noErr) goto bail;
	
	videoDescH = (ImageDescriptionHandle)NewHandleClear(sizeof(ImageDescription));
	if (videoDescH == NULL) goto bail;
	
	// create the ImageDescription that will describe our video track media samples
    videoDescH[0]->idSize = sizeof(ImageDescription);
    videoDescH[0]->cType = kCodecType; // the codec type for your data 
    videoDescH[0]->temporalQuality = codecNormalQuality;
    videoDescH[0]->spatialQuality = codecNormalQuality;
    videoDescH[0]->width = kFrameWidth;
    videoDescH[0]->height = kFrameHeight;
    videoDescH[0]->hRes = 72L << 16;
    videoDescH[0]->vRes = 72L << 16;
    videoDescH[0]->depth = 32;
    videoDescH[0]->clutID = -1;

    SampleReference64Record videoRef;
    videoRef.dataOffset.hi = 0; videoRef.dataOffset.lo = 0;
    videoRef.dataSize = (kFrameWidth*kFrameHeight*4) * kNumberOfSamples; 
    videoRef.durationPerSample = kDurationPerSample;
    videoRef.numberOfSamples = kNumberOfSamples;
    videoRef.sampleFlags = 0;

	// now add all of our media samples to the movie data file.
    err = AddMediaSampleReferences64(videoM, (SampleDescriptionHandle)videoDescH,
                            1, &videoRef, 0);
	if (err != noErr) goto bail;

    TimeValue mediaDuration = kNumberOfSamples * kDurationPerSample;
	// inserts a reference to our media samples into the track.
    err = InsertMediaIntoTrack(videoT, 0, 0, mediaDuration, 
				fixed1);	// media's rate (1.0 = media's natural playback rate).
	if (err != noErr) goto bail;

    DataHandler outDataHandler;
	// opens a data handler for our movie storage (the video frames file)
    err = OpenMovieStorage (dataRef, dataRefType, kDataHCanWrite, &outDataHandler );
	if (err != noErr) goto bail;
	
	// add a movie to our movie storage container
    err = AddMovieToStorage (m, outDataHandler );
	if (err != noErr) goto bail;

    err = CloseMovieStorage (outDataHandler);
	outDataHandler = NULL;
	
bail:
	if (videoDescH)
	{
		DisposeHandle((Handle)videoDescH);
	}
	
	return err;
}
void QTCmpr_CompressSequence (WindowObject theWindowObject)
{
	ComponentInstance			myComponent = NULL;
	GWorldPtr					myImageWorld = NULL;		// the graphics world we draw the images in
	PixMapHandle				myPixMap = NULL;
	Movie						mySrcMovie = NULL;
	Track						mySrcTrack = NULL;
	Movie						myDstMovie = NULL;
	Track						myDstTrack = NULL;
	Media						myDstMedia = NULL;
	Rect						myRect;
	PicHandle					myPicture = NULL;
	CGrafPtr					mySavedPort = NULL;
	GDHandle					mySavedDevice = NULL;
	SCTemporalSettings			myTimeSettings;
	SCDataRateSettings			myRateSettings;
	FSSpec						myFile;
	Boolean						myIsSelected = false;
	Boolean						myIsReplacing = false;	
	short						myRefNum = -1;
	StringPtr 					myMoviePrompt = QTUtils_ConvertCToPascalString(kQTCSaveMoviePrompt);
	StringPtr 					myMovieFileName = QTUtils_ConvertCToPascalString(kQTCSaveMovieFileName);
	MatrixRecord				myMatrix;
	ImageDescriptionHandle		myImageDesc = NULL;
	TimeValue					myCurMovieTime = 0L;
	TimeValue					myOrigMovieTime = 0L;		// current movie time, when compression is begun
	short						myFrameNum;		
	long						myFlags = 0L;
	long						myNumFrames = 0L;
	long						mySrcMovieDuration = 0L;	// duration of source movie
	OSErr						myErr = noErr;
#if USE_ASYNC_COMPRESSION
	ICMCompletionProcRecord		myICMComplProcRec;
	ICMCompletionProcRecordPtr	myICMComplProcPtr = NULL;
	OSErr						myICMComplProcErr = noErr;

	myICMComplProcRec.completionProc = NULL;
	myICMComplProcRec.completionRefCon = 0L;
#endif

	if (theWindowObject == NULL)
		goto bail;

	//////////
	//
	// get the movie and the first video track in the movie
	//
	//////////
	
	mySrcMovie = (**theWindowObject).fMovie;
	if (mySrcMovie == NULL)
		goto bail;

	mySrcTrack = GetMovieIndTrackType(mySrcMovie, 1, VideoMediaType, movieTrackMediaType);
	if (mySrcTrack == NULL)
		goto bail;
	
	// stop the movie; we don't want it to be playing while we're (re)compressing it
	SetMovieRate(mySrcMovie, (Fixed)0L);

	// get the current movie time, when compression is begun; we'll restore this later
	myOrigMovieTime = GetMovieTime(mySrcMovie, NULL);

	//////////
	//
	// configure and display the Standard Image Compression dialog box
	//
	//////////
	
	// open an instance of the Standard Image Compression dialog component
	myComponent = OpenDefaultComponent(StandardCompressionType, StandardCompressionSubType);
	if (myComponent == NULL)
		goto bail;

	// turn off "best depth" option in the compression dialog, because all of our
	// buffering is done at 32-bits (regardless of the depth of the source data)
	//
	// a more ambitious approach would be to loop through each of the video sample
	// descriptions in each of the video tracks looking for the deepest depth, and
	// using that for the best depth; better yet, we could find out which compressors
	// were used and set one of those as the default in the compression dialog
	SCGetInfo(myComponent, scPreferenceFlagsType, &myFlags);
	myFlags &= ~scShowBestDepth;
	SCSetInfo(myComponent, scPreferenceFlagsType, &myFlags);

	// because we are recompressing a movie that may have a variable frame rate,
	// we want to allow the user to leave the frame rate text field blank (in which
	// case we can preserve the frame durations of the source movie); if the user
	// enters a number, we will resample the movie at a new frame rate; if we don't
	// clear this flag, the compression dialog will not allow zero in the frame rate field
	//
	// NOTE: we could have set this flag above when we cleared the scShowBestDepth flag;
	// it is done here for clarity.	
	SCGetInfo(myComponent, scPreferenceFlagsType, &myFlags);
	myFlags |= scAllowZeroFrameRate;
	SCSetInfo(myComponent, scPreferenceFlagsType, &myFlags);

	// get the number of video frames in the movie
	myNumFrames = QTUtils_GetFrameCount(mySrcTrack);

	// get the bounding rectangle of the movie, create a 32-bit GWorld with those
	// dimensions, and draw the movie poster picture into it; this GWorld will be
	// used for the test image in the compression dialog box and for rendering movie
	// frames
	myPicture = GetMoviePosterPict(mySrcMovie);
	if (myPicture == NULL)
		goto bail;
		
	GetMovieBox(mySrcMovie, &myRect);

	myErr = NewGWorld(&myImageWorld, 32, &myRect, NULL, NULL, 0L);
	if (myErr != noErr)
		goto bail;
		
	// get the pixmap of the GWorld; we'll lock the pixmap, just to be safe
	myPixMap = GetGWorldPixMap(myImageWorld);
	if (!LockPixels(myPixMap))
		goto bail;

	// draw the movie poster image into the GWorld
	GetGWorld(&mySavedPort, &mySavedDevice);
	SetGWorld(myImageWorld, NULL);
	EraseRect(&myRect);
	DrawPicture(myPicture, &myRect);
	KillPicture(myPicture);
	SetGWorld(mySavedPort, mySavedDevice);

	// set the picture to be displayed in the dialog box; passing NULL for the rect
	// means use the entire image; passing 0 for the flags means to use the default
	// system method of displaying the test image, which is currently a combination
	// of cropping and scaling; personally, I prefer scaling (your mileage may vary)
	SCSetTestImagePixMap(myComponent, myPixMap, NULL, scPreferScaling);

	// install the custom procs, if requested
	// we can install two kinds of custom procedures for use in connection with
	// the standard dialog box: (1) a modal-dialog filter function, and (2) a hook
	// function to handle the custom button in the dialog box
	if (gUseExtendedProcs)
		QTCmpr_InstallExtendedProcs(myComponent, (long)myPixMap);
	
	// set up some default settings for the compression dialog
	SCDefaultPixMapSettings(myComponent, myPixMap, true);
	
	// clear out the default frame rate chosen by Standard Compression (a frame rate
	// of 0 means to use the rate of the source movie)
	myErr = SCGetInfo(myComponent, scTemporalSettingsType, &myTimeSettings);
	if (myErr != noErr)
		goto bail;

	myTimeSettings.frameRate = 0;
	SCSetInfo(myComponent, scTemporalSettingsType, &myTimeSettings);

	// request image compression settings from the user; in other words, put up the dialog box
	myErr = SCRequestSequenceSettings(myComponent);
	if (myErr == scUserCancelled)
		goto bail;

	// get a copy of the temporal settings the user entered; we'll need them for some
	// of our calculations (in a simpler application, we'd never have to look at them)	
	SCGetInfo(myComponent, scTemporalSettingsType, &myTimeSettings);

	//////////
	//
	// adjust the data rate [to be supplied][relevant only for movies that have sound tracks]
	//
	//////////

	
	//////////
	//
	// adjust the sample count
	//
	// if the user wants to resample the frame rate of the movie (as indicated a non-zero
	// value in the frame rate field) calculate the number of frames and duration for the new movie
	//
	//////////
	
	if (myTimeSettings.frameRate != 0) {
		long	myDuration = GetMovieDuration(mySrcMovie);
		long	myTimeScale = GetMovieTimeScale(mySrcMovie);
		float	myFloat = (float)myDuration * myTimeSettings.frameRate;
		
		myNumFrames = myFloat / myTimeScale / 65536;
		if (myNumFrames == 0)
			myNumFrames = 1;
	}

	//////////
	//
	// get the name and location of the new movie file
	//
	//////////

	// prompt the user for a file to put the compressed image into; in theory, the name
	// should have a file extension appropriate to the type of compressed data selected by the user;
	// this is left as an exercise for the reader
	QTFrame_PutFile(myMoviePrompt, myMovieFileName, &myFile, &myIsSelected, &myIsReplacing);
	if (!myIsSelected)
		goto bail;

	// delete any existing file of that name
	if (myIsReplacing) {
		myErr = DeleteMovieFile(&myFile);
		if (myErr != noErr)
			goto bail;
	}
		
	//////////
	//
	// create the target movie
	//
	//////////
	
	myErr = CreateMovieFile(&myFile, sigMoviePlayer, smSystemScript, 
								createMovieFileDeleteCurFile | createMovieFileDontCreateResFile, &myRefNum, &myDstMovie);
	if (myErr != noErr)
		goto bail;
	
	// create a new video movie track with the same dimensions as the entire source movie
	myDstTrack = NewMovieTrack(myDstMovie,
								(long)(myRect.right - myRect.left) << 16,
								(long)(myRect.bottom - myRect.top) << 16, kNoVolume);
	if (myDstTrack == NULL)
		goto bail;
	
	// create a media for the new track with the same time scale as the source movie;
	// because the time scales are the same, we don't have to do any time scale conversions.
	myDstMedia = NewTrackMedia(myDstTrack, VIDEO_TYPE, GetMovieTimeScale(mySrcMovie), 0, 0);
	if (myDstMedia == NULL)
		goto bail;
	
	// copy the user data and settings from the source to the dest movie
	CopyMovieSettings(mySrcMovie, myDstMovie);
	
	// set movie matrix to identity and clear the movie clip region (because the conversion
	// process transforms and composites all video tracks into one untransformed video track)
	SetIdentityMatrix(&myMatrix);
	SetMovieMatrix(myDstMovie, &myMatrix);
	SetMovieClipRgn(myDstMovie, NULL);
	
	// set the movie to highest quality imaging
	SetMoviePlayHints(mySrcMovie, hintsHighQuality, hintsHighQuality);

	myImageDesc = (ImageDescriptionHandle)NewHandleClear(sizeof(ImageDescription));
	if (myImageDesc == NULL)
		goto bail;

	// prepare for adding frames to the movie
	myErr = BeginMediaEdits(myDstMedia);
	if (myErr != noErr)
		goto bail;

	//////////
	//
	// compress the image sequence
	//
	// we are going to step through the source movie, compress each frame, and then add
	// the compressed frame to the destination movie
	//
	//////////
	
	myErr = SCCompressSequenceBegin(myComponent, myPixMap, NULL, &myImageDesc);
	if (myErr != noErr)
		goto bail;
	
#if USE_ASYNC_COMPRESSION
	myFlags = codecFlagUpdatePrevious + codecFlagUpdatePreviousComp + codecFlagLiveGrab;
	SCSetInfo(myComponent, scCodecFlagsType, &myFlags);
#endif

	// clear out our image GWorld and set movie to draw into it
	SetGWorld(myImageWorld, NULL);
	EraseRect(&myRect);
	SetMovieGWorld(mySrcMovie, myImageWorld, GetGWorldDevice(myImageWorld));

	// set current time value to beginning of the source movie
	myCurMovieTime = 0;

	// get a value we'll need inside the loop
	mySrcMovieDuration = GetMovieDuration(mySrcMovie);

	// loop through all of the interesting times we counted above
	for (myFrameNum = 0; myFrameNum < myNumFrames; myFrameNum++) {
		short			mySyncFlag;
		TimeValue		myDuration;
		long			myDataSize;
		Handle			myCompressedData;

		//////////
		//
		// get the next frame of the source movie
		//
		//////////
		
		// if we are resampling the movie, step to the next frame
		if (myTimeSettings.frameRate) {
			myCurMovieTime = myFrameNum * mySrcMovieDuration / (myNumFrames - 1);
			myDuration = mySrcMovieDuration / myNumFrames;
		} else {
			OSType		myMediaType = VIDEO_TYPE;
			
			myFlags = nextTimeMediaSample;

			// if this is the first frame, include the frame we are currently on		
			if (myFrameNum == 0)
				myFlags |= nextTimeEdgeOK;
			
			// if we are maintaining the frame durations of the source movie,
			// skip to the next interesting time and get the duration for that frame
			GetMovieNextInterestingTime(mySrcMovie, myFlags, 1, &myMediaType, myCurMovieTime, 0, &myCurMovieTime, &myDuration);
		}
		
		SetMovieTimeValue(mySrcMovie, myCurMovieTime);
		MoviesTask(mySrcMovie, 0);
		MoviesTask(mySrcMovie, 0);
		MoviesTask(mySrcMovie, 0);

		// if data rate constraining is being done, tell Standard Compression the
		// duration of the current frame in milliseconds; we only need to do this
		// if the frames have variable durations
		if (!SCGetInfo(myComponent, scDataRateSettingsType, &myRateSettings)) {
			myRateSettings.frameDuration = myDuration * 1000 / GetMovieTimeScale(mySrcMovie);
			SCSetInfo(myComponent, scDataRateSettingsType, &myRateSettings);
		}

		//////////
		//
		// compress the current frame of the source movie and add it to the destination movie
		//
		//////////
		
		// if SCCompressSequenceFrame completes successfully, myCompressedData will hold
		// a handle to the newly-compressed image data and myDataSize will be the size of
		// the compressed data (which will usually be different from the size of the handle);
		// also mySyncFlag will be a value that that indicates whether or not the frame is a
		// key frame (and which we pass directly to AddMediaSample); note that we do not need
		// to dispose of myCompressedData, since SCCompressSequenceEnd will do that for us
#if !USE_ASYNC_COMPRESSION
		myErr = SCCompressSequenceFrame(myComponent, myPixMap, &myRect, &myCompressedData, &myDataSize, &mySyncFlag);
		if (myErr != noErr)
			goto bail;
#else
		if (myICMComplProcPtr == NULL) {
			myICMComplProcRec.completionProc = NewICMCompletionProc(QTCmpr_CompletionProc);
			myICMComplProcRec.completionRefCon = (long)&myICMComplProcErr;
			myICMComplProcPtr = &myICMComplProcRec;
		}
		
		myICMComplProcErr = kAsyncDefaultValue;
		
		myErr = SCCompressSequenceFrameAsync(myComponent, myPixMap, &myRect, &myCompressedData, &myDataSize, &mySyncFlag, myICMComplProcPtr);
		if (myErr != noErr)
			goto bail;

		// spin our wheels while we're waiting for the compress call to complete
		while (myICMComplProcErr == kAsyncDefaultValue) {
			EventRecord			myEvent;
			
			WaitNextEvent(0, &myEvent, 60, NULL);
			SCAsyncIdle(myComponent);
		}
		myErr = myICMComplProcErr;
#endif

		myErr = AddMediaSample(myDstMedia, myCompressedData, 0, myDataSize, myDuration, (SampleDescriptionHandle)myImageDesc, 1, mySyncFlag, NULL);
		if (myErr != noErr)
			goto bail;
	}
	
	// close the compression sequence; this will dispose of the image description
	// and compressed data handles allocated by SCCompressSequenceBegin
	SCCompressSequenceEnd(myComponent);

	//////////
	//
	// add the media data to the destination movie
	//
	//////////
	
	myErr = EndMediaEdits(myDstMedia);
	if (myErr != noErr)
		goto bail;
	
	InsertMediaIntoTrack(myDstTrack, 0, 0, GetMediaDuration(myDstMedia), fixed1);

	// add the movie resource to the dst movie file.
	myErr = AddMovieResource(myDstMovie, myRefNum, NULL, NULL);
	if (myErr != noErr)
		goto bail;

	// flatten the movie data [to be supplied]
	
	// close the movie file
	CloseMovieFile(myRefNum);
	
bail:
	// close the Standard Compression component
	if (myComponent != NULL)
		CloseComponent(myComponent);

	if (mySrcMovie != NULL) {
		// restore the source movie's original graphics port and device
		SetMovieGWorld(mySrcMovie, mySavedPort, mySavedDevice);

		// restore the source movie's original movie time
		SetMovieTimeValue(mySrcMovie, myOrigMovieTime);
	}
	
	// restore the original graphics port and device
	SetGWorld(mySavedPort, mySavedDevice);

	// delete the GWorld we were drawing frames into
	if (myImageWorld != NULL)
		DisposeGWorld(myImageWorld);
	
#if USE_ASYNC_COMPRESSION
	if (myICMComplProcRec.completionProc != NULL)
		DisposeICMCompletionUPP(myICMComplProcRec.completionProc);
#endif

	free(myMoviePrompt);
	free(myMovieFileName);
}