コード例 #1
0
ファイル: quartzWindow.cpp プロジェクト: ardeujho/self
OSStatus QuartzWindow::AddHandledEvent_wrap( uint32* eclass, uint ec_len, uint ekind, void* FH) {
  if (ec_len != 1)  { failure( FH, "class needs to have four bytes"); return NULL; }
  EventTypeSpec es;
  es.eventClass = EndianU32_BtoN(*eclass);
  es.eventKind = ekind;
  return AddEventTypesToHandler(_my_event_handler, 1, &es);
}
コード例 #2
0
static OSErr SpriteUtils_GetImageGroupID (QTAtomContainer theKeySample, QTAtom theImagesContainerAtom, short theImageIndex, long *theGroupID)
{
	QTAtom			myImageAtom, myImageGroupAtom;
	OSErr			myErr = noErr;

	myImageAtom = QTFindChildByIndex(theKeySample, theImagesContainerAtom, kSpriteImageAtomType, theImageIndex, NULL);
	if (myImageAtom == 0)	{ 
		myErr = cannotFindAtomErr;
		goto bail;
	}

	myImageGroupAtom = QTFindChildByIndex(theKeySample, myImageAtom, kSpriteImageGroupIDAtomType, 1, NULL);
	
	if (myImageGroupAtom == 0)
		*theGroupID = 0;
	else {
		myErr = QTCopyAtomDataToPtr(theKeySample, myImageGroupAtom, false, sizeof(*theGroupID), (Ptr)theGroupID, NULL);
		if (myErr != noErr)
			goto bail;

		*theGroupID = EndianU32_BtoN(*theGroupID);		// return native endian long
	}
	
bail:
	return(myErr);
}
コード例 #3
0
ファイル: stream_flac.c プロジェクト: mecke/xiph-qt
int process_first_packet__flac(StreamInfo *si, ogg_page *op, ogg_packet *opckt)
{
    unsigned long serialnoatom[3] = { EndianU32_NtoB(sizeof(serialnoatom)), EndianU32_NtoB(kCookieTypeOggSerialNo),
                                      EndianS32_NtoB(ogg_page_serialno(op)) };
    unsigned long atomhead[2] = { EndianU32_NtoB(opckt->bytes + sizeof(atomhead) - 13), EndianU32_NtoB(kCookieTypeFLACStreaminfo) };

    UInt32 sib = EndianU32_BtoN(* (UInt32 *) (((char *)opckt->packet) + 27));
    si->si_flac.metablocks =  (SInt32) EndianU16_BtoN(* (UInt16 *) (((char *)opckt->packet) + 7));

    sib >>= 4;
    si->si_flac.bps = (sib & 0x1f) + 1;
    sib >>= 5;
    si->numChannels = (sib & 0x07) + 1;
    si->rate = (sib >> 3) & 0xfffff;

    //si->lastMediaInserted = 0;
    si->mediaLength = 0;

    dbg_printf("! -- - flac_first_packet: ch: %d, rate: %ld, bps: %ld\n", si->numChannels, si->rate, si->si_flac.bps);

    PtrAndHand(serialnoatom, si->soundDescExtension, sizeof(serialnoatom)); //check errors?
    PtrAndHand(atomhead, si->soundDescExtension, sizeof(atomhead)); //check errors?
    PtrAndHand((((char *)opckt->packet) + 13), si->soundDescExtension, opckt->bytes - 13); //check errors?

    si->si_flac.state = kFStateReadingComments;

    return 0;
};
コード例 #4
0
ファイル: quartzPrims.cpp プロジェクト: AaronNGray/self
oop GetEventParam_CGSize(EventRef evt, uint32* name, uint32 name_len, void* FH) {
  if (name_len != 1) failure(FH, "name length is not 1 32-bit int");
  HISize p;
  OSStatus e = GetEventParameter( evt, EndianU32_BtoN(*name), typeHISize,
                                  NULL, sizeof(p), NULL, &p);
  if (e != noErr)  return (oop)reportOSError(e, "GetEventParam_CGSize", FH);
  objVectorOop r = Memory->objVectorObj->cloneSize(2);
  r->obj_at_put(0, as_floatOop(p.width), false);
  r->obj_at_put(1, as_floatOop(p.height), false);
  return r;
}
コード例 #5
0
static void ASBD_BtoN(const AudioStreamBasicDescription *infmt, AudioStreamBasicDescription *outfmt)
{
	*(UInt64 *)&outfmt->mSampleRate = EndianU64_BtoN(*(UInt64 *)&infmt->mSampleRate);
	outfmt->mFormatID = EndianU32_BtoN(infmt->mFormatID);
	outfmt->mFormatFlags = EndianU32_BtoN(infmt->mFormatFlags);
	outfmt->mBytesPerPacket = EndianU32_BtoN(infmt->mBytesPerPacket);
	outfmt->mFramesPerPacket = EndianU32_BtoN(infmt->mFramesPerPacket);
	outfmt->mBytesPerFrame = EndianU32_BtoN(infmt->mBytesPerFrame);
	outfmt->mChannelsPerFrame = EndianU32_BtoN(infmt->mChannelsPerFrame);
	outfmt->mBitsPerChannel = EndianU32_BtoN(infmt->mBitsPerChannel);
}
コード例 #6
0
void	CAAudioFileConverter::ReadCAFInfo()
{
	FSRef fsref;
	AudioFileID afid = 0;
	CAFSourceInfo info;
	UInt32 size;
	OSStatus err;
	
	try {
		XThrowIfError(FSPathMakeRef((UInt8 *)mParams.input.filePath, &fsref, NULL), "couldn't locate input file");
		XThrowIfError(AudioFileOpen(&fsref, fsRdPerm, 0, &afid), "couldn't open input file");
		size = sizeof(AudioFileTypeID);
		XThrowIfError(AudioFileGetProperty(afid, kAudioFilePropertyFileFormat, &size, &info.filetype), "couldn't get input file's format");
		if (info.filetype == kAudioFileCAFType) {
			size = sizeof(info);
			err = AudioFileGetUserData(afid, 'srcI', 0, &size, &info);
			if (!err) {
				// restore the following from the original file info:
				//	filetype
				//	data format
				//	filename
				AudioStreamBasicDescription destfmt;
				ASBD_BtoN((AudioStreamBasicDescription *)info.asbd, &destfmt);
				mParams.output.dataFormat = destfmt;
				mParams.output.fileType = EndianU32_BtoN(info.filetype);
				if (mParams.output.filePath == NULL) {
					int len = strlen(mParams.input.filePath) + strlen(info.filename) + 2;
					char *newname = (char *)malloc(len);	// $$$ leaked
					
					const char *dir = dirname(mParams.input.filePath);
					if (dir && (dir[0] !='.' && dir[1] != '/'))
						sprintf(newname, "%s/%s", dir, info.filename);
					else
						strcpy(newname, info.filename);
					mParams.output.filePath = newname;
					mParams.flags = (mParams.flags & ~kOpt_OverwriteOutputFile) | kOpt_NoSanitizeOutputFormat;
				}
			}
		}
		AudioFileClose(afid);
	}
	catch (...) {
		if (afid)
			AudioFileClose(afid);
		throw;
	}
}
コード例 #7
0
void ACFLACCodec::ParseMagicCookie(const void* inMagicCookieData, UInt32 inMagicCookieDataByteSize, FLAC__StreamMetadata_StreamInfo * theStreamInfo) const
{
	FLAC__StreamMetadata_StreamInfo * tempConfig;
	UInt32 cookieOffset = 0;
	
	// We might get a cookie with atoms -- strip them off
	if (inMagicCookieDataByteSize > sizeof(FLAC__StreamMetadata_StreamInfo))
	{
		if(EndianU32_BtoN(((AudioFormatAtom *)inMagicCookieData)->atomType) == 'frma')
		{
			cookieOffset = (sizeof(AudioFormatAtom) + sizeof(FullAtomHeader));
		}
	} 
	// Finally, parse the cookie for the bits we care about
	tempConfig = (FLAC__StreamMetadata_StreamInfo *)(&((Byte *)(inMagicCookieData))[cookieOffset]);
	theStreamInfo->min_blocksize	= EndianU32_BtoN( tempConfig->min_blocksize );
	theStreamInfo->max_blocksize	= EndianU32_BtoN( tempConfig->max_blocksize );
	theStreamInfo->min_framesize	= EndianU32_BtoN( tempConfig->min_framesize );
	theStreamInfo->max_framesize	= EndianU32_BtoN( tempConfig->max_framesize );
	theStreamInfo->sample_rate		= EndianU32_BtoN( tempConfig->sample_rate );
	theStreamInfo->channels		= EndianU32_BtoN( tempConfig->channels );
	theStreamInfo->bits_per_sample	= EndianU32_BtoN( tempConfig->bits_per_sample );
	theStreamInfo->total_samples	= EndianU64_BtoN( tempConfig->total_samples );
	theStreamInfo->md5sum[0]		= tempConfig->md5sum[0];
	theStreamInfo->md5sum[1]		= tempConfig->md5sum[1];
	theStreamInfo->md5sum[2]		= tempConfig->md5sum[2];
	theStreamInfo->md5sum[3]		= tempConfig->md5sum[3];
	theStreamInfo->md5sum[4]		= tempConfig->md5sum[4];
	theStreamInfo->md5sum[5]		= tempConfig->md5sum[5];
	theStreamInfo->md5sum[6]		= tempConfig->md5sum[6];
	theStreamInfo->md5sum[7]		= tempConfig->md5sum[7];
	theStreamInfo->md5sum[8]		= tempConfig->md5sum[8];
	theStreamInfo->md5sum[9]		= tempConfig->md5sum[9];
	theStreamInfo->md5sum[10]		= tempConfig->md5sum[10];
	theStreamInfo->md5sum[11]		= tempConfig->md5sum[11];
	theStreamInfo->md5sum[12]		= tempConfig->md5sum[12];
	theStreamInfo->md5sum[13]		= tempConfig->md5sum[13];
	theStreamInfo->md5sum[14]		= tempConfig->md5sum[14];
	theStreamInfo->md5sum[15]		= tempConfig->md5sum[15];
}
コード例 #8
0
static OSErr SpriteUtils_GetImageDescription (QTAtomContainer theKeySample, QTAtom theImagesContainerAtom, short theImageIndex, ImageDescriptionHandle theImageDesc)
{
	QTAtom						myImageAtom, myImageDataAtom;
	UInt8						mySaveState;
	UInt32						mySize;
	OSErr						myErr = noErr;

	myImageAtom = QTFindChildByIndex(theKeySample, theImagesContainerAtom, kSpriteImageAtomType, theImageIndex, NULL);
	if (myImageAtom == 0)	{ 
		myErr = cannotFindAtomErr; 
		goto bail;
	}

	myImageDataAtom = QTFindChildByIndex(theKeySample, myImageAtom, kSpriteImageDataAtomType, 1, NULL);
	if (myImageDataAtom == 0)	{ 
		myErr = cannotFindAtomErr; 
		goto bail;
	}

	mySaveState = HGetState((Handle)theImageDesc);
	HUnlock((Handle)theImageDesc);

	// copy the data (ImageDescription followed by image data) to a handle
	myErr = QTCopyAtomDataToHandle(theKeySample, myImageDataAtom, (Handle)theImageDesc);
	if (myErr != noErr)
		goto bail;

	mySize = EndianU32_BtoN((**theImageDesc).idSize);

	// pull off anything following the image description (& its color table, if any, and any image description extensions)
	SetHandleSize((Handle)theImageDesc, mySize);

#if TARGET_RT_LITTLE_ENDIAN
	EndianUtils_ImageDescription_BtoN(theImageDesc);
#endif

	HSetState((Handle)theImageDesc, mySaveState);
	myErr = MemError();
	
bail:
	return(myErr);
}
コード例 #9
0
ファイル: CASpeexDecoder.cpp プロジェクト: JanX2/XiphQT
void CASpeexDecoder::InitializeCompressionSettings()
{
    if (mCookie == NULL)
        return;

    if (mCompressionInitialized) {
        memset(&mSpeexHeader, 0, sizeof(mSpeexHeader));

        mSpeexStereoState.balance = 1.0;
        mSpeexStereoState.e_ratio = 0.5;
        mSpeexStereoState.smooth_left = 1.0;
        mSpeexStereoState.smooth_right = 1.0;

        if (mSpeexDecoderState != NULL) {
            speex_decoder_destroy(mSpeexDecoderState);
            mSpeexDecoderState = NULL;
        }
    }

    mCompressionInitialized = false;

    OggSerialNoAtom *atom = reinterpret_cast<OggSerialNoAtom*> (mCookie);
    Byte *ptrheader = mCookie + EndianU32_BtoN(atom->size);
    CookieAtomHeader *aheader = reinterpret_cast<CookieAtomHeader*> (ptrheader);

    // scan quickly through the cookie, check types and packet sizes
    if (EndianS32_BtoN(atom->type) != kCookieTypeOggSerialNo || static_cast<UInt32> (ptrheader - mCookie) > mCookieSize)
        return;
    ptrheader += EndianU32_BtoN(aheader->size);
    if (EndianS32_BtoN(aheader->type) != kCookieTypeSpeexHeader || static_cast<UInt32> (ptrheader - mCookie) > mCookieSize)
        return;
    // we ignore the rest: comments and extra headers

    // all OK, back to the first speex packet
    aheader = reinterpret_cast<CookieAtomHeader*> (mCookie + EndianU32_BtoN(atom->size));
    SpeexHeader *inheader = reinterpret_cast<SpeexHeader *> (&aheader->data[0]);

    // TODO: convert, at some point, mSpeexHeader to a pointer?
    mSpeexHeader.bitrate =                 EndianS32_LtoN(inheader->bitrate);
    mSpeexHeader.extra_headers =           EndianS32_LtoN(inheader->extra_headers);
    mSpeexHeader.frame_size =              EndianS32_LtoN(inheader->frame_size);
    mSpeexHeader.frames_per_packet =       EndianS32_LtoN(inheader->frames_per_packet);
    mSpeexHeader.header_size =             EndianS32_LtoN(inheader->header_size);
    mSpeexHeader.mode =                    EndianS32_LtoN(inheader->mode);
    mSpeexHeader.mode_bitstream_version =  EndianS32_LtoN(inheader->mode_bitstream_version);
    mSpeexHeader.nb_channels =             EndianS32_LtoN(inheader->nb_channels);
    mSpeexHeader.rate =                    EndianS32_LtoN(inheader->rate);
    mSpeexHeader.reserved1 =               EndianS32_LtoN(inheader->reserved1);
    mSpeexHeader.reserved2 =               EndianS32_LtoN(inheader->reserved2);
    mSpeexHeader.speex_version_id =        EndianS32_LtoN(inheader->speex_version_id);
    mSpeexHeader.vbr =                     EndianS32_LtoN(inheader->vbr);

    if (mSpeexHeader.mode >= SPEEX_NB_MODES)
        CODEC_THROW(kAudioCodecUnsupportedFormatError);

    //TODO: check bitstream version here

    mSpeexDecoderState = speex_decoder_init(speex_lib_get_mode(mSpeexHeader.mode));

    if (!mSpeexDecoderState)
        CODEC_THROW(kAudioCodecUnsupportedFormatError);

    //TODO: fix some of the header fields here

    int enhzero = 0;
    speex_decoder_ctl(mSpeexDecoderState, SPEEX_SET_ENH, &enhzero);

    if (mSpeexHeader.nb_channels == 2)
    {
        SpeexCallback callback;
        callback.callback_id = SPEEX_INBAND_STEREO;
        callback.func = speex_std_stereo_request_handler;
        callback.data = &mSpeexStereoState;
        speex_decoder_ctl(mSpeexDecoderState, SPEEX_SET_HANDLER, &callback);
    }

    mCompressionInitialized = true;
}
コード例 #10
0
void QTEffects_RespondToDialogSelection (OSErr theErr)
{
	Boolean					myDialogWasCancelled = false;
	short					myResID = movieInDataForkResID;
	UInt16					myMovieIter;
	short					mySrcMovieRefNum = 0;
	Movie					myPrevSrcMovie = NULL;
	Track					myPrevSrcTrack = NULL;
	Movie					myNextSrcMovie = NULL;
	Track					myNextSrcTrack = NULL;
	short					myDestMovieRefNum = 0;
	FSSpec					myFile;
	Boolean					myIsSelected = false;
	Boolean					myIsReplacing = false;	
	StringPtr 				myPrompt = QTUtils_ConvertCToPascalString(kEffectsSaveMoviePrompt);
	StringPtr 				myFileName = QTUtils_ConvertCToPascalString(kEffectsSaveMovieFileName);
	Movie					myDestMovie = NULL;
	Fixed					myDestMovieWidth, myDestMovieHeight;
	ImageDescriptionHandle	myDesc = NULL;
	Track					videoTrackFX, videoTrackA, videoTrackB;
	Media					videoMediaFX, videoMediaA, videoMediaB;
	TimeValue				myCurrentDuration = 0;
	TimeValue				myReturnedDuration;
	Boolean					isFirstTransition = true;
	TimeValue				myMediaTransitionDuration;
	TimeValue				myMediaFXStartTime, myMediaFXDuration;
	OSType					myEffectCode;
	long					myFlags = createMovieFileDeleteCurFile | createMovieFileDontCreateResFile;
	long					myLong;
	OSErr					myErr = noErr;

	// standard parameter box has been dismissed, so remember that fact
	gEffectsDialog = 0L;
	
	myDialogWasCancelled = (theErr == userCanceledErr);
	
	// we're finished with the effect list and movie posters	
	QTDisposeAtomContainer(gEffectList);
	
	if (gPosterA != NULL)
		KillPicture(gPosterA);
		
	if (gPosterB != NULL)
		KillPicture(gPosterB);
	
	// when the sign says stop, then stop
	if (myDialogWasCancelled)
		goto bail;

	// add atoms naming the sources to gEffectSample
	myLong = EndianU32_NtoB(kSourceOneName);
	QTInsertChild(gEffectSample, kParentAtomIsContainer, kEffectSourceName, 1, 0, sizeof(myLong), &myLong, NULL);

	myLong = EndianU32_NtoB(kSourceTwoName);
	QTInsertChild(gEffectSample, kParentAtomIsContainer, kEffectSourceName, 2, 0, sizeof(myLong), &myLong, NULL);
	
	// extract the 'what' atom to find out what kind of effect it is
	{
		QTAtom			myEffectAtom;
		QTAtomID		myEffectAtomID;
		long			myEffectCodeSize;
		Ptr				myEffectCodePtr;

		myEffectAtom = QTFindChildByIndex(gEffectSample, kParentAtomIsContainer, kParameterWhatName, kParameterWhatID, &myEffectAtomID);
		
		myErr = QTLockContainer(gEffectSample);
		BailError(myErr);

		myErr = QTGetAtomDataPtr(gEffectSample, myEffectAtom, &myEffectCodeSize, &myEffectCodePtr);
		BailError(myErr);

		if (myEffectCodeSize != sizeof(OSType)) {
			myErr = paramErr;
			goto bail;
		}
		
		myEffectCode = *(OSType *)myEffectCodePtr;		// "tsk"
		myEffectCode = EndianU32_BtoN(myEffectCode);	// because the data is read from an atom container
		
		myErr = QTUnlockContainer(gEffectSample);
		BailError(myErr);
	}

	// ask the user for the name of the new movie file
	QTFrame_PutFile(myPrompt, myFileName, &myFile, &myIsSelected, &myIsReplacing);
	if (!myIsSelected)
		goto bail;				// deal with user cancelling

	// create a movie file for the destination movie
	myErr = CreateMovieFile(&myFile, FOUR_CHAR_CODE('TVOD'), 0, myFlags, &myDestMovieRefNum, &myDestMovie);
	BailError(myErr);
	
	// open the first file as a movie; call the first movie myPrevSrcMovie
	myErr = OpenMovieFile(&gSpecList[0], &mySrcMovieRefNum, fsRdPerm);
	BailError(myErr);
	
	myErr = NewMovieFromFile(&myPrevSrcMovie, mySrcMovieRefNum, NULL, NULL, 0, NULL);
	BailError(myErr);
	
	myErr = CloseMovieFile(mySrcMovieRefNum);
	BailError(myErr);
	
	// if the movie is shorter than kMinimumDuration, scale it to that length
	SetMovieTimeScale(myPrevSrcMovie, kTimeScale);
	myErr = QTEffects_GetFirstVideoTrackInMovie(myPrevSrcMovie, &myPrevSrcTrack);
	BailNil(myPrevSrcTrack);
	
	if (GetTrackDuration(myPrevSrcTrack) < kMinimumDuration) {
		myErr = ScaleTrackSegment(myPrevSrcTrack, 0, GetTrackDuration(myPrevSrcTrack), kMinimumDuration);
		BailError(myErr);
	}
	
	// find out how big the first movie is; we'll use it as the size of all our tracks
	GetTrackDimensions(myPrevSrcTrack, &myDestMovieWidth, &myDestMovieHeight);
	
#if USES_MAKE_IMAGE_DESC_FOR_EFFECT
	// create a new sample description for the effect,
	// which is just an image description specifying the effect and its dimensions
	myErr = MakeImageDescriptionForEffect(myEffectCode, &myDesc);
	if (myErr != noErr)
		BailError(myErr);
#else
	// create a new sample description for the effect,
	// which is just an image description specifying the effect and its dimensions
	myDesc = (ImageDescriptionHandle)NewHandleClear(sizeof(ImageDescription));
	BailNil(myDesc);
	
	(**myDesc).idSize = sizeof(ImageDescription);
	(**myDesc).cType = myEffectCode;
	(**myDesc).hRes = 72L << 16;
	(**myDesc).vRes = 72L << 16;
	(**myDesc).dataSize = 0L;
	(**myDesc).frameCount = 1;
	(**myDesc).depth = 0;
	(**myDesc).clutID = -1;
#endif
	
	// fill in the fields of the sample description
	(**myDesc).vendor = kAppleManufacturer;
	(**myDesc).temporalQuality = codecNormalQuality;
	(**myDesc).spatialQuality = codecNormalQuality;
	(**myDesc).width = FixRound(myDestMovieWidth);
	(**myDesc).height = FixRound(myDestMovieHeight);

	// add three video tracks to the destination movie:
	// 	- videoTrackFX is where the effects and stills live; it's user-visible.
	//	- videoTrackA is where the "source A"s for effects live; it's hidden by the input map
	//	- videoTrackB is where the "source B"s for effects live; it's hidden by the input map
	videoTrackFX = NewMovieTrack(myDestMovie, myDestMovieWidth, myDestMovieHeight, 0);
	BailNil(videoTrackFX);
	videoMediaFX = NewTrackMedia(videoTrackFX, VideoMediaType, kTimeScale, NULL, 0);
	BailNil(videoMediaFX);
	myErr = BeginMediaEdits(videoMediaFX);
	BailError(myErr);
	
	videoTrackA = NewMovieTrack(myDestMovie, myDestMovieWidth, myDestMovieHeight, 0);
	BailNil(videoTrackA);
	videoMediaA = NewTrackMedia(videoTrackA, VideoMediaType, kTimeScale, NULL, 0);
	BailNil(videoMediaA);

	videoTrackB = NewMovieTrack(myDestMovie, myDestMovieWidth, myDestMovieHeight, 0);
	BailNil(videoTrackB);
	videoMediaB = NewTrackMedia(videoTrackB, VideoMediaType, kTimeScale, NULL, 0);
	BailNil(videoMediaB);

	// create the input map
	{
		long				myRefIndex1, myRefIndex2;
		QTAtomContainer		myInputMap;
		QTAtom				myInputAtom;
		OSType				myInputType;

		QTNewAtomContainer(&myInputMap);

		// first input
		if (videoTrackA) {
		
			AddTrackReference(videoTrackFX, videoTrackA, kTrackModifierReference, &myRefIndex1);
			QTInsertChild(myInputMap, kParentAtomIsContainer, kTrackModifierInput, myRefIndex1, 0, 0, NULL, &myInputAtom);
	
			myInputType = EndianU32_NtoB(kTrackModifierTypeImage);
			QTInsertChild(myInputMap, myInputAtom, kTrackModifierType, 1, 0, sizeof(myInputType), &myInputType, NULL);
	
			myLong = EndianU32_NtoB(kSourceOneName);
			QTInsertChild(myInputMap, myInputAtom, kEffectDataSourceType, 1, 0, sizeof(myLong), &myLong, NULL);
		}

		// second input
		if (videoTrackB) {
		
			AddTrackReference(videoTrackFX, videoTrackB, kTrackModifierReference, &myRefIndex2);
			QTInsertChild(myInputMap, kParentAtomIsContainer, kTrackModifierInput, myRefIndex2, 0, 0, NULL, &myInputAtom);
	
			myInputType = EndianU32_NtoB(kTrackModifierTypeImage);
			QTInsertChild(myInputMap, myInputAtom, kTrackModifierType, 1, 0, sizeof(myInputType), &myInputType, NULL);
	
			myLong = EndianU32_NtoB(kSourceTwoName);
			QTInsertChild(myInputMap, myInputAtom, kEffectDataSourceType, 1, 0, sizeof(myLong), &myLong, NULL);
		}

		// set that map
		SetMediaInputMap(GetTrackMedia(videoTrackFX), myInputMap);
		
		QTDisposeAtomContainer(myInputMap);
	}

	myCurrentDuration = 0;

#if MAKE_STILL_SECTIONS
	// copy the first sample of the first video track of the first movie to videoTrackFX, with duration kStillDuration.
	myErr = CopyPortionOfTrackToTrack(myPrevSrcTrack, eStartPortion + eMiddlePortion, videoTrackFX, myCurrentDuration, &myReturnedDuration);
	BailError(myErr);
	
	myCurrentDuration += myReturnedDuration;
#endif 

	// now process any remaining files
	myMovieIter = 1;
	while (myMovieIter < gSpecCount) {
		
		// open the next file as a movie; call it nextSourceMovie
		myErr = OpenMovieFile(&gSpecList[myMovieIter], &mySrcMovieRefNum, fsRdPerm);
		BailError(myErr);
		
		myErr = NewMovieFromFile(&myNextSrcMovie, mySrcMovieRefNum, NULL, NULL, 0, NULL);
		BailError(myErr);
		
		// we're done with the movie file, so close it
		myErr = CloseMovieFile(mySrcMovieRefNum);
		BailError(myErr);
		
		// if the movie is shorter than kMinimumDuration, scale it to that length
		SetMovieTimeScale(myNextSrcMovie, kTimeScale);
		myErr = QTEffects_GetFirstVideoTrackInMovie(myNextSrcMovie, &myNextSrcTrack);
		BailNil(myNextSrcTrack);
		
		if (GetTrackDuration(myNextSrcTrack) < kMinimumDuration) {
			myErr = ScaleTrackSegment(myNextSrcTrack, 0, GetTrackDuration(myNextSrcTrack), kMinimumDuration);
			BailError(myErr);
		}

		// create a transition effect from the previous source movie's first video sample to the next source movie's first video sample
		// (the effect should have duration kEffectDuration);
		// this involves adding one sample to each of the three video tracks:
		
		//    sample from previous source movie	 -> videoTrackA
		myErr = QTEffects_CopyPortionOfTrackToTrack(myPrevSrcTrack, eFinishPortion, videoTrackA, myCurrentDuration, &myReturnedDuration);
		BailError(myErr);
		
		//    sample from next source movie    	 -> videoTrackB
		myErr = QTEffects_CopyPortionOfTrackToTrack(myNextSrcTrack, eStartPortion, videoTrackB, myCurrentDuration, &myReturnedDuration);
		BailError(myErr);
		
		//    effect sample                 	  -> videoTrackFX
		if (isFirstTransition) {
			myMediaTransitionDuration = myReturnedDuration;
			myMediaFXStartTime = GetMediaDuration(videoMediaFX);
			myErr = AddMediaSample(videoMediaFX, gEffectSample, 0, GetHandleSize(gEffectSample), myMediaTransitionDuration, (SampleDescriptionHandle)myDesc, 1, 0, NULL);
			BailError(myErr);
			
			myMediaFXDuration = GetMediaDuration(videoMediaFX) - myMediaFXStartTime;
			isFirstTransition = false;
		}
		
		myErr = InsertMediaIntoTrack(videoTrackFX, myCurrentDuration, myMediaFXStartTime, myMediaFXDuration, FixRatio(myReturnedDuration, myMediaTransitionDuration));
		BailError(myErr);
		
		myCurrentDuration += myReturnedDuration;
		
#if MAKE_STILL_SECTIONS
		// copy the first video sample of myNextSrcMovie to videoTrackFX, with duration kStillDuration.
		myErr = QTEffects_CopyPortionOfTrackToTrack(myNextSrcTrack, eMiddlePortion + (myMovieIter + 1 == theSpecCount) ? eFinishPortion : 0, videoTrackFX, myCurrentDuration, &myReturnedDuration);
		BailError(myErr);
		
		myCurrentDuration += myReturnedDuration;
#endif // MAKE_STILL_SECTIONS
		
		// dispose of previous source movie.  
		DisposeMovie(myPrevSrcMovie);
		
		myPrevSrcMovie = myNextSrcMovie;
		myPrevSrcTrack = myNextSrcTrack;
		myNextSrcMovie = NULL;
		myNextSrcTrack = NULL;
		
		myMovieIter++;
	} // while
	
	myErr = EndMediaEdits(videoMediaFX);
	BailError(myErr);

	myErr = AddMovieResource(myDestMovie, myDestMovieRefNum, &myResID, "\pMovie 1");
	BailError(myErr);
	
	CloseMovieFile(myDestMovieRefNum);
	
	if (myPrevSrcMovie != NULL)
		DisposeMovie(myPrevSrcMovie);
		
	DisposeMovie(myDestMovie);
	
bail:
	free(myPrompt);
	free(myFileName);

	QTDisposeAtomContainer(gEffectSample);
	DisposeHandle((Handle)myDesc);

	return;
}
コード例 #11
0
ファイル: PRDocument.cpp プロジェクト: MaddTheSane/tntbasic
OSErr
IsFlattenedResourceFile(
	ConstFSSpecPtr	inFile,
	Boolean*		outIsFlat)
{
	OSErr		err;
	CInfoPBRec	pb;
	
	if (not inFile)
	{
		// This can occur when we create a new project document (Cmd-N)
		*outIsFlat = false;
		return noErr;
	}

	pb.hFileInfo.ioNamePtr = (StringPtr)inFile->name;
	pb.hFileInfo.ioVRefNum = inFile->vRefNum;
	pb.hFileInfo.ioDirID = inFile->parID;
	pb.hFileInfo.ioFDirIndex = 0;

	err = PBGetCatInfoSync(&pb);
	if (err == noErr)
	{
		if (pb.hFileInfo.ioFlAttrib & kioFlAttribDirMask)
		{
			// This is a directory
			*outIsFlat = false;
			return paramErr;
		}
		else
		{
			UInt32	dfSize;
			UInt32	rfSize;
			SInt16	dfRefNum;
			SInt32	filePos;
			
			dfSize = pb.hFileInfo.ioFlLgLen;
			rfSize = pb.hFileInfo.ioFlRLgLen;
			
			if (rfSize > 0)
			{
				*outIsFlat = false;
			}
			else if (dfSize == 0)
			{
				// This file has no data or resource fork.
				*outIsFlat = false;
			}
			else
			{
				// Only the data fork is non-empty.
				// Now we need to determine if it contains resources or not.
				UInt32	firstFourWords[4];
				SInt32	byteCount;
				
				err = FSpOpenDF(inFile, fsRdPerm, &dfRefNum);
				if (err)	return err;
				
				err = GetFPos(dfRefNum, &filePos);
				
				byteCount = sizeof(firstFourWords);

				err = FSRead(dfRefNum, &byteCount, &firstFourWords);
				if (err == noErr)
				{
					// Test is based on resource file format as described in IM: More Mac Toolbox
					// <http://developer.apple.com/techpubs/mac/MoreToolbox/MoreToolbox-99.html#HEADING99-0>
					//
					// First four words of the file represent the resource header
					// Word1:  Offset from beginning of resource fork to resource data
					// Word2:  Offset from beginning of resource fork to resource map
					// Word3:  Length of resource data
					// Word4:  Length of resource map
					//
					// So...
					// (Word1 + Word3 + Word4) == (Word2 + Word4) == size of resource fork

					if ((byteCount == sizeof(firstFourWords)) and
						(EndianU32_BtoN(firstFourWords[0]) + EndianU32_BtoN(firstFourWords[2]) +
						 EndianU32_BtoN(firstFourWords[3]) == dfSize) and
						(EndianU32_BtoN(firstFourWords[1]) + EndianU32_BtoN(firstFourWords[3]) == dfSize))
					{
						*outIsFlat = true;
					}
				}
				
				err = SetFPos(dfRefNum, fsFromStart, filePos);

				err = FSClose(dfRefNum);
			}
		}
	}
	
	return err;
}
コード例 #12
0
ComponentResult write_vorbisPrivateData(GenericStreamPtr as, UInt8 **buf, UInt32 *bufSize)
{
  ComponentResult err = noErr;
  void *magicCookie = NULL;
  UInt32 cookieSize = 0;
  dbg_printf("[WebM] Get Vorbis Private Data\n");

  err = QTGetComponentPropertyInfo(as->aud.vorbisComponentInstance,
                                   kQTPropertyClass_SCAudio,
                                   kQTSCAudioPropertyID_MagicCookie,
                                   NULL, &cookieSize, NULL);

  if (err) return err;

  dbg_printf("[WebM] Cookie Size %d\n", cookieSize);

  magicCookie = calloc(1, cookieSize);
  err = QTGetComponentProperty(as->aud.vorbisComponentInstance,
                               kQTPropertyClass_SCAudio,
                               kQTSCAudioPropertyID_MagicCookie,
                               cookieSize, magicCookie, NULL);

  if (err) goto bail;

  UInt8 *ptrheader = (UInt8 *) magicCookie;
  UInt8 *cend = ptrheader + cookieSize;
  CookieAtomHeader *aheader = (CookieAtomHeader *) ptrheader;
  WebMBuffer header, header_vc, header_cb;
  header.size = header_vc.size = header_cb.size = 0;

  while (ptrheader < cend)
  {
    aheader = (CookieAtomHeader *) ptrheader;
    ptrheader += EndianU32_BtoN(aheader->size);

    if (ptrheader > cend || EndianU32_BtoN(aheader->size) <= 0)
      break;

    switch (EndianS32_BtoN(aheader->type))
    {
      case kCookieTypeVorbisHeader:
        header.size = EndianS32_BtoN(aheader->size) - 2 * sizeof(long);
        header.data = aheader->data;
        break;

      case kCookieTypeVorbisComments:
        header_vc.size = EndianS32_BtoN(aheader->size) - 2 * sizeof(long);
        header_vc.data = aheader->data;
        break;

      case kCookieTypeVorbisCodebooks:
        header_cb.size = EndianS32_BtoN(aheader->size) - 2 * sizeof(long);
        header_cb.data = aheader->data;
        break;

      default:
        break;
    }
  }

  if (header.size == 0 || header_vc.size == 0 || header_cb.size == 0)
  {
    err = paramErr;
    goto bail;
  }

  //1 + header1 /255 + header2 /255 + idheader.len +
  *bufSize = 1;  //the first byte which is always 0x02
  *bufSize += (header.size - 1) / 255 + 1; //the header size lacing
  *bufSize += (header_vc.size - 1) / 255 + 1; //the comment size lacing
  *bufSize += header.size + header_vc.size + header_cb.size; //the packets
  dbg_printf("[WebM]Packet headers  %d %d %d -- total buffer %d\n",
             header.size, header_vc.size , header_cb.size, *bufSize);
  *buf = malloc(*bufSize);
  UInt8 *ptr = *buf;

  *ptr = 0x02;
  ptr ++;
  //using ogg lacing write out the size of the first two packets
  _oggLacing(&ptr, header.size);
  _oggLacing(&ptr, header_vc.size);

  _dbg_printVorbisHeader(header.data);

  memcpy(ptr, header.data, header.size);
  ptr += header.size;
  memcpy(ptr, header_vc.data, header_vc.size);
  ptr += header_vc.size;
  memcpy(ptr, header_cb.data, header_cb.size);

bail:

  if (magicCookie != NULL)
  {
    free(magicCookie);
    magicCookie = NULL;
  }

  return err;
}
コード例 #13
0
ファイル: TheoraDecoder.c プロジェクト: JanX2/XiphQT
OSErr init_theora_decoder(Theora_Globals glob, CodecDecompressParams *p)
{
    OSErr err = noErr;
    Handle ext;
    //OggSerialNoAtom *atom;
    Byte *ptrheader, *mCookie, *cend;
    UInt32 mCookieSize;
    CookieAtomHeader *aheader;
    th_comment tc;
    ogg_packet header, header_tc, header_cb;

    if (glob->info_initialised) {
        dbg_printf("--:Theora:- Decoder already initialised, skipping...\n");
        return err;
    }

    err = GetImageDescriptionExtension(p->imageDescription, &ext, kSampleDescriptionExtensionTheora, 1);
    if (err != noErr) {
        dbg_printf("XXX GetImageDescriptionExtension() failed! ('%4.4s')\n", &(*p->imageDescription)->cType);
        err = codecBadDataErr;
        return err;
    }

    mCookie = (UInt8 *) *ext;
    mCookieSize = GetHandleSize(ext);

    ptrheader = mCookie;
    cend = mCookie + mCookieSize;

    aheader = (CookieAtomHeader*)ptrheader;


    header.bytes = header_tc.bytes = header_cb.bytes = 0;

    while (ptrheader < cend) {
        aheader = (CookieAtomHeader *) ptrheader;
        ptrheader += EndianU32_BtoN(aheader->size);
        if (ptrheader > cend || EndianU32_BtoN(aheader->size) <= 0)
            break;

        switch(EndianS32_BtoN(aheader->type)) {
        case kCookieTypeTheoraHeader:
            header.b_o_s = 1;
            header.e_o_s = 0;
            header.granulepos = 0;
            header.packetno = 0;
            header.bytes = EndianS32_BtoN(aheader->size) - 2 * sizeof(long);
            header.packet = aheader->data;
            break;

        case kCookieTypeTheoraComments:
            header_tc.b_o_s = 0;
            header_tc.e_o_s = 0;
            header_tc.granulepos = 0;
            header_tc.packetno = 1;
            header_tc.bytes = EndianS32_BtoN(aheader->size) - 2 * sizeof(long);
            header_tc.packet = aheader->data;
            break;

        case kCookieTypeTheoraCodebooks:
            header_cb.b_o_s = 0;
            header_cb.e_o_s = 0;
            header_cb.granulepos = 0;
            header_cb.packetno = 2;
            header_cb.bytes = EndianS32_BtoN(aheader->size) - 2 * sizeof(long);
            header_cb.packet = aheader->data;
            break;

        default:
            break;
        }
    }

    err = codecBadDataErr;

    if (header.bytes == 0 || header_tc.bytes == 0 || header_cb.bytes == 0)
        return err;

    th_info_init(&glob->ti);
    th_comment_init(&tc);
    glob->ts = NULL;

    if (th_decode_headerin(&glob->ti, &tc, &glob->ts, &header) < 0) {

        if (glob->ts != NULL)
            th_setup_free (glob->ts);
        th_comment_clear(&tc);
        th_info_clear(&glob->ti);

        return err;
    }

    th_decode_headerin(&glob->ti, &tc, &glob->ts, &header_tc);
    th_decode_headerin(&glob->ti, &tc, &glob->ts, &header_cb);

    err = noErr;

    th_comment_clear(&tc);

    dbg_printf("--:Theora:- OK, managed to initialize the decoder somehow...\n");
    glob->info_initialised = true;

    return err;
}
コード例 #14
0
ファイル: auprocess.cpp プロジェクト: arnelh/Examples
static OSStatus InputCallback (void 			*inRefCon, 
					AudioUnitRenderActionFlags 	*ioActionFlags, 
					const AudioTimeStamp 		*inTimeStamp, 
					UInt32 						inBusNumber, 
					UInt32 						inNumberFrames, 
					AudioBufferList 			*ioData)
{
													#if CA_AU_PROFILE_TIME 
														UInt64 now = CAHostTimeBase::GetTheCurrentTime(); 
													#endif

	CAAudioFile &readFile = *(static_cast<CAAudioFile*>(inRefCon));

#if !CAAF_USE_EXTAUDIOFILE
	if (SInt64(inTimeStamp->mSampleTime) > readFile.GetNumberPackets()) {
#else
	if (SInt64(inTimeStamp->mSampleTime) > readFile.GetNumberFrames()) {
#endif
#if DEBUG
	printf ("reading past end of input\n");
#endif
		return -1;
	}

	readFile.Seek (SInt64(inTimeStamp->mSampleTime));
	readFile.Read (inNumberFrames, ioData);

													#if CA_AU_PROFILE_TIME 
														sReadTime += (CAHostTimeBase::GetTheCurrentTime() - now); 
													#endif

	return noErr;
}

static OSStatus FConvInputCallback (void 			*inRefCon, 
					AudioUnitRenderActionFlags 	*ioActionFlags, 
					const AudioTimeStamp 		*inTimeStamp, 
					UInt32 						inBusNumber, 
					UInt32 						inNumberFrames, 
					AudioBufferList 			*ioData)
{
												#if CA_AU_PROFILE_TIME 
													UInt64 now = CAHostTimeBase::GetTheCurrentTime(); 
												#endif

	CAAudioFile &readFile = *(static_cast<CAAudioFile*>(inRefCon));

		// this test is ONLY needed in case of processing with a Format Converter type of AU
		// in all other cases, the CAAUProcessor class will NEVER call you for input
		// beyond the end of the file....

#if !CAAF_USE_EXTAUDIOFILE
	if (SInt64(inTimeStamp->mSampleTime) >= readFile.GetNumberPackets()) {
#else
	if (SInt64(inTimeStamp->mSampleTime) >= readFile.GetNumberFrames()) {
#endif
		return -1;
	}
	
	readFile.Seek (SInt64(inTimeStamp->mSampleTime));
	UInt32 readPackets = inNumberFrames;
		
		// also, have to do this for a format converter AU - otherwise we'd just read what we're told
#if !CAAF_USE_EXTAUDIOFILE
	if (SInt64(inTimeStamp->mSampleTime + inNumberFrames) > readFile.GetNumberPackets()) {
#else
	if (SInt64(inTimeStamp->mSampleTime + inNumberFrames) > readFile.GetNumberFrames()) {
#endif
		// first set this to zero as we're only going to read a partial number of frames
		AudioBuffer *buf = ioData->mBuffers;
		for (UInt32 i = ioData->mNumberBuffers; i--; ++buf)
			memset((Byte *)buf->mData, 0, buf->mDataByteSize);
#if !CAAF_USE_EXTAUDIOFILE
		readPackets = UInt32 (readFile.GetNumberPackets() - SInt64(inTimeStamp->mSampleTime));
#else
		readPackets = UInt32 (readFile.GetNumberFrames() - SInt64(inTimeStamp->mSampleTime));
#endif
	}
	
	readFile.Read (readPackets, ioData);

													#if CA_AU_PROFILE_TIME 
														sReadTime += (CAHostTimeBase::GetTheCurrentTime() - now); 
													#endif

	return noErr;
}

struct ReadBuffer {
	AUOutputBL *readData;
	UInt32 readFrames;
};

static OSStatus MemoryInputCallback (void		*inRefCon, 
					AudioUnitRenderActionFlags 	*ioActionFlags, 
					const AudioTimeStamp 		*inTimeStamp, 
					UInt32 						inBusNumber, 
					UInt32 						inNumberFrames, 
					AudioBufferList 			*ioData)
{
													#if CA_AU_PROFILE_TIME 
														UInt64 now = CAHostTimeBase::GetTheCurrentTime(); 
													#endif

	ReadBuffer *readBuffer = (ReadBuffer*)inRefCon;
	
	if (((readBuffer->readFrames + inNumberFrames) * sizeof(Float32)) > (readBuffer->readData->ABL()->mBuffers[0].mDataByteSize)) 
	{
		// going past read size
		AudioBuffer *buf = ioData->mBuffers;
		for (UInt32 i = ioData->mNumberBuffers; i--; ++buf)
			memset((Byte *)buf->mData, 0, buf->mDataByteSize);
	}
	else
	{
		AudioBuffer *buf = ioData->mBuffers;
		AudioBuffer *rBuf = readBuffer->readData->ABL()->mBuffers;
		for (UInt32 i = ioData->mNumberBuffers; i--; ++buf, ++rBuf) {
			AudioBuffer readB = *rBuf;
			readB.mData = static_cast<Float32*>(rBuf->mData) + readBuffer->readFrames;
			memcpy (buf->mData, readB.mData, buf->mDataByteSize);
		}
		readBuffer->readFrames += inNumberFrames;
	}

													#if CA_AU_PROFILE_TIME 
														sReadTime += (CAHostTimeBase::GetTheCurrentTime() - now); 
													#endif

	return noErr;
}

#pragma mark __Utility Helpers

CFPropertyListRef	 ReadPresetFromPresetFile (char* filePath)
{	
	if (!filePath)
		return NULL;
	
	FSRef ref;
	if (FSPathMakeRef((UInt8 *)filePath, &ref, NULL))
		return NULL;
		
	CFDataRef			resourceData = NULL;
	CFPropertyListRef   theData = NULL;
	CFStringRef			errString = NULL;
	CFURLRef			fileURL = CFURLCreateFromFSRef (kCFAllocatorDefault, &ref);
		if (fileURL == NULL) {
			goto home;
		}
		
	SInt32				result;
    
   // Read the XML file.
   Boolean status; status = CFURLCreateDataAndPropertiesFromResource (kCFAllocatorDefault, fileURL,
                                                                &resourceData,	// place to put file data
                                                                NULL, NULL, &result);
        if (status == false || result) {
            goto home;
        }
    
	theData = CFPropertyListCreateFromXMLData (kCFAllocatorDefault, resourceData,  
													kCFPropertyListImmutable, &errString);
        if (theData == NULL || errString) {
            if (theData)
				CFRelease (theData);
			theData = NULL;
			goto home;
       }
	
home:
	if (fileURL)
		CFRelease (fileURL);
	if (resourceData)
		CFRelease (resourceData);
    if (errString)
		CFRelease (errString);
		
	return theData;
}

#pragma mark __the setup code

#define OFFLINE_AU_CMD 		"[-au TYPE SUBTYPE MANU] The Audio Unit component description\n\t"
#define INPUT_FILE	 		"[-i /Path/To/File] The file that is to be processed.\n\t"
#define OUTPUT_FILE			"[-o /Path/To/File/To/Create] This will be in the same format as the input file\n\t"
#define AU_PRESET_CMD		"[-p /Path/To/AUPreset/File] Specify an AU Preset File to establish the state of the AU\n\t"
#define SHORT_MEM_CMD		"[-m] Just reads and processes the first half second of the input file\n\t"
#define USE_MAX_FRAMES		"[-f max_frames] default is 32768 (512 for aufc units)"
 
static char* usageStr = "Usage: AU Process\n\t" 
				OFFLINE_AU_CMD 
				INPUT_FILE
				OUTPUT_FILE
				AU_PRESET_CMD
				SHORT_MEM_CMD
				USE_MAX_FRAMES;

static int		StrToOSType(const char *str, OSType &t)
{
	char buf[4];
	const char *p = str;
	int x;
	for (int i = 0; i < 4; ++i) {
		if (*p != '\\') {
			if ((buf[i] = *p++) == '\0')
				goto fail;
		} else {
			if (*++p != 'x') goto fail;
			if (sscanf(++p, "%02X", &x) != 1) goto fail;
			buf[i] = x;
			p += 2;
		}
	}
	t = EndianU32_BtoN(*(UInt32 *)buf);
	return p - str;
fail:
	return 0;
}