bool QTImportFileHandle::Import(TrackFactory *trackFactory, Track ***outTracks, int *outNumTracks) { OSErr err = noErr; // // Determine the file format. // // GetMediaSampleDescription takes a SampleDescriptionHandle, but apparently // if the media is a sound (which presumably we know it is) then it will treat // it as a SoundDescriptionHandle (which in addition to the format of single // samples, also tells you sample rate, number of channels, etc.) // Pretty messed up interface, if you ask me. SoundDescriptionHandle soundDescription = (SoundDescriptionHandle)NewHandle(0); GetMediaSampleDescription(mMedia, 1, (SampleDescriptionHandle)soundDescription); // If this is a compressed format, it may have out-of-stream compression // parameters that need to be passed to the sound converter. We retrieve // these in the form of an audio atom. To do this, however we have to // get the data by way of a handle, then copy it manually from the handle to // the atom. These interfaces get worse all the time! Handle decompressionParamsHandle = NewHandle(0); AudioFormatAtomPtr decompressionParamsAtom = NULL; err = GetSoundDescriptionExtension(soundDescription, &decompressionParamsHandle, siDecompressionParams); if(err == noErr) { // this stream has decompression parameters. copy from the handle to the atom. int paramsSize = GetHandleSize(decompressionParamsHandle); HLock(decompressionParamsHandle); decompressionParamsAtom = (AudioFormatAtomPtr)NewPtr(paramsSize); //err = MemError(); BlockMoveData(*decompressionParamsHandle, decompressionParamsAtom, paramsSize); HUnlock(decompressionParamsHandle); } if(decompressionParamsHandle) DisposeHandle(decompressionParamsHandle); // // Now we set up a sound converter to decompress the data if it is compressed. // SoundComponentData inputFormat; SoundComponentData outputFormat; SoundConverter soundConverter = NULL; inputFormat.flags = outputFormat.flags = 0; inputFormat.sampleCount = outputFormat.sampleCount = 0; inputFormat.reserved = outputFormat.reserved = 0; inputFormat.buffer = outputFormat.buffer = NULL; inputFormat.numChannels = outputFormat.numChannels = (*soundDescription)->numChannels; inputFormat.sampleSize = outputFormat.sampleSize = (*soundDescription)->sampleSize; inputFormat.sampleRate = outputFormat.sampleRate = (*soundDescription)->sampleRate; inputFormat.format = (*soundDescription)->dataFormat; outputFormat.format = kSoundNotCompressed; err = SoundConverterOpen(&inputFormat, &outputFormat, &soundConverter); // // Create the Audacity WaveTracks to house the new data // *outNumTracks = outputFormat.numChannels; WaveTrack **channels = new WaveTrack *[*outNumTracks]; // determine sample format sampleFormat format; int bytesPerSample; // TODO: do we know for sure that 24 and 32 bit samples are the same kind // of 24 and 32 bit samples we expect? switch(outputFormat.sampleSize) { case 16: format = int16Sample; bytesPerSample = 2; break; case 24: format = int24Sample; bytesPerSample = 3; break; case 32: format = floatSample; bytesPerSample = 4; break; default: printf("I can't import a %d-bit file!\n", outputFormat.sampleSize); return false; } int c; for (c = 0; c < *outNumTracks; c++) { channels[c] = trackFactory->NewWaveTrack(format); channels[c]->SetRate(outputFormat.sampleRate / 65536.0); if(*outNumTracks == 2) { if(c == 0) { channels[c]->SetChannel(Track::LeftChannel); channels[c]->SetLinked(true); } else if(c == 1) { channels[c]->SetChannel(Track::RightChannel); } } } // // Give the converter the decompression atom. // // (judging from the sample code, it's OK if the atom is NULL, which // it will be if there was no decompression information) err = SoundConverterSetInfo(soundConverter, siDecompressionParams, decompressionParamsAtom); if(err == siUnknownInfoType) { // the decompressor didn't need the decompression atom, but that's ok. err = noErr; } // Tell the converter we're cool with VBR audio SoundConverterSetInfo(soundConverter, siClientAcceptsVBR, Ptr(true)); // // Determine buffer sizes and allocate output buffer // int inputBufferSize = 655360; int outputBufferSize = 524288; char *outputBuffer = new char[outputBufferSize]; // // Populate the structure of data that is passed to the callback // CallbackData cbData; memset(&cbData.compData, 0, sizeof(ExtendedSoundComponentData)); cbData.isSourceVBR = ((*soundDescription)->compressionID == variableCompression); cbData.sourceMedia = mMedia; cbData.getMediaAtThisTime = 0; cbData.sourceDuration = GetMediaDuration(mMedia); cbData.isThereMoreSource = true; cbData.maxBufferSize = inputBufferSize; // allocate source media buffer cbData.hSource = NewHandle((long)cbData.maxBufferSize); MoveHHi(cbData.hSource); HLock(cbData.hSource); cbData.compData.desc = inputFormat; cbData.compData.desc.buffer = (BytePtr)*cbData.hSource; cbData.compData.desc.flags = kExtendedSoundData; cbData.compData.extendedFlags = kExtendedSoundBufferSizeValid | kExtendedSoundSampleCountNotValid; if(cbData.isSourceVBR) cbData.compData.extendedFlags |= kExtendedSoundCommonFrameSizeValid; cbData.compData.bufferSize = 0; // filled in during callback // this doesn't make sense to me, but it is taken from sample code cbData.compData.recordSize = sizeof(ExtendedSoundComponentData); // // Begin the Conversion // err = SoundConverterBeginConversion(soundConverter); SoundConverterFillBufferDataUPP fillBufferUPP; fillBufferUPP = NewSoundConverterFillBufferDataUPP(SoundConverterFillBufferCallback); bool done = false; bool cancelled = false; sampleCount samplesSinceLastCallback = 0; UInt32 outputFrames; UInt32 outputBytes; UInt32 outputFlags; #define SAMPLES_PER_CALLBACK 10000 while(!done && !cancelled) { err = SoundConverterFillBuffer(soundConverter, // a sound converter fillBufferUPP, // the callback &cbData, // refCon passed to FillDataProc outputBuffer, // the buffer to decompress into outputBufferSize, // size of that buffer &outputBytes, // number of bytes actually output &outputFrames, // number of frames actually output &outputFlags); // fillbuffer retured advisor flags if (err) break; if((outputFlags & kSoundConverterHasLeftOverData) == false) done = true; for(c = 0; c < *outNumTracks; c++) channels[c]->Append(outputBuffer + (c*bytesPerSample), format, outputFrames, *outNumTracks); samplesSinceLastCallback += outputFrames; if( samplesSinceLastCallback > SAMPLES_PER_CALLBACK ) { if( mProgressCallback ) cancelled = mProgressCallback(mUserData, (float)cbData.getMediaAtThisTime / cbData.sourceDuration); samplesSinceLastCallback -= SAMPLES_PER_CALLBACK; } } HUnlock(cbData.hSource); // Flush any remaining data to the output buffer. // It appears that we have no way of telling this routine how big the output // buffer is! We had better hope that there isn't more data left than // the buffer is big. SoundConverterEndConversion(soundConverter, outputBuffer, &outputFrames, &outputBytes); for(c = 0; c < *outNumTracks; c++) { channels[c]->Append(outputBuffer + (c*bytesPerSample), format, outputFrames, *outNumTracks); channels[c]->Flush(); } delete[] outputBuffer; DisposeHandle(cbData.hSource); SoundConverterClose(soundConverter); DisposeMovie(mMovie); if (cancelled || err != noErr) { for (c = 0; c < *outNumTracks; c++) delete channels[c]; delete[] channels; return false; } else { *outTracks = new Track *[*outNumTracks]; for(c = 0; c < *outNumTracks; c++) (*outTracks)[c] = channels[c]; delete[] channels; return true; } }
void TLevelWriter3gp::saveSoundTrack(TSoundTrack *st) { Track theTrack; OSErr myErr = noErr; SoundDescriptionV1Handle mySampleDesc; Media myMedia; Handle myDestHandle; SoundComponentData sourceInfo; SoundComponentData destInfo; SoundConverter converter; CompressionInfo compressionInfo; int err; if (!st) throw TException("null reference to soundtrack"); if (st->getBitPerSample() != 16) { throw TImageException(m_path, "Only 16 bits per sample is supported"); } theTrack = NewMovieTrack(m_movie, 0, 0, kFullVolume); myErr = GetMoviesError(); if (myErr != noErr) throw TImageException(m_path, "error creating audio track"); FailIf(myErr != noErr, CompressErr); myDestHandle = NewHandle(0); FailWithAction(myDestHandle == NULL, myErr = MemError(), NoDest); *myDestHandle = (char *)st->getRawData(); ////////// // // create a media for the track passed in // ////////// // set new track to be a sound track m_soundDataRef = nil; m_hSoundMovieData = NewHandle(0); // Construct the Handle data reference err = PtrToHand(&m_hSoundMovieData, &m_soundDataRef, sizeof(Handle)); if ((err = GetMoviesError() != noErr)) throw TImageException(getFilePath(), "can't create Data Ref"); myMedia = NewTrackMedia(theTrack, SoundMediaType, st->getSampleRate(), m_soundDataRef, HandleDataHandlerSubType); //track->rate >> 16 myErr = GetMoviesError(); if (myErr != noErr) throw TImageException(m_path, "error setting audio track"); FailIf(myErr != noErr, Exit); // start a media editing session myErr = BeginMediaEdits(myMedia); if (myErr != noErr) throw TImageException(m_path, "error beginning edit audio track"); FailIf(myErr != noErr, Exit); sourceInfo.flags = 0x0; sourceInfo.format = kSoundNotCompressed; sourceInfo.numChannels = st->getChannelCount(); sourceInfo.sampleSize = st->getBitPerSample(); sourceInfo.sampleRate = st->getSampleRate(); sourceInfo.sampleCount = st->getSampleCount(); sourceInfo.buffer = (unsigned char *)st->getRawData(); sourceInfo.reserved = 0x0; destInfo.flags = kNoSampleRateConversion | kNoSampleSizeConversion | kNoSampleFormatConversion | kNoChannelConversion | kNoDecompression | kNoVolumeConversion | kNoRealtimeProcessing; destInfo.format = k16BitNativeEndianFormat; destInfo.numChannels = st->getChannelCount(); destInfo.sampleSize = st->getBitPerSample(); destInfo.sampleRate = st->getSampleRate(); destInfo.sampleCount = st->getSampleCount(); destInfo.buffer = (unsigned char *)st->getRawData(); destInfo.reserved = 0x0; SoundConverterOpen(&sourceInfo, &destInfo, &converter); myErr = SoundConverterGetInfo(converter, siCompressionFactor, &compressionInfo); if (myErr != noErr) throw TImageException(m_path, "error getting audio converter info"); myErr = GetCompressionInfo(fixedCompression, sourceInfo.format, sourceInfo.numChannels, sourceInfo.sampleSize, &compressionInfo); if (myErr != noErr) throw TImageException(m_path, "error getting audio compression info"); FailIf(myErr != noErr, ConverterErr); compressionInfo.bytesPerFrame = compressionInfo.bytesPerPacket * destInfo.numChannels; ////////// // // create a sound sample description // ////////// // use the SoundDescription format 1 because it adds fields for data size information // and is required by AddSoundDescriptionExtension if an extension is required for the compression format mySampleDesc = (SoundDescriptionV1Handle)NewHandleClear(sizeof(SoundDescriptionV1)); FailWithAction(myErr != noErr, myErr = MemError(), Exit); (**mySampleDesc).desc.descSize = sizeof(SoundDescriptionV1); (**mySampleDesc).desc.dataFormat = destInfo.format; (**mySampleDesc).desc.resvd1 = 0; (**mySampleDesc).desc.resvd2 = 0; (**mySampleDesc).desc.dataRefIndex = 1; (**mySampleDesc).desc.version = 1; (**mySampleDesc).desc.revlevel = 0; (**mySampleDesc).desc.vendor = 0; (**mySampleDesc).desc.numChannels = destInfo.numChannels; (**mySampleDesc).desc.sampleSize = destInfo.sampleSize; (**mySampleDesc).desc.compressionID = 0; (**mySampleDesc).desc.packetSize = 0; (**mySampleDesc).desc.sampleRate = st->getSampleRate() << 16; (**mySampleDesc).samplesPerPacket = compressionInfo.samplesPerPacket; (**mySampleDesc).bytesPerPacket = compressionInfo.bytesPerPacket; (**mySampleDesc).bytesPerFrame = compressionInfo.bytesPerFrame; (**mySampleDesc).bytesPerSample = compressionInfo.bytesPerSample; ////////// // // add samples to the media // ////////// myErr = AddMediaSample(myMedia, myDestHandle, 0, destInfo.sampleCount * compressionInfo.bytesPerFrame, 1, (SampleDescriptionHandle)mySampleDesc, destInfo.sampleCount * compressionInfo.samplesPerPacket, 0, NULL); if (myErr != noErr) throw TImageException(m_path, "error adding audio samples"); FailIf(myErr != noErr, MediaErr); myErr = EndMediaEdits(myMedia); if (myErr != noErr) throw TImageException(m_path, "error ending audio edit"); FailIf(myErr != noErr, MediaErr); ////////// // // insert the media into the track // ////////// myErr = InsertMediaIntoTrack(theTrack, 0, 0, GetMediaDuration(myMedia), fixed1); if (myErr != noErr) throw TImageException(m_path, "error inserting audio track"); FailIf(myErr != noErr, MediaErr); goto Done; ConverterErr: NoDest: CompressErr: Exit: Done: MediaErr: if (mySampleDesc != NULL) DisposeHandle((Handle)mySampleDesc); if (converter) SoundConverterClose(converter); if (myErr != noErr) throw TImageException(m_path, "error saving audio track"); }