Exemplo n.º 1
0
static AudioChannelLayout *ffat_convert_layout(AudioChannelLayout *layout, UInt32* size)
{
    AudioChannelLayoutTag tag = layout->mChannelLayoutTag;
    AudioChannelLayout *new_layout;
    if (tag == kAudioChannelLayoutTag_UseChannelDescriptions)
        return layout;
    else if (tag == kAudioChannelLayoutTag_UseChannelBitmap)
        AudioFormatGetPropertyInfo(kAudioFormatProperty_ChannelLayoutForBitmap,
                                   sizeof(UInt32), &layout->mChannelBitmap, size);
    else
        AudioFormatGetPropertyInfo(kAudioFormatProperty_ChannelLayoutForTag,
                                   sizeof(AudioChannelLayoutTag), &tag, size);
    new_layout = av_malloc(*size);
    if (!new_layout) {
        av_free(layout);
        return NULL;
    }
    if (tag == kAudioChannelLayoutTag_UseChannelBitmap)
        AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutForBitmap,
                               sizeof(UInt32), &layout->mChannelBitmap, size, new_layout);
    else
        AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutForTag,
                               sizeof(AudioChannelLayoutTag), &tag, size, new_layout);
    new_layout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions;
    av_free(layout);
    return new_layout;
}
Exemplo n.º 2
0
OSStatus			AUPannerBase::UpdateBypassMatrix()
{
	OSStatus err = SetDefaultChannelLayoutsIfNone();
	if (err) return err;
	
	UInt32 inChannels = GetNumberOfInputChannels();
	UInt32 outChannels = GetNumberOfOutputChannels();	
	
	const AudioChannelLayout* inoutACL[2];
	
	inoutACL[0] = &GetInputLayout();
	inoutACL[1] = &GetOutputLayout();

	mBypassMatrix.alloc(inChannels * outChannels, true);
	
	UInt32 propSize = inChannels * outChannels * sizeof(Float32);
	
	err = AudioFormatGetProperty(kAudioFormatProperty_MatrixMixMap, sizeof(inoutACL), inoutACL, &propSize, mBypassMatrix());
	if (err) {
		// if there is an error, use a diagonal matrix.
		Float32* bypass = mBypassMatrix();
		for (UInt32 chan = 0; chan < std::min(inChannels, outChannels); ++chan)
		{
			float *amp = bypass + (chan * outChannels + chan);
			*amp = 1.;
		}
	}

    return noErr;
}
Exemplo n.º 3
0
void CCoreAudioMixMap::Rebuild(AudioChannelLayout& inLayout, AudioChannelLayout& outLayout)
{
  // map[in][out] = mix-level of input_channel[in] into output_channel[out]

  free(m_pMap);
  m_pMap = NULL;

  m_inChannels  = CCoreAudioChannelLayout::GetChannelCountForLayout(inLayout);
  m_outChannels = CCoreAudioChannelLayout::GetChannelCountForLayout(outLayout);

  // Try to find a 'well-known' matrix
  const AudioChannelLayout* layouts[] = {&inLayout, &outLayout};
  UInt32 propSize = 0;
  AudioFormatGetPropertyInfo(kAudioFormatProperty_MatrixMixMap,
    sizeof(layouts), layouts, &propSize);
  m_pMap = (Float32*)calloc(1,propSize);

  // Try and get a predefined mixmap
  OSStatus ret = AudioFormatGetProperty(kAudioFormatProperty_MatrixMixMap,
    sizeof(layouts), layouts, &propSize, m_pMap);
  if (!ret)
  {
    // Nothing else to do...a map already exists
    m_isValid = true;
    return;
  }

  // No predefined mixmap was available. Going to have to build it manually
  CLog::Log(LOGDEBUG, "CCoreAudioMixMap::CreateMap: Unable to locate pre-defined mixing matrix");

  m_isValid = false;
}
Exemplo n.º 4
0
void	PrintCompatibleChannelLayouts()
{
	int plen = strlen(prefix);
	for (LayoutTag *tag1 = gLayoutTags; tag1->name != NULL; ++tag1) {
		AudioChannelLayout layout1 = { tag1->constant, 0, 0 };
		printf("\t'%s' : (", tag1->name + plen);
		int printed = 0;
		for (LayoutTag *tag2 = gLayoutTags; tag2->name != NULL; ++tag2) {
			AudioChannelLayout layout2 = { tag2->constant, 0, 0 };
			AudioChannelLayout *layouts[] = { &layout1, &layout2 };

			UInt32 propertySize;
			OSStatus err = AudioFormatGetPropertyInfo(kAudioFormatProperty_ChannelMap, sizeof(layouts), layouts, &propertySize);
			if (err == noErr) {
				SInt32 *map = (SInt32 *)malloc(propertySize);
				err = AudioFormatGetProperty(kAudioFormatProperty_ChannelMap, sizeof(layouts), layouts, &propertySize, map);
				if (err == noErr) {
					if (printed++) printf(", ");
					printf("'%s'", tag2->name + plen);
				}
			}
		}
		printf("),\n");
	}
}
Exemplo n.º 5
0
bool SFB::Audio::ChannelLayout::MapToLayout(const ChannelLayout& outputLayout, std::vector<SInt32>& channelMap) const
{
	// No valid map exists for empty/unknown layouts
	if(!mChannelLayout || !outputLayout.mChannelLayout)
		return false;

	const AudioChannelLayout *layouts [] = {
		GetACL(),
		outputLayout.GetACL()
	};

	auto outputChannelCount = outputLayout.GetChannelCount();
	if(0 == outputChannelCount)
		return false;

	SInt32 rawChannelMap [outputChannelCount];
	UInt32 propertySize = (UInt32)sizeof(rawChannelMap);
	OSStatus result = AudioFormatGetProperty(kAudioFormatProperty_ChannelMap, sizeof(layouts), (void *)layouts, &propertySize, &rawChannelMap);

	if(noErr != result)
		return false;
	//LOGGER_ERR("org.sbooth.AudioEngine.ChannelLayout", "AudioFormatGetProperty (kAudioFormatProperty_ChannelMap) failed: " << result);

	auto start = (SInt32 *)rawChannelMap;
	channelMap.assign(start, start + outputChannelCount);

	return true;
}
Exemplo n.º 6
0
bool CCoreAudioChannelLayout::CopyLayout(AudioChannelLayout& layout)
{
  enum {
    kVariableLengthArray_deprecated = 1
  };

  free(m_pLayout);
  m_pLayout = NULL;

  // This method always produces a layout with a ChannelDescriptions structure

  OSStatus ret = 0;
  UInt32 channels = GetChannelCountForLayout(layout);
  UInt32 size = sizeof(AudioChannelLayout) + (channels - kVariableLengthArray_deprecated) * sizeof(AudioChannelDescription);

  if (layout.mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions)
  {
    // We can copy the whole layout
    m_pLayout = (AudioChannelLayout*)malloc(size);
    memcpy(m_pLayout, &layout, size);
  }
  else if (layout.mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap)
  {
    // Deconstruct the bitmap to get the layout
    UInt32 propSize = 0;
    AudioFormatGetPropertyInfo(kAudioFormatProperty_ChannelLayoutForBitmap, sizeof(layout.mChannelBitmap), &layout.mChannelBitmap, &propSize);
    m_pLayout = (AudioChannelLayout*)malloc(propSize);
    ret = AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutForBitmap, sizeof(layout.mChannelBitmap), &layout.mChannelBitmap, &propSize, m_pLayout);
    m_pLayout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions;
  }
  else
  {
    // Convert the known layout to a custom layout
    UInt32 propSize = 0;
    AudioFormatGetPropertyInfo(kAudioFormatProperty_ChannelLayoutForTag,
      sizeof(layout.mChannelLayoutTag), &layout.mChannelLayoutTag, &propSize);
    m_pLayout = (AudioChannelLayout*)malloc(propSize);
    ret = AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutForTag,
      sizeof(layout.mChannelLayoutTag), &layout.mChannelLayoutTag, &propSize, m_pLayout);
    m_pLayout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions;
  }

  return (ret == noErr);
}
Exemplo n.º 7
0
static AudioChannelLayout *ca_layout_to_custom_layout(struct ao *ao,
        void *talloc_ctx,
        AudioChannelLayout *l)
{
    AudioChannelLayoutTag tag = l->mChannelLayoutTag;
    AudioChannelLayout *r;
    OSStatus err;

    if (tag == kAudioChannelLayoutTag_UseChannelDescriptions)
        return l;

    if (tag == kAudioChannelLayoutTag_UseChannelBitmap) {
        uint32_t psize;
        err = AudioFormatGetPropertyInfo(
                  kAudioFormatProperty_ChannelLayoutForBitmap,
                  sizeof(uint32_t), &l->mChannelBitmap, &psize);
        CHECK_CA_ERROR("failed to convert channel bitmap to descriptions (info)");
        r = talloc_size(NULL, psize);
        err = AudioFormatGetProperty(
                  kAudioFormatProperty_ChannelLayoutForBitmap,
                  sizeof(uint32_t), &l->mChannelBitmap, &psize, r);
        CHECK_CA_ERROR("failed to convert channel bitmap to descriptions (get)");
    } else {
        uint32_t psize;
        err = AudioFormatGetPropertyInfo(
                  kAudioFormatProperty_ChannelLayoutForTag,
                  sizeof(AudioChannelLayoutTag), &l->mChannelLayoutTag, &psize);
        r = talloc_size(NULL, psize);
        CHECK_CA_ERROR("failed to convert channel tag to descriptions (info)");
        err = AudioFormatGetProperty(
                  kAudioFormatProperty_ChannelLayoutForTag,
                  sizeof(AudioChannelLayoutTag), &l->mChannelLayoutTag, &psize, r);
        CHECK_CA_ERROR("failed to convert channel tag to descriptions (get)");
    }

    MP_VERBOSE(ao, "converted input channel layout:\n");
    ca_log_layout(ao, MSGL_V, l);

    return r;
coreaudio_error:
    return NULL;
}
Exemplo n.º 8
0
bool SFB::Audio::Converter::Open(CFErrorRef *error)
{
	if(!mDecoder)
		return false;

	// Open the decoder if necessary
	if(!mDecoder->IsOpen() && !mDecoder->Open(error)) {
		if(error)
			LOGGER_ERR("org.sbooth.AudioEngine.AudioConverter", "Error opening decoder: " << error);

		return false;
	}

	AudioStreamBasicDescription inputFormat = mDecoder->GetFormat();
	OSStatus result = AudioConverterNew(&inputFormat, &mFormat, &mConverter);
	if(noErr != result) {
		LOGGER_ERR("org.sbooth.AudioEngine.AudioConverter", "AudioConverterNewfailed: " << result << "'" << SFB::StringForOSType((OSType)result) << "'");

		if(error)
			*error = CFErrorCreate(kCFAllocatorDefault, kCFErrorDomainOSStatus, result, nullptr);

		return false;
	}

	// TODO: Set kAudioConverterPropertyCalculateInputBufferSize

	mConverterState = std::unique_ptr<ConverterStateData>(new ConverterStateData(*mDecoder));
	mConverterState->AllocateBufferList(BUFFER_SIZE_FRAMES);

	// Create the channel map
	if(mChannelLayout) {
		SInt32 channelMap [ mFormat.mChannelsPerFrame ];
		UInt32 dataSize = (UInt32)sizeof(channelMap);

		const AudioChannelLayout *channelLayouts [] = {
			mDecoder->GetChannelLayout(),
			mChannelLayout
		};

		result = AudioFormatGetProperty(kAudioFormatProperty_ChannelMap, sizeof(channelLayouts), channelLayouts, &dataSize, channelMap);
		if(noErr != result) {
			LOGGER_ERR("org.sbooth.AudioEngine.AudioConverter", "AudioFormatGetProperty (kAudioFormatProperty_ChannelMap) failed: " << result);

			if(error)
				*error = CFErrorCreate(kCFAllocatorDefault, kCFErrorDomainOSStatus, result, nullptr);

			return false;
		}
	}

	mIsOpen = true;
	return true;
}
Exemplo n.º 9
0
void	GetFormatFromInputFile (AudioFileID inputFile, CAStreamBasicDescription & inputFormat)
{
	bool doPrint = true;
	UInt32 size;
	XThrowIfError(AudioFileGetPropertyInfo(inputFile,
                                           kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info");
	UInt32 numFormats = size / sizeof(AudioFormatListItem);
	AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ];
    
	XThrowIfError(AudioFileGetProperty(inputFile,
                                       kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format");
	numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it
	if (numFormats == 1) {
        // this is the common case
		inputFormat = formatList[0].mASBD;
	} else {
		if (doPrint) {
			printf ("File has a %d layered data format:\n", (int)numFormats);
			for (unsigned int i = 0; i < numFormats; ++i)
				CAStreamBasicDescription(formatList[i].mASBD).Print();
			printf("\n");
		}
		// now we should look to see which decoders we have on the system
		XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's");
		UInt32 numDecoders = size / sizeof(OSType);
		OSType *decoderIDs = new OSType [ numDecoders ];
		XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's");
		unsigned int i = 0;
		for (; i < numFormats; ++i) {
			OSType decoderID = formatList[i].mASBD.mFormatID;
			bool found = false;
			for (unsigned int j = 0; j < numDecoders; ++j) {
				if (decoderID == decoderIDs[j]) {
					found = true;
					break;
				}
			}
			if (found) break;
		}
		delete [] decoderIDs;
		
		if (i >= numFormats) {
			fprintf (stderr, "Cannot play any of the formats in this file\n");
			throw kAudioFileUnsupportedDataFormatError;
		}
		inputFormat = formatList[i].mASBD;
	}
	delete [] formatList;
}
Exemplo n.º 10
0
SFB::CFString SFB::Audio::CoreAudioDecoder::_GetSourceFormatDescription() const
{
	CFStringRef		sourceFormatDescription		= nullptr;
	UInt32			sourceFormatNameSize		= sizeof(sourceFormatDescription);
	OSStatus		result						= AudioFormatGetProperty(kAudioFormatProperty_FormatName,
																		 sizeof(mSourceFormat),
																		 &mSourceFormat,
																		 &sourceFormatNameSize,
																		 &sourceFormatDescription);

	if(noErr != result)
		LOGGER_ERR("org.sbooth.AudioEngine.Decoder", "AudioFormatGetProperty (kAudioFormatProperty_FormatName) failed: " << result << "'" << SFB::StringForOSType((OSType)result) << "'");

	return sourceFormatDescription;
}
Exemplo n.º 11
0
size_t SFB::Audio::ChannelLayout::GetChannelCount() const
{
	if(!mChannelLayout)
		return 0;

	UInt32 channelCount = 0;
	UInt32 propertySize = sizeof(channelCount);
	OSStatus result = AudioFormatGetProperty(kAudioFormatProperty_NumberOfChannelsForLayout, (UInt32)GetACLSize(), (void *)GetACL(), &propertySize, &channelCount);

	if(noErr != result)
		return 0;
	//LOGGER_ERR("org.sbooth.AudioEngine.ChannelLayout", "AudioFormatGetProperty (kAudioFormatProperty_NumberOfChannelsForLayout) failed: " << result);

	return channelCount;
}
Exemplo n.º 12
0
CFStringRef SFB::Audio::Converter::CreateChannelLayoutDescription() const
{
	if(!IsOpen())
		return nullptr;

	CFStringRef		channelLayoutDescription	= nullptr;
	UInt32			specifierSize				= sizeof(channelLayoutDescription);
	OSStatus		result						= AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutName,
																		 sizeof(mChannelLayout),
																		 mChannelLayout,
																		 &specifierSize,
																		 &channelLayoutDescription);

	if(noErr != result)
		LOGGER_WARNING("org.sbooth.AudioEngine.AudioConverter", "AudioFormatGetProperty (kAudioFormatProperty_ChannelLayoutName) failed: " << result << "'" << SFB::StringForOSType((OSType)result) << "'");

	return channelLayoutDescription;
}
Exemplo n.º 13
0
CFStringRef SFB::Audio::Decoder::CreateChannelLayoutDescription() const
{
	if(!IsOpen()) {
		LOGGER_INFO("org.sbooth.AudioEngine.Decoder", "CreateChannelLayoutDescription() called on a Decoder that hasn't been opened");
		return nullptr;
	}

	CFStringRef		channelLayoutDescription	= nullptr;
	UInt32			specifierSize				= sizeof(channelLayoutDescription);
	OSStatus		result						= AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutName, 
																		 sizeof(mChannelLayout.GetACL()),
																		 mChannelLayout.GetACL(),
																		 &specifierSize, 
																		 &channelLayoutDescription);

	if(noErr != result)
		LOGGER_ERR("org.sbooth.AudioEngine.Decoder", "AudioFormatGetProperty (kAudioFormatProperty_ChannelLayoutName) failed: " << result << "'" << SFB::StringForOSType((OSType)result) << "'");

	return channelLayoutDescription;
}
CFStringRef AudioDecoder::CreateChannelLayoutDescription() const
{
	if(!IsOpen())
		return nullptr;

	CFStringRef		channelLayoutDescription	= nullptr;
	UInt32			specifierSize				= sizeof(channelLayoutDescription);
	OSStatus		result						= AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutName, 
																		 sizeof(mChannelLayout), 
																		 mChannelLayout, 
																		 &specifierSize, 
																		 &channelLayoutDescription);

	if(noErr != result) {
		CFStringRef osType = CreateStringForOSType(result);
		LOGGER_WARNING("org.sbooth.AudioEngine.AudioDecoder", "AudioFormatGetProperty (kAudioFormatProperty_ChannelLayoutName) failed: " << result << osType);
		CFRelease(osType), osType = nullptr;
	}
	
	return channelLayoutDescription;
}
CFStringRef AudioDecoder::CreateSourceFormatDescription() const
{
	if(!IsOpen())
		return nullptr;

	CFStringRef		sourceFormatDescription		= nullptr;
	UInt32			sourceFormatNameSize		= sizeof(sourceFormatDescription);
	OSStatus		result						= AudioFormatGetProperty(kAudioFormatProperty_FormatName, 
																		 sizeof(mSourceFormat), 
																		 &mSourceFormat, 
																		 &sourceFormatNameSize, 
																		 &sourceFormatDescription);

	if(noErr != result) {
		CFStringRef osType = CreateStringForOSType(result);
		LOGGER_WARNING("org.sbooth.AudioEngine.AudioDecoder", "AudioFormatGetProperty (kAudioFormatProperty_FormatName) failed: " << result << osType);
		CFRelease(osType), osType = nullptr;
	}
	
	return sourceFormatDescription;
}
Exemplo n.º 16
0
// _______________________________________________________________________________________
//	Allocates: mIOBufferList, mIOBufferSizePackets, mPacketDescs
//	Dependent on: mFileMaxPacketSize, mIOBufferSizeBytes
void	CAAudioFile::AllocateBuffers(bool okToFail)
{
	LOG_FUNCTION("CAAudioFile::AllocateBuffers", "%p", this);
	if (mFileMaxPacketSize == 0) {
		if (okToFail)
			return;
		XThrowIf(true, kExtAudioFileError_MaxPacketSizeUnknown, "file's maximum packet size is 0");
	}
	UInt32 bufferSizeBytes = mIOBufferSizeBytes = std::max(mIOBufferSizeBytes, mFileMaxPacketSize);
		// must be big enough for at least one maximum size packet

	if (mIOBufferList.mBuffers[0].mDataByteSize != bufferSizeBytes) {
		mIOBufferList.mNumberBuffers = 1;
		mIOBufferList.mBuffers[0].mNumberChannels = mFileDataFormat.mChannelsPerFrame;
		if (!mClientOwnsIOBuffer) {
			//printf("reallocating I/O buffer\n");
			delete[] (Byte *)mIOBufferList.mBuffers[0].mData;
			mIOBufferList.mBuffers[0].mData = new Byte[bufferSizeBytes];
		}
		mIOBufferList.mBuffers[0].mDataByteSize = bufferSizeBytes;
		mIOBufferSizePackets = bufferSizeBytes / mFileMaxPacketSize;
	}

	UInt32 propertySize = sizeof(UInt32);
	UInt32 externallyFramed;
	XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatIsExternallyFramed,
			sizeof(AudioStreamBasicDescription), &mFileDataFormat, &propertySize, &externallyFramed),
			"is format externally framed");
	if (mNumPacketDescs != (externallyFramed ? mIOBufferSizePackets : 0)) {
		delete[] mPacketDescs;
		mPacketDescs = NULL;
		mNumPacketDescs = 0;

		if (externallyFramed) {
			//printf("reallocating packet descs\n");
			mPacketDescs = new AudioStreamPacketDescription[mIOBufferSizePackets];
			mNumPacketDescs = mIOBufferSizePackets;
		}
	}
}
Exemplo n.º 17
0
CFStringRef AudioDecoder::CreateChannelLayoutDescription() const
{
	if(!IsOpen())
		return NULL;

	CFStringRef		channelLayoutDescription	= NULL;
	UInt32			specifierSize				= sizeof(channelLayoutDescription);
	OSStatus		result						= AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutName, 
																		 sizeof(mChannelLayout), 
																		 mChannelLayout, 
																		 &specifierSize, 
																		 &channelLayoutDescription);

	if(noErr != result) {
		log4cxx::LoggerPtr logger = log4cxx::Logger::getLogger("org.sbooth.AudioEngine.AudioDecoder");
		CFStringRef osType = CreateStringForOSType(result);
		LOG4CXX_WARN(logger, "AudioFormatGetProperty (kAudioFormatProperty_ChannelLayoutName) failed: " << result << osType);
		CFRelease(osType), osType = NULL;
	}
	
	return channelLayoutDescription;
}
Exemplo n.º 18
0
CoreAudioEncoder::CoreAudioEncoder(AudioConverterX &converter)
    : m_converter(converter),
      m_variable_packet_len(false)
{
    m_converter.getInputStreamDescription(&m_input_desc);
    m_converter.getOutputStreamDescription(&m_output_desc);
    m_stat.setBasicDescription(m_output_desc);
    {
        UInt32 res;
        UInt32 size = sizeof res;
        CHECKCA(AudioFormatGetProperty(
                kAudioFormatProperty_FormatIsExternallyFramed,
                sizeof m_output_desc, &m_output_desc, &size, &res));
        m_requires_packet_desc = !!res;
    }
    {
        AudioBufferList *abl;
        size_t size = offsetof(AudioBufferList, mBuffers[1]);
        abl = static_cast<AudioBufferList*>(std::calloc(1, size));
        abl->mBuffers[0].mNumberChannels = m_output_desc.mChannelsPerFrame;
        abl->mNumberBuffers = 1;
        m_output_abl = std::shared_ptr<AudioBufferList>(abl, std::free);
    }
}
Exemplo n.º 19
0
bool SFB::Audio::ChannelLayout::operator==(const ChannelLayout& rhs) const
{
	// Two empty channel layouts are considered equivalent
	if(!mChannelLayout && !rhs.mChannelLayout)
		return true;

	if(!mChannelLayout || !rhs.mChannelLayout)
		return false;

	const AudioChannelLayout *layouts [] = {
		rhs.GetACL(),
		GetACL()
	};

	UInt32 layoutsEqual = false;
	UInt32 propertySize = sizeof(layoutsEqual);
	OSStatus result = AudioFormatGetProperty(kAudioFormatProperty_AreChannelLayoutsEquivalent, sizeof(layouts), (void *)layouts, &propertySize, &layoutsEqual);

	if(noErr != result)
		return false;
		//LOGGER_ERR("org.sbooth.AudioEngine.ChannelLayout", "AudioFormatGetProperty (kAudioFormatProperty_AreChannelLayoutsEquivalent) failed: " << result);

	return layoutsEqual;
}
Exemplo n.º 20
0
CAAudioFileFormats::CAAudioFileFormats() : 
	mNumFileFormats(0), mFileFormats(NULL)
{
	OSStatus err;
	UInt32 size;
	UInt32 *fileTypes = NULL, *writableFormats = NULL, *readableFormats = NULL;
	int nWritableFormats, nReadableFormats;
	
	// get all file types
	err = AudioFileGetGlobalInfoSize(kAudioFileGlobalInfo_WritableTypes, 0, NULL, &size);
	if (err != noErr) goto bail;
	mNumFileFormats = size / sizeof(UInt32);
	mFileFormats = new FileFormatInfo[mNumFileFormats];
	fileTypes = new UInt32[mNumFileFormats];
	err = AudioFileGetGlobalInfo(kAudioFileGlobalInfo_WritableTypes, 0, NULL, &size, fileTypes);
	if (err != noErr) goto bail;
	
	// get all writable formats
	err = AudioFormatGetPropertyInfo(kAudioFormatProperty_EncodeFormatIDs, 0, NULL, &size);
	if (err != noErr) goto bail;
	nWritableFormats = size / sizeof(UInt32);
	writableFormats = new UInt32[nWritableFormats];
	err = AudioFormatGetProperty(kAudioFormatProperty_EncodeFormatIDs, 0, NULL, &size, writableFormats);
	if (err != noErr) goto bail;
	
	// get all readable formats
	err = AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size);
	if (err != noErr) goto bail;
	nReadableFormats = size / sizeof(UInt32);
	readableFormats = new UInt32[nReadableFormats];
	err = AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, readableFormats);
	if (err != noErr) goto bail;
	
	// get info for each file type
	for (int i = 0; i < mNumFileFormats; ++i) {
		FileFormatInfo *ffi = &mFileFormats[i];
		OSType filetype = fileTypes[i];

		ffi->mFileTypeID = filetype;
		
		// file type name
		ffi->mFileTypeName = NULL;
		size = sizeof(CFStringRef);
		err = AudioFileGetGlobalInfo(kAudioFileGlobalInfo_FileTypeName, sizeof(UInt32), &filetype, &size, &ffi->mFileTypeName);
		if (ffi->mFileTypeName)
			CFRetain(ffi->mFileTypeName);
		
		// file extensions
		size = sizeof(CFArrayRef);
		err = AudioFileGetGlobalInfo(kAudioFileGlobalInfo_ExtensionsForType,
			sizeof(OSType), &filetype, &size, &ffi->mExtensions);
		if (err)
			ffi->mExtensions = NULL;
		
		// file data formats
		ffi->mNumDataFormats = 0;
		ffi->mDataFormats = NULL;

		err = AudioFileGetGlobalInfoSize(kAudioFileGlobalInfo_AvailableFormatIDs, 
				sizeof(UInt32), &filetype, &size);
		if (err == noErr) {
			ffi->mNumDataFormats = size / sizeof(OSType);
			OSType *formatIDs = new OSType[ffi->mNumDataFormats];
			err = AudioFileGetGlobalInfo(kAudioFileGlobalInfo_AvailableFormatIDs,
				sizeof(UInt32), &filetype, &size, formatIDs);
			if (err == noErr) {
				ffi->mDataFormats = new DataFormatInfo[ffi->mNumDataFormats];
				for (int j = 0; j < ffi->mNumDataFormats; ++j) {
					int k;
					bool anyBigEndian = false, anyLittleEndian = false;
					DataFormatInfo *dfi = &ffi->mDataFormats[j];
					dfi->mFormatID = formatIDs[j];
					dfi->mReadable = (dfi->mFormatID == kAudioFormatLinearPCM);
					dfi->mWritable = (dfi->mFormatID == kAudioFormatLinearPCM);
					for (k = 0; k < nReadableFormats; ++k)
						if (readableFormats[k] == dfi->mFormatID) {
							dfi->mReadable = true;
							break;
						}
					for (k = 0; k < nWritableFormats; ++k)
						if (writableFormats[k] == dfi->mFormatID) {
							dfi->mWritable = true;
							break;
						}
					
					dfi->mNumVariants = 0;
					AudioFileTypeAndFormatID tf = { filetype, dfi->mFormatID };
					err = AudioFileGetGlobalInfoSize(kAudioFileGlobalInfo_AvailableStreamDescriptionsForFormat,
						sizeof(AudioFileTypeAndFormatID), &tf, &size);
					if (err == noErr) {
						dfi->mNumVariants = size / sizeof(AudioStreamBasicDescription);
						dfi->mVariants = new AudioStreamBasicDescription[dfi->mNumVariants];
						err = AudioFileGetGlobalInfo(kAudioFileGlobalInfo_AvailableStreamDescriptionsForFormat,
							sizeof(AudioFileTypeAndFormatID), &tf, &size, dfi->mVariants);
						if (err) {
							dfi->mNumVariants = 0;
							delete[] dfi->mVariants;
							dfi->mVariants = NULL;
						} else {
							for (k = 0; k < dfi->mNumVariants; ++k) {
								AudioStreamBasicDescription *desc = &dfi->mVariants[k];
								if (desc->mBitsPerChannel > 8) {
									if (desc->mFormatFlags & kAudioFormatFlagIsBigEndian)
										anyBigEndian = true;
									else
										anyLittleEndian = true;
								}
							}
						}
					}
					
					dfi->mEitherEndianPCM = (anyBigEndian && anyLittleEndian);
				}
			}
			delete[] formatIDs;
		}
	}

	// sort file formats by name
	qsort(mFileFormats, mNumFileFormats, sizeof(FileFormatInfo), CompareFileFormatNames);
bail:
	delete[] fileTypes;
	delete[] readableFormats;
	delete[] writableFormats;
}
AudioChannelLayout *
gst_core_audio_audio_device_get_channel_layout (AudioDeviceID device_id,
    gboolean output)
{
  OSStatus status = noErr;
  UInt32 propertySize = 0;
  AudioChannelLayout *layout = NULL;
  AudioObjectPropertyScope prop_scope;

  prop_scope = output ? kAudioDevicePropertyScopeOutput :
      kAudioDevicePropertyScopeInput;

  AudioObjectPropertyAddress channelLayoutAddress = {
    kAudioDevicePropertyPreferredChannelLayout,
    prop_scope,
    kAudioObjectPropertyElementMaster
  };

  /* Get the length of the default channel layout structure */
  status = AudioObjectGetPropertyDataSize (device_id,
      &channelLayoutAddress, 0, NULL, &propertySize);
  if (status != noErr) {
    GST_ERROR ("failed to get preferred layout: %d", (int) status);
    goto beach;
  }

  /* Get the default channel layout of the device */
  layout = (AudioChannelLayout *) g_malloc (propertySize);
  status = AudioObjectGetPropertyData (device_id,
      &channelLayoutAddress, 0, NULL, &propertySize, layout);
  if (status != noErr) {
    GST_ERROR ("failed to get preferred layout: %d", (int) status);
    goto failed;
  }

  if (layout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) {
    /* bitmap defined channellayout */
    status =
        AudioFormatGetProperty (kAudioFormatProperty_ChannelLayoutForBitmap,
        sizeof (UInt32), &layout->mChannelBitmap, &propertySize, layout);
    if (status != noErr) {
      GST_ERROR ("failed to get layout for bitmap: %d", (int) status);
      goto failed;
    }
  } else if (layout->mChannelLayoutTag !=
      kAudioChannelLayoutTag_UseChannelDescriptions) {
    /* layouttags defined channellayout */
    status = AudioFormatGetProperty (kAudioFormatProperty_ChannelLayoutForTag,
        sizeof (AudioChannelLayoutTag), &layout->mChannelLayoutTag,
        &propertySize, layout);
    if (status != noErr) {
      GST_ERROR ("failed to get layout for tag: %d", (int) status);
      goto failed;
    }
  }

  gst_core_audio_dump_channel_layout (layout);

beach:
  return layout;

failed:
  g_free (layout);
  return NULL;
}
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate, UInt32 outputBitRate)
{
	AudioFileID         sourceFileID = 0;
    AudioFileID         destinationFileID = 0;
    AudioConverterRef   converter = NULL;
    Boolean             canResumeFromInterruption = true; // we can continue unless told otherwise
    
    CAStreamBasicDescription srcFormat, dstFormat;
    AudioFileIO afio = {};
    
    char                         *outputBuffer = NULL;
    AudioStreamPacketDescription *outputPacketDescriptions = NULL;
    
    OSStatus error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    
    printf("\nDoConvertFile\n");
    
    try {
        // get the source file
        XThrowIfError(AudioFileOpenURL(sourceURL, kAudioFileReadPermission, 0, &sourceFileID), "AudioFileOpenURL failed");
	
        // get the source data format
        UInt32 size = sizeof(srcFormat);
        XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyDataFormat, &size, &srcFormat), "couldn't get source data format");
        
        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if the output format is PC create a 16-bit int PCM file format description as an example
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("Source File format: "); srcFormat.Print();
        printf("Destination format: "); dstFormat.Print();
	
        // create the AudioConverter
        
        XThrowIfError(AudioConverterNew(&srcFormat, &dstFormat, &converter), "AudioConverterNew failed!");
    
        // if the source has a cookie, get it and set it on the Audio Converter
        ReadCookie(sourceFileID, converter);

        // get the actual formats back from the Audio Converter
        size = sizeof(srcFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentInputStreamDescription, &size, &srcFormat), "AudioConverterGetProperty kAudioConverterCurrentInputStreamDescription failed!");

        size = sizeof(dstFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentOutputStreamDescription, &size, &dstFormat), "AudioConverterGetProperty kAudioConverterCurrentOutputStreamDescription failed!");

        printf("Formats returned from AudioConverter:\n");
        printf("              Source format: "); srcFormat.Print();
        printf("    Destination File format: "); dstFormat.Print();
        
        // if encoding to AAC set the bitrate
        // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
        // when you explicitly set the bit rate and the sample rate, this tells the encoder to stick with both bit rate and sample rate
        //     but there are combinations (also depending on the number of channels) which will not be allowed
        // if you do not explicitly set a bit rate the encoder will pick the correct value for you depending on samplerate and number of channels
        // bit rate also scales with the number of channels, therefore one bit rate per sample rate can be used for mono cases
        //    and if you have stereo or more, you can multiply that number by the number of channels.
        
        if (outputBitRate == 0) {
            outputBitRate = 192000; // 192kbs
        }
        
        if (dstFormat.mFormatID == kAudioFormatMPEG4AAC) {
            
            UInt32 propSize = sizeof(outputBitRate);
            
            // set the bit rate depending on the samplerate chosen
            XThrowIfError(AudioConverterSetProperty(converter, kAudioConverterEncodeBitRate, propSize, &outputBitRate),
                           "AudioConverterSetProperty kAudioConverterEncodeBitRate failed!");
            
            // get it back and print it out
            AudioConverterGetProperty(converter, kAudioConverterEncodeBitRate, &propSize, &outputBitRate);
            printf ("AAC Encode Bitrate: %u\n", (unsigned int)outputBitRate);
        }

        // can the Audio Converter resume conversion after an interruption?
        // this property may be queried at any time after construction of the Audio Converter after setting its output format
        // there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(converter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported - see comments in source for more info.\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld, paramErr is OK if PCM\n", error);
            }
            
            error = noErr;
        }
        
        // create the destination file 
        XThrowIfError(AudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, kAudioFileFlags_EraseFile, &destinationFileID), "AudioFileCreateWithURL failed!");

        // set up source buffers and data proc info struct
        afio.srcFileID = sourceFileID;
        afio.srcBufferSize = 32768;
        afio.srcBuffer = new char [afio.srcBufferSize];
        afio.srcFilePos = 0;
        afio.srcFormat = srcFormat;
		
        if (srcFormat.mBytesPerPacket == 0) {
            // if the source format is VBR, we need to get the maximum packet size
            // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
            // in the file (without actually scanning the whole file to find the largest packet,
            // as may happen with kAudioFilePropertyMaximumPacketSize)
            size = sizeof(afio.srcSizePerPacket);
            XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyPacketSizeUpperBound, &size, &afio.srcSizePerPacket), "AudioFileGetProperty kAudioFilePropertyPacketSizeUpperBound failed!");
            
            // how many packets can we read for our buffer size?
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            afio.packetDescriptions = new AudioStreamPacketDescription [afio.numPacketsPerRead];
        } else {
            // CBR source format
            afio.srcSizePerPacket = srcFormat.mBytesPerPacket;
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            afio.packetDescriptions = NULL;
        }

        // set up output buffers
        UInt32 outputSizePerPacket = dstFormat.mBytesPerPacket; // this will be non-zero if the format is CBR
        UInt32 theOutputBufSize = 32768;
        outputBuffer = new char[theOutputBufSize];
        
        if (outputSizePerPacket == 0) {
            // if the destination format is VBR, we need to get max size per packet from the converter
            size = sizeof(outputSizePerPacket);
            XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterPropertyMaximumOutputPacketSize, &size, &outputSizePerPacket), "AudioConverterGetProperty kAudioConverterPropertyMaximumOutputPacketSize failed!");
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
        }
        UInt32 numOutputPackets = theOutputBufSize / outputSizePerPacket;

        // if the destination format has a cookie, get it and set it on the output file
        WriteCookie(converter, destinationFileID);

        // write destination channel layout
        if (srcFormat.mChannelsPerFrame > 2) {
            WriteDestinationChannelLayout(converter, sourceFileID, destinationFileID);
        }

        UInt64 totalOutputFrames = 0; // used for debgging printf
        SInt64 outputFilePos = 0;
        
        // loop to convert data
        printf("Converting...\n");
        while (1) {

            // set up output buffer list
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = dstFormat.mChannelsPerFrame;
            fillBufList.mBuffers[0].mDataByteSize = theOutputBufSize;
            fillBufList.mBuffers[0].mData = outputBuffer;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = NO;
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the Audio Converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            // convert data
            UInt32 ioOutputDataPackets = numOutputPackets;
            printf("AudioConverterFillComplexBuffer...\n");
            error = AudioConverterFillComplexBuffer(converter, EncoderDataProc, &afio, &ioOutputDataPackets, &fillBufList, outputPacketDescriptions);
            // if interrupted in the process of the conversion call, we must handle the error appropriately
            if (error) {
                if (kAudioConverterErr_HardwareInUse == error) {
                     printf("Audio Converter returned kAudioConverterErr_HardwareInUse!\n");
                } else {
                    XThrowIfError(error, "AudioConverterFillComplexBuffer error!");
                }
            } else {
                if (ioOutputDataPackets == 0) {
                    // this is the EOF conditon
                    error = noErr;
                    break;
                }
            }
            
            if (noErr == error) {
                // write to output file
                UInt32 inNumBytes = fillBufList.mBuffers[0].mDataByteSize;
                XThrowIfError(AudioFileWritePackets(destinationFileID, false, inNumBytes, outputPacketDescriptions, outputFilePos, &ioOutputDataPackets, outputBuffer), "AudioFileWritePackets failed!");
            
                printf("Convert Output: Write %lu packets at position %lld, size: %ld\n", ioOutputDataPackets, outputFilePos, inNumBytes);
                
                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                if (dstFormat.mFramesPerPacket) { 
                    // the format has constant frames per packet
                    totalOutputFrames += (ioOutputDataPackets * dstFormat.mFramesPerPacket);
                } else if (outputPacketDescriptions != NULL) {
                    // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
                    for (UInt32 i = 0; i < ioOutputDataPackets; ++i)
                        totalOutputFrames += outputPacketDescriptions[i].mVariableFramesInPacket;
                }
            }
        } // while

        if (noErr == error) {
            // write out any of the leading and trailing frames for compressed formats only
            if (dstFormat.mBitsPerChannel == 0) {
                // our output frame count should jive with
                printf("Total number of output frames counted: %lld\n", totalOutputFrames); 
                WritePacketTableInfo(converter, destinationFileID);
            }
        
            // write the cookie again - sometimes codecs will update cookies at the end of a conversion
            WriteCookie(converter, destinationFileID);
        }
    }
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // cleanup
    if (converter) AudioConverterDispose(converter);
    if (destinationFileID) AudioFileClose(destinationFileID);
	if (sourceFileID) AudioFileClose(sourceFileID);
    
    if (afio.srcBuffer) delete [] afio.srcBuffer;
    if (afio.packetDescriptions) delete [] afio.packetDescriptions;
    if (outputBuffer) delete [] outputBuffer;
    if (outputPacketDescriptions) delete [] outputPacketDescriptions;
    
    
    return error;
}
Exemplo n.º 23
0
void WriteOutputFile (const char*	outputFilePath, 
					OSType			dataFormat, 
					Float64			srate, 
					MusicTimeStamp	sequenceLength, 
					bool			shouldPrint,
					AUGraph			inGraph,
					UInt32			numFrames,
					MusicPlayer		player)
{
		// delete existing output  file
	TestFile (outputFilePath, true);
	OSStatus result = 0;
	UInt32 size;

	CAStreamBasicDescription outputFormat;
	outputFormat.mChannelsPerFrame = 2;
	outputFormat.mSampleRate = srate;
	outputFormat.mFormatID = dataFormat;
	
	AudioFileTypeID destFileType;
	CAAudioFileFormats::Instance()->InferFileFormatFromFilename (outputFilePath, destFileType);
	
	if (dataFormat == kAudioFormatLinearPCM) {
		outputFormat.mBytesPerPacket = outputFormat.mChannelsPerFrame * 2;
		outputFormat.mFramesPerPacket = 1;
		outputFormat.mBytesPerFrame = outputFormat.mBytesPerPacket;
		outputFormat.mBitsPerChannel = 16;
		
		if (destFileType == kAudioFileWAVEType)
			outputFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
								| kLinearPCMFormatFlagIsPacked;
		else
			outputFormat.mFormatFlags = kLinearPCMFormatFlagIsBigEndian
								| kLinearPCMFormatFlagIsSignedInteger
								| kLinearPCMFormatFlagIsPacked;
	} else {
		// use AudioFormat API to fill out the rest.
		size = sizeof(outputFormat);
		require_noerr (result = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputFormat), fail);
	}

	if (shouldPrint) {
		printf ("Writing to file: %s with format:\n* ", outputFilePath);
		outputFormat.Print();
	}
	
	FSRef parentDir;
	CFStringRef destFileName;
	require_noerr (result = PosixPathToParentFSRefAndName(outputFilePath, parentDir, destFileName), fail);

	ExtAudioFileRef outfile;
	result = ExtAudioFileCreateNew (&parentDir, destFileName, destFileType, &outputFormat, NULL, &outfile);
	CFRelease (destFileName);
	require_noerr (result, fail);

	AudioUnit outputUnit;	
	UInt32 nodeCount;
	require_noerr (result = AUGraphGetNodeCount (inGraph, &nodeCount), fail);
	
	for (UInt32 i = 0; i < nodeCount; ++i) 
	{
		AUNode node;
		require_noerr (result = AUGraphGetIndNode(inGraph, i, &node), fail);

		ComponentDescription desc;
		require_noerr (result = AUGraphNodeInfo(inGraph, node, &desc, NULL), fail);
		
		if (desc.componentType == kAudioUnitType_Output) 
		{
			require_noerr (result = AUGraphNodeInfo(inGraph, node, 0, &outputUnit), fail);
			break;
		}
	}

	{
		CAStreamBasicDescription clientFormat;
		size = sizeof(clientFormat);
		require_noerr (result = AudioUnitGetProperty (outputUnit,
													kAudioUnitProperty_StreamFormat,
													kAudioUnitScope_Output, 0,
													&clientFormat, &size), fail);
		size = sizeof(clientFormat);
		require_noerr (result = ExtAudioFileSetProperty(outfile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), fail);
		
		{
			MusicTimeStamp currentTime;
			AUOutputBL outputBuffer (clientFormat, numFrames);
			AudioTimeStamp tStamp;
			memset (&tStamp, 0, sizeof(AudioTimeStamp));
			tStamp.mFlags = kAudioTimeStampSampleTimeValid;
			int i = 0;
			int numTimesFor10Secs = (int)(10. / (numFrames / srate));
			do {
				outputBuffer.Prepare();
				AudioUnitRenderActionFlags actionFlags = 0;
				require_noerr (result = AudioUnitRender (outputUnit, &actionFlags, &tStamp, 0, numFrames, outputBuffer.ABL()), fail);

				tStamp.mSampleTime += numFrames;
				
				require_noerr (result = ExtAudioFileWrite(outfile, numFrames, outputBuffer.ABL()), fail);	

				require_noerr (result = MusicPlayerGetTime (player, &currentTime), fail);
				if (shouldPrint && (++i % numTimesFor10Secs == 0))
					printf ("current time: %6.2f beats\n", currentTime);
			} while (currentTime < sequenceLength);
		}
	}
	
// close
	ExtAudioFileDispose(outfile);

	return;

fail:
	printf ("Problem: %ld\n", result); 
	exit(1);
}
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate) 
{
    ExtAudioFileRef sourceFile = 0;
    ExtAudioFileRef destinationFile = 0;
    Boolean         canResumeFromInterruption = true; // we can continue unless told otherwise
    OSStatus        error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    ThreadStateSetRunning();
    
    printf("DoConvertFile\n");
    
	try {
        CAStreamBasicDescription srcFormat, dstFormat;

        // open the source file
        XThrowIfError(ExtAudioFileOpenURL(sourceURL, &sourceFile), "ExtAudioFileOpenURL failed");
			
        // get the source data format
		UInt32 size = sizeof(srcFormat);
		XThrowIfError(ExtAudioFileGetProperty(sourceFile, kExtAudioFileProperty_FileDataFormat, &size, &srcFormat), "couldn't get source data format");
		
		printf("\nSource file format: "); srcFormat.Print();

        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if PCM was selected as the destination format, create a 16-bit int PCM file format description
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("\nDestination file format: "); dstFormat.Print();
        
        // create the destination file 
        XThrowIfError(ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, NULL, kAudioFileFlags_EraseFile, &destinationFile), "ExtAudioFileCreateWithURL failed!");

        // set the client format - The format must be linear PCM (kAudioFormatLinearPCM)
        // You must set this in order to encode or decode a non-PCM file data format
        // You may set this on PCM files to specify the data format used in your calls to read/write
        CAStreamBasicDescription clientFormat;
        if (outputFormat == kAudioFormatLinearPCM) {
            clientFormat = dstFormat;
        } else {
            clientFormat.SetCanonical(srcFormat.NumberChannels(), true);
            clientFormat.mSampleRate = srcFormat.mSampleRate;
        }
        
        printf("\nClient data format: "); clientFormat.Print();
        printf("\n");
        
        size = sizeof(clientFormat);
        XThrowIfError(ExtAudioFileSetProperty(sourceFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), "couldn't set source client format");
        
        size = sizeof(clientFormat);
        XThrowIfError(ExtAudioFileSetProperty(destinationFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), "couldn't set destination client format");

        // can the audio converter (which in this case is owned by an ExtAudioFile object) resume conversion after an interruption?
        AudioConverterRef audioConverter;
                    
        size = sizeof(audioConverter);
        XThrowIfError(ExtAudioFileGetProperty(destinationFile, kExtAudioFileProperty_AudioConverter, &size, &audioConverter), "Couldn't get Audio Converter!");
        
        // this property may be queried at any time after construction of the audio converter (which in this case is owned by an ExtAudioFile object)
        // after setting the output format -- there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(audioConverter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported!\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld\n", error);
            }
            
            error = noErr;
        }
        
        // set up buffers
        UInt32 bufferByteSize = 32768;
        char srcBuffer[bufferByteSize];
        
        // keep track of the source file offset so we know where to reset the source for
        // reading if interrupted and input was not consumed by the audio converter
        SInt64 sourceFrameOffset = 0;
        
        //***** do the read and write - the conversion is done on and by the write call *****//
        printf("Converting...\n");
        while (1) {
        
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = clientFormat.NumberChannels();
            fillBufList.mBuffers[0].mDataByteSize = bufferByteSize;
            fillBufList.mBuffers[0].mData = srcBuffer;
                
            // client format is always linear PCM - so here we determine how many frames of lpcm
            // we can read/write given our buffer size
            UInt32 numFrames;
            if (clientFormat.mBytesPerFrame > 0) // rids bogus analyzer div by zero warning mBytesPerFrame can't be 0 and is protected by an Assert
                numFrames = clientFormat.BytesToFrames(bufferByteSize); // (bufferByteSize / clientFormat.mBytesPerFrame);

            XThrowIfError(ExtAudioFileRead(sourceFile, &numFrames, &fillBufList), "ExtAudioFileRead failed!");	
            if (!numFrames) {
                // this is our termination condition
                error = noErr;
                break;
            }
            sourceFrameOffset += numFrames;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = ThreadStatePausedCheck();
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the audio converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            error = ExtAudioFileWrite(destinationFile, numFrames, &fillBufList);
            // if interrupted in the process of the write call, we must handle the errors appropriately
            if (error) {
                if (kExtAudioFileError_CodecUnavailableInputConsumed == error) {
                
                    printf("ExtAudioFileWrite kExtAudioFileError_CodecUnavailableInputConsumed error %ld\n", error);
                    
                    /*
                        Returned when ExtAudioFileWrite was interrupted. You must stop calling
                        ExtAudioFileWrite. If the underlying audio converter can resume after an
                        interruption (see kAudioConverterPropertyCanResumeFromInterruption), you must
                        wait for an EndInterruption notification from AudioSession, then activate the session
                        before resuming. In this situation, the buffer you provided to ExtAudioFileWrite was successfully
                        consumed and you may proceed to the next buffer
                    */
                    
                } else if (kExtAudioFileError_CodecUnavailableInputNotConsumed == error) {
                
                    printf("ExtAudioFileWrite kExtAudioFileError_CodecUnavailableInputNotConsumed error %ld\n", error);
                    
                    /*
                        Returned when ExtAudioFileWrite was interrupted. You must stop calling
                        ExtAudioFileWrite. If the underlying audio converter can resume after an
                        interruption (see kAudioConverterPropertyCanResumeFromInterruption), you must
                        wait for an EndInterruption notification from AudioSession, then activate the session
                        before resuming. In this situation, the buffer you provided to ExtAudioFileWrite was not
                        successfully consumed and you must try to write it again
                    */
                    
                    // seek back to last offset before last read so we can try again after the interruption
                    sourceFrameOffset -= numFrames;
                    XThrowIfError(ExtAudioFileSeek(sourceFile, sourceFrameOffset), "ExtAudioFileSeek failed!");
                    
                } else {
                    XThrowIfError(error, "ExtAudioFileWrite error!");
                }
            } // if
        } // while
	}
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // close
    if (destinationFile) ExtAudioFileDispose(destinationFile);
    if (sourceFile) ExtAudioFileDispose(sourceFile);

    // transition thread state to kStateDone before continuing
    ThreadStateSetDone();
    
    return error;
}
Exemplo n.º 25
0
void	CAAudioFileConverter::ConvertFile(const ConversionParameters &_params)
{
	FSRef destFSRef;
	UInt32 propertySize;
	CAStreamBasicDescription destFormat;
	CAAudioChannelLayout origSrcFileLayout, srcFileLayout, destFileLayout;
	bool openedSourceFile = false, createdOutputFile = false;
	
	mParams = _params;
	mReadBuffer = NULL;
	mReadPtrs = NULL;
	CABufferList *writeBuffer = NULL;
	CABufferList *writePtrs = NULL;
	
	PrepareConversion();

	try {
		if (TaggedDecodingFromCAF())
			ReadCAFInfo();
		OpenInputFile();
		openedSourceFile = true;
		
		// get input file's format
		const CAStreamBasicDescription &srcFormat = mSrcFile.GetFileDataFormat();
		if (mParams.flags & kOpt_Verbose) {
			printf("Input file: %s, %qd frames\n", mParams.input.filePath ? basename(mParams.input.filePath) : "?", 
				mSrcFile.GetNumberFrames());
		}
		mSrcFormat = srcFormat;
		
		bool encoding = !destFormat.IsPCM();
		bool decoding = !srcFormat.IsPCM();
		
		// prepare output file's format
		destFormat = mParams.output.dataFormat;

		if (!encoding && destFormat.mSampleRate == 0.)
			// on encode, it's OK to have a 0 sample rate; ExtAudioFile will get the SR from the converter and set it on the file.
			// on decode or PCM->PCM, a sample rate of 0 is interpreted as using the source sample rate
			destFormat.mSampleRate = srcFormat.mSampleRate;
		
		// source channel layout
		srcFileLayout = mSrcFile.GetFileChannelLayout();
		origSrcFileLayout = srcFileLayout;
		if (mParams.input.channelLayoutTag != 0) {
			XThrowIf(AudioChannelLayoutTag_GetNumberOfChannels(mParams.input.channelLayoutTag)
				!= srcFormat.mChannelsPerFrame, -1, "input channel layout has wrong number of channels for file");
			srcFileLayout = CAAudioChannelLayout(mParams.input.channelLayoutTag);
			mSrcFile.SetFileChannelLayout(srcFileLayout);
		}
		
		// destination channel layout
		int outChannels = mParams.output.channels;
		if (mParams.output.channelLayoutTag != 0) {
			// use the one specified by caller, if any
			destFileLayout = CAAudioChannelLayout(mParams.output.channelLayoutTag);
		} else if (srcFileLayout.IsValid()) {
			// otherwise, assume the same as the source, if any
			destFileLayout = srcFileLayout;
		}
		if (destFileLayout.IsValid()) {
			// the output channel layout specifies the number of output channels
			if (outChannels != -1)
				XThrowIf((unsigned)outChannels != destFileLayout.NumberChannels(), -1,
					"output channel layout has wrong number of channels");
			else
				outChannels = destFileLayout.NumberChannels();
		}

		if (!(mParams.flags & kOpt_NoSanitizeOutputFormat)) {
			// adjust the output format's channels; output.channels overrides the channels
			if (outChannels == -1)
				outChannels = srcFormat.mChannelsPerFrame;
			if (outChannels > 0) {
				destFormat.mChannelsPerFrame = outChannels;
				destFormat.mBytesPerPacket *= outChannels;
				destFormat.mBytesPerFrame *= outChannels;
			}
		
			// use AudioFormat API to clean up the output format
			propertySize = sizeof(AudioStreamBasicDescription);
			XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &propertySize, &destFormat),
					"get destination format info");
		}
		OpenOutputFile(srcFormat, destFormat, destFSRef, destFileLayout);
		createdOutputFile = true;
		mDestFormat = destFormat;
		
		// set up client formats
		CAStreamBasicDescription srcClientFormat, destClientFormat;
		{
			CAAudioChannelLayout srcClientLayout, destClientLayout;
			
			if (encoding) {
				if (decoding) {
					// transcoding
//					XThrowIf(encoding && decoding, -1, "transcoding not currently supported");
					
					if (srcFormat.mChannelsPerFrame > 2 || destFormat.mChannelsPerFrame > 2)
						CAXException::Warning("Transcoding multichannel audio may not handle channel layouts correctly", 0);
					srcClientFormat.SetCanonical(std::min(srcFormat.mChannelsPerFrame, destFormat.mChannelsPerFrame), true);
					srcClientFormat.mSampleRate = std::max(srcFormat.mSampleRate, destFormat.mSampleRate);
					mSrcFile.SetClientFormat(srcClientFormat, NULL);
					
					destClientFormat = srcClientFormat;
				} else {
					// encoding
					srcClientFormat = srcFormat;
					destClientFormat = srcFormat;
				}
				// by here, destClientFormat will have a valid sample rate
				destClientLayout = srcFileLayout.IsValid() ? srcFileLayout : destFileLayout;

				mDestFile.SetClientFormat(destClientFormat, &destClientLayout);
			} else {
				// decoding or PCM->PCM
				if (destFormat.mSampleRate == 0.)
					destFormat.mSampleRate = srcFormat.mSampleRate;
		
				destClientFormat = destFormat;
				srcClientFormat = destFormat;
				srcClientLayout = destFileLayout;
				
				mSrcFile.SetClientFormat(srcClientFormat, &srcClientLayout);
			}
		}
		
		XThrowIf(srcClientFormat.mBytesPerPacket == 0, -1, "source client format not PCM"); 
		XThrowIf(destClientFormat.mBytesPerPacket == 0, -1, "dest client format not PCM"); 		
		if (encoding) {
			// set the bitrate
			if (mParams.output.bitRate != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("bitrate = %ld\n", mParams.output.bitRate);
				mDestFile.SetConverterProperty(kAudioConverterEncodeBitRate, sizeof(UInt32), &mParams.output.bitRate);
			}

			// set the codec quality
			if (mParams.output.codecQuality != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("codec quality = %ld\n", mParams.output.codecQuality);
				mDestFile.SetConverterProperty(kAudioConverterCodecQuality, sizeof(UInt32), &mParams.output.codecQuality);
			}

			// set the bitrate strategy -- called bitrate format in the codecs since it had already shipped
			if (mParams.output.strategy != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("strategy = %ld\n", mParams.output.strategy);
				mDestFile.SetConverterProperty(kAudioCodecBitRateFormat, sizeof(UInt32), &mParams.output.strategy);
			}
		}
		// set the SRC quality
		if (mParams.output.srcQuality != -1) {
			if (srcFormat.mSampleRate != 0. && destFormat.mSampleRate != 0. && srcFormat.mSampleRate != destFormat.mSampleRate) {
				if (mParams.flags & kOpt_Verbose)
					printf("SRC quality = %ld\n", mParams.output.srcQuality);
				if (encoding)
					mDestFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality);
				else
					mSrcFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality);
			}
		}
		if (decoding) {
			if (mParams.output.primeMethod != -1)
				mSrcFile.SetConverterProperty(kAudioConverterPrimeMethod, sizeof(UInt32), &mParams.output.primeMethod);
		}

		PrintFormats(&origSrcFileLayout);

		// prepare I/O buffers
		UInt32 bytesToRead = 0x10000;
		UInt32 framesToRead = bytesToRead;	// OK, ReadPackets will limit as appropriate
		ComputeReadSize(srcFormat, destFormat, bytesToRead, framesToRead);

//		const SInt64 totalFrames = mSrcFile.GetNumberFrames();
//#warning "GetNumberFrames() can be prohibitively slow for some formats"
		
		mReadBuffer = CABufferList::New("readbuf", srcClientFormat);
		mReadBuffer->AllocateBuffers(bytesToRead);
		mReadPtrs = CABufferList::New("readptrs", srcClientFormat);
		
		BeginConversion();
		
		while (true) {
			//XThrowIf(Progress(mSrcFile.Tell(), totalFrames), userCanceledErr, "user stopped");
				// this was commented out for awhile -- performance? make it optional?
			UInt32 nFrames = framesToRead;
			mReadPtrs->SetFrom(mReadBuffer);
			AudioBufferList *readbuf = &mReadPtrs->GetModifiableBufferList();
			
			mSrcFile.Read(nFrames, readbuf);
			//printf("read %ld of %ld frames\n", nFrames, framesToRead);
			if (nFrames == 0)
				break;

			mDestFile.Write(nFrames, readbuf);
			if (ShouldTerminateConversion())
				break;
		}
		
		if (decoding) {
			// fix up the destination file's length if necessary and possible
			SInt64 nframes = mSrcFile.GetNumberFrames();
			if (nframes != 0) {
				// only shorten, don't try to lengthen
				nframes = SInt64(ceil(nframes * destFormat.mSampleRate / srcFormat.mSampleRate));
				if (nframes < mDestFile.GetNumberFrames()) {
					mDestFile.SetNumberFrames(nframes);
				}
			}
		}
		EndConversion();
	}
	catch (...) {
		delete mReadBuffer;
		delete mReadPtrs;
		delete writeBuffer;
		delete writePtrs;
		if (!createdOutputFile)
			PrintFormats(&origSrcFileLayout);
		try { mSrcFile.Close(); } catch (...) { }
		try { mDestFile.Close(); } catch (...) { }
		if (createdOutputFile)
			unlink(mOutName);
		throw;
	}
	delete mReadBuffer;
	delete mReadPtrs;
	delete writeBuffer;
	delete writePtrs;
	mSrcFile.Close();
	mDestFile.Close();
	if (TaggedEncodingToCAF())
		WriteCAFInfo();
	
	if (mParams.flags & kOpt_Verbose) {
		// must close to flush encoder; GetNumberFrames() not necessarily valid until afterwards but then
		// the file is closed
		CAAudioFile temp;
		FSRef destFSRef;
		if (FSPathMakeRef((UInt8 *)mOutName, &destFSRef, NULL) == noErr) {
			temp.Open(destFSRef);
			printf("Output file: %s, %qd frames\n", basename(mOutName), temp.GetNumberFrames());
		}
	}
}
Exemplo n.º 26
0
int main (int argc, const char * argv[]) 
{
#if TARGET_OS_WIN32
	InitializeQTML(0L);
#endif
	const char *fpath = NULL;
	Float32 volume = 1;
	Float32 duration = -1;
	Float32 currentTime = 0.0;
	Float32 rate = 0;
	int rQuality = 0;
	
	bool doPrint = false;
	for (int i = 1; i < argc; ++i) {
		const char *arg = argv[i];
		if (arg[0] != '-') {
			if (fpath != NULL) {
				fprintf(stderr, "may only specify one file to play\n");
				usage();
			}
			fpath = arg;
		} else {
			arg += 1;
			if (arg[0] == 'v' || !strcmp(arg, "-volume")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];
				sscanf(arg, "%f", &volume);
			} else if (arg[0] == 't' || !strcmp(arg, "-time")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%f", &duration);
			} else if (arg[0] == 'r' || !strcmp(arg, "-rate")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%f", &rate);
			} else if (arg[0] == 'q' || !strcmp(arg, "-rQuality")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%d", &rQuality);
			} else if (arg[0] == 'h' || !strcmp(arg, "-help")) {
				usage();
			} else if (arg[0] == 'd' || !strcmp(arg, "-debug")) {
				doPrint = true;
			} else {
				fprintf(stderr, "unknown argument: %s\n\n", arg - 1);
				usage();
			}
		}
	}

	if (fpath == NULL)
		usage();
	
	if (doPrint)
		printf ("Playing file: %s\n", fpath);
	
	try {
		AQTestInfo myInfo;
		
		CFURLRef sndFile = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)fpath, strlen(fpath), false);
		if (!sndFile) XThrowIfError (!sndFile, "can't parse file path");
			
		OSStatus result = AudioFileOpenURL (sndFile, 0x1/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile);
		CFRelease (sndFile);
						
		XThrowIfError(result, "AudioFileOpen failed");
		
		UInt32 size;
		XThrowIfError(AudioFileGetPropertyInfo(myInfo.mAudioFile, 
									kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info");
		UInt32 numFormats = size / sizeof(AudioFormatListItem);
		AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ];

		XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, 
									kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format");
		numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it
		if (numFormats == 1) {
				// this is the common case
			myInfo.mDataFormat = formatList[0].mASBD;
			
				// see if there is a channel layout (multichannel file)
			result = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, NULL);
			if (result == noErr && myInfo.mChannelLayoutSize > 0) {
				myInfo.mChannelLayout = (AudioChannelLayout *)new char [myInfo.mChannelLayoutSize];
				XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, myInfo.mChannelLayout), "get audio file's channel layout");
			}
		} else {
			if (doPrint) {
				printf ("File has a %d layered data format:\n", (int)numFormats);
				for (unsigned int i = 0; i < numFormats; ++i)
					CAStreamBasicDescription(formatList[i].mASBD).Print();
			}
			// now we should look to see which decoders we have on the system
			XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's");
			UInt32 numDecoders = size / sizeof(OSType);
			OSType *decoderIDs = new OSType [ numDecoders ];
			XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's");			
			unsigned int i = 0;
			for (; i < numFormats; ++i) {
				OSType decoderID = formatList[i].mASBD.mFormatID;
				bool found = false;
				for (unsigned int j = 0; j < numDecoders; ++j) {
					if (decoderID == decoderIDs[j]) {
						found = true;
						break;
					}
				}
				if (found) break;
			}
			delete [] decoderIDs;
			
			if (i >= numFormats) {
				fprintf (stderr, "Cannot play any of the formats in this file\n");
				throw kAudioFileUnsupportedDataFormatError;
			}
			myInfo.mDataFormat = formatList[i].mASBD;
			myInfo.mChannelLayoutSize = sizeof(AudioChannelLayout);
			myInfo.mChannelLayout = (AudioChannelLayout*)new char [myInfo.mChannelLayoutSize];
			myInfo.mChannelLayout->mChannelLayoutTag = formatList[i].mChannelLayoutTag;
			myInfo.mChannelLayout->mChannelBitmap = 0;
			myInfo.mChannelLayout->mNumberChannelDescriptions = 0;
		}
		delete [] formatList;
		
		if (doPrint) {
			printf ("Playing format: "); 
			myInfo.mDataFormat.Print();
		}
		
		XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat, AQTestBufferCallback, &myInfo, 
									CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &myInfo.mQueue), "AudioQueueNew failed");

		UInt32 bufferByteSize;		
		// we need to calculate how many packets we read at a time, and how big a buffer we need
		// we base this on the size of the packets in the file and an approximate duration for each buffer
		{
			bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0);
			
			// first check to see what the max size of a packet is - if it is bigger
			// than our allocation default size, that needs to become larger
			UInt32 maxPacketSize;
			size = sizeof(maxPacketSize);
			XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, 
									kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size");
			
			// adjust buffer size to represent about a half second of audio based on this format
			CalculateBytesForTime (myInfo.mDataFormat, maxPacketSize, 0.5/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead);
			
			if (isFormatVBR)
				myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead];
			else
				myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
				
			if (doPrint)
				printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead);
		}

		// (2) If the file has a cookie, we should get it and set it on the AQ
		size = sizeof(UInt32);
		result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL);

		if (!result && size) {
			char* cookie = new char [size];		
			XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file");
			XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue");
			delete [] cookie;
		}

		// set ACL if there is one
		if (myInfo.mChannelLayout)
			XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, myInfo.mChannelLayout, myInfo.mChannelLayoutSize), "set channel layout on queue");

		// prime the queue with some data before starting
		myInfo.mDone = false;
		myInfo.mCurrentPacket = 0;
		for (int i = 0; i < kNumberBuffers; ++i) {
			XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffers[i]), "AudioQueueAllocateBuffer failed");

			AQTestBufferCallback (&myInfo, myInfo.mQueue, myInfo.mBuffers[i]);
			
			if (myInfo.mDone) break;
		}	
			// set the volume of the queue
		XThrowIfError (AudioQueueSetParameter(myInfo.mQueue, kAudioQueueParam_Volume, volume), "set queue volume");
		
		XThrowIfError (AudioQueueAddPropertyListener (myInfo.mQueue, kAudioQueueProperty_IsRunning, MyAudioQueuePropertyListenerProc, NULL), "add listener");
		
#if !TARGET_OS_IPHONE
		if (rate > 0) {
			UInt32 propValue = 1;
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_EnableTimePitch, &propValue, sizeof(propValue)), "enable time pitch");
			
			propValue = rQuality ? kAudioQueueTimePitchAlgorithm_Spectral : kAudioQueueTimePitchAlgorithm_TimeDomain;
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchAlgorithm, &propValue, sizeof(propValue)), "time pitch algorithm");
			
			propValue = (rate == 1.0f ? 1 : 0); // bypass rate if 1.0
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchBypass, &propValue, sizeof(propValue)), "bypass time pitch");
			if (rate != 1) {
				XThrowIfError (AudioQueueSetParameter (myInfo.mQueue, kAudioQueueParam_PlayRate, rate), "set playback rate");
			}
			
			if (doPrint) {
				printf ("Enable rate-scaled playback (rate = %.2f) using %s algorithm\n", rate, (rQuality ? "Spectral": "Time Domain"));
			}
		}
#endif
			// lets start playing now - stop is called in the AQTestBufferCallback when there's
			// no more to read from the file
		XThrowIfError(AudioQueueStart(myInfo.mQueue, NULL), "AudioQueueStart failed");

		do {
			CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.25, false);
			currentTime += .25;
			if (duration > 0 && currentTime >= duration)
				break;
			
		} while (gIsRunning);
			
		CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false);

		XThrowIfError(AudioQueueDispose(myInfo.mQueue, true), "AudioQueueDispose(true) failed");
		XThrowIfError(AudioFileClose(myInfo.mAudioFile), "AudioQueueDispose(false) failed");
	}
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
	}
	catch (...) {
		fprintf(stderr, "Unspecified exception\n");
	}
	
    return 0;
}
Exemplo n.º 27
0
static av_cold int ffat_create_decoder(AVCodecContext *avctx, AVPacket *pkt)
{
    ATDecodeContext *at = avctx->priv_data;
    OSStatus status;
    int i;

    enum AVSampleFormat sample_fmt = (avctx->bits_per_raw_sample == 32) ?
                                     AV_SAMPLE_FMT_S32 : AV_SAMPLE_FMT_S16;

    AudioStreamBasicDescription in_format = {
        .mFormatID = ffat_get_format_id(avctx->codec_id, avctx->profile),
        .mBytesPerPacket = (avctx->codec_id == AV_CODEC_ID_ILBC) ? avctx->block_align : 0,
    };
    AudioStreamBasicDescription out_format = {
        .mFormatID = kAudioFormatLinearPCM,
        .mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked,
        .mFramesPerPacket = 1,
        .mBitsPerChannel = av_get_bytes_per_sample(sample_fmt) * 8,
    };

    avctx->sample_fmt = sample_fmt;

    if (ffat_usable_extradata(avctx)) {
        UInt32 format_size = sizeof(in_format);
        UInt32 cookie_size;
        uint8_t *cookie = ffat_get_magic_cookie(avctx, &cookie_size);
        if (!cookie)
            return AVERROR(ENOMEM);
        status = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
                                        cookie_size, cookie, &format_size, &in_format);
        if (cookie != at->extradata)
            av_free(cookie);
        if (status != 0) {
            av_log(avctx, AV_LOG_ERROR, "AudioToolbox header-parse error: %i\n", (int)status);
            return AVERROR_UNKNOWN;
        }
#if CONFIG_MP1_AT_DECODER || CONFIG_MP2_AT_DECODER || CONFIG_MP3_AT_DECODER
    } else if (pkt && pkt->size >= 4 &&
               (avctx->codec_id == AV_CODEC_ID_MP1 ||
                avctx->codec_id == AV_CODEC_ID_MP2 ||
                avctx->codec_id == AV_CODEC_ID_MP3)) {
        enum AVCodecID codec_id;
        int bit_rate;
        if (ff_mpa_decode_header(AV_RB32(pkt->data), &avctx->sample_rate,
                                 &in_format.mChannelsPerFrame, &avctx->frame_size,
                                 &bit_rate, &codec_id) < 0)
            return AVERROR_INVALIDDATA;
        avctx->bit_rate = bit_rate;
        in_format.mSampleRate = avctx->sample_rate;
#endif
#if CONFIG_AC3_AT_DECODER || CONFIG_EAC3_AT_DECODER
    } else if (pkt && pkt->size >= 7 &&
               (avctx->codec_id == AV_CODEC_ID_AC3 ||
                avctx->codec_id == AV_CODEC_ID_EAC3)) {
        AC3HeaderInfo hdr, *phdr = &hdr;
        GetBitContext gbc;
        init_get_bits(&gbc, pkt->data, pkt->size);
        if (avpriv_ac3_parse_header(&gbc, &phdr) < 0)
            return AVERROR_INVALIDDATA;
        in_format.mSampleRate = hdr.sample_rate;
        in_format.mChannelsPerFrame = hdr.channels;
        avctx->frame_size = hdr.num_blocks * 256;
        avctx->bit_rate = hdr.bit_rate;
#endif
    } else {
        in_format.mSampleRate = avctx->sample_rate ? avctx->sample_rate : 44100;
        in_format.mChannelsPerFrame = avctx->channels ? avctx->channels : 1;
    }

    avctx->sample_rate = out_format.mSampleRate = in_format.mSampleRate;
    avctx->channels = out_format.mChannelsPerFrame = in_format.mChannelsPerFrame;

    if (avctx->codec_id == AV_CODEC_ID_ADPCM_IMA_QT)
        in_format.mFramesPerPacket = 64;

    status = AudioConverterNew(&in_format, &out_format, &at->converter);

    if (status != 0) {
        av_log(avctx, AV_LOG_ERROR, "AudioToolbox init error: %i\n", (int)status);
        return AVERROR_UNKNOWN;
    }

    if ((status = ffat_set_extradata(avctx)) < 0)
        return status;

    for (i = 0; i < (sizeof(at->channel_map) / sizeof(at->channel_map[0])); i++)
        at->channel_map[i] = i;

    ffat_update_ctx(avctx);

    if(!(at->decoded_data = av_malloc(av_get_bytes_per_sample(avctx->sample_fmt)
                                      * avctx->frame_size * avctx->channels)))
        return AVERROR(ENOMEM);

    at->last_pts = AV_NOPTS_VALUE;

    return 0;
}

static av_cold int ffat_init_decoder(AVCodecContext *avctx)
{
    ATDecodeContext *at = avctx->priv_data;
    at->extradata = avctx->extradata;
    at->extradata_size = avctx->extradata_size;

    if ((avctx->channels && avctx->sample_rate) || ffat_usable_extradata(avctx))
        return ffat_create_decoder(avctx, NULL);
    else
        return 0;
}

static OSStatus ffat_decode_callback(AudioConverterRef converter, UInt32 *nb_packets,
                                     AudioBufferList *data,
                                     AudioStreamPacketDescription **packets,
                                     void *inctx)
{
    AVCodecContext *avctx = inctx;
    ATDecodeContext *at = avctx->priv_data;

    if (at->eof) {
        *nb_packets = 0;
        if (packets) {
            *packets = &at->pkt_desc;
            at->pkt_desc.mDataByteSize = 0;
        }
        return 0;
    }

    av_packet_unref(&at->in_pkt);
    av_packet_move_ref(&at->in_pkt, &at->new_in_pkt);

    if (!at->in_pkt.data) {
        *nb_packets = 0;
        return 1;
    }

    data->mNumberBuffers              = 1;
    data->mBuffers[0].mNumberChannels = 0;
    data->mBuffers[0].mDataByteSize   = at->in_pkt.size;
    data->mBuffers[0].mData           = at->in_pkt.data;
    *nb_packets = 1;

    if (packets) {
        *packets = &at->pkt_desc;
        at->pkt_desc.mDataByteSize = at->in_pkt.size;
    }

    return 0;
}
Exemplo n.º 28
0
void WriteOutputFile (const char*	outputFilePath, 
					OSType			dataFormat, 
					Float64			srate, 
					MusicTimeStamp	sequenceLength, 
					bool			shouldPrint,
					AUGraph			inGraph,
					UInt32			numFrames,
					MusicPlayer		player)
{
	OSStatus result = 0;
	UInt32 size;

	CAStreamBasicDescription outputFormat;
	outputFormat.mChannelsPerFrame = 2;
	outputFormat.mSampleRate = srate;
	outputFormat.mFormatID = dataFormat;
	
	AudioFileTypeID destFileType;
	CAAudioFileFormats::Instance()->InferFileFormatFromFilename (outputFilePath, destFileType);
	
	if (dataFormat == kAudioFormatLinearPCM) {
		outputFormat.mBytesPerPacket = outputFormat.mChannelsPerFrame * 2;
		outputFormat.mFramesPerPacket = 1;
		outputFormat.mBytesPerFrame = outputFormat.mBytesPerPacket;
		outputFormat.mBitsPerChannel = 16;
		
		if (destFileType == kAudioFileWAVEType)
			outputFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
								| kLinearPCMFormatFlagIsPacked;
		else
			outputFormat.mFormatFlags = kLinearPCMFormatFlagIsBigEndian
								| kLinearPCMFormatFlagIsSignedInteger
								| kLinearPCMFormatFlagIsPacked;
	} else {
		// use AudioFormat API to fill out the rest.
		size = sizeof(outputFormat);
		FailIf ((result = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputFormat)), fail, "");
	}

	if (shouldPrint) {
		printf ("Writing to file: %s with format:\n* ", outputFilePath);
		outputFormat.Print();
	}
	
	CFURLRef url; url = CFURLCreateFromFileSystemRepresentation(NULL, (const UInt8*)outputFilePath, strlen(outputFilePath), false);
    
    // create output file, delete existing file
	ExtAudioFileRef outfile;
	result = ExtAudioFileCreateWithURL(url, destFileType, &outputFormat, NULL, kAudioFileFlags_EraseFile, &outfile);
	if (url) CFRelease (url);	
	FailIf (result, fail, "ExtAudioFileCreateWithURL");

	AudioUnit outputUnit; outputUnit = NULL;
	UInt32 nodeCount;
	FailIf ((result = AUGraphGetNodeCount (inGraph, &nodeCount)), fail, "AUGraphGetNodeCount");
	
	for (UInt32 i = 0; i < nodeCount; ++i) 
	{
		AUNode node;
		FailIf ((result = AUGraphGetIndNode(inGraph, i, &node)), fail, "AUGraphGetIndNode");

		AudioComponentDescription desc;
		FailIf ((result = AUGraphNodeInfo(inGraph, node, &desc, NULL)), fail, "AUGraphNodeInfo");
		
		if (desc.componentType == kAudioUnitType_Output) 
		{
			FailIf ((result = AUGraphNodeInfo(inGraph, node, 0, &outputUnit)), fail, "AUGraphNodeInfo");
			break;
		}
	}
    
    FailIf ((result = (outputUnit == NULL)), fail, "outputUnit == NULL");
	{
		CAStreamBasicDescription clientFormat = CAStreamBasicDescription();
		size = sizeof(clientFormat);
		FailIf ((result = AudioUnitGetProperty (outputUnit,
													kAudioUnitProperty_StreamFormat,
													kAudioUnitScope_Output, 0,
													&clientFormat, &size)), fail, "AudioUnitGetProperty: kAudioUnitProperty_StreamFormat");
		size = sizeof(clientFormat);
		FailIf ((result = ExtAudioFileSetProperty(outfile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat)), fail, "ExtAudioFileSetProperty: kExtAudioFileProperty_ClientDataFormat");
		
		{
			MusicTimeStamp currentTime;
			AUOutputBL outputBuffer (clientFormat, numFrames);
			AudioTimeStamp tStamp;
			memset (&tStamp, 0, sizeof(AudioTimeStamp));
			tStamp.mFlags = kAudioTimeStampSampleTimeValid;
			int i = 0;
			int numTimesFor10Secs = (int)(10. / (numFrames / srate));
			do {
				outputBuffer.Prepare();
				AudioUnitRenderActionFlags actionFlags = 0;
				FailIf ((result = AudioUnitRender (outputUnit, &actionFlags, &tStamp, 0, numFrames, outputBuffer.ABL())), fail, "AudioUnitRender");

				tStamp.mSampleTime += numFrames;
				
				FailIf ((result = ExtAudioFileWrite(outfile, numFrames, outputBuffer.ABL())), fail, "ExtAudioFileWrite");	

				FailIf ((result = MusicPlayerGetTime (player, &currentTime)), fail, "MusicPlayerGetTime");
				if (shouldPrint && (++i % numTimesFor10Secs == 0))
					printf ("current time: %6.2f beats\n", currentTime);
			} while (currentTime < sequenceLength);
		}
	}
	
// close
	ExtAudioFileDispose(outfile);

	return;

fail:
	printf ("Problem: %ld\n", (long)result); 
	exit(1);
}
Exemplo n.º 29
0
/*
 * Open codec.
 */
static pj_status_t ilbc_codec_open(pjmedia_codec *codec, 
				   pjmedia_codec_param *attr )
{
    struct ilbc_codec *ilbc_codec = (struct ilbc_codec*)codec;
    pj_status_t status;
    unsigned i;
    pj_uint16_t dec_fmtp_mode = DEFAULT_MODE, 
		enc_fmtp_mode = DEFAULT_MODE;

#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
    AudioStreamBasicDescription srcFormat, dstFormat;
    UInt32 size;

    srcFormat.mSampleRate       = attr->info.clock_rate;
    srcFormat.mFormatID         = kAudioFormatLinearPCM;
    srcFormat.mFormatFlags      = kLinearPCMFormatFlagIsSignedInteger
				  | kLinearPCMFormatFlagIsPacked;
    srcFormat.mBitsPerChannel   = attr->info.pcm_bits_per_sample;
    srcFormat.mChannelsPerFrame = attr->info.channel_cnt;
    srcFormat.mBytesPerFrame    = srcFormat.mChannelsPerFrame
	                          * srcFormat.mBitsPerChannel >> 3;
    srcFormat.mFramesPerPacket  = 1;
    srcFormat.mBytesPerPacket   = srcFormat.mBytesPerFrame *
				  srcFormat.mFramesPerPacket;

    memset(&dstFormat, 0, sizeof(dstFormat));
    dstFormat.mSampleRate 	= attr->info.clock_rate;
    dstFormat.mFormatID 	= kAudioFormatiLBC;
    dstFormat.mChannelsPerFrame = attr->info.channel_cnt;
#endif

    pj_assert(ilbc_codec != NULL);
    pj_assert(ilbc_codec->enc_ready == PJ_FALSE && 
	      ilbc_codec->dec_ready == PJ_FALSE);

    /* Get decoder mode */
    for (i = 0; i < attr->setting.dec_fmtp.cnt; ++i) {
	if (pj_stricmp(&attr->setting.dec_fmtp.param[i].name, &STR_MODE) == 0)
	{
	    dec_fmtp_mode = (pj_uint16_t)
			    pj_strtoul(&attr->setting.dec_fmtp.param[i].val);
	    break;
	}
    }

    /* Decoder mode must be set */
    PJ_ASSERT_RETURN(dec_fmtp_mode == 20 || dec_fmtp_mode == 30, 
		     PJMEDIA_CODEC_EINMODE);

    /* Get encoder mode */
    for (i = 0; i < attr->setting.enc_fmtp.cnt; ++i) {
	if (pj_stricmp(&attr->setting.enc_fmtp.param[i].name, &STR_MODE) == 0)
	{
	    enc_fmtp_mode = (pj_uint16_t)
			    pj_strtoul(&attr->setting.enc_fmtp.param[i].val);
	    break;
	}
    }

    PJ_ASSERT_RETURN(enc_fmtp_mode==20 || enc_fmtp_mode==30, 
		     PJMEDIA_CODEC_EINMODE);

    /* Both sides of a bi-directional session MUST use the same "mode" value.
     * In this point, possible values are only 20 or 30, so when encoder and
     * decoder modes are not same, just use the default mode, it is 30.
     */
    if (enc_fmtp_mode != dec_fmtp_mode) {
	enc_fmtp_mode = dec_fmtp_mode = DEFAULT_MODE;
	PJ_LOG(4,(ilbc_codec->obj_name, 
		  "Normalized iLBC encoder and decoder modes to %d", 
		  DEFAULT_MODE));
    }

    /* Update some attributes based on negotiated mode. */
    attr->info.avg_bps = (dec_fmtp_mode == 30? 13333 : 15200);
    attr->info.frm_ptime = dec_fmtp_mode;

    /* Create encoder */
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
    dstFormat.mFramesPerPacket  = CLOCK_RATE * enc_fmtp_mode / 1000;
    dstFormat.mBytesPerPacket   = (enc_fmtp_mode == 20? 38 : 50);

    /* Use AudioFormat API to fill out the rest of the description */
    size = sizeof(dstFormat);
    AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
 	                   0, NULL, &size, &dstFormat);

    if (AudioConverterNew(&srcFormat, &dstFormat, &ilbc_codec->enc) != noErr)
	return PJMEDIA_CODEC_EFAILED;
    ilbc_codec->enc_frame_size = (enc_fmtp_mode == 20? 38 : 50);
#else
    ilbc_codec->enc_frame_size = initEncode(&ilbc_codec->enc, enc_fmtp_mode);
#endif
    ilbc_codec->enc_samples_per_frame = CLOCK_RATE * enc_fmtp_mode / 1000;
    ilbc_codec->enc_ready = PJ_TRUE;

    /* Create decoder */
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
    if (AudioConverterNew(&dstFormat, &srcFormat, &ilbc_codec->dec) != noErr)
	return PJMEDIA_CODEC_EFAILED;
    ilbc_codec->dec_samples_per_frame = CLOCK_RATE * dec_fmtp_mode / 1000;
#else
    ilbc_codec->dec_samples_per_frame = initDecode(&ilbc_codec->dec,
						   dec_fmtp_mode,
						   attr->setting.penh);
#endif
    ilbc_codec->dec_frame_size = (dec_fmtp_mode == 20? 38 : 50);
    ilbc_codec->dec_ready = PJ_TRUE;

    /* Save plc flags */
    ilbc_codec->plc_enabled = (attr->setting.plc != 0);

    /* Create silence detector. */
    ilbc_codec->vad_enabled = (attr->setting.vad != 0);
    status = pjmedia_silence_det_create(ilbc_codec->pool, CLOCK_RATE,
					ilbc_codec->enc_samples_per_frame,
					&ilbc_codec->vad);
    if (status != PJ_SUCCESS)
	return status;

    /* Init last_tx (not necessary because of zalloc, but better
     * be safe in case someone remove zalloc later.
     */
    pj_set_timestamp32(&ilbc_codec->last_tx, 0, 0);

    PJ_LOG(4,(ilbc_codec->obj_name, 
	      "iLBC codec opened, mode=%d", dec_fmtp_mode));

    return PJ_SUCCESS;
}
Exemplo n.º 30
0
nsresult
AppleATDecoder::GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
                                         const nsTArray<uint8_t>& aExtraData)
{
  // Request the properties from CoreAudio using the codec magic cookie
  AudioFormatInfo formatInfo;
  PodZero(&formatInfo.mASBD);
  formatInfo.mASBD.mFormatID = mFormatID;
  if (mFormatID == kAudioFormatMPEG4AAC) {
    formatInfo.mASBD.mFormatFlags = mConfig.extended_profile;
  }
  formatInfo.mMagicCookieSize = aExtraData.Length();
  formatInfo.mMagicCookie = aExtraData.Elements();

  UInt32 formatListSize;
  // Attempt to retrieve the default format using
  // kAudioFormatProperty_FormatInfo method.
  // This method only retrieves the FramesPerPacket information required
  // by the decoder, which depends on the codec type and profile.
  aDesc.mFormatID = mFormatID;
  aDesc.mChannelsPerFrame = mConfig.channel_count;
  aDesc.mSampleRate = mConfig.samples_per_second;
  UInt32 inputFormatSize = sizeof(aDesc);
  OSStatus rv = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
                                       0,
                                       NULL,
                                       &inputFormatSize,
                                       &aDesc);
  if (NS_WARN_IF(rv)) {
    return NS_ERROR_FAILURE;
  }

  // If any of the methods below fail, we will return the default format as
  // created using kAudioFormatProperty_FormatInfo above.
  rv = AudioFormatGetPropertyInfo(kAudioFormatProperty_FormatList,
                                  sizeof(formatInfo),
                                  &formatInfo,
                                  &formatListSize);
  if (rv || (formatListSize % sizeof(AudioFormatListItem))) {
    return NS_OK;
  }
  size_t listCount = formatListSize / sizeof(AudioFormatListItem);
  nsAutoArrayPtr<AudioFormatListItem> formatList(
    new AudioFormatListItem[listCount]);

  rv = AudioFormatGetProperty(kAudioFormatProperty_FormatList,
                              sizeof(formatInfo),
                              &formatInfo,
                              &formatListSize,
                              formatList);
  if (rv) {
    return NS_OK;
  }
  LOG("found %u available audio stream(s)",
      formatListSize / sizeof(AudioFormatListItem));
  // Get the index number of the first playable format.
  // This index number will be for the highest quality layer the platform
  // is capable of playing.
  UInt32 itemIndex;
  UInt32 indexSize = sizeof(itemIndex);
  rv = AudioFormatGetProperty(kAudioFormatProperty_FirstPlayableFormatFromList,
                              formatListSize,
                              formatList,
                              &indexSize,
                              &itemIndex);
  if (rv) {
    return NS_OK;
  }

  aDesc = formatList[itemIndex].mASBD;

  return NS_OK;
}