bool CCoreAudioUnit::GetSupportedChannelLayouts(AudioChannelLayoutList* pLayouts)
{
  if (!m_audioUnit || !pLayouts)
    return false;

  UInt32 propSize = 0;
  Boolean writable = false;
  OSStatus ret = AudioUnitGetPropertyInfo(m_audioUnit,
    kAudioUnitProperty_SupportedChannelLayoutTags, kAudioUnitScope_Input, 0, &propSize, &writable);
  if (ret)
  {
    CLog::Log(LOGERROR, "CCoreAudioUnit::GetSupportedChannelLayouts: "
      "Unable to retrieve supported channel layout property info. Error = %s", GetError(ret).c_str());
    return false;
  }
  UInt32 layoutCount = propSize / sizeof(AudioChannelLayoutTag);
  AudioChannelLayoutTag* pSuppLayouts = new AudioChannelLayoutTag[layoutCount];
  ret = AudioUnitGetProperty(m_audioUnit,
    kAudioUnitProperty_SupportedChannelLayoutTags, kAudioUnitScope_Output, 0, pSuppLayouts, &propSize);
  if (ret)
  {
    CLog::Log(LOGERROR, "CCoreAudioUnit::GetSupportedChannelLayouts: "
      "Unable to retrieve supported channel layouts. Error = %s", GetError(ret).c_str());
    return false;
  }
  for (UInt32 layout = 0; layout < layoutCount; layout++)
    pLayouts->push_back(pSuppLayouts[layout]);
  delete[] pSuppLayouts;
  return true;
}
const UInt32 AudioUnitNode::numberOfParameters(UInt32 scope) const
{
    UInt32 size;
    checkError(AudioUnitGetPropertyInfo(mUnit, kAudioUnitProperty_ParameterList, scope, 0, &size, NULL), "AudioUnitGetPropertyInfo");
    UInt32 numOfParams = size / sizeof(AudioUnitParameterID);
    return numOfParams;
}
示例#3
0
void auLoader::printInfo() const
{
  UInt32 size;
  void *data;
  Boolean write;
  ComponentResult err = noErr;
  
  debug(LOG_INFO, "Plugin Properties:");
  for(int i = 0; i < sizeof(_AUCODES) / sizeof(UInt32); ++i) {
    data = 0;
    err = AudioUnitGetPropertyInfo(m_plugin, _AUCODES[i], kAudioUnitScope_Global, 0, &size, &write);
    if(size && err == noErr) {
      if(write) {
        debug(LOG_INFO, "  %s: %d bytes (+ writeable)", AUPropertyStr(_AUCODES[i]), size);
      }
      else {
        debug(LOG_INFO, "  %s: %d bytes", AUPropertyStr(_AUCODES[i]), size);
      }
    }
    
    if(data) {
      free(data);
    }
  }
  
  // Get parameter information
  AudioUnitParameterInfo auinfo;
  UInt32 *plist;
  int num_params = 0;
  err = AudioUnitGetPropertyInfo(m_plugin, kAudioUnitProperty_ParameterList, kAudioUnitScope_Global, 0, &size, &write);
  if(err == noErr && size > 0) {
    num_params = size / sizeof(UInt32);
    plist = new UInt32[num_params];
    err = AudioUnitGetProperty(m_plugin, kAudioUnitProperty_ParameterList, kAudioUnitScope_Global, 0, plist, &size);
  }
  
  debug(LOG_INFO, "Parameters (%d total):", num_params);
  for(int i = 0; i < num_params; ++i) {
    err = AudioUnitGetPropertyInfo(m_plugin, kAudioUnitProperty_ParameterInfo, kAudioUnitScope_Global, plist[i], &size, &write);
    if(size == sizeof(AudioUnitParameterInfo) && err == noErr) {
      err = AudioUnitGetProperty(m_plugin, kAudioUnitProperty_ParameterInfo, kAudioUnitScope_Global, plist[i], &auinfo, &size);
      if(err == noErr) {
        debug(LOG_INFO, "  %d: %s, type %d, min %f, max %f", plist[i], auinfo.name, auinfo.unit, auinfo.minValue, auinfo.maxValue);
      }
    }
  }
}
示例#4
0
bool		CAAudioUnit::CanBypass () const
{
	Boolean outWritable;
	OSStatus result = AudioUnitGetPropertyInfo (AU(), kAudioUnitProperty_BypassEffect,
									kAudioUnitScope_Global, 0,
									NULL, &outWritable);
	return (!result && outWritable);
}
示例#5
0
bool		CAAudioUnit::HasChannelLayouts (AudioUnitScope 		inScope,
										AudioUnitElement 		inEl) const
{
	OSStatus result = AudioUnitGetPropertyInfo (AU(),
									kAudioUnitProperty_SupportedChannelLayoutTags,
									inScope, inEl,
									NULL, NULL);
	return !result;
}
示例#6
0
AUEditWindow::AUEditWindow(XController *owner, IBNibRef nibRef, CFStringRef name, AudioUnit editUnit, bool forceGeneric) :
	XWindow(owner, nibRef, name),
	mEditUnit(editUnit)
{
	OSStatus err;
	ComponentDescription editorComponentDesc;
	
	// set up to use generic UI component
	editorComponentDesc.componentType = kAudioUnitCarbonViewComponentType;
	editorComponentDesc.componentSubType = 'gnrc';
	editorComponentDesc.componentManufacturer = 'appl';
	editorComponentDesc.componentFlags = 0;
	editorComponentDesc.componentFlagsMask = 0;
	
	if (!forceGeneric) {
		// ask the AU for its first editor component
		UInt32 propertySize;
		err = AudioUnitGetPropertyInfo(editUnit, kAudioUnitProperty_GetUIComponentList,
			kAudioUnitScope_Global, 0, &propertySize, NULL);
		if (!err) {
			int nEditors = propertySize / sizeof(ComponentDescription);
			ComponentDescription *editors = new ComponentDescription[nEditors];
			err = AudioUnitGetProperty(editUnit, kAudioUnitProperty_GetUIComponentList,
				kAudioUnitScope_Global, 0, editors, &propertySize);
			if (!err)
				// just pick the first one for now
				editorComponentDesc = editors[0];
			delete[] editors;
		}
	}
	Component editComp = FindNextComponent(NULL, &editorComponentDesc);
	
	verify_noerr(OpenAComponent(editComp, &mEditView));
	
	ControlRef rootControl;
	verify_noerr(GetRootControl(mWindow, &rootControl));

	Rect r;
	ControlRef viewPane;
	GetControlBounds(rootControl, &r);
	Float32Point location = { 0., 0. };
	Float32Point size = { Float32(r.right), Float32(r.bottom) };
	verify_noerr(AudioUnitCarbonViewCreate(mEditView, mEditUnit, mWindow, rootControl, &location, &size, &viewPane));
	
	AudioUnitCarbonViewSetEventListener(mEditView, EventListener, this);

	GetControlBounds(viewPane, &r);
	size.x = r.right-r.left; size.y = r.bottom-r.top;
	SetSize(size);
	Show();

/*	EventLoopTimerRef timer;
	RequireNoErr(
		InstallEventLoopTimer(
			GetMainEventLoop(), 5., 0., TimerProc, this, &timer));*/
}
示例#7
0
	int initoutput(){
		AudioComponentDescription desc;  
		AudioComponent comp;
		OSStatus err;
		UInt32 size;
		Boolean canwrite;
		
		AudioStreamBasicDescription 	inputdesc,outputdesc;

		desc.componentType=kAudioUnitType_Output;
		desc.componentSubType=kAudioUnitSubType_DefaultOutput;
		desc.componentManufacturer=kAudioUnitManufacturer_Apple;
		desc.componentFlags=0;
		desc.componentFlagsMask=0;

		comp=AudioComponentFindNext(NULL,&desc);
		if (comp==NULL) return -1;

		err= AudioComponentInstanceNew(comp,&out);
		if (err) return err;				

		err=AudioUnitInitialize(out);if (err) return err;
		
		err=AudioUnitGetPropertyInfo(out,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Output,0,&size,&canwrite);
		if (err) return err;

		err=AudioUnitGetProperty(out,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input,0,&outputdesc,&size);
		if (err) return err;		
		
//		dumpdesc(&outputdesc);
		
		inputdesc.mSampleRate=44100.0;
		inputdesc.mFormatID='lpcm';
#if __BIG_ENDIAN__
		inputdesc.mFormatFlags=0x0e;
#else
		inputdesc.mFormatFlags=0x0c;
#endif
		inputdesc.mBytesPerPacket=4;
		inputdesc.mFramesPerPacket=1;
		inputdesc.mBytesPerFrame=4;
		inputdesc.mChannelsPerFrame=2;
		inputdesc.mBitsPerChannel=16;
		inputdesc.mReserved=0;

//		dumpdesc(&inputdesc);
		
		err=AudioConverterNew(&inputdesc,&outputdesc,&conv);
		if (err) {
//			printf("AudioConvertNew failed %.*s\n",4,(char*)&err);
			return err;
		}

		return err;
	}
示例#8
0
bool		CAAudioUnit::SupportsNumChannels () const
{
	// this is the default assumption of an audio effect unit
	Boolean* isWritable = 0;
	UInt32	dataSize = 0;
		// lets see if the unit has any channel restrictions
	OSStatus result = AudioUnitGetPropertyInfo (AU(),
									kAudioUnitProperty_SupportedNumChannels,
									kAudioUnitScope_Global, 0,
									&dataSize, isWritable); //don't care if this is writable

		// if this property is NOT implemented an FX unit
		// is expected to deal with same channel valance in and out
	if (result) {
		if (Comp().Desc().IsEffect() || Comp().Desc().IsOffline())
			return true;
	}
	return result == noErr;
}
示例#9
0
bool CCoreAudioUnit::GetInputChannelMap(CoreAudioChannelList* pChannelMap)
{
  if (!m_Component)
    return false;
  
  UInt32 size = 0;
  Boolean writable = false;
  AudioUnitGetPropertyInfo(m_Component, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Input, 0, &size, &writable);
  UInt32 channels = size/sizeof(SInt32);
  SInt32* pMap = new SInt32[channels];
  OSStatus ret = AudioUnitGetProperty(m_Component, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Input, 0, pMap, &size);
  if (ret)
    CLog::Log(LOGERROR, "CCoreAudioUnit::GetInputChannelMap: Unable to retrieve AudioUnit input channel map. Error = 0x%08x (%4.4s)", ret, CONVERT_OSSTATUS(ret));
  else
    for (UInt32 i = 0; i < channels; i++)
      pChannelMap->push_back(pMap[i]);  
  delete[] pMap;
  return (!ret);
}
示例#10
0
OSStatus	CAAudioUnit::GetChannelLayout (AudioUnitScope 		inScope,
										AudioUnitElement 		inEl,
										CAAudioChannelLayout	&outLayout) const
{
	UInt32 size;
	OSStatus result = AudioUnitGetPropertyInfo (AU(), kAudioUnitProperty_AudioChannelLayout,
									inScope, inEl, &size, NULL);
	if (result) return result;

	AudioChannelLayout *layout = (AudioChannelLayout*)malloc (size);

	require_noerr (result = AudioUnitGetProperty (AU(), kAudioUnitProperty_AudioChannelLayout,
									inScope, inEl, layout, &size), home);

	outLayout = CAAudioChannelLayout (layout);

home:
	free (layout);
	return result;
}
示例#11
0
bool auLoader::setSampleRate(float srate) {
  UInt32 size;
  Boolean write;
  Float64 data = srate;
  ComponentResult err = AudioUnitGetPropertyInfo(m_plugin, kAudioUnitProperty_SampleRate, kAudioUnitScope_Global, 0, &size, &write);
  if(err == noErr && size == sizeof(Float64)) {
    err = AudioUnitSetProperty(m_plugin, kAudioUnitProperty_SampleRate, kAudioUnitScope_Global, 0, &data, size);
    if(err != noErr) {
      debug(LOG_ERROR, "Could not set sample rate to %f", srate);
      return false;
    }
    else {
      debug(LOG_VERBOSE, "Set sample rate to %f", srate);
    }
  }
  
  if(m_desc.componentType == kAudioUnitType_Effect ||
     m_desc.componentType == kAudioUnitType_MusicEffect) {
    AudioStreamBasicDescription stream_format;
    stream_format.mSampleRate = srate;
    stream_format.mFormatID = kAudioFormatLinearPCM;
    stream_format.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
    stream_format.mBytesPerPacket = 4;
    stream_format.mFramesPerPacket = 1;
    stream_format.mBytesPerFrame = 4;
    stream_format.mChannelsPerFrame = MAX_CHANNELS;
    stream_format.mBitsPerChannel = sizeof(Float32) * 8;
    
    err = AudioUnitSetProperty(m_plugin, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
                               0, &stream_format, sizeof(stream_format));
    if(err != noErr) {
      debug(LOG_ERROR, "Could not set stream format");
      return false;
    }
    else {
      debug(LOG_VERBOSE, "Set stream format");
    }
  }
  
  return true;
}
示例#12
0
bool		CAAudioUnit::GetChannelLayouts (AudioUnitScope 			inScope,
										AudioUnitElement 			inEl,
										ChannelTagVector			&outChannelVector) const
{
	if (HasChannelLayouts (inScope, inEl) == false) return false;

	UInt32 dataSize;
	OSStatus result = AudioUnitGetPropertyInfo (AU(),
								kAudioUnitProperty_SupportedChannelLayoutTags,
								inScope, inEl,
								&dataSize, NULL);

	if (result == kAudioUnitErr_InvalidProperty) {
		// if we get here we can do layouts but we've got the speaker config property
		outChannelVector.erase (outChannelVector.begin(), outChannelVector.end());
		outChannelVector.push_back (kAudioChannelLayoutTag_Stereo);
		outChannelVector.push_back (kAudioChannelLayoutTag_StereoHeadphones);
		outChannelVector.push_back (kAudioChannelLayoutTag_Quadraphonic);
		outChannelVector.push_back (kAudioChannelLayoutTag_AudioUnit_5_0);
		return true;
	}

	if (result) return false;

	bool canDo = false;
		// OK lets get our channel layouts and see if the one we want is present
	AudioChannelLayoutTag* info = (AudioChannelLayoutTag*)malloc (dataSize);
	result = AudioUnitGetProperty (AU(),
							kAudioUnitProperty_SupportedChannelLayoutTags,
							inScope, inEl,
							info, &dataSize);
	if (result) goto home;

	outChannelVector.erase (outChannelVector.begin(), outChannelVector.end());
	for (unsigned int i = 0; i < (dataSize / sizeof (AudioChannelLayoutTag)); ++i)
		outChannelVector.push_back (info[i]);

home:
	free (info);
	return canDo;
}
示例#13
0
bool		CAAudioUnit::CanDo (	int 				inChannelsIn,
									int 				inChannelsOut) const
{
	// this is the default assumption of an audio effect unit
	Boolean* isWritable = 0;
	UInt32	dataSize = 0;
		// lets see if the unit has any channel restrictions
	OSStatus result = AudioUnitGetPropertyInfo (AU(),
									kAudioUnitProperty_SupportedNumChannels,
									kAudioUnitScope_Global, 0,
									&dataSize, isWritable); //don't care if this is writable

		// if this property is NOT implemented an FX unit
		// is expected to deal with same channel valance in and out
	if (result)
	{
		if ((Comp().Desc().IsEffect() && (inChannelsIn == inChannelsOut))
			|| (Comp().Desc().IsOffline() && (inChannelsIn == inChannelsOut)))
		{
			return true;
		}
		else
		{
			// the au should either really tell us about this
			// or we will assume the worst
			return false;
		}
	}

	StackAUChannelInfo info (dataSize);

	result = GetProperty (kAudioUnitProperty_SupportedNumChannels,
							kAudioUnitScope_Global, 0,
							info.mChanInfo, &dataSize);
	if (result) { return false; }

	return ValidateChannelPair (inChannelsIn, inChannelsOut, info.mChanInfo, (dataSize / sizeof (AUChannelInfo)));
}
示例#14
0
bool CAUOutputDevice::GetChannelMap(CoreAudioChannelList* pChannelMap)
{
  if (!m_audioUnit)
    return false;

  UInt32 size = 0;
  Boolean writable = false;
  AudioUnitGetPropertyInfo(m_audioUnit,
    kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Input, 0, &size, &writable);

  UInt32 channels = size/sizeof(SInt32);
  SInt32* pMap = new SInt32[channels];
  OSStatus ret = AudioUnitGetProperty(m_audioUnit,
    kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Input, 0, pMap, &size);
  if (ret)
    CLog::Log(LOGERROR, "CCoreAudioUnit::GetInputChannelMap: "
      "Unable to retrieve AudioUnit input channel map. Error = %s", GetError(ret).c_str());
  else
    for (UInt32 i = 0; i < channels; i++)
      pChannelMap->push_back(pMap[i]);
  delete[] pMap;
  return (!ret);
}
示例#15
0
void AUEditWindow::SetUnitToDisplay (AudioUnit editUnit, bool forceGeneric)
{
	if (mEditView) {
		verify_noerr(CloseComponent(mEditView));
		mEditView = 0;
	}

	ComponentDescription editorComponentDesc;
	
	// set up to use generic UI component
	editorComponentDesc.componentType = kAudioUnitCarbonViewComponentType;
	editorComponentDesc.componentSubType = 'gnrc';
	editorComponentDesc.componentManufacturer = 'appl';
	editorComponentDesc.componentFlags = 0;
	editorComponentDesc.componentFlagsMask = 0;
	
	OSStatus err;

	if (!forceGeneric) {
		// ask the AU for its first editor component
		UInt32 propertySize;
		err = AudioUnitGetPropertyInfo(editUnit, kAudioUnitProperty_GetUIComponentList,
			kAudioUnitScope_Global, 0, &propertySize, NULL);
		if (!err) {
			int nEditors = propertySize / sizeof(ComponentDescription);
			ComponentDescription *editors = new ComponentDescription[nEditors];
			err = AudioUnitGetProperty(editUnit, kAudioUnitProperty_GetUIComponentList,
				kAudioUnitScope_Global, 0, editors, &propertySize);
			if (!err)
				// just pick the first one for now
				editorComponentDesc = editors[0];
			delete[] editors;
		}
	}
	
	SetUnitToDisplay (editUnit, editorComponentDesc);
}
示例#16
0
OSStatus setupOutput (AudioUnit *outputUnit, AFfilehandle file)
{
	OSStatus	status = noErr;
	UInt32		size;
	Boolean		outWritable;

	AudioStreamBasicDescription	fileASBD, inputASBD, outputASBD;
	AURenderCallbackStruct		renderCallback;

	/* Set virtual sample format to single-precision floating-point. */
	afSetVirtualSampleFormat(file, AF_DEFAULT_TRACK, AF_SAMPFMT_FLOAT, 32);

	/* Get ASBD for virtual sample format. */ 
	getASBDForFile(file, AF_DEFAULT_TRACK, &fileASBD);

	status = AudioUnitGetPropertyInfo(*outputUnit,
		kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
		0, &size, &outWritable);

	status = AudioUnitGetProperty(*outputUnit,
		kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
		0, &outputASBD, &size);

	if (outWritable)
	{
		outputASBD = fileASBD;

		status = AudioUnitSetProperty(*outputUnit,
			kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
			0, &outputASBD, size);
	}

	inputASBD = fileASBD;

	status = AudioUnitSetProperty(*outputUnit,
		kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
		0, &inputASBD, size);
	if (status != noErr)
	{
		fprintf(stderr, "Could not set input stream format.\n");
		exit(EXIT_FAILURE);
	}

	/*
		Set the render callback to a procedure which will
		read from the file.
	*/
	renderCallback.inputProc = fileRenderProc;
	renderCallback.inputProcRefCon = file;

	status = AudioUnitSetProperty(*outputUnit,
		kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0,
		&renderCallback, sizeof (AURenderCallbackStruct));
	if (status != noErr)
	{
		fprintf(stderr, "Could not set render callback.\n");
		exit(EXIT_FAILURE);
	}

	return status;
}
示例#17
0
int macosx_audio_open(audio_desc_t ad, audio_format* ifmt, audio_format *ofmt)
{
	OSStatus err = noErr;
	UInt32   propertySize;
	Boolean  writable;
	obtained_ = false;
	add = ad;
	//dev[0] = devices[ad];
	UNUSED(ofmt);

	// Get the default input device ID. 
	err = AudioHardwareGetPropertyInfo(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &writable);              
	if (err != noErr) {
		return 0;
	}
	err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &(devices[ad].inputDeviceID_));
	if (err != noErr) {
		debug_msg("error kAudioHardwarePropertyDefaultInputDevice");
		return 0;
	}
	if (devices[ad].inputDeviceID_ == kAudioDeviceUnknown) {
		debug_msg("error kAudioDeviceUnknown");
		return 0;
	}
	// Get the input stream description.
	err = AudioDeviceGetPropertyInfo(devices[ad].inputDeviceID_, 0, true, kAudioDevicePropertyStreamFormat, &propertySize, &writable);
	if (err != noErr) {
		debug_msg("error AudioDeviceGetPropertyInfo");
		return 0;
	}
	err = AudioDeviceGetProperty(devices[ad].inputDeviceID_, 0, true, kAudioDevicePropertyStreamFormat, &propertySize, &(devices[ad].inputStreamBasicDescription_));
	//printf("inputStreamBasicDescription_.mBytesPerFrame %d\n", devices[add].inputStreamBasicDescription_);
	if (err != noErr) {
		debug_msg("error AudioDeviceGetProperty");
		return 0;
	}

	// nastavime maly endian
	devices[ad].inputStreamBasicDescription_.mFormatFlags &= (kAudioFormatFlagIsBigEndian & 0);

	if (writable) {
	        err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, 0, true, kAudioDevicePropertyStreamFormat, sizeof(AudioStreamBasicDescription), &(devices[ad].inputStreamBasicDescription_));
	        if (err != noErr) printf("err: AudioDeviceSetProperty: kAudioDevicePropertyStreamFormat\n");
	}
	
	/* set the buffer size of the device */
	
	/*
	int bufferByteSize = 8192;
	propertySize = sizeof(bufferByteSize);
	err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, 0, true, kAudioDevicePropertyBufferSize, propertySize, &bufferByteSize);
	if (err != noErr) debug_msg("err: Set kAudioDevicePropertyBufferSize to %d\n", bufferByteSize);
	else debug_msg("sucessfully set kAudioDevicePropertyBufferSize to %d\n", bufferByteSize);
	*/

        // Set the device sample rate -- a temporary fix for the G5's
        //   built-in audio and possibly other audio devices.
	Boolean IsInput = 0;
	int inChannel = 0;
         
	Float64 theAnswer = 44100;
	UInt32 theSize = sizeof(theAnswer);
	err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, inChannel, IsInput,
                                kAudioDevicePropertyNominalSampleRate, theSize, &theAnswer);

	if (err != noErr) {
		debug_msg("error AudioDeviceSetProperty\n");
		return 0;
	}
	debug_msg("Sample rate, %f\n", theAnswer);
#if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
	err = AudioDeviceCreateIOProcID(devices[ad].inputDeviceID_, audioIOProc, (void*)NULL, &devices[ad].inputDeviceProcID_);
	if (err != noErr) {
		debug_msg("error AudioDeviceCreateIOProcID, %s\n", GetMacOSStatusCommentString(err));
		return 0;
	}
	err = OpenADefaultComponent(kAudioUnitType_Output, kAudioUnitSubType_DefaultOutput, &(devices[ad].outputUnit_));
	// The HAL AU maybe a better way to in the future...
	//err = OpenADefaultComponent(kAudioUnitType_Output, kAudioUnitSubType_HALOutput, &(devices[ad].outputUnit_));
	if (err != noErr) {
		debug_msg("error OpenADefaultComponent\n");
		return 0;
	}
#else
	// Register the AudioDeviceIOProc.
	err = AudioDeviceAddIOProc(devices[ad].inputDeviceID_, audioIOProc, NULL);
	if (err != noErr) {
		debug_msg("error AudioDeviceAddIOProc\n");
		return 0;
	}
	err = OpenDefaultAudioOutput(&(devices[ad].outputUnit_));
	if (err != noErr) {
		debug_msg("error OpenDefaultAudioOutput\n");
		return 0;
	}
#endif
	// Register a callback function to provide output data to the unit.
	devices[ad].input.inputProc = outputRenderer;
	devices[ad].input.inputProcRefCon = 0;
	/* These would be needed if HAL used
	 * UInt32 enableIO =1; 
	err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, (const void*)&enableIO, sizeof(UInt32));
	enableIO=0;
	err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, (const void*)&enableIO, sizeof(UInt32));
	if (err != noErr) {
		debug_msg("error AudioUnitSetProperty EnableIO with error %ld: %s\n", err, GetMacOSStatusErrorString(err));
		return 0;
	}*/
#if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
	err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &(devices[ad].input), sizeof(AURenderCallbackStruct));
#else
	err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &(devices[ad].input), sizeof(AURenderCallbackStruct));
#endif

	if (err != noErr) {
		debug_msg("error AudioUnitSetProperty1 with error %ld: %s\n", err, GetMacOSStatusErrorString(err));
		return 0;
	}
	// Define the Mash stream description. Mash puts 20ms of data into each read
	// and write call. 20ms at 8000Hz equals 160 samples. Each sample is a u_char,
	// so that's 160 bytes. Mash uses 8-bit mu-law internally, so we need to convert
	// to 16-bit linear before using the audio data.
	devices[ad].mashStreamBasicDescription_.mSampleRate = 8000.0;
	//devices[ad].mashStreamBasicDescription_.mSampleRate = ifmt->sample_rate;
	devices[ad].mashStreamBasicDescription_.mFormatID = kAudioFormatLinearPCM;
#ifdef WORDS_BIGENDIAN
	devices[ad].mashStreamBasicDescription_.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian |kLinearPCMFormatFlagIsPacked;
#else
	devices[ad].mashStreamBasicDescription_.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
#endif
	devices[ad].mashStreamBasicDescription_.mBytesPerPacket = 2;
	devices[ad].mashStreamBasicDescription_.mFramesPerPacket = 1;
	devices[ad].mashStreamBasicDescription_.mBytesPerFrame = 2;
	devices[ad].mashStreamBasicDescription_.mChannelsPerFrame = 1;
	devices[ad].mashStreamBasicDescription_.mBitsPerChannel = 16;

	// Inform the default output unit of our source format.
	err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &(devices[ad].mashStreamBasicDescription_), sizeof(AudioStreamBasicDescription));
	if (err != noErr) {
		debug_msg("error AudioUnitSetProperty2");
		printf("error setting output unit source format\n");
		return 0;
	}

	// check the stream format
	err = AudioUnitGetPropertyInfo(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &propertySize, &writable);
	if (err != noErr) debug_msg("err getting propert info for kAudioUnitProperty_StreamFormat\n");

	err = AudioUnitGetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamdesc_, &propertySize);
	if (err != noErr) debug_msg("err getting values for kAudioUnitProperty_StreamFormat\n");
	
	char name[128];
	audio_format_name(ifmt, name, 128);
	debug_msg("Requested ifmt %s\n",name);
	debug_msg("ifmt bytes pre block: %d\n",ifmt->bytes_per_block);

	// handle the requested format
	if (ifmt->encoding != DEV_S16) {
		audio_format_change_encoding(ifmt, DEV_S16);
		debug_msg("Requested ifmt changed to %s\n",name);
		debug_msg("ifmt bytes pre block: %d\n",ifmt->bytes_per_block);
	}

	audio_format_name(ofmt, name, 128);
	debug_msg("Requested ofmt %s\n",name);
	debug_msg("ofmt bytes pre block: %d\n",ofmt->bytes_per_block);
	
	// Allocate the read buffer and Z delay line.
	//readBufferSize_ = 8192;
	readBufferSize_ = ifmt->bytes_per_block * ringBufferFactor_;
	//readBufferSize_ = 320;
	//printf("readBufferSize_ %d\n", readBufferSize_);
	readBuffer_ = malloc(sizeof(u_char)*readBufferSize_);
	bzero(readBuffer_, readBufferSize_ * sizeof(u_char));
	//memset(readBuffer_, PCMU_AUDIO_ZERO, readBufferSize_);
	//inputReadIndex_ = -1; 
	inputReadIndex_ = 0; inputWriteIndex_ = 0;
	zLine_ = malloc(sizeof(double)*DECIM441_LENGTH / 80);
	availableInput_ = 0;

	// Allocate the write buffer.
	//writeBufferSize_ = 8000;
	writeBufferSize_ = ofmt->bytes_per_block * ringBufferFactor_;
	writeBuffer_ = malloc(sizeof(SInt16)*writeBufferSize_);
	bzero(writeBuffer_, writeBufferSize_ * sizeof(SInt16));
	outputReadIndex_ = 0; outputWriteIndex_ = 0;
	//outputWriteIndex_ = -1;
    	// Start audio processing.
	err = AudioUnitInitialize(devices[ad].outputUnit_);
	if (err != noErr) {
		debug_msg("error AudioUnitInitialize\n");
		return 0;
	}
	err = AudioDeviceStart(devices[ad].inputDeviceID_, audioIOProc);
	if (err != noErr) {
		fprintf(stderr, "Input device error: AudioDeviceStart\n");
		return 0;
	}
	err = AudioOutputUnitStart(devices[ad].outputUnit_);
	if (err != noErr) {
		fprintf(stderr, "Output device error: AudioOutputUnitStart\n");
		return 0;
	}
	// Inform the default output unit of our source format.
	/*
	err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &(devices[ad].mashStreamBasicDescription_), sizeof(AudioStreamBasicDescription));
	if (err != noErr) {
		debug_msg("error AudioUnitSetProperty3");
		return 0;
	}
	*/
	return 1;
};
JNIEXPORT jint JNICALL Java_com_apple_audio_units_AudioUnit_AudioUnitGetPropertyInfo
  (JNIEnv *, jclass, jint ci, jint inID, jint inScope, jint inElement, jint outDataSize, jint outWritable)
{
	return (jint)AudioUnitGetPropertyInfo((AudioUnit)ci, (AudioUnitPropertyID)inID, (AudioUnitScope)inScope, (AudioUnitElement)inElement, (UInt32 *)outDataSize, (Boolean *)outWritable);
}
示例#19
0
static int open_coreaudio(audio_output_t *ao)
{
	mpg123_coreaudio_t* ca = (mpg123_coreaudio_t*)ao->userptr;
	UInt32 size;
	ComponentDescription desc;
	Component comp;
	AudioStreamBasicDescription inFormat;
	AudioStreamBasicDescription outFormat;
	AURenderCallbackStruct  renderCallback;
	Boolean outWritable;
	
	/* Initialize our environment */
	ca->play = 0;
	ca->buffer = NULL;
	ca->buffer_size = 0;
	ca->last_buffer = 0;
	ca->play_done = 0;
	ca->decode_done = 0;

	
	/* Get the default audio output unit */
	desc.componentType = kAudioUnitType_Output; 
	desc.componentSubType = kAudioUnitSubType_DefaultOutput;
	desc.componentManufacturer = kAudioUnitManufacturer_Apple;
	desc.componentFlags = 0;
	desc.componentFlagsMask = 0;
	comp = FindNextComponent(NULL, &desc);
	if(comp == NULL) {
		error("FindNextComponent failed");
		return(-1);
	}
	
	if(OpenAComponent(comp, &(ca->outputUnit)))  {
		error("OpenAComponent failed");
		return (-1);
	}
	
	if(AudioUnitInitialize(ca->outputUnit)) {
		error("AudioUnitInitialize failed");
		return (-1);
	}
	
	/* Specify the output PCM format */
	AudioUnitGetPropertyInfo(ca->outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &size, &outWritable);
	if(AudioUnitGetProperty(ca->outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &outFormat, &size)) {
		error("AudioUnitGetProperty(kAudioUnitProperty_StreamFormat) failed");
		return (-1);
	}
	
	if(AudioUnitSetProperty(ca->outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outFormat, size)) {
		error("AudioUnitSetProperty(kAudioUnitProperty_StreamFormat) failed");
		return (-1);
	}
	
	/* Specify the input PCM format */
	ca->channels = ao->channels;
	inFormat.mSampleRate = ao->rate;
	inFormat.mChannelsPerFrame = ao->channels;
	inFormat.mFormatID = kAudioFormatLinearPCM;
#ifdef _BIG_ENDIAN
	inFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsBigEndian;
#else
	inFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked;
#endif
	
	switch(ao->format)
	{
		case MPG123_ENC_SIGNED_16:
			inFormat.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
			ca->bps = 2;
			break;
		case MPG123_ENC_SIGNED_8:
			inFormat.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
			ca->bps = 1;
			break;
		case MPG123_ENC_UNSIGNED_8:
			ca->bps = 1;
			break;
		case MPG123_ENC_SIGNED_32:
			inFormat.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
			ca->bps = 4;
			break;
		case MPG123_ENC_FLOAT_32:
			inFormat.mFormatFlags |= kLinearPCMFormatFlagIsFloat;
			ca->bps = 4;
			break;
		case MPG123_ENC_FLOAT_64:
			inFormat.mFormatFlags |= kLinearPCMFormatFlagIsFloat;
			ca->bps = 4;
			break;
	}
	
	inFormat.mBitsPerChannel = ca->bps << 3;
	inFormat.mBytesPerPacket = ca->bps*inFormat.mChannelsPerFrame;
	inFormat.mFramesPerPacket = 1;
	inFormat.mBytesPerFrame = ca->bps*inFormat.mChannelsPerFrame;
	
	/* Add our callback - but don't start it yet */
	memset(&renderCallback, 0, sizeof(AURenderCallbackStruct));
	renderCallback.inputProc = convertProc;
	renderCallback.inputProcRefCon = ao->userptr;
	if(AudioUnitSetProperty(ca->outputUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &renderCallback, sizeof(AURenderCallbackStruct))) {
		error("AudioUnitSetProperty(kAudioUnitProperty_SetRenderCallback) failed");
		return(-1);
	}
	
	
	/* Open an audio I/O stream and create converter */
	if (ao->rate > 0 && ao->channels >0 ) {
		int ringbuffer_len;

		if(AudioConverterNew(&inFormat, &outFormat, &(ca->converter))) {
			error("AudioConverterNew failed");
			return(-1);
		}
		if(ao->channels == 1) {
			SInt32 channelMap[2] = { 0, 0 };
			if(AudioConverterSetProperty(ca->converter, kAudioConverterChannelMap, sizeof(channelMap), channelMap)) {
				error("AudioConverterSetProperty(kAudioConverterChannelMap) failed");
				return(-1);
			}
		}
		
		/* Initialise FIFO */
		ringbuffer_len = ao->rate * FIFO_DURATION * ca->bps * ao->channels;
		debug2( "Allocating %d byte ring-buffer (%f seconds)", ringbuffer_len, (float)FIFO_DURATION);
		sfifo_init( &ca->fifo, ringbuffer_len );
	}
	
	return(0);
}
示例#20
0
void	CAChannelMappingPlayer::SetupChannelMapping()
{
	delete mMapper;
	mMapper = NULL;
	
	const CAStreamBasicDescription &fileFormat = GetFile().GetClientDataFormat();
	CAStreamBasicDescription deviceFormat;
	UInt32 propertySize = sizeof(AudioStreamBasicDescription);
	
	XThrowIfError(AudioUnitGetProperty(
							GetOutputUnit(),
							kAudioUnitProperty_StreamFormat,
							kAudioUnitScope_Output,
							0,
							(void *)&deviceFormat,
							&propertySize), "get output device's format");

#if VERBOSE
	printf("CAChannelMappingPlayer::SetupChannelMapping: %ld-ch file, %ld-ch device\n",
		fileFormat.mChannelsPerFrame, deviceFormat.mChannelsPerFrame);
#endif

	if (fileFormat.mChannelsPerFrame <= deviceFormat.mChannelsPerFrame) {
		// no mapping needed, use output unit's default behavior 
		// (default stereo pair and speaker config from AMS)
#if VERBOSE
		printf("  using output unit's channel mapping\n");
#endif
		CAAudioFilePlayer::SetupChannelMapping();
	} else {
		// fewer device than file channels, mapping needed
		CAAudioChannelLayout fileLayout, deviceLayout;
		
#if VERBOSE
		printf("  using our own channel mapping\n");
#endif
		deviceFormat.mSampleRate = fileFormat.mSampleRate;
		deviceFormat.SetCanonical(deviceFormat.mChannelsPerFrame, false);	// force deinterleaved
		
		fileLayout = GetFile().GetFileChannelLayout();

		UInt32 layoutSize;
		Boolean writable;
		OSStatus err = AudioUnitGetPropertyInfo(
								GetOutputUnit(),
								kAudioUnitProperty_AudioChannelLayout,
								kAudioUnitScope_Input,
								0,
								&layoutSize,
								&writable);
		if (!err) {
			char *buf = (char *)malloc(layoutSize);
			err = AudioUnitGetProperty(
								GetOutputUnit(),
								kAudioUnitProperty_AudioChannelLayout,
								kAudioUnitScope_Input,
								0,
								buf,
								&layoutSize);
			deviceLayout = CAAudioChannelLayout(reinterpret_cast<AudioChannelLayout *>(buf));
			free(buf);
		}
		mMapper = new CAChannelMapper(fileFormat, deviceFormat, &fileLayout, &deviceLayout);

		// give the output unit the same number of channels as in the device, 
		// since we'll be doing the mapping ourselves
		XThrowIfError(AudioUnitSetProperty(
								GetOutputUnit(),
								kAudioUnitProperty_StreamFormat,
								kAudioUnitScope_Input,
								0,
								(void *)&deviceFormat,
								sizeof(AudioStreamBasicDescription)), "set audio output format");
		
		XThrowIfError(mMapper->OpenMixer(fileFormat.mSampleRate), "open mixer");
		XThrowIfError(mMapper->ConfigureDownmix(), "configure downmix");
		
		AudioUnitConnection conn;
		conn.sourceAudioUnit = mMapper->GetMixer();
		conn.sourceOutputNumber = 0;
		conn.destInputNumber = 0;

		XThrowIfError(AudioUnitSetProperty(
								GetOutputUnit(),
								kAudioUnitProperty_MakeConnection,
								kAudioUnitScope_Global,
								0,
								(void *)&conn,
								sizeof(AudioUnitConnection)), "connect mixer to output unit");
		
		AURenderCallbackStruct input;
		input.inputProc = InputProc;
		input.inputProcRefCon = this;
		XThrowIfError(AudioUnitSetProperty(
									conn.sourceAudioUnit, 
									kAudioUnitProperty_SetRenderCallback, 
									kAudioUnitScope_Global,
									0,
									&input, 
									sizeof(input)), "connect input proc to mixer");
		// provide NO channel layout
//		mReadBuf = CABufferList::New("", fileFormat);
//		mReadBuf->AllocateBuffers(
	}
}
示例#21
0
int    CAAudioUnit::GetChannelInfo (AUChannelInfo** chaninfo, UInt32& cnt)
{
	// this is the default assumption of an audio effect unit
	Boolean* isWritable = 0;
	UInt32	dataSize = 0;
		// lets see if the unit has any channel restrictions
	OSStatus result = AudioUnitGetPropertyInfo (AU(),
						    kAudioUnitProperty_SupportedNumChannels,
						    kAudioUnitScope_Global, 0,
						    &dataSize, isWritable); //don't care if this is writable

	// if this property is NOT implemented an FX unit
	// is expected to deal with same channel valance in and out

	if (result)
	{
		if (Comp().Desc().IsEffect())
		{
			return 1;
		}
		else if (Comp().Desc().IsGenerator() || Comp().Desc().IsMusicDevice()) {
			// directly query Bus Formats
			// Note that that these may refer to different subBusses
			// (eg. Kick, Snare,.. on a Drummachine)
			// eventually the Bus-Name for each configuration should be exposed
			// for the User to select..

			UInt32 elCountIn, elCountOut;

			if (GetElementCount (kAudioUnitScope_Input, elCountIn)) return -1;
			if (GetElementCount (kAudioUnitScope_Output, elCountOut)) return -1;

			cnt = std::max(elCountIn, elCountOut);

			*chaninfo = (AUChannelInfo*) malloc (sizeof (AUChannelInfo) * cnt);

			for (unsigned int i = 0; i < elCountIn; ++i) {
				UInt32 numChans;
				if (NumberChannels (kAudioUnitScope_Input, i, numChans)) return -1;
				(*chaninfo)[i].inChannels = numChans;
			}
			for (unsigned int i = elCountIn; i < cnt; ++i) {
				(*chaninfo)[i].inChannels = 0;
			}

			for (unsigned int i = 0; i < elCountOut; ++i) {
				UInt32 numChans;
				if (NumberChannels (kAudioUnitScope_Output, i, numChans)) return -1;
				(*chaninfo)[i].outChannels = numChans;
			}
			for (unsigned int i = elCountOut; i < cnt; ++i) {
				(*chaninfo)[i].outChannels = 0;
			}
			return 0;
		}
		else
		{
			// the au should either really tell us about this
			// or we will assume the worst
			return -1;
		}
	}

	*chaninfo = (AUChannelInfo*) malloc (dataSize);
	cnt = dataSize / sizeof (AUChannelInfo);

	result = GetProperty (kAudioUnitProperty_SupportedNumChannels,
			      kAudioUnitScope_Global, 0,
			      *chaninfo, &dataSize);

	if (result) { return -1; }
	return 0;
}
示例#22
0
static int open_coreaudio(audio_output_t *ao)
{
    mpg123_coreaudio_t* ca = (mpg123_coreaudio_t*)ao->userptr;
    UInt32 size;
    AudioComponentDescription desc;
    AudioComponent comp;
    AudioStreamBasicDescription inFormat;
    AudioStreamBasicDescription outFormat;
    AURenderCallbackStruct  renderCallback;
    Boolean outWritable;

    /* Initialize our environment */
    ca->play = 0;
    ca->buffer = NULL;
    ca->buffer_size = 0;
    ca->last_buffer = 0;
    ca->play_done = 0;
    ca->decode_done = 0;


    /* Get the default audio output unit */
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_DefaultOutput;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;
    comp = AudioComponentFindNext(NULL, &desc);
    if(comp == NULL) {
        return -1;
    }

    if(AudioComponentInstanceNew(comp, &(ca->outputUnit)))  {
        return -1;
    }

    if(AudioUnitInitialize(ca->outputUnit)) {
        return -1;
    }

    /* Specify the output PCM format */
    AudioUnitGetPropertyInfo(ca->outputUnit,
                             kAudioUnitProperty_StreamFormat,
                             kAudioUnitScope_Output,
                             0,
                             &size,
                             &outWritable);
    if(AudioUnitGetProperty(ca->outputUnit,
                            kAudioUnitProperty_StreamFormat,
                            kAudioUnitScope_Output,
                            0,
                            &outFormat,
                            &size)) {
        return -1;
    }

    if(AudioUnitSetProperty(ca->outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outFormat, size)) {
        return -1;
    }

    /* Specify the input PCM format */
    ca->channels = ao->channels;
    inFormat.mSampleRate = ao->rate;
    inFormat.mChannelsPerFrame = ao->channels;
    inFormat.mFormatID = kAudioFormatLinearPCM;
#ifdef _BIG_ENDIAN
    inFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsBigEndian;
#else
    inFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked;
#endif

    if (ao->signed_samples) {
        inFormat.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
    }

    ca->bps = ao->bytes_per_sample;

    inFormat.mBitsPerChannel = ca->bps << 3;
    inFormat.mBytesPerPacket = ca->bps*inFormat.mChannelsPerFrame;
    inFormat.mFramesPerPacket = 1;
    inFormat.mBytesPerFrame = ca->bps*inFormat.mChannelsPerFrame;

    /* Add our callback - but don't start it yet */
    memset(&renderCallback, 0, sizeof(AURenderCallbackStruct));
    renderCallback.inputProc = convertProc;
    renderCallback.inputProcRefCon = ao->userptr;
    if(AudioUnitSetProperty(ca->outputUnit,
                            kAudioUnitProperty_SetRenderCallback,
                            kAudioUnitScope_Input,
                            0,
                            &renderCallback,
                            sizeof(AURenderCallbackStruct))) {
        return -1;
    }


    /* Open an audio I/O stream and create converter */
    if (ao->rate > 0 && ao->channels >0 ) {
        int ringbuffer_len;

        if(AudioConverterNew(&inFormat, &outFormat, &(ca->converter))) {
            return -1;
        }
        if(ao->channels == 1) {
            SInt32 channelMap[2] = { 0, 0 };
            if(AudioConverterSetProperty(ca->converter, kAudioConverterChannelMap, sizeof(channelMap), channelMap)) {
                return -1;
            }
        }

        /* Initialise FIFO */
        ringbuffer_len = ((int)ao->rate *
                          FIFO_DURATION *
                          ca->bps *
                          ao->channels);
        sfifo_init( &ca->fifo, ringbuffer_len );
    }

    return(0);
}
示例#23
0
AUParamInfo::AUParamInfo (AudioUnit				inAU, 
							bool				inIncludeExpert, 
							bool				inIncludeReadOnly,
							AudioUnitScope		inScope,
							AudioUnitElement	inElement)
	: mAU (inAU),
	  mNumParams (0),
	  mParamListID(NULL),
	  mScope (inScope),
	  mElement (inElement)
{
	UInt32 size;
	OSStatus result = AudioUnitGetPropertyInfo(mAU, kAudioUnitProperty_ParameterList, inScope, mElement, &size, NULL);
		if (size == 0 || result) return;
	
	int nparams = size / sizeof(AudioUnitPropertyID);
	mParamListID = new AudioUnitParameterID[nparams];

	memset (mParamListID, 0xFF, size);

	AudioUnitParameterID *paramList = new AudioUnitParameterID[nparams];
	
	result = AudioUnitGetProperty(mAU, kAudioUnitProperty_ParameterList, mScope, mElement, paramList, &size);
	if (result) {
		delete [] mParamListID;
		delete [] paramList;
		mParamListID = NULL;
		return;
	}
	
	ParameterMap params;
	for (int i = 0; i < nparams; ++i) 
	{
		CAAUParameter auvp (mAU, paramList[i], mScope, mElement); // took out only using global scope in CAAUParameter creation
		const AudioUnitParameterInfo &paramInfo = auvp.ParamInfo();
			
		//	don't include if parameter can't be read or written
		if (!(paramInfo.flags & kAudioUnitParameterFlag_IsWritable) 
			&& !(paramInfo.flags & kAudioUnitParameterFlag_IsReadable))
			continue;

		// only include if expert params wanted
		if (!inIncludeExpert && auvp.IsExpert())
			continue;
		
		// only include if read only params are wanted
		if (!(paramInfo.flags & kAudioUnitParameterFlag_IsWritable) 
			&& (paramInfo.flags & kAudioUnitParameterFlag_IsReadable))
		{	
			if (!inIncludeReadOnly)
				continue;
		}
		
		mParamListID[mNumParams] = paramList[i];
		mNumParams++;
		
		// ok - if we're here, then we have a parameter we are going to display.
		UInt32 clump = 0;
		auvp.GetClumpID (clump);
		mParams[clump].push_back (auvp);
	}

	delete [] paramList;
}