/** * @brief Adjust the audio output by a certain ratio and return the new * value. */ static int audio_output_volume_adjust(Float32 n) { Float32 vl, vr, vn; UInt32 size = sizeof vl; /* * Merge left and right. On Mac OS X, we want to do this. My * PowerBook has this odd bug that the built-in OS X volume * applet makes the sound card go unbalanced after a long amount * of time. We can prevent that over here... */ if (AudioDeviceGetProperty(adid, achans[0], false, kAudioDevicePropertyVolumeScalar, &size, &vl) != 0 || AudioDeviceGetProperty(adid, achans[1], false, kAudioDevicePropertyVolumeScalar, &size, &vr) != 0) return (-1); vn = CLAMP((vl + vr) / 2.0 + n, 0.0, 1.0); /* Set the new volume */ if (AudioDeviceSetProperty(adid, 0, achans[0], false, kAudioDevicePropertyVolumeScalar, size, &vn) != 0 || AudioDeviceSetProperty(adid, 0, achans[1], false, kAudioDevicePropertyVolumeScalar, size, &vn) != 0) return (-1); return (vn * 100.0); }
static int setVolume(int dir, double dleft, double dright) { Float32 left= (Float32)dleft; Float32 right= (Float32)dright; UInt32 sz; AudioDeviceID id; if (!getDefaultDevice(&id, dir)) return 0; sz= sizeof(left); if (checkError(AudioDeviceSetProperty(id, 0, 1, // left dir, kAudioDevicePropertyVolumeScalar, sz, &left), "SetProperty", "VolumeScalar")) return 0; sz= sizeof(right); if (checkError(AudioDeviceSetProperty(id, 0, 2, // right dir, kAudioDevicePropertyVolumeScalar, sz, &right), "SetProperty", "VolumeScalar")) return 0; return 1; }
OSStatus AudioOutputSetVolume(AudioDeviceID device, Float32 left, Float32 right) { OSStatus err = AudioObjectSetPropertyData(device, &kAudioOutputVolumeProperty, 0, NULL, (UInt32)sizeof(left), &left); if (kAudioHardwareUnknownPropertyError == err) { UInt32 channels[2]; err = AudioOutputGetStereoChannels(device, &channels[0], &channels[1]); if (noErr == err) err = AudioDeviceSetProperty(device, NULL, channels[0], FALSE, kAudioDevicePropertyVolumeScalar, (UInt32)sizeof(Float32), &left); if (noErr == err) err = AudioDeviceSetProperty(device, NULL, channels[1], FALSE, kAudioDevicePropertyVolumeScalar, (UInt32)sizeof(Float32), &right); } return err; }
bool CCoreAudioDevice::SetHogStatus(bool hog) { if (!m_DeviceId) return false; pid_t holder = GetHogStatus(); pid_t me = getpid(); if (hog) { if (holder != me) { CLog::Log(LOGDEBUG, "CCoreAudioDevice::SetHogStatus: Setting 'hog' status on device 0x%04x", m_DeviceId); OSStatus ret = AudioDeviceSetProperty(m_DeviceId, NULL, 0, false, kAudioDevicePropertyHogMode, sizeof(me), &me); if (ret) { CLog::Log(LOGERROR, "CCoreAudioDevice::SetHogStatus: Unable to set 'hog' status. Error = 0x%08x (%4.4s)", ret, CONVERT_OSSTATUS(ret)); return false; } pid_t holder = GetHogStatus(); if (holder != getpid()) { CLog::Log(LOGERROR, "CCoreAudioDevice::SetHogStatus: Unable to set 'hog' status. another process (%x) has it.", holder); return false; } CLog::Log(LOGDEBUG, "CCoreAudioDevice::SetHogStatus: Successfully set 'hog' status on device 0x%04x", m_DeviceId); } } else { if (holder == me) // Currently Set { CLog::Log(LOGDEBUG, "CCoreAudioDevice::SetHogStatus: Releasing 'hog' status on device 0x%04x", m_DeviceId); pid_t hogPid = -1; OSStatus ret = AudioDeviceSetProperty(m_DeviceId, NULL, 0, false, kAudioDevicePropertyHogMode, sizeof(hogPid), &hogPid); if (ret) { CLog::Log(LOGERROR, "CCoreAudioDevice::SetHogStatus: Unable to release 'hog' status. Error = 0x%08x (%4.4s)", ret, CONVERT_OSSTATUS(ret)); return false; } pid_t holder = GetHogStatus(); if (holder == getpid()) { CLog::Log(LOGERROR, "CCoreAudioDevice::SetHogStatus: failed to release. still hogging!"); return false; } } } return true; }
/* * Sets the value of system sound volume. * * @param [Float] volume * The value that set volume to system. * range of volume is 0.0 .. 1.0. * @example * System::Sound.set_volume(0.75) */ static VALUE rb_sys_set_volume(VALUE obj, VALUE volume) { AudioDeviceID device; OSStatus err; UInt32 size; Boolean canset = false; UInt32 channels[2]; float involume; if (!FIXFLOAT_P(volume)) { rb_raise(rb_eTypeError, "wrong type of argument"); } involume = NUM2DBL(volume); if (involume < 0.0 || involume > 1.0) { rb_raise(rb_eRangeError, "out of range"); } // get device device = get_audio_device_id(); size = sizeof(canset); err = AudioDeviceGetPropertyInfo(device, 0, false, kAudioDevicePropertyVolumeScalar, &size, &canset); if (err == noErr && canset == true) { size = sizeof involume; err = AudioDeviceSetProperty(device, NULL, 0, false, kAudioDevicePropertyVolumeScalar, size, &involume); return Qnil; } // else, try seperate channes // get channels size = sizeof(channels); err = AudioDeviceGetProperty(device, 0, false, kAudioDevicePropertyPreferredChannelsForStereo, &size, &channels); if (err != noErr) { rb_raise(rb_eRuntimeError, "Failed to get channel numbers"); } // set volume size = sizeof(float); err = AudioDeviceSetProperty(device, 0, channels[0], false, kAudioDevicePropertyVolumeScalar, size, &involume); if (err != noErr) { rb_raise(rb_eRuntimeError, "Failed to set volume of channel"); } err = AudioDeviceSetProperty(device, 0, channels[1], false, kAudioDevicePropertyVolumeScalar, size, &involume); if (err != noErr) { rb_raise(rb_eRuntimeError, "Failed to set volume of channel"); } return Qnil; }
static int ca_apply_format (void) { UInt32 sz; if (req_format.mSampleRate > 0) { sz = sizeof (req_format); if (AudioDeviceSetProperty(device_id, NULL, 0, 0, kAudioDevicePropertyStreamFormat, sz, &req_format)) { if (AudioDeviceSetProperty(device_id, NULL, 0, 0, kAudioDevicePropertyStreamFormat, sz, &default_format)) { return -1; } } } return 0; }
static PaError SetFramesPerBuffer(AudioDeviceID device, unsigned long framesPerBuffer, int isInput) { PaError result = paNoError; UInt32 preferredFramesPerBuffer = framesPerBuffer; // while (preferredFramesPerBuffer > UINT32_MAX) { // preferredFramesPerBuffer /= 2; // } UInt32 actualFramesPerBuffer; UInt32 propSize = sizeof(UInt32); result = conv_err(AudioDeviceSetProperty(device, NULL, 0, isInput, kAudioDevicePropertyBufferFrameSize, propSize, &preferredFramesPerBuffer)); result = conv_err(AudioDeviceGetProperty(device, 0, isInput, kAudioDevicePropertyBufferFrameSize, &propSize, &actualFramesPerBuffer)); if (result != paNoError) { // do nothing } else if (actualFramesPerBuffer > framesPerBuffer) { result = paBufferTooSmall; } else if (actualFramesPerBuffer < framesPerBuffer) { result = paBufferTooBig; } return result; }
void AudioDevice::SetBufferSize(UInt32 size) { UInt32 propsize = sizeof(UInt32); verify_noerr(AudioDeviceSetProperty(mID, NULL, 0, mIsInput, kAudioDevicePropertyBufferFrameSize, propsize, &size)); propsize = sizeof(UInt32); verify_noerr(AudioDeviceGetProperty(mID, 0, mIsInput, kAudioDevicePropertyBufferFrameSize, &propsize, &mBufferSizeFrames)); }
OSStatus AudioOutputSetMuted(AudioDeviceID device, Boolean mute) { UInt32 value = mute ? 1 : 0; return AudioDeviceSetProperty(device, NULL, //time stamp not needed 0, //channel 0 is master channel FALSE, //for an output device kAudioDevicePropertyMute, (UInt32)sizeof(UInt32), &value); }
static void gviSetDeviceVolume(GVDevice device, GVBool isInput, GVScalar volume) { Float32 vol = volume; int channel; // set the volume on all three channels // this covers mono and stereo devices for(channel = 0 ; channel < 3 ; channel++) AudioDeviceSetProperty(device->m_deviceID, NULL, channel, isInput, kAudioDevicePropertyVolumeScalar, sizeof(Float32), &vol); }
static void set_data_source(AudioDeviceID audioDeviceId, int isInput, UInt32 dataSourceId) { UInt32 size = sizeof(UInt32); OSStatus status = noErr; status = AudioDeviceSetProperty(audioDeviceId, NULL, 0, isInput, kAudioDevicePropertyDataSource, size, &dataSourceId); if (status) { DBG_DYNA_AUDIO_DRV("!!CoreAudio: can't set data source\n"); return; } }
bool CCoreAudioDevice::SetHogStatus(bool hog) { // According to Jeff Moore (Core Audio, Apple), Setting kAudioDevicePropertyHogMode // is a toggle and the only way to tell if you do get hog mode is to compare // the returned pid against getpid, if the match, you have hog mode, if not you don't. if (!m_DeviceId) return false; if (hog) { if (m_Hog == -1) // Not already set { CLog::Log(LOGDEBUG, "CCoreAudioDevice::SetHogStatus: Setting 'hog' status on device 0x%04x", m_DeviceId); OSStatus ret = AudioDeviceSetProperty(m_DeviceId, NULL, 0, false, kAudioDevicePropertyHogMode, sizeof(m_Hog), &m_Hog); if (ret || m_Hog != getpid()) { CLog::Log(LOGERROR, "CCoreAudioDevice::SetHogStatus: Unable to set 'hog' status. Error = 0x%08x (%4.4s)", ret, CONVERT_OSSTATUS(ret)); return false; } CLog::Log(LOGDEBUG, "CCoreAudioDevice::SetHogStatus: Successfully set 'hog' status on device 0x%04x", m_DeviceId); } } else { if (m_Hog > -1) // Currently Set { CLog::Log(LOGDEBUG, "CCoreAudioDevice::SetHogStatus: Releasing 'hog' status on device 0x%04x", m_DeviceId); pid_t hogPid = -1; OSStatus ret = AudioDeviceSetProperty(m_DeviceId, NULL, 0, false, kAudioDevicePropertyHogMode, sizeof(hogPid), &hogPid); if (ret || hogPid == getpid()) { CLog::Log(LOGERROR, "CCoreAudioDevice::SetHogStatus: Unable to release 'hog' status. Error = 0x%08x (%4.4s)", ret, CONVERT_OSSTATUS(ret)); return false; } m_Hog = hogPid; // Reset internal state } } return true; }
static OSStatus coreaudio_set_framesize(AudioDeviceID id, UInt32 *framesize) { UInt32 size = sizeof(*framesize); return AudioDeviceSetProperty( id, NULL, 0, false, kAudioDevicePropertyBufferFrameSize, size, framesize); }
tbool CDeviceCoreAudio::Initialize(tint32 iSampleRate, tint iHWBufferSize) { struct AudioTimeStamp timeStamp; timeStamp.mFlags = 0; Float64 fSampleRate = iSampleRate; OSErr err1 = AudioDeviceSetProperty(mAudioDeviceID, &timeStamp, 0, FALSE, kAudioDevicePropertyBufferFrameSize, sizeof(UInt32), &iHWBufferSize); OSErr err2 = AudioDeviceSetProperty(mAudioDeviceID, &timeStamp, 0, FALSE, kAudioDevicePropertyNominalSampleRate, sizeof(Float64), &fSampleRate); miHWBufferSize = iHWBufferSize; mfSampleRate = iSampleRate; tbool bIsRateAndBuffOK = ((err1 == 0) && (err2 == 0)); if (GetInputChannels() <= 0) mbEnableInput = false; if (GetOutputChannels() <= 0) mbEnableOutput = false; return (bIsRateAndBuffOK && (mbEnableInput || mbEnableOutput)); } // Initialize
static OSStatus coreaudio_set_streamformat(AudioDeviceID id, AudioStreamBasicDescription *d) { UInt32 size = sizeof(*d); return AudioDeviceSetProperty( id, 0, 0, 0, kAudioDevicePropertyStreamFormat, size, d); }
static PaError SetSampleRate(AudioDeviceID device, double sampleRate, int isInput) { PaError result = paNoError; double actualSampleRate; UInt32 propSize = sizeof(double); result = conv_err(AudioDeviceSetProperty(device, NULL, 0, isInput, kAudioDevicePropertyNominalSampleRate, propSize, &sampleRate)); result = conv_err(AudioDeviceGetProperty(device, 0, isInput, kAudioDevicePropertyNominalSampleRate, &propSize, &actualSampleRate)); if (result == paNoError && actualSampleRate != sampleRate) { result = paInvalidSampleRate; } return result; }
int audio_output_play(struct audio_file *fd) { UInt32 len, size; int16_t *tmp; /* Read data in our temporary buffer */ if ((len = audio_file_read(fd, abufnew, abuflen)) == 0) return (-1); if (fd->srate != afmt.mSampleRate || fd->channels != afmt.mChannelsPerFrame) { /* Sample rate or the amount of channels has changed */ afmt.mSampleRate = fd->srate; afmt.mChannelsPerFrame = fd->channels; if (AudioDeviceSetProperty(adid, 0, 0, 0, kAudioDevicePropertyStreamFormat, sizeof afmt, &afmt) != 0) { /* Get current settings back */ size = sizeof afmt; AudioDeviceGetProperty(adid, 0, false, kAudioDevicePropertyStreamFormat, &size, &afmt); gui_msgbar_warn(_("Sample rate or amount of channels not supported.")); return (-1); } } /* XXX: Mutex not actually needed - only for the condvar */ g_mutex_lock(abuflock); while (g_atomic_int_get(&abufulen) != 0) g_cond_wait(abufdrained, abuflock); g_mutex_unlock(abuflock); /* Toggle the buffers */ tmp = abufcur; abufcur = abufnew; abufnew = tmp; /* Atomically set the usage length */ g_atomic_int_set(&abufulen, len); /* Start processing of the data */ AudioDeviceStart(adid, aprocid); return (0); }
static int ca_init (void) { UInt32 sz; char device_name[128]; sz = sizeof(device_id); if (AudioHardwareGetProperty (kAudioHardwarePropertyDefaultOutputDevice, &sz, &device_id)) { return -1; } sz = sizeof (device_name); if (AudioDeviceGetProperty (device_id, 1, 0, kAudioDevicePropertyDeviceName, &sz, device_name)) { return -1; } sz = sizeof (default_format); if (AudioDeviceGetProperty (device_id, 0, 0, kAudioDevicePropertyStreamFormat, &sz, &default_format)) { return -1; } UInt32 bufsize = 4096; sz = sizeof (bufsize); if (AudioDeviceSetProperty(device_id, NULL, 0, 0, kAudioDevicePropertyBufferFrameSize, sz, &bufsize)) { fprintf (stderr, "Failed to set buffer size\n"); } if (ca_apply_format ()) { return -1; } if (AudioDeviceAddIOProc (device_id, ca_buffer_callback, NULL)) { return -1; } if (AudioDeviceAddPropertyListener (device_id, 0, 0, kAudioDevicePropertyStreamFormat, ca_fmtchanged, NULL)) { return -1; } ca_fmtchanged(0, 0, 0, kAudioDevicePropertyStreamFormat, NULL); state = OUTPUT_STATE_STOPPED; return 0; }
bool CCoreAudioDevice::SetMixingSupport(bool mix) { if (!m_DeviceId) return false; int restore = -1; if (m_MixerRestore == -1) // This is our first change to this setting. Store the original setting for restore restore = (GetMixingSupport() ? 1 : 0); UInt32 mixEnable = mix ? 1 : 0; CLog::Log(LOGDEBUG, "CCoreAudioDevice::SetMixingSupport: %sabling mixing for device 0x%04x",mix ? "En" : "Dis", m_DeviceId); OSStatus ret = AudioDeviceSetProperty(m_DeviceId, NULL, 0, false, kAudioDevicePropertySupportsMixing, sizeof(mixEnable), &mixEnable); if (ret) { CLog::Log(LOGERROR, "CCoreAudioDevice::SetMixingSupport: Unable to set MixingSupport to %s. Error = 0x%08x (%4.4s)", mix ? "'On'" : "'Off'", ret, CONVERT_OSSTATUS(ret)); return false; } if (m_MixerRestore == -1) m_MixerRestore = restore; return true; }
bool CCoreAudioDevice::SetNominalSampleRate(Float64 sampleRate) { if (!m_DeviceId || sampleRate == 0.0f) return false; Float64 currentRate = GetNominalSampleRate(); if (currentRate == sampleRate) return true; //No need to change UInt32 size = sizeof(Float64); OSStatus ret = AudioDeviceSetProperty(m_DeviceId, NULL, 0, false, kAudioDevicePropertyNominalSampleRate, size, &sampleRate); if (ret) { CLog::Log(LOGERROR, "CCoreAudioUnit::SetNominalSampleRate: Unable to set current device sample rate to %0.0f. Error = 0x%08x (%4.4s)", (float)sampleRate, ret, CONVERT_OSSTATUS(ret)); return false; } CLog::Log(LOGDEBUG, "CCoreAudioUnit::SetNominalSampleRate: Changed device sample rate from %0.0f to %0.0f.", (float)currentRate, (float)sampleRate); if (m_SampleRateRestore == 0.0f) m_SampleRateRestore = currentRate; return true; }
void macosx_volume_write(int left, int right) { UInt32 size; AudioDeviceID od; OSStatus e; UInt32 ch[2]; Float32 fl[2]; int i; size=sizeof(od); e = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice, &size, &od); if (e != 0) return; size=sizeof(ch); e = AudioDeviceGetProperty(od, 0, /* QA1016 says "0" is master channel */ false, kAudioDevicePropertyPreferredChannelsForStereo, &size, &ch); if (e != 0) return; fl[0] = ((float)left) / 65536.0f; fl[1] = ((float)right) / 65536.0f; for (i = 0; i < 2; i++) { e = AudioDeviceSetProperty(od, /* device */ NULL, /* no timestamp */ ch[i], /* preferred stereo channel */ false, /* output device */ kAudioDevicePropertyVolumeScalar, sizeof(Float32), &fl[i]); if (e != 0) return; } }
static void macosx_play (int argc, char *argv []) { MacOSXAudioData audio_data ; OSStatus err ; UInt32 count, buffer_size ; int k ; audio_data.fake_stereo = 0 ; audio_data.device = kAudioDeviceUnknown ; /* get the default output device for the HAL */ count = sizeof (AudioDeviceID) ; if ((err = AudioHardwareGetProperty (kAudioHardwarePropertyDefaultOutputDevice, &count, (void *) &(audio_data.device))) != noErr) { printf ("AudioHardwareGetProperty (kAudioDevicePropertyDefaultOutputDevice) failed.\n") ; return ; } ; /* get the buffersize that the default device uses for IO */ count = sizeof (UInt32) ; if ((err = AudioDeviceGetProperty (audio_data.device, 0, false, kAudioDevicePropertyBufferSize, &count, &buffer_size)) != noErr) { printf ("AudioDeviceGetProperty (kAudioDevicePropertyBufferSize) failed.\n") ; return ; } ; /* get a description of the data format used by the default device */ count = sizeof (AudioStreamBasicDescription) ; if ((err = AudioDeviceGetProperty (audio_data.device, 0, false, kAudioDevicePropertyStreamFormat, &count, &(audio_data.format))) != noErr) { printf ("AudioDeviceGetProperty (kAudioDevicePropertyStreamFormat) failed.\n") ; return ; } ; /* Base setup completed. Now play files. */ for (k = 1 ; k < argc ; k++) { printf ("Playing %s\n", argv [k]) ; if (! (audio_data.sndfile = sf_open (argv [k], SFM_READ, &(audio_data.sfinfo)))) { puts (sf_strerror (NULL)) ; continue ; } ; if (audio_data.sfinfo.channels < 1 || audio_data.sfinfo.channels > 2) { printf ("Error : channels = %d.\n", audio_data.sfinfo.channels) ; continue ; } ; audio_data.format.mSampleRate = audio_data.sfinfo.samplerate ; if (audio_data.sfinfo.channels == 1) { audio_data.format.mChannelsPerFrame = 2 ; audio_data.fake_stereo = 1 ; } else audio_data.format.mChannelsPerFrame = audio_data.sfinfo.channels ; if ((err = AudioDeviceSetProperty (audio_data.device, NULL, 0, false, kAudioDevicePropertyStreamFormat, sizeof (AudioStreamBasicDescription), &(audio_data.format))) != noErr) { printf ("AudioDeviceSetProperty (kAudioDevicePropertyStreamFormat) failed.\n") ; return ; } ; /* we want linear pcm */ if (audio_data.format.mFormatID != kAudioFormatLinearPCM) return ; /* Fire off the device. */ if ((err = AudioDeviceAddIOProc (audio_data.device, macosx_audio_out_callback, (void *) &audio_data)) != noErr) { printf ("AudioDeviceAddIOProc failed.\n") ; return ; } ; err = AudioDeviceStart (audio_data.device, macosx_audio_out_callback) ; if (err != noErr) return ; audio_data.done_playing = SF_FALSE ; while (audio_data.done_playing == SF_FALSE) usleep (10 * 1000) ; /* 10 000 milliseconds. */ if ((err = AudioDeviceStop (audio_data.device, macosx_audio_out_callback)) != noErr) { printf ("AudioDeviceStop failed.\n") ; return ; } ; err = AudioDeviceRemoveIOProc (audio_data.device, macosx_audio_out_callback) ; if (err != noErr) { printf ("AudioDeviceRemoveIOProc failed.\n") ; return ; } ; sf_close (audio_data.sndfile) ; } ; return ; } /* macosx_play */
JARInsert::JARInsert(long host_buffer_size, int hostType) : c_error(kNoErr), c_client(NULL), c_isRunning(false), c_rBufOn(false), c_needsDeactivate(false), c_hBufferSize(host_buffer_size), c_hostType(hostType) { ReadPrefs(); UInt32 outSize; Boolean isWritable; if (!OpenAudioClient()) { JARILog("Cannot find jack client.\n"); SHOWALERT("Cannot find jack client for this application, check if Jack server is running."); return; } // Deactivate Jack callback //AudioDeviceGetPropertyInfo(c_jackDevID, 0, true, kAudioDevicePropertyDeactivateJack, &outSize, &isWritable); //AudioDeviceSetProperty(c_jackDevID, NULL, 0, true, kAudioDevicePropertyDeactivateJack, 0, NULL); int nPorts = 2; c_inPorts = (jack_port_t**)malloc(sizeof(jack_port_t*) * nPorts); c_outPorts = (float**)malloc(sizeof(float*) * nPorts); c_nInPorts = c_nOutPorts = nPorts; c_jBufferSize = jack_get_buffer_size(c_client); char name[256]; for (int i = 0;i < c_nInPorts;i++) { if (hostType == 'vst ') sprintf(name, "VSTreturn%d", JARInsert::c_instances + i + 1); else sprintf(name, "AUreturn%d", JARInsert::c_instances + i + 1); c_inPorts[i] = jack_port_register(c_client, name, JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0); JARILog("Port: %s created\n", name); } c_instance = JARInsert::c_instances; for (int i = 0; i < c_nOutPorts; i++) { UInt32 portNum = c_instance + i; if (hostType == 'vst ') { AudioDeviceGetPropertyInfo(c_jackDevID, 0, true, kAudioDevicePropertyAllocateJackPortVST, &outSize, &isWritable); AudioDeviceSetProperty(c_jackDevID, NULL, 0, true, kAudioDevicePropertyAllocateJackPortVST, portNum, NULL); AudioDeviceGetPropertyInfo(c_jackDevID, 0, true, kAudioDevicePropertyGetJackPortVST, &outSize, &isWritable); AudioDeviceGetProperty(c_jackDevID, 0, true, kAudioDevicePropertyGetJackPortVST, &portNum, &c_outPorts[i]); } else { AudioDeviceGetPropertyInfo(c_jackDevID, 0, true, kAudioDevicePropertyAllocateJackPortAU, &outSize, &isWritable); AudioDeviceSetProperty(c_jackDevID, NULL, 0, true, kAudioDevicePropertyAllocateJackPortAU, portNum, NULL); AudioDeviceGetPropertyInfo(c_jackDevID, 0, true, kAudioDevicePropertyGetJackPortAU, &outSize, &isWritable); AudioDeviceGetProperty(c_jackDevID, 0, true, kAudioDevicePropertyGetJackPortAU, &portNum, &c_outPorts[i]); } JARILog("Port: %s created\n", name); } #if 0 if (!c_isRunning) { JARILog("Jack client activated\n"); jack_activate(c_client); c_needsDeactivate = true; } else c_needsDeactivate = false; #endif if (c_jBufferSize > c_hBufferSize) { c_bsAI1 = new BSizeAlign(c_hBufferSize, c_jBufferSize); c_bsAI2 = new BSizeAlign(c_hBufferSize, c_jBufferSize); c_bsAO1 = new BSizeAlign(c_jBufferSize, c_hBufferSize); c_bsAO2 = new BSizeAlign(c_jBufferSize, c_hBufferSize); if (c_bsAI1->Ready() && c_bsAI2->Ready() && c_bsAO1->Ready() && c_bsAO2->Ready()) { c_rBufOn = true; } else { c_error = kErrInvalidBSize; Flush(); return ; } } JARInsert::c_instances += 2; JARInsert::c_instances_count++; c_canProcess = true; // (Possible) reactivate Jack callback //AudioDeviceGetPropertyInfo(c_jackDevID, 0, true, kAudioDevicePropertyActivateJack, &outSize, &isWritable); //AudioDeviceSetProperty(c_jackDevID, NULL, 0, true, kAudioDevicePropertyActivateJack, 0, NULL); }
static int coreaudio_voice_init (coreaudioVoice* core, struct audsettings* as, int frameSize, AudioDeviceIOProc ioproc, void* hw, int input) { OSStatus status; UInt32 propertySize; int err; int bits = 8; AudioValueRange frameRange; const char* typ = input ? "input" : "playback"; core->isInput = input ? true : false; /* create mutex */ err = pthread_mutex_init(&core->mutex, NULL); if (err) { dolog("Could not create mutex\nReason: %s\n", strerror (err)); return -1; } if (as->fmt == AUD_FMT_S16 || as->fmt == AUD_FMT_U16) { bits = 16; } // TODO: audio_pcm_init_info (&hw->info, as); /* open default output device */ /* note: we use DefaultSystemOutputDevice because DefaultOutputDevice seems to * always link to the internal speakers, and not the ones selected through system properties * go figure... */ propertySize = sizeof(core->deviceID); status = AudioHardwareGetProperty( input ? kAudioHardwarePropertyDefaultInputDevice : kAudioHardwarePropertyDefaultSystemOutputDevice, &propertySize, &core->deviceID); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get default %s device\n", typ); return -1; } if (core->deviceID == kAudioDeviceUnknown) { dolog ("Could not initialize %s - Unknown Audiodevice\n", typ); return -1; } /* get minimum and maximum buffer frame sizes */ propertySize = sizeof(frameRange); status = AudioDeviceGetProperty( core->deviceID, 0, core->isInput, kAudioDevicePropertyBufferFrameSizeRange, &propertySize, &frameRange); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get device buffer frame range\n"); return -1; } if (frameRange.mMinimum > frameSize) { core->bufferFrameSize = (UInt32) frameRange.mMinimum; dolog ("warning: Upsizing Output Buffer Frames to %f\n", frameRange.mMinimum); } else if (frameRange.mMaximum < frameSize) { core->bufferFrameSize = (UInt32) frameRange.mMaximum; dolog ("warning: Downsizing Output Buffer Frames to %f\n", frameRange.mMaximum); } else { core->bufferFrameSize = frameSize; } /* set Buffer Frame Size */ propertySize = sizeof(core->bufferFrameSize); status = AudioDeviceSetProperty( core->deviceID, NULL, 0, core->isInput, kAudioDevicePropertyBufferFrameSize, propertySize, &core->bufferFrameSize); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set device buffer frame size %ld\n", core->bufferFrameSize); return -1; } /* get Buffer Frame Size */ propertySize = sizeof(core->bufferFrameSize); status = AudioDeviceGetProperty( core->deviceID, 0, core->isInput, kAudioDevicePropertyBufferFrameSize, &propertySize, &core->bufferFrameSize); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get device buffer frame size\n"); return -1; } // TODO: hw->samples = *pNBuffers * core->bufferFrameSize; /* get StreamFormat */ propertySize = sizeof(core->streamBasicDescription); status = AudioDeviceGetProperty( core->deviceID, 0, core->isInput, kAudioDevicePropertyStreamFormat, &propertySize, &core->streamBasicDescription); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get Device Stream properties\n"); core->deviceID = kAudioDeviceUnknown; return -1; } /* set Samplerate */ core->streamBasicDescription.mSampleRate = (Float64) as->freq; propertySize = sizeof(core->streamBasicDescription); status = AudioDeviceSetProperty( core->deviceID, 0, 0, core->isInput, kAudioDevicePropertyStreamFormat, propertySize, &core->streamBasicDescription); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set samplerate %d\n", as->freq); core->deviceID = kAudioDeviceUnknown; return -1; } /* set Callback */ core->ioproc = ioproc; status = AudioDeviceAddIOProc(core->deviceID, ioproc, hw); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set IOProc\n"); core->deviceID = kAudioDeviceUnknown; return -1; } /* start Playback */ if (!input && !coreaudio_voice_isPlaying(core)) { status = AudioDeviceStart(core->deviceID, core->ioproc); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not start playback\n"); AudioDeviceRemoveIOProc(core->deviceID, core->ioproc); core->deviceID = kAudioDeviceUnknown; return -1; } } return 0; }
int macosx_audio_open(audio_desc_t ad, audio_format* ifmt, audio_format *ofmt) { OSStatus err = noErr; UInt32 propertySize; Boolean writable; obtained_ = false; add = ad; //dev[0] = devices[ad]; UNUSED(ofmt); // Get the default input device ID. err = AudioHardwareGetPropertyInfo(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &writable); if (err != noErr) { return 0; } err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &(devices[ad].inputDeviceID_)); if (err != noErr) { debug_msg("error kAudioHardwarePropertyDefaultInputDevice"); return 0; } if (devices[ad].inputDeviceID_ == kAudioDeviceUnknown) { debug_msg("error kAudioDeviceUnknown"); return 0; } // Get the input stream description. err = AudioDeviceGetPropertyInfo(devices[ad].inputDeviceID_, 0, true, kAudioDevicePropertyStreamFormat, &propertySize, &writable); if (err != noErr) { debug_msg("error AudioDeviceGetPropertyInfo"); return 0; } err = AudioDeviceGetProperty(devices[ad].inputDeviceID_, 0, true, kAudioDevicePropertyStreamFormat, &propertySize, &(devices[ad].inputStreamBasicDescription_)); //printf("inputStreamBasicDescription_.mBytesPerFrame %d\n", devices[add].inputStreamBasicDescription_); if (err != noErr) { debug_msg("error AudioDeviceGetProperty"); return 0; } // nastavime maly endian devices[ad].inputStreamBasicDescription_.mFormatFlags &= (kAudioFormatFlagIsBigEndian & 0); if (writable) { err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, 0, true, kAudioDevicePropertyStreamFormat, sizeof(AudioStreamBasicDescription), &(devices[ad].inputStreamBasicDescription_)); if (err != noErr) printf("err: AudioDeviceSetProperty: kAudioDevicePropertyStreamFormat\n"); } /* set the buffer size of the device */ /* int bufferByteSize = 8192; propertySize = sizeof(bufferByteSize); err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, 0, true, kAudioDevicePropertyBufferSize, propertySize, &bufferByteSize); if (err != noErr) debug_msg("err: Set kAudioDevicePropertyBufferSize to %d\n", bufferByteSize); else debug_msg("sucessfully set kAudioDevicePropertyBufferSize to %d\n", bufferByteSize); */ // Set the device sample rate -- a temporary fix for the G5's // built-in audio and possibly other audio devices. Boolean IsInput = 0; int inChannel = 0; Float64 theAnswer = 44100; UInt32 theSize = sizeof(theAnswer); err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, inChannel, IsInput, kAudioDevicePropertyNominalSampleRate, theSize, &theAnswer); if (err != noErr) { debug_msg("error AudioDeviceSetProperty\n"); return 0; } debug_msg("Sample rate, %f\n", theAnswer); #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) err = AudioDeviceCreateIOProcID(devices[ad].inputDeviceID_, audioIOProc, (void*)NULL, &devices[ad].inputDeviceProcID_); if (err != noErr) { debug_msg("error AudioDeviceCreateIOProcID, %s\n", GetMacOSStatusCommentString(err)); return 0; } err = OpenADefaultComponent(kAudioUnitType_Output, kAudioUnitSubType_DefaultOutput, &(devices[ad].outputUnit_)); // The HAL AU maybe a better way to in the future... //err = OpenADefaultComponent(kAudioUnitType_Output, kAudioUnitSubType_HALOutput, &(devices[ad].outputUnit_)); if (err != noErr) { debug_msg("error OpenADefaultComponent\n"); return 0; } #else // Register the AudioDeviceIOProc. err = AudioDeviceAddIOProc(devices[ad].inputDeviceID_, audioIOProc, NULL); if (err != noErr) { debug_msg("error AudioDeviceAddIOProc\n"); return 0; } err = OpenDefaultAudioOutput(&(devices[ad].outputUnit_)); if (err != noErr) { debug_msg("error OpenDefaultAudioOutput\n"); return 0; } #endif // Register a callback function to provide output data to the unit. devices[ad].input.inputProc = outputRenderer; devices[ad].input.inputProcRefCon = 0; /* These would be needed if HAL used * UInt32 enableIO =1; err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, (const void*)&enableIO, sizeof(UInt32)); enableIO=0; err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, (const void*)&enableIO, sizeof(UInt32)); if (err != noErr) { debug_msg("error AudioUnitSetProperty EnableIO with error %ld: %s\n", err, GetMacOSStatusErrorString(err)); return 0; }*/ #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &(devices[ad].input), sizeof(AURenderCallbackStruct)); #else err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &(devices[ad].input), sizeof(AURenderCallbackStruct)); #endif if (err != noErr) { debug_msg("error AudioUnitSetProperty1 with error %ld: %s\n", err, GetMacOSStatusErrorString(err)); return 0; } // Define the Mash stream description. Mash puts 20ms of data into each read // and write call. 20ms at 8000Hz equals 160 samples. Each sample is a u_char, // so that's 160 bytes. Mash uses 8-bit mu-law internally, so we need to convert // to 16-bit linear before using the audio data. devices[ad].mashStreamBasicDescription_.mSampleRate = 8000.0; //devices[ad].mashStreamBasicDescription_.mSampleRate = ifmt->sample_rate; devices[ad].mashStreamBasicDescription_.mFormatID = kAudioFormatLinearPCM; #ifdef WORDS_BIGENDIAN devices[ad].mashStreamBasicDescription_.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian |kLinearPCMFormatFlagIsPacked; #else devices[ad].mashStreamBasicDescription_.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; #endif devices[ad].mashStreamBasicDescription_.mBytesPerPacket = 2; devices[ad].mashStreamBasicDescription_.mFramesPerPacket = 1; devices[ad].mashStreamBasicDescription_.mBytesPerFrame = 2; devices[ad].mashStreamBasicDescription_.mChannelsPerFrame = 1; devices[ad].mashStreamBasicDescription_.mBitsPerChannel = 16; // Inform the default output unit of our source format. err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &(devices[ad].mashStreamBasicDescription_), sizeof(AudioStreamBasicDescription)); if (err != noErr) { debug_msg("error AudioUnitSetProperty2"); printf("error setting output unit source format\n"); return 0; } // check the stream format err = AudioUnitGetPropertyInfo(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &propertySize, &writable); if (err != noErr) debug_msg("err getting propert info for kAudioUnitProperty_StreamFormat\n"); err = AudioUnitGetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamdesc_, &propertySize); if (err != noErr) debug_msg("err getting values for kAudioUnitProperty_StreamFormat\n"); char name[128]; audio_format_name(ifmt, name, 128); debug_msg("Requested ifmt %s\n",name); debug_msg("ifmt bytes pre block: %d\n",ifmt->bytes_per_block); // handle the requested format if (ifmt->encoding != DEV_S16) { audio_format_change_encoding(ifmt, DEV_S16); debug_msg("Requested ifmt changed to %s\n",name); debug_msg("ifmt bytes pre block: %d\n",ifmt->bytes_per_block); } audio_format_name(ofmt, name, 128); debug_msg("Requested ofmt %s\n",name); debug_msg("ofmt bytes pre block: %d\n",ofmt->bytes_per_block); // Allocate the read buffer and Z delay line. //readBufferSize_ = 8192; readBufferSize_ = ifmt->bytes_per_block * ringBufferFactor_; //readBufferSize_ = 320; //printf("readBufferSize_ %d\n", readBufferSize_); readBuffer_ = malloc(sizeof(u_char)*readBufferSize_); bzero(readBuffer_, readBufferSize_ * sizeof(u_char)); //memset(readBuffer_, PCMU_AUDIO_ZERO, readBufferSize_); //inputReadIndex_ = -1; inputReadIndex_ = 0; inputWriteIndex_ = 0; zLine_ = malloc(sizeof(double)*DECIM441_LENGTH / 80); availableInput_ = 0; // Allocate the write buffer. //writeBufferSize_ = 8000; writeBufferSize_ = ofmt->bytes_per_block * ringBufferFactor_; writeBuffer_ = malloc(sizeof(SInt16)*writeBufferSize_); bzero(writeBuffer_, writeBufferSize_ * sizeof(SInt16)); outputReadIndex_ = 0; outputWriteIndex_ = 0; //outputWriteIndex_ = -1; // Start audio processing. err = AudioUnitInitialize(devices[ad].outputUnit_); if (err != noErr) { debug_msg("error AudioUnitInitialize\n"); return 0; } err = AudioDeviceStart(devices[ad].inputDeviceID_, audioIOProc); if (err != noErr) { fprintf(stderr, "Input device error: AudioDeviceStart\n"); return 0; } err = AudioOutputUnitStart(devices[ad].outputUnit_); if (err != noErr) { fprintf(stderr, "Output device error: AudioOutputUnitStart\n"); return 0; } // Inform the default output unit of our source format. /* err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &(devices[ad].mashStreamBasicDescription_), sizeof(AudioStreamBasicDescription)); if (err != noErr) { debug_msg("error AudioUnitSetProperty3"); return 0; } */ return 1; };
static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque) { OSStatus status; coreaudioVoiceOut *core = (coreaudioVoiceOut *) hw; UInt32 propertySize; int err; const char *typ = "playback"; AudioValueRange frameRange; CoreaudioConf *conf = drv_opaque; /* create mutex */ err = pthread_mutex_init(&core->mutex, NULL); if (err) { dolog("Could not create mutex\nReason: %s\n", strerror (err)); return -1; } audio_pcm_init_info (&hw->info, as); /* open default output device */ propertySize = sizeof(core->outputDeviceID); status = AudioHardwareGetProperty( kAudioHardwarePropertyDefaultOutputDevice, &propertySize, &core->outputDeviceID); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get default output Device\n"); return -1; } if (core->outputDeviceID == kAudioDeviceUnknown) { dolog ("Could not initialize %s - Unknown Audiodevice\n", typ); return -1; } /* get minimum and maximum buffer frame sizes */ propertySize = sizeof(frameRange); status = AudioDeviceGetProperty( core->outputDeviceID, 0, 0, kAudioDevicePropertyBufferFrameSizeRange, &propertySize, &frameRange); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get device buffer frame range\n"); return -1; } if (frameRange.mMinimum > conf->buffer_frames) { core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMinimum; dolog ("warning: Upsizing Buffer Frames to %f\n", frameRange.mMinimum); } else if (frameRange.mMaximum < conf->buffer_frames) { core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMaximum; dolog ("warning: Downsizing Buffer Frames to %f\n", frameRange.mMaximum); } else { core->audioDevicePropertyBufferFrameSize = conf->buffer_frames; } /* set Buffer Frame Size */ propertySize = sizeof(core->audioDevicePropertyBufferFrameSize); status = AudioDeviceSetProperty( core->outputDeviceID, NULL, 0, false, kAudioDevicePropertyBufferFrameSize, propertySize, &core->audioDevicePropertyBufferFrameSize); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set device buffer frame size %" PRIu32 "\n", (uint32_t)core->audioDevicePropertyBufferFrameSize); return -1; } /* get Buffer Frame Size */ propertySize = sizeof(core->audioDevicePropertyBufferFrameSize); status = AudioDeviceGetProperty( core->outputDeviceID, 0, false, kAudioDevicePropertyBufferFrameSize, &propertySize, &core->audioDevicePropertyBufferFrameSize); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get device buffer frame size\n"); return -1; } hw->samples = conf->nbuffers * core->audioDevicePropertyBufferFrameSize; /* get StreamFormat */ propertySize = sizeof(core->outputStreamBasicDescription); status = AudioDeviceGetProperty( core->outputDeviceID, 0, false, kAudioDevicePropertyStreamFormat, &propertySize, &core->outputStreamBasicDescription); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get Device Stream properties\n"); core->outputDeviceID = kAudioDeviceUnknown; return -1; } /* set Samplerate */ core->outputStreamBasicDescription.mSampleRate = (Float64) as->freq; propertySize = sizeof(core->outputStreamBasicDescription); status = AudioDeviceSetProperty( core->outputDeviceID, 0, 0, 0, kAudioDevicePropertyStreamFormat, propertySize, &core->outputStreamBasicDescription); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set samplerate %d\n", as->freq); core->outputDeviceID = kAudioDeviceUnknown; return -1; } /* set Callback */ status = AudioDeviceAddIOProc(core->outputDeviceID, audioDeviceIOProc, hw); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set IOProc\n"); core->outputDeviceID = kAudioDeviceUnknown; return -1; } /* start Playback */ if (!isPlaying(core->outputDeviceID)) { status = AudioDeviceStart(core->outputDeviceID, audioDeviceIOProc); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not start playback\n"); AudioDeviceRemoveIOProc(core->outputDeviceID, audioDeviceIOProc); core->outputDeviceID = kAudioDeviceUnknown; return -1; } } return 0; }
//_______________________________________________ // // //_______________________________________________ uint8_t coreAudioDevice::init(uint8_t channels, uint32_t fq) { _channels = channels; OSStatus err; ComponentDescription desc; AudioUnitInputCallback input; AudioStreamBasicDescription streamFormat; AudioDeviceID theDevice; UInt32 sz=0; UInt32 kFramesPerSlice=512; desc.componentType = 'aunt'; desc.componentSubType = kAudioUnitSubType_Output; desc.componentManufacturer = kAudioUnitID_DefaultOutput; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp= FindNextComponent(NULL, &desc); if (comp == NULL) { printf("coreAudio: Cannot find component\n"); return 0; } err = OpenAComponent(comp, &theOutputUnit); if(err) { printf("coreAudio: Cannot open component\n"); return 0; } // Initialize it verify_noerr(AudioUnitInitialize(theOutputUnit)); // Set up a callback function to generate output to the output unit #if 1 input.inputProc = MyRenderer; input.inputProcRefCon = NULL; verify_noerr(AudioUnitSetProperty(theOutputUnit, kAudioUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(input))); #endif streamFormat.mSampleRate = fq; streamFormat.mFormatID = kAudioFormatLinearPCM; streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsPacked; streamFormat.mBytesPerPacket = channels * sizeof (UInt16); streamFormat.mFramesPerPacket = 1; streamFormat.mBytesPerFrame = channels * sizeof (UInt16); streamFormat.mChannelsPerFrame = channels; streamFormat.mBitsPerChannel = sizeof (UInt16) * 8; verify_noerr(AudioUnitSetProperty( theOutputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription))); printf("Rendering source:\n\t"); printf ("SampleRate=%f,", streamFormat.mSampleRate); printf ("BytesPerPacket=%ld,", streamFormat.mBytesPerPacket); printf ("FramesPerPacket=%ld,", streamFormat.mFramesPerPacket); printf ("BytesPerFrame=%ld,", streamFormat.mBytesPerFrame); printf ("BitsPerChannel=%ld,", streamFormat.mBitsPerChannel); printf ("ChannelsPerFrame=%ld\n", streamFormat.mChannelsPerFrame); sz=sizeof (theDevice); verify_noerr(AudioUnitGetProperty (theOutputUnit, kAudioOutputUnitProperty_CurrentDevice, 0, 0, &theDevice, &sz)); sz = sizeof (kFramesPerSlice); verify_noerr(AudioDeviceSetProperty(theDevice, 0, 0, false, kAudioDevicePropertyBufferFrameSize, sz, &kFramesPerSlice)); sz = sizeof (kFramesPerSlice); verify_noerr(AudioDeviceGetProperty(theDevice, 0, false, kAudioDevicePropertyBufferFrameSize, &sz, &kFramesPerSlice)); verify_noerr (AudioDeviceAddPropertyListener(theDevice, 0, false, kAudioDeviceProcessorOverload, OverloadListenerProc, 0)); printf ("size of the device's buffer = %ld frames\n", kFramesPerSlice); frameCount=0; audioBuffer=new int16_t[BUFFER_SIZE]; // between hald a sec and a sec should be enough :) return 1; }
static int getvol(void) { int vol; if (ioctl(mixfd, MIXER_READ(mixchan), &vol)) { #else static float getvol(void) { float volumeL, volumeR, vol; OSStatus err; AudioDeviceID device; UInt32 size; UInt32 channels[2]; size = sizeof(device); err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice, &size, &device); size = sizeof(channels); if (!err) err = AudioDeviceGetProperty(device, 0, false, kAudioDevicePropertyPreferredChannelsForStereo, &size, &channels); size = sizeof(vol); if (!err) err = AudioDeviceGetProperty(device, channels[0], false, kAudioDevicePropertyVolumeScalar, &size, &volumeL); if (!err) err = AudioDeviceGetProperty(device, channels[1], false, kAudioDevicePropertyVolumeScalar, &size, &volumeR); if (!err) vol = (volumeL < volumeR) ? volumeR : volumeL; else { #endif fprintf(stderr, "Unable to read mixer volume: %s\n", strerror(errno)); return -1; } return vol; } #ifndef __Darwin__ static int setvol(int vol) #else static int setvol(float vol) #endif { #ifndef __Darwin__ if (ioctl(mixfd, MIXER_WRITE(mixchan), &vol)) { #else float volumeL = vol; float volumeR = vol; OSStatus err; AudioDeviceID device; UInt32 size; UInt32 channels[2]; size = sizeof(device); err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice, &size, &device); size = sizeof(channels); err = AudioDeviceGetProperty(device, 0, false, kAudioDevicePropertyPreferredChannelsForStereo, &size, &channels); size = sizeof(vol); if (!err) err = AudioDeviceSetProperty(device, 0, channels[0], false, kAudioDevicePropertyVolumeScalar, size, &volumeL); if (!err) err = AudioDeviceSetProperty(device, 0, channels[1], false, kAudioDevicePropertyVolumeScalar, size, &volumeR); if (err) { #endif fprintf(stderr, "Unable to write mixer volume: %s\n", strerror(errno)); return -1; } return 0; } #ifndef __Darwin__ static int oldvol = 0; static int mutevol = 0; #else static float oldvol = 0; static float mutevol = 0; #endif #ifndef __Darwin__ static int mutedlevel(int orig, int mutelevel) { int l = orig >> 8; int r = orig & 0xff; l = (float)(mutelevel) * (float)(l) / 100.0; r = (float)(mutelevel) * (float)(r) / 100.0; return (l << 8) | r; #else static float mutedlevel(float orig, float mutelevel) { float master = orig; master = mutelevel * master / 100.0; return master; #endif } static void mute(void) { #ifndef __Darwin__ int vol; int start; int x; #else float vol; float start = 1.0; float x; #endif vol = getvol(); oldvol = vol; if (smoothfade) #ifdef __Darwin__ start = mutelevel; #else start = 100; else
int audev_init_device(char *wantdevname, long ratewanted, int verbose, extraopt_t *extra) { int bx, res; OSStatus status; int channels; long rate; long fragsize; int listdevices = FALSE; AudioDeviceID wantdevid; AudioDeviceID wantedaudev; extraopt_t *opt; UInt32 propsize; UInt32 bytecount; struct AudioStreamBasicDescription streamdesc; #define LEN_DEVICE_NAME 128 char devicename[LEN_DEVICE_NAME]; if (verbose) { printf("Boodler: OSX CoreAudio sound driver.\n"); } fragsize = 32768; bufcount = 6; for (opt=extra; opt->key; opt++) { if (!strcmp(opt->key, "buffersize") && opt->val) { fragsize = atol(opt->val); } else if (!strcmp(opt->key, "buffercount") && opt->val) { bufcount = atoi(opt->val); } else if (!strcmp(opt->key, "listdevices")) { listdevices = TRUE; } } if (bufcount < 2) bufcount = 2; if (audevice != kAudioDeviceUnknown) { fprintf(stderr, "Sound device is already open.\n"); return FALSE; } wantedaudev = kAudioDeviceUnknown; /* If the given device name is a string representation of an integer, work out the integer. */ wantdevid = kAudioDeviceUnknown; if (wantdevname) { char *endptr = NULL; wantdevid = strtol(wantdevname, &endptr, 10); if (!endptr || endptr == wantdevname || (*endptr != '\0')) wantdevid = kAudioDeviceUnknown; } if (listdevices || wantdevname) { int ix, jx; int device_count; #define LEN_DEVICE_LIST 16 AudioDeviceID devicelist[LEN_DEVICE_LIST]; propsize = LEN_DEVICE_LIST * sizeof(AudioDeviceID); status = AudioHardwareGetProperty(kAudioHardwarePropertyDevices, &propsize, devicelist); if (status) { fprintf(stderr, "Could not get list of audio devices.\n"); return FALSE; } device_count = propsize / sizeof(AudioDeviceID); for (ix=0; ix<device_count; ix++) { AudioDeviceID tmpaudev = devicelist[ix]; /* Determine if this is an output device. */ status = AudioDeviceGetPropertyInfo(tmpaudev, 0, 0, kAudioDevicePropertyStreamConfiguration, &propsize, NULL); if (status) { fprintf(stderr, "Could not get audio property info.\n"); return FALSE; } AudioBufferList *buflist = (AudioBufferList *)malloc(propsize); status = AudioDeviceGetProperty(tmpaudev, 0, 0, kAudioDevicePropertyStreamConfiguration, &propsize, buflist); if (status) { fprintf(stderr, "Could not get audio property info.\n"); return FALSE; } int hasoutput = FALSE; for (jx=0; jx<buflist->mNumberBuffers; jx++) { if (buflist->mBuffers[jx].mNumberChannels > 0) { hasoutput = TRUE; } } free(buflist); buflist = NULL; if (!hasoutput) { /* skip this device. */ continue; } /* Determine the device name. */ propsize = LEN_DEVICE_NAME * sizeof(char); status = AudioDeviceGetProperty(tmpaudev, 1, 0, kAudioDevicePropertyDeviceName, &propsize, devicename); if (status) { fprintf(stderr, "Could not get audio device name.\n"); return FALSE; } if (listdevices) printf("Found device ID %d: \"%s\".\n", (int)tmpaudev, devicename); /* Check if the desired name matches (a prefix of) the device name. */ if (wantdevname && !strncmp(wantdevname, devicename, strlen(wantdevname))) { wantedaudev = tmpaudev; } /* Check if the int version of the desired name matches the device ID. */ if (wantdevid != kAudioDeviceUnknown && wantdevid == tmpaudev) { wantedaudev = tmpaudev; } } } if (wantdevname) { audevice = wantedaudev; } else { propsize = sizeof(audevice); status = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice, &propsize, &audevice); if (status) { fprintf(stderr, "Could not get audio default device.\n"); return FALSE; } } if (audevice == kAudioDeviceUnknown) { fprintf(stderr, "Audio default device is unknown.\n"); return FALSE; } propsize = LEN_DEVICE_NAME * sizeof(char); status = AudioDeviceGetProperty(audevice, 1, 0, kAudioDevicePropertyDeviceName, &propsize, devicename); if (status) { fprintf(stderr, "Could not get audio device name.\n"); return FALSE; } if (verbose) { printf("Got device ID %d: \"%s\".\n", (int)audevice, devicename); } if (ratewanted) { memset(&streamdesc, 0, sizeof(streamdesc)); streamdesc.mSampleRate = ratewanted; propsize = sizeof(streamdesc); status = AudioDeviceSetProperty(audevice, NULL, 0, 0, kAudioDevicePropertyStreamFormatMatch, propsize, &streamdesc); if (status) { fprintf(stderr, "Could not set sample rate; continuing.\n"); } } { bytecount = fragsize; propsize = sizeof(bytecount); status = AudioDeviceSetProperty(audevice, NULL, 0, 0, kAudioDevicePropertyBufferSize, propsize, &bytecount); if (status) { fprintf(stderr, "Could not set buffer size; continuing.\n"); } } propsize = sizeof(struct AudioStreamBasicDescription); status = AudioDeviceGetProperty(audevice, 1, 0, kAudioDevicePropertyStreamFormat, &propsize, &streamdesc); if (status) { fprintf(stderr, "Could not get audio device description.\n"); return FALSE; } rate = streamdesc.mSampleRate; if (streamdesc.mFormatID != kAudioFormatLinearPCM) { fprintf(stderr, "Audio device format is not LinearPCM; exiting.\n"); return FALSE; } if (streamdesc.mChannelsPerFrame != 2) { fprintf(stderr, "Audio device is not stereo; exiting.\n"); return FALSE; } channels = 2; if (!(streamdesc.mFormatFlags & kLinearPCMFormatFlagIsFloat)) { fprintf(stderr, "Audio device is not floating-point; exiting.\n"); return FALSE; } propsize = sizeof(bytecount); status = AudioDeviceGetProperty(audevice, 1, 0, kAudioDevicePropertyBufferSize, &propsize, &bytecount); if (status) { fprintf(stderr, "Could not get audio device buffer size.\n"); return FALSE; } fragsize = bytecount; if (verbose) { printf("%ld bytes per buffer.\n", fragsize); } if (verbose) { printf("%d buffers in queue.\n", bufcount); } /* Everything's figured out. */ sound_rate = rate; sound_channels = channels; sound_buffersize = fragsize; framesperbuf = sound_buffersize / (sizeof(float) * sound_channels); samplesperbuf = framesperbuf * sound_channels; if (verbose) { printf("%ld frames (%ld samples) per buffer.\n", framesperbuf, samplesperbuf); printf("%ld frames per second.\n", rate); } emptying = 0; filling = 0; bailing = FALSE; valbuffer = (long *)malloc(sizeof(long) * samplesperbuf); if (!valbuffer) { fprintf(stderr, "Unable to allocate sound buffer.\n"); return FALSE; } memset(valbuffer, 0, sizeof(long) * samplesperbuf); rawbuffer = (buffer_t *)malloc(sizeof(buffer_t) * bufcount); memset(rawbuffer, 0, sizeof(buffer_t) * bufcount); for (bx=0; bx<bufcount; bx++) { buffer_t *buffer = &rawbuffer[bx]; buffer->full = FALSE; buffer->buf = (float *)malloc(sound_buffersize); if (!buffer->buf) { fprintf(stderr, "Unable to allocate sound buffer.\n"); /* free stuff */ return FALSE; } memset(buffer->buf, 0, sound_buffersize); res = pthread_mutex_init(&buffer->mutex, NULL); if (res) { fprintf(stderr, "Unable to init mutex.\n"); /* free stuff */ return FALSE; } res = pthread_cond_init(&buffer->cond, NULL); if (res) { fprintf(stderr, "Unable to init cond.\n"); /* free stuff */ return FALSE; } } /* AudioDeviceAddIOProc is deprecated as of OSX 10.5. Use the osxaq driver instead. */ status = AudioDeviceAddIOProc(audevice, PlaybackIOProc, (void *)1); if (status) { fprintf(stderr, "Could not add IOProc to device.\n"); return FALSE; } started = FALSE; return TRUE; }
/*return value == 0 sucess == -1 fails */ static int open_output(void) { OSStatus err = 0; //no err UInt32 count, bufferSize; AudioDeviceID device = kAudioDeviceUnknown; AudioStreamBasicDescription format; // get the default output device for the HAL count = sizeof(globals.device); // it is required to pass the size of the data to be returned err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice, &count, (void *) &device); if (err != 0) goto Bail; // get the buffersize that the default device uses for IO count = sizeof(globals.deviceBufferSize); // it is required to pass the size of the data to be returned err = AudioDeviceGetProperty(device, 0, 0, kAudioDevicePropertyBufferSize, &count, &bufferSize); if (err != 0) goto Bail; if( globals.deviceBufferSize>BUFLEN ){ fprintf(stderr, "globals.deviceBufferSize NG: %ld\n", globals.deviceBufferSize); exit(1); } // get a description of the data format used by the default device count = sizeof(globals.deviceFormat); // it is required to pass the size of the data to be returned err = AudioDeviceGetProperty(device, 0, 0, kAudioDevicePropertyStreamFormat, &count, &format); if (err != 0) goto Bail; FailWithAction(format.mFormatID != kAudioFormatLinearPCM, err = -1, Bail); // bail if the format is not linear pcm // everything is ok so fill in these globals globals.device = device; globals.deviceBufferSize = bufferSize; globals.deviceFormat = format; init_variable(); err = AudioDeviceAddIOProc(globals.device, appIOProc, 0 ); // setup our device with an IO proc if (err != 0) goto Bail; globals.deviceFormat.mSampleRate = dpm.rate; #if 0 globals.deviceFormat.mFormatFlags = kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; globals.deviceFormat.mBytesPerPacket = 4; globals.deviceFormat.mBytesPerFrame = 4; globals.deviceFormat.mBitsPerChannel = 0x10; err = AudioDeviceSetProperty(device, &inWhen, 0, 0, kAudioDevicePropertyStreamFormat, count, &globals.deviceFormat); if (err != 0) goto Bail; #endif #if 0 fprintf(stderr, "deviceBufferSize = %d\n", globals.deviceBufferSize); fprintf(stderr, "mSampleRate = %g\n", globals.deviceFormat.mSampleRate); fprintf(stderr, "mFormatID = 0x%08x\n", globals.deviceFormat.mFormatID); fprintf(stderr, "mFormatFlags = 0x%08x\n", globals.deviceFormat.mFormatFlags); fprintf(stderr, "mBytesPerPacket = 0x%08x\n", globals.deviceFormat.mBytesPerPacket); fprintf(stderr, "mBytesPerFrame = 0x%08x\n", globals.deviceFormat.mBytesPerFrame); fprintf(stderr, "mBitsPerChannel = 0x%08x\n", globals.deviceFormat.mBitsPerChannel); #endif Bail: return (err); }