static void gpcap_devices_add (GtkWidget *widget, gpointer user_data) { GnomePilotCapplet *gpcap = GNOME_PILOT_CAPPLET (user_data); GnomePilotCappletPrivate *priv; GObject *dlg; GPilotDevice *device; gboolean res; priv = gpcap->priv; device = get_default_device (priv->state); dlg = gnome_pilot_ddialog_new (device); res = gnome_pilot_ddialog_run_and_close (GNOME_PILOT_DDIALOG (dlg), GTK_WINDOW (gpcap)); if (!res) { g_free (device); } else { GtkTreeIter iter; priv->state->devices = g_list_append (priv->state->devices, device); append_devices_treeview (gpcap, device, &iter); gtk_tree_selection_select_iter (gtk_tree_view_get_selection (GTK_TREE_VIEW (priv->devices_treeview)), &iter); gpcap_save_state (gpcap); } }
LoopbackCapture::LoopbackCapture(AudioBuffer *pBuffer) { //memset(this,0,sizeof(this)); this->pBuffer = pBuffer; //hr = CoInitialize(NULL); // create a "loopback capture has started" event hStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL); if (NULL == hStartedEvent) { printf("CreateEvent failed: last error is %u\n", GetLastError()); //return -__LINE__; } // create a "stop capturing now" event hStopEvent = CreateEvent(NULL, FALSE, FALSE, NULL); if (NULL == hStopEvent) { printf("CreateEvent failed: last error is %u\n", GetLastError()); CloseHandle(hStartedEvent); //return -__LINE__; } hDeviceEvent = CreateEvent(NULL, FALSE, FALSE, NULL); if (NULL == hDeviceEvent) { printf("CreateEvent failed: last error is %u\n", GetLastError()); CloseHandle(hStartedEvent); CloseHandle(hStopEvent); //return -__LINE__; } hGlobalCloseEvent = CreateEvent(NULL, FALSE, FALSE, L"sysAudioSpectrogram_GlobalCloseEvent"); if (NULL == hGlobalCloseEvent) { printf("CreateEvent failed: last error is %u\n", GetLastError()); CloseHandle(hStartedEvent); CloseHandle(hStopEvent); CloseHandle(hDeviceEvent); //return -__LINE__; } hr = E_UNEXPECTED; // thread will overwrite this get_default_device(&pMMDevice); //pMMDevice = prefs.m_pMMDevice; bInt16 = false;//prefs.m_bInt16; hFile = NULL;//prefs.m_hFile; nFrames = 0; //CoUninitialize(); }
void propagateWithRawCurrentFormat(WAVEFORMATEX *toThis) { WAVEFORMATEX *pwfx; IMMDevice *pMMDevice; IAudioClient *pAudioClient; HANDLE hTask; DWORD nTaskIndex = 0; hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex); HRESULT hr = get_default_device(&pMMDevice); if (FAILED(hr)) { assert(false); } // activate an (the default, for us, since we want loopback) IAudioClient hr = pMMDevice->Activate( __uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**)&pAudioClient ); if (FAILED(hr)) { ShowOutput("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr); assert(false); } hr = pAudioClient->GetMixFormat(&pwfx); if (FAILED(hr)) { ShowOutput("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr); CoTaskMemFree(pwfx); pAudioClient->Release(); assert(false); } pAudioClient->Stop(); AvRevertMmThreadCharacteristics(hTask); pAudioClient->Release(); pMMDevice->Release(); memcpy(toThis, pwfx, sizeof(WAVEFORMATEX)); CoTaskMemFree(pwfx); }
CPrefs::CPrefs(int argc, LPCWSTR argv[], HRESULT &hr) : m_pMMDevice(NULL) , m_bInt16(true) , m_bMono(false) , m_iSampleRateDivisor(1) , m_pwfx(NULL) { switch (argc) { case 2: if (0 == _wcsicmp(argv[1], L"-?") || 0 == _wcsicmp(argv[1], L"/?")) { // print usage but don't actually capture hr = S_FALSE; usage(argv[0]); return; } else if (0 == _wcsicmp(argv[1], L"--list-devices")) { // list the devices but don't actually capture hr = list_devices(); // don't actually play if (S_OK == hr) { hr = S_FALSE; return; } } // intentional fallthrough default: // loop through arguments and parse them for (int i = 1; i < argc; i++) { // --device if (0 == _wcsicmp(argv[i], L"--device")) { if (NULL != m_pMMDevice) { printf("Only one --device switch is allowed\n"); hr = E_INVALIDARG; return; } if (i++ == argc) { printf("--device switch requires an argument\n"); hr = E_INVALIDARG; return; } hr = get_specific_device(argv[i], &m_pMMDevice); if (FAILED(hr)) { return; } continue; } // --int-16 if (0 == _wcsicmp(argv[i], L"--int-16")) { if (m_bInt16) { printf("Only one --int-16 switch is allowed\n"); hr = E_INVALIDARG; return; } m_bInt16 = true; continue; } // --mono if (0 == _wcsicmp(argv[i], L"--mono")) { m_bMono = true; continue; } // --div divisor if (0 == _wcsicmp(argv[i], L"--div")) { if (i++ == argc) { printf("--div switch requires an argument\n"); hr = E_INVALIDARG; return; } m_iSampleRateDivisor = _wtoi(argv[i]); continue; } printf("Invalid argument %ls\n", argv[i]); hr = E_INVALIDARG; return; } // open default device if not specified if (NULL == m_pMMDevice) { hr = get_default_device(&m_pMMDevice); if (FAILED(hr)) { return; } } } }
// we only call this once...per hit of the play button :) HRESULT LoopbackCaptureSetup() { assert(shouldStop); // duplicate starts would be odd... shouldStop = false; // allow graphs to restart, if they so desire... pnFrames = 0; bool bInt16 = true; // makes it actually work, for some reason...LODO HRESULT hr; hr = get_default_device(&m_pMMDevice); // so it can re-place our pointer... if (FAILED(hr)) { return hr; } // tell it to not overflow one buffer's worth <sigh> not sure if this is right or not, and thus we don't "cache" or "buffer" more than that much currently... // but a buffer size is a buffer size...hmm...as long as we keep it small though... assert(expectedMaxBufferSize <= pBufOriginalSize); // activate an (the default, for us, since we want loopback) IAudioClient hr = m_pMMDevice->Activate( __uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**)&pAudioClient ); if (FAILED(hr)) { ShowOutput("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr); return hr; } // get the default device periodicity, why? I don't know... REFERENCE_TIME hnsDefaultDevicePeriod; hr = pAudioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, NULL); if (FAILED(hr)) { ShowOutput("IAudioClient::GetDevicePeriod failed: hr = 0x%08x\n", hr); pAudioClient->Release(); return hr; } // get the default device format (incoming...) WAVEFORMATEX *pwfx; // incoming wave... // apparently propogated by GetMixFormat... hr = pAudioClient->GetMixFormat(&pwfx); if (FAILED(hr)) { ShowOutput("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr); CoTaskMemFree(pwfx); pAudioClient->Release(); return hr; } if (true /*bInt16*/) { // coerce int-16 wave format // can do this in-place since we're not changing the size of the format // also, the engine will auto-convert from float to int for us switch (pwfx->wFormatTag) { case WAVE_FORMAT_IEEE_FLOAT: assert(false);// we never get here...I hope... pwfx->wFormatTag = WAVE_FORMAT_PCM; pwfx->wBitsPerSample = 16; pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8; pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec; break; case WAVE_FORMAT_EXTENSIBLE: { // naked scope for case-local variable PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx); if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)) { // WE GET HERE! pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM; pEx->Samples.wValidBitsPerSample = 16; pwfx->wBitsPerSample = 16; pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8; pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec; /* scawah lodo... if(ifNotNullThenJustSetTypeOnly) { PWAVEFORMATEXTENSIBLE pEx2 = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(ifNotNullThenJustSetTypeOnly); pEx2->SubFormat = pEx->SubFormat; pEx2->Samples.wValidBitsPerSample = pEx->Samples.wValidBitsPerSample; } */ } else { ShowOutput("Don't know how to coerce mix format to int-16\n"); CoTaskMemFree(pwfx); pAudioClient->Release(); return E_UNEXPECTED; } } break; default: ShowOutput("Don't know how to coerce WAVEFORMATEX with wFormatTag = 0x%08x to int-16\n", pwfx->wFormatTag); CoTaskMemFree(pwfx); pAudioClient->Release(); return E_UNEXPECTED; } } /* scawah setting stream types up to match...didn't seem to work well... if(ifNotNullThenJustSetTypeOnly) { // pwfx is set at this point... WAVEFORMATEX* pwfex = ifNotNullThenJustSetTypeOnly; // copy them all out as the possible format...hmm... pwfx->wFormatTag = WAVE_FORMAT_PCM; pwfx->wBitsPerSample = 16; pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8; pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec; pwfex->wFormatTag = pwfx->wFormatTag; pwfex->nChannels = pwfx->nChannels; pwfex->nSamplesPerSec = pwfx->nSamplesPerSec; pwfex->wBitsPerSample = pwfx->wBitsPerSample; pwfex->nBlockAlign = pwfx->nBlockAlign; pwfex->nAvgBytesPerSec = pwfx->nAvgBytesPerSec; pwfex->cbSize = pwfx->cbSize; //FILE *fp = fopen("/normal2", "w"); // fails on me? maybe juts a VLC thing... //fShowOutput(fp, "hello world %d %d %d %d %d %d %d", pwfex->wFormatTag, pwfex->nChannels, // pwfex->nSamplesPerSec, pwfex->wBitsPerSample, pwfex->nBlockAlign, pwfex->nAvgBytesPerSec, pwfex->cbSize ); //fclose(fp); // cleanup // I might be leaking here... CoTaskMemFree(pwfx); pAudioClient->Release(); //m_pMMDevice->Release(); return hr; }*/ MMCKINFO ckRIFF = {0}; MMCKINFO ckData = {0}; nBlockAlign = pwfx->nBlockAlign; // avoid stuttering on close // http://social.msdn.microsoft.com/forums/en-US/windowspro-audiodevelopment/thread/c7ba0a04-46ce-43ff-ad15-ce8932c00171/ //IAudioClient *pAudioClient = NULL; //IAudioCaptureClient *pCaptureClient = NULL; IMMDeviceEnumerator *pEnumerator = NULL; IMMDevice *pDevice = NULL; IAudioRenderClient *pRenderClient = NULL; WAVEFORMATEXTENSIBLE *captureDataFormat = NULL; BYTE *captureData; REFERENCE_TIME hnsRequestedDuration = REFTIMES_PER_SEC; hr = CoCreateInstance( CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&pEnumerator); EXIT_ON_ERROR(hr) hr = pEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &pDevice); EXIT_ON_ERROR(hr) hr = pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pAudioClient); EXIT_ON_ERROR(hr) hr = pAudioClient->GetMixFormat((WAVEFORMATEX **)&captureDataFormat); EXIT_ON_ERROR(hr) // Silence: initialise in sharedmode [this is the "silence" bug overwriter, so buffer doesn't matter as much...] hr = pAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, 0, REFTIMES_PER_SEC, // buffer size a full 1.0s, though prolly doesn't matter here. 0, pwfx, NULL); EXIT_ON_ERROR(hr) // get the frame count UINT32 bufferFrameCount; hr = pAudioClient->GetBufferSize(&bufferFrameCount); EXIT_ON_ERROR(hr) // create a render client hr = pAudioClient->GetService(IID_IAudioRenderClient, (void**)&pRenderClient); EXIT_ON_ERROR(hr) // get the buffer hr = pRenderClient->GetBuffer(bufferFrameCount, &captureData); EXIT_ON_ERROR(hr) // release it hr = pRenderClient->ReleaseBuffer(bufferFrameCount, AUDCLNT_BUFFERFLAGS_SILENT); EXIT_ON_ERROR(hr) // release the audio client pAudioClient->Release(); EXIT_ON_ERROR(hr) // create a new IAudioClient hr = pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pAudioClient); EXIT_ON_ERROR(hr) // -============================ now the sniffing code initialization stuff, direct from mauritius... =================================== // call IAudioClient::Initialize // note that AUDCLNT_STREAMFLAGS_LOOPBACK and AUDCLNT_STREAMFLAGS_EVENTCALLBACK // do not work together... // the "data ready" event never gets set // so we're going to have to do this in a timer-driven loop... hr = pAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, REFTIMES_PER_SEC, // buffer size a full 1.0s, seems ok VLC 0, pwfx, 0 ); if (FAILED(hr)) { ShowOutput("IAudioClient::Initialize failed: hr = 0x%08x\n", hr); pAudioClient->Release(); return hr; } CoTaskMemFree(pwfx); // activate an IAudioCaptureClient hr = pAudioClient->GetService( __uuidof(IAudioCaptureClient), (void**)&pAudioCaptureClient // CARE INSTANTIATION ); if (FAILED(hr)) { ShowOutput("IAudioClient::GetService(IAudioCaptureClient) failed: hr 0x%08x\n", hr); pAudioClient->Release(); return hr; } // register with MMCSS DWORD nTaskIndex = 0; hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex); if (NULL == hTask) { DWORD dwErr = GetLastError(); ShowOutput("AvSetMmThreadCharacteristics failed: last error = %u\n", dwErr); pAudioCaptureClient->Release(); pAudioClient->Release(); return HRESULT_FROM_WIN32(dwErr); } // call IAudioClient::Start hr = pAudioClient->Start(); if (FAILED(hr)) { ShowOutput("IAudioClient::Start failed: hr = 0x%08x\n", hr); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); pAudioClient->Release(); return hr; } bFirstPacket = true; // start the forever grabbing thread... DWORD dwThreadID; m_hThread = CreateThread(NULL, 0, propagateBufferForever, 0, 0, &dwThreadID); if(!m_hThread) { DWORD dwErr = GetLastError(); return HRESULT_FROM_WIN32(dwErr); } else { // we...shouldn't need this...maybe? // seems to make no difference... hr = SetThreadPriority(m_hThread, THREAD_PRIORITY_TIME_CRITICAL); if (FAILED(hr)) { // of course we always want to be a high prio thread, right? [we don't use much cpu...] return hr; } } return hr; } // end LoopbackCaptureSetup
/** create a new driver instance */ static jack_driver_t *coreaudio_driver_new(char* name, jack_client_t* client, jack_nframes_t nframes, jack_nframes_t samplerate, int capturing, int playing, int inchannels, int outchannels, char* capture_driver_uid, char* playback_driver_uid, jack_nframes_t capture_latency, jack_nframes_t playback_latency) { coreaudio_driver_t *driver; OSStatus err = noErr; ComponentResult err1; UInt32 outSize; UInt32 enableIO; AudioStreamBasicDescription srcFormat, dstFormat; Float64 sampleRate; int in_nChannels = 0; int out_nChannels = 0; int i; driver = (coreaudio_driver_t *) calloc(1, sizeof(coreaudio_driver_t)); jack_driver_init((jack_driver_t *) driver); if (!jack_power_of_two(nframes)) { jack_error("CA: -p must be a power of two."); goto error; } driver->state = 0; driver->frames_per_cycle = nframes; driver->frame_rate = samplerate; driver->capturing = capturing; driver->playing = playing; driver->xrun_detected = 0; driver->null_cycle = 0; driver->attach = (JackDriverAttachFunction) coreaudio_driver_attach; driver->detach = (JackDriverDetachFunction) coreaudio_driver_detach; driver->read = (JackDriverReadFunction) coreaudio_driver_read; driver->write = (JackDriverReadFunction) coreaudio_driver_write; driver->null_cycle = (JackDriverNullCycleFunction) coreaudio_driver_null_cycle; driver->bufsize = (JackDriverBufSizeFunction) coreaudio_driver_bufsize; driver->start = (JackDriverStartFunction) coreaudio_driver_audio_start; driver->stop = (JackDriverStopFunction) coreaudio_driver_audio_stop; driver->capture_frame_latency = capture_latency; driver->playback_frame_latency = playback_latency; // Duplex if (strcmp(capture_driver_uid, "") != 0 && strcmp(playback_driver_uid, "") != 0) { JCALog("Open duplex \n"); if (get_device_id_from_uid(playback_driver_uid, &driver->device_id) != noErr) { if (get_default_device(&driver->device_id) != noErr) { jack_error("Cannot open default device"); goto error; } } if (get_device_name_from_id(driver->device_id, driver->capture_driver_name) != noErr || get_device_name_from_id(driver->device_id, driver->playback_driver_name) != noErr) { jack_error("Cannot get device name from device ID"); goto error; } // Capture only } else if (strcmp(capture_driver_uid, "") != 0) { JCALog("Open capture only \n"); if (get_device_id_from_uid(capture_driver_uid, &driver->device_id) != noErr) { if (get_default_input_device(&driver->device_id) != noErr) { jack_error("Cannot open default device"); goto error; } } if (get_device_name_from_id(driver->device_id, driver->capture_driver_name) != noErr) { jack_error("Cannot get device name from device ID"); goto error; } // Playback only } else if (playback_driver_uid != NULL) { JCALog("Open playback only \n"); if (get_device_id_from_uid(playback_driver_uid, &driver->device_id) != noErr) { if (get_default_output_device(&driver->device_id) != noErr) { jack_error("Cannot open default device"); goto error; } } if (get_device_name_from_id(driver->device_id, driver->playback_driver_name) != noErr) { jack_error("Cannot get device name from device ID"); goto error; } // Use default driver in duplex mode } else { JCALog("Open default driver \n"); if (get_default_device(&driver->device_id) != noErr) { jack_error("Cannot open default device"); goto error; } if (get_device_name_from_id(driver->device_id, driver->capture_driver_name) != noErr || get_device_name_from_id(driver->device_id, driver->playback_driver_name) != noErr) { jack_error("Cannot get device name from device ID"); goto error; } } driver->client = client; driver->period_usecs = (((float) driver->frames_per_cycle) / driver->frame_rate) * 1000000.0f; if (capturing) { err = get_total_channels(driver->device_id, &in_nChannels, true); if (err != noErr) { jack_error("Cannot get input channel number"); printError(err); goto error; } } if (playing) { err = get_total_channels(driver->device_id, &out_nChannels, false); if (err != noErr) { jack_error("Cannot get output channel number"); printError(err); goto error; } } if (inchannels > in_nChannels) { jack_error("This device hasn't required input channels inchannels = %ld in_nChannels = %ld", inchannels, in_nChannels); goto error; } if (outchannels > out_nChannels) { jack_error("This device hasn't required output channels outchannels = %ld out_nChannels = %ld", outchannels, out_nChannels); goto error; } if (inchannels == 0) { JCALog("Setup max in channels = %ld\n", in_nChannels); inchannels = in_nChannels; } if (outchannels == 0) { JCALog("Setup max out channels = %ld\n", out_nChannels); outchannels = out_nChannels; } // Setting buffer size outSize = sizeof(UInt32); err = AudioDeviceSetProperty(driver->device_id, NULL, 0, false, kAudioDevicePropertyBufferFrameSize, outSize, &nframes); if (err != noErr) { jack_error("Cannot set buffer size %ld", nframes); printError(err); goto error; } // Set sample rate outSize = sizeof(Float64); err = AudioDeviceGetProperty(driver->device_id, 0, kAudioDeviceSectionGlobal, kAudioDevicePropertyNominalSampleRate, &outSize, &sampleRate); if (err != noErr) { jack_error("Cannot get current sample rate"); printError(err); goto error; } if (samplerate != (jack_nframes_t)sampleRate) { sampleRate = (Float64)samplerate; // To get SR change notification err = AudioDeviceAddPropertyListener(driver->device_id, 0, true, kAudioDevicePropertyNominalSampleRate, sr_notification, driver); if (err != noErr) { jack_error("Error calling AudioDeviceAddPropertyListener with kAudioDevicePropertyNominalSampleRate"); printError(err); return -1; } err = AudioDeviceSetProperty(driver->device_id, NULL, 0, kAudioDeviceSectionGlobal, kAudioDevicePropertyNominalSampleRate, outSize, &sampleRate); if (err != noErr) { jack_error("Cannot set sample rate = %ld", samplerate); printError(err); return -1; } // Waiting for SR change notification int count = 0; while (!driver->state && count++ < 100) { usleep(100000); JCALog("Wait count = %ld\n", count); } // Remove SR change notification AudioDeviceRemovePropertyListener(driver->device_id, 0, true, kAudioDevicePropertyNominalSampleRate, sr_notification); } // AUHAL ComponentDescription cd = {kAudioUnitType_Output, kAudioUnitSubType_HALOutput, kAudioUnitManufacturer_Apple, 0, 0}; Component HALOutput = FindNextComponent(NULL, &cd); err1 = OpenAComponent(HALOutput, &driver->au_hal); if (err1 != noErr) { jack_error("Error calling OpenAComponent"); printError(err1); goto error; } err1 = AudioUnitInitialize(driver->au_hal); if (err1 != noErr) { jack_error("Cannot initialize AUHAL unit"); printError(err1); goto error; } // Start I/O enableIO = 1; if (capturing && inchannels > 0) { JCALog("Setup AUHAL input\n"); err1 = AudioUnitSetProperty(driver->au_hal, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(enableIO)); if (err1 != noErr) { jack_error("Error calling AudioUnitSetProperty - kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input"); printError(err1); goto error; } } if (playing && outchannels > 0) { JCALog("Setup AUHAL output\n"); err1 = AudioUnitSetProperty(driver->au_hal, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(enableIO)); if (err1 != noErr) { jack_error("Error calling AudioUnitSetProperty - kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Output"); printError(err1); goto error; } } // Setup up choosen device, in both input and output cases err1 = AudioUnitSetProperty(driver->au_hal, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &driver->device_id, sizeof(AudioDeviceID)); if (err1 != noErr) { jack_error("Error calling AudioUnitSetProperty - kAudioOutputUnitProperty_CurrentDevice"); printError(err1); goto error; } // Set buffer size if (capturing && inchannels > 0) { err1 = AudioUnitSetProperty(driver->au_hal, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 1, (UInt32*)&nframes, sizeof(UInt32)); if (err1 != noErr) { jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_MaximumFramesPerSlice"); printError(err1); goto error; } } if (playing && outchannels > 0) { err1 = AudioUnitSetProperty(driver->au_hal, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, (UInt32*)&nframes, sizeof(UInt32)); if (err1 != noErr) { jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_MaximumFramesPerSlice"); printError(err1); goto error; } } // Setup channel map if (capturing && inchannels > 0 && inchannels < in_nChannels) { SInt32 chanArr[in_nChannels]; for (i = 0; i < in_nChannels; i++) { chanArr[i] = -1; } for (i = 0; i < inchannels; i++) { chanArr[i] = i; } AudioUnitSetProperty(driver->au_hal, kAudioOutputUnitProperty_ChannelMap , kAudioUnitScope_Input, 1, chanArr, sizeof(SInt32) * in_nChannels); if (err1 != noErr) { jack_error("Error calling AudioUnitSetProperty - kAudioOutputUnitProperty_ChannelMap 1"); printError(err1); } } if (playing && outchannels > 0 && outchannels < out_nChannels) { SInt32 chanArr[out_nChannels]; for (i = 0; i < out_nChannels; i++) { chanArr[i] = -1; } for (i = 0; i < outchannels; i++) { chanArr[i] = i; } err1 = AudioUnitSetProperty(driver->au_hal, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Output, 0, chanArr, sizeof(SInt32) * out_nChannels); if (err1 != noErr) { jack_error("Error calling AudioUnitSetProperty - kAudioOutputUnitProperty_ChannelMap 0"); printError(err1); } } // Setup stream converters srcFormat.mSampleRate = samplerate; srcFormat.mFormatID = kAudioFormatLinearPCM; srcFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved; srcFormat.mBytesPerPacket = sizeof(float); srcFormat.mFramesPerPacket = 1; srcFormat.mBytesPerFrame = sizeof(float); srcFormat.mChannelsPerFrame = outchannels; srcFormat.mBitsPerChannel = 32; err1 = AudioUnitSetProperty(driver->au_hal, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &srcFormat, sizeof(AudioStreamBasicDescription)); if (err1 != noErr) { jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_StreamFormat kAudioUnitScope_Input"); printError(err1); } dstFormat.mSampleRate = samplerate; dstFormat.mFormatID = kAudioFormatLinearPCM; dstFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved; dstFormat.mBytesPerPacket = sizeof(float); dstFormat.mFramesPerPacket = 1; dstFormat.mBytesPerFrame = sizeof(float); dstFormat.mChannelsPerFrame = inchannels; dstFormat.mBitsPerChannel = 32; err1 = AudioUnitSetProperty(driver->au_hal, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &dstFormat, sizeof(AudioStreamBasicDescription)); if (err1 != noErr) { jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_StreamFormat kAudioUnitScope_Output"); printError(err1); } // Setup callbacks if (inchannels > 0 && outchannels == 0) { AURenderCallbackStruct output; output.inputProc = render_input; output.inputProcRefCon = driver; err1 = AudioUnitSetProperty(driver->au_hal, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &output, sizeof(output)); if (err1 != noErr) { jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_SetRenderCallback 1"); printError(err1); goto error; } } else { AURenderCallbackStruct output; output.inputProc = render; output.inputProcRefCon = driver; err1 = AudioUnitSetProperty(driver->au_hal, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &output, sizeof(output)); if (err1 != noErr) { jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_SetRenderCallback 0"); printError(err1); goto error; } } if (capturing && inchannels > 0) { driver->input_list = (AudioBufferList*)malloc(sizeof(UInt32) + inchannels * sizeof(AudioBuffer)); if (driver->input_list == 0) goto error; driver->input_list->mNumberBuffers = inchannels; // Prepare buffers for (i = 0; i < driver->capture_nchannels; i++) { driver->input_list->mBuffers[i].mNumberChannels = 1; driver->input_list->mBuffers[i].mDataByteSize = nframes * sizeof(float); } } err = AudioDeviceAddPropertyListener(driver->device_id, 0, true, kAudioDeviceProcessorOverload, notification, driver); if (err != noErr) { jack_error("Error calling AudioDeviceAddPropertyListener with kAudioDeviceProcessorOverload"); goto error; } err = AudioDeviceAddPropertyListener(driver->device_id, 0, true, kAudioDevicePropertyNominalSampleRate, notification, driver); if (err != noErr) { jack_error("Error calling AudioDeviceAddPropertyListener with kAudioDevicePropertyNominalSampleRate"); goto error; } driver->playback_nchannels = outchannels; driver->capture_nchannels = inchannels; return ((jack_driver_t *) driver); error: AudioUnitUninitialize(driver->au_hal); CloseComponent(driver->au_hal); jack_error("Cannot open the coreaudio driver"); free(driver); return NULL; }