Пример #1
0
bool AudioCapture::StartCaptureInternal(UINT AudioDeviceIndex)
{
    PersistentAssert(_CaptureDevice == NULL && _Capturer == NULL, "StartCapture called without StopCapture");

    const int TargetLatency = 20;
    int TargetDurationInSec = 10;

    //
    //  A GUI application should use COINIT_APARTMENTTHREADED instead of COINIT_MULTITHREADED.
    //
    HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
    //PersistentAssert(SUCCEEDED(hr), "CoInitializeEx failed");
    
    //
    //  Now that we've parsed our command line, pick the device to capture.
    //
    bool Success = PickDevice(&_CaptureDevice, AudioDeviceIndex);
    PersistentAssert(Success, "PickDevice failed");
    
    //
    //  Instantiate a capturer and capture sounds for TargetDuration seconds
    //
    //  Configure the capturer to enable stream switching on the specified role if the user specified one of the default devices.
    //
    _Capturer = new (std::nothrow) CWASAPICapture(_CaptureDevice, _Compressor);
    PersistentAssert(_Capturer != NULL, "Allocate CWASAPICapture failed");
        
    if (_Capturer->Initialize(TargetLatency))
    {
        //
        //  We've initialized the capturer.  Once we've done that, we know some information about the
        //  mix format and we can allocate the buffer that we're going to capture.
        //
        //
        //  The buffer is going to contain "TargetDuration" seconds worth of PCM data.  That means 
        //  we're going to have TargetDuration*samples/second frames multiplied by the frame size.
        //
        size_t captureBufferSize = _Capturer->SamplesPerSecond() * TargetDurationInSec * _Capturer->FrameSize();
        _CaptureBuffer.Allocate(captureBufferSize);
        bool Success = _Capturer->Start(_CaptureBuffer.CArray(), captureBufferSize);
        PersistentAssert(Success, "_Capturer->Start failed");
    }

    return true;
}
int wmain(int argc, wchar_t* argv[])
{
    int result = 0;
    IMMDevice *device = NULL;
    bool isDefaultDevice;
    ERole role;

    printf("WASAPI Render Exclusive Timer Driven Sample\n");
    printf("Copyright (c) Microsoft.  All Rights Reserved\n");
    printf("\n");

    if (!ParseCommandLine(argc, argv, CmdLineArgs, CmdLineArgLength))
    {
        result = -1;
        goto Exit;
    }
    //
    //  Now that we've parsed our command line, do some semantic checks.
    //

    //
    //  First off, show the help for the app if the user asked for it.
    //
    if (ShowHelp)
    {
        Help(argv[0]);
        goto Exit;
    }

    //
    //  The user can only specify one of -console, -communications or -multimedia or a specific endpoint.
    //
    if (((UseConsoleDevice != 0) + (UseCommunicationsDevice != 0) + (UseMultimediaDevice != 0) + (OutputEndpoint != NULL)) > 1)
    {
        printf("Can only specify one of -Console, -Communications or -Multimedia\n");
        result = -1;
        goto Exit;
    }


    //
    //  A GUI application should use COINIT_APARTMENTTHREADED instead of COINIT_MULTITHREADED.
    //
    HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
    if (FAILED(hr))
    {
        printf("Unable to initialize COM: %x\n", hr);
        result = hr;
        goto Exit;
    }

    //
    //  Now that we've parsed our command line, pick the device to render.
    //
    if (!PickDevice(&device, &isDefaultDevice, &role))
    {
        result = -1;
        goto Exit;
    }

    printf("Render a %d hz Sine wave for %d seconds\n", TargetFrequency, TargetDurationInSec);

    //
    //  Instantiate a renderer and play a sound for TargetDuration seconds
    //
    //  Configure the renderer to enable stream switching on the specified role if the user specified one of the default devices.
    //
    {
        CWASAPIRenderer *renderer = new (std::nothrow) CWASAPIRenderer(device);
        if (renderer == NULL)
        {
            printf("Unable to allocate renderer\n");
            return -1;
        }

        if (renderer->Initialize(TargetLatency))
        {
            //
            //  We've initialized the renderer.  Once we've done that, we know some information about the
            //  mix format and we can allocate the buffer that we're going to render.
            //
            //
            //  The buffer is going to contain "TargetDuration" seconds worth of PCM data.  That means 
            //  we're going to have TargetDuration*samples/second frames multiplied by the frame size.
            //
            UINT32 renderBufferSizeInBytes = (renderer->BufferSizePerPeriod()  * renderer->FrameSize());
            size_t renderDataLength = (renderer->SamplesPerSecond() * TargetDurationInSec * renderer->FrameSize()) + (renderBufferSizeInBytes-1);
            size_t renderBufferCount = renderDataLength / (renderBufferSizeInBytes);
            //
            //  Render buffer queue. Because we need to insert each buffer at the end of the linked list instead of at the head, 
            //  we keep a pointer to a pointer to the variable which holds the tail of the current list in currentBufferTail.
            //
            RenderBuffer *renderQueue = NULL;
            RenderBuffer **currentBufferTail = &renderQueue;

            double theta = 0;

            for (size_t i = 0 ; i < renderBufferCount ; i += 1)
            {
                RenderBuffer *renderBuffer = new (std::nothrow) RenderBuffer();
                if (renderBuffer == NULL)
                {
                    printf("Unable to allocate render buffer\n");
                    return -1;
                }
                renderBuffer->_BufferLength = renderBufferSizeInBytes;
                renderBuffer->_Buffer = new (std::nothrow) BYTE[renderBufferSizeInBytes];
                if (renderBuffer->_Buffer == NULL)
                {
                    printf("Unable to allocate render buffer\n");
                    return -1;
                }
                //
                //  Generate tone data in the buffer.
                //
                switch (renderer->SampleType())
                {
                case CWASAPIRenderer::SampleTypeFloat:
                    GenerateSineSamples<float>(renderBuffer->_Buffer, renderBuffer->_BufferLength, TargetFrequency,
                                                renderer->ChannelCount(), renderer->SamplesPerSecond(), &theta);
                    break;
                case CWASAPIRenderer::SampleType16BitPCM:
                    GenerateSineSamples<short>(renderBuffer->_Buffer, renderBuffer->_BufferLength, TargetFrequency,
                                                renderer->ChannelCount(), renderer->SamplesPerSecond(), &theta);
                    break;
                }
                //
                //  Link the newly allocated and filled buffer into the queue.  
                //
                *currentBufferTail = renderBuffer;
                currentBufferTail = &renderBuffer->_Next;
            }

            //
            //  The renderer takes ownership of the render queue - it will free the items in the queue when it renders them.
            //
            if (renderer->Start(renderQueue))
            {
                do
                {
                    printf(".");
                    Sleep(1000);
                } while (--TargetDurationInSec);
                printf("\n");

                renderer->Stop();
                renderer->Shutdown();
                SafeRelease(&renderer);
            }
        }
        else
        {
            renderer->Shutdown();
            SafeRelease(&renderer);
        }
    }

Exit:
    SafeRelease(&device);
    CoUninitialize();
    return 0;
}
//
//  The core of the sample.
//
//  Parse the command line, interpret the input parameters, pick an audio device then capture data from that device.
//  When done, write the data to a file.
//
int wmain(int argc, wchar_t* argv[])
{
    int result = 0;
    IMMDevice *device = NULL;
    bool isDefaultDevice;
    ERole role;

    printf("WASAPI Capture Shared Event Driven Sample\n");
    printf("Copyright (c) Microsoft.  All Rights Reserved\n");
    printf("\n");

    if (!ParseCommandLine(argc, argv, CmdLineArgs, CmdLineArgLength))
    {
        result = -1;
        goto Exit;
    }
    //
    //  Now that we've parsed our command line, do some semantic checks.
    //

    //
    //  First off, show the help for the app if the user asked for it.
    //
    if (ShowHelp)
    {
        Help(argv[0]);
        goto Exit;
    }

    //
    //  The user can only specify one of -console, -communications or -multimedia or a specific endpoint.
    //
    if (((UseConsoleDevice != 0) + (UseCommunicationsDevice != 0) + (UseMultimediaDevice != 0) + (OutputEndpoint != NULL)) > 1)
    {
        printf("Can only specify one of -Console, -Communications or -Multimedia\n");
        result = -1;
        goto Exit;
    }


    //
    //  A GUI application should use COINIT_APARTMENTTHREADED instead of COINIT_MULTITHREADED.
    //
    HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
    if (FAILED(hr))
    {
        printf("Unable to initialize COM: %x\n", hr);
        result = hr;
        goto Exit;
    }

    //
    //  Now that we've parsed our command line, pick the device to capture.
    //
    if (!PickDevice(&device, &isDefaultDevice, &role))
    {
        result = -1;
        goto Exit;
    }

    printf("Capture audio data for %d seconds\n", TargetDurationInSec);

    //
    //  Instantiate a capturer and capture sounds for TargetDuration seconds
    //
    //  Configure the capturer to enable stream switching on the specified role if the user specified one of the default devices.
    //
    {
        CWASAPICapture *capturer = new (std::nothrow) CWASAPICapture(device, isDefaultDevice, role);
        if (capturer == NULL)
        {
            printf("Unable to allocate capturer\n");
            return -1;
        }

        if (capturer->Initialize(TargetLatency))
        {
            //
            //  We've initialized the capturer.  Once we've done that, we know some information about the
            //  mix format and we can allocate the buffer that we're going to capture.
            //
            //
            //  The buffer is going to contain "TargetDuration" seconds worth of PCM data.  That means 
            //  we're going to have TargetDuration*samples/second frames multiplied by the frame size.
            //
            size_t captureBufferSize = capturer->SamplesPerSecond() * TargetDurationInSec * capturer->FrameSize();
            BYTE *captureBuffer = new (std::nothrow) BYTE[captureBufferSize];

            if (captureBuffer == NULL)
            {
                printf("Unable to allocate capture buffer\n");
                return -1;
            }

            if (capturer->Start(captureBuffer, captureBufferSize))
            {
                do
                {
                    printf(".");
                    Sleep(1000);
                } while (--TargetDurationInSec);
                printf("\n");

                capturer->Stop();

                //
                //  We've now captured our wave data.  Now write it out in a wave file.
                //
                SaveWaveData(captureBuffer, capturer->BytesCaptured(), capturer->MixFormat());


                //
                //  Now shut down the capturer and release it we're done.
                //
                capturer->Shutdown();
                SafeRelease(&capturer);
            }

            delete []captureBuffer;
        }
    }

Exit:
    SafeRelease(&device);
    CoUninitialize();
    return 0;
}