Esempio n. 1
0
    std::vector<HGPUNV> RenderContext::enumGpusNV() const
    {
      std::vector<HGPUNV> gpus;

      if(WGLEW_NV_gpu_affinity)
      {
        HGPUNV gpu;
        for (UINT gpuIndex = 0;wglEnumGpusNV(gpuIndex, &gpu);++gpuIndex)
        {
          gpus.push_back(gpu);
        }
      }

      return gpus;
    }
Esempio n. 2
0
int main( const int argc, char** argv )
{
    if( !initWGLEW( ))
        std::cerr << "WGL extension query failed" << std::endl;
    else if( !WGLEW_NV_gpu_affinity )
        std::cerr << "WGL_NV_gpu_affinity unsupported" << std::endl;
    else for( UINT gpu = 0; true; ++gpu )
    {
        HGPUNV hGPU = 0;
        if( !wglEnumGpusNV( gpu, &hGPU ))
            break;

        GPU_DEVICE gpuDevice;
        gpuDevice.cb = sizeof( gpuDevice );
        const bool found = wglEnumGpuDevicesNV( hGPU, 0, &gpuDevice );
        assert( found );

        std::cout << "GPU " << gpu << ": " << gpuDevice.DeviceString;
 
        if( gpuDevice.Flags & DISPLAY_DEVICE_ATTACHED_TO_DESKTOP )
        {
            const RECT& rect = gpuDevice.rcVirtualScreen;
            std::cout << " used on [" << rect.left << ' ' << rect.top << ' '
                      << rect.right  - rect.left << ' ' 
                      << rect.bottom - rect.top << ']';
        }
        else
            std::cout << " offline";

        std::cout << std::endl;
    }

    std::cout << "Press Enter to exit..." << std::endl;
    
    char foo[256];
    std::cin.getline( foo, 256 );
    return EXIT_SUCCESS;
}
Esempio n. 3
0
bool Pipe::_getGPUHandle( HGPUNV& handle )
{
    handle = 0;

    const uint32_t device = getPipe()->getDevice();
    if( device == EQ_UNDEFINED_UINT32 )
        return true;

    if( !WGLEW_NV_gpu_affinity )
    {
        EQWARN <<"WGL_NV_gpu_affinity unsupported, ignoring pipe device setting"
               << std::endl;
        return true;
    }

    if( !wglEnumGpusNV( device, &handle ))
    {
        setError( ERROR_WGLPIPE_ENUMGPUS_FAILED );
        EQWARN << getError() << ": " << co::base::sysError << std::endl;
        return false;
    }

    return true;
}
Esempio n. 4
0
    //--------------------------------------------------------------------------
    // CUDA init
    //--------------------------------------------------------------------------
    bool CUDAContext::configInit( )
    {
#ifdef EQUALIZER_USE_CUDA
        cudaDeviceProp props;
        uint32_t device = getPipe()->getDevice();

        // Setup the CUDA device
        if( device == LB_UNDEFINED_UINT32 )
        {
            device = _getFastestDeviceID();
            LBWARN << "No CUDA device, using the fastest device: " << device
                   << std::endl;
        }

        int device_count = 0;
        cudaGetDeviceCount( &device_count );
        LBINFO << "CUDA devices found: " << device_count << std::endl;
        LBASSERT( static_cast< uint32_t >( device_count ) > device );
        if( static_cast< uint32_t >( device_count ) <= device )
        {
            LBWARN << "Not enough cuda devices, requested device " << device
                   << " of " << device_count << std::endl;
            setError( ERROR_CUDACONTEXT_DEVICE_NOTFOUND );
            return false;
        }

        // We assume GL interop here, otherwise use cudaSetDevice( device );
        // Attention: this call requires a valid GL context!
        cudaGLSetGLDevice( device );

        int usedDevice = static_cast< int >( device );
#ifdef _WIN32

        HGPUNV handle = 0;

        if( !WGLEW_NV_gpu_affinity )
        {
            LBWARN <<"WGL_NV_gpu_affinity unsupported, ignoring device setting"
                   << std::endl;
            return true;
        }

        if( !wglEnumGpusNV( device, &handle ))
        {
           LBWARN << "wglEnumGpusNV failed : " << lunchbox::sysError << std::endl;
            return false;
        }

        cudaWGLGetDevice( &usedDevice, handle );
#else
        cudaGetDevice( &usedDevice );
#endif
        LBASSERT( device == static_cast< uint32_t >( device ));
        cudaGetDeviceProperties( &props, usedDevice );

        cudaError_t err = cudaGetLastError();
        if( cudaSuccess != err) 
        {
            LBWARN << "CUDA initialization error: "
                   << cudaGetErrorString( err ) << std::endl;
            setError( ERROR_CUDACONTEXT_INIT_FAILED );
            return false;
        }                         

        LBINFO << "Using CUDA device: " << device << std::endl;
        return true;
#else
        setError( ERROR_CUDACONTEXT_MISSING_SUPPORT );
        return false;
#endif
    }