Esempio n. 1
0
void OnDestroy()
{	G_pGame->m_bIsProgramActive = FALSE;		
	_StopTimer(G_mmTimer);
	G_pGame->Quit();
	WSACleanup();
	PostQuitMessage(0);
}
Esempio n. 2
0
//----------------------------------------------------------------------------//
double CUDAImpl::Run(std::string * err)
{
    _StartTimer();
    for(size_t i = 0; i < _kernels.size(); ++i) {
        if (!_LaunchKernel(*_kernels[i].get(), _cudaKernels[i], err)) {
            return GPUIP_ERROR;
        }
    }
    cudaDeviceSynchronize();
    return  _StopTimer();
}
Esempio n. 3
0
//=============================================================================
void OnDestroy()
{
	if (G_pListenSock != NULL) delete G_pListenSock;
	if (G_pLogSock != NULL) delete G_pLogSock;

	/*if (g_gameCopy != NULL) {
		g_gameCopy->Quit();
		delete g_gameCopy;
	}*/

	if (G_mmTimer != NULL) _StopTimer(G_mmTimer);
	_TermWinsock();

	PostQuitMessage(0);
}
/*********************************************************************
*
*       _Send
*
*  Function description
*    Sends data back to host if embOSView is ready to receive data.
*/
static void _Send(void) {
  if (TX_CNT == 0) {        /* Can we send data? */
    _StopTimer();
    if (_TxIsPending) {
      _FillTxBuf(_TxPendingData);
      _TxIsPending = 0;
    } else {
      if (_pfOnTx != NULL) {
        if (_LockTxBuf()) {
          _pfOnTx();
          _UnlockTxBuf();
        }
      }
    }
  }
}
Esempio n. 5
0
void OnDestroy() {
    delete G_pListenSock;
    delete G_pLogSock;

    if (G_pGame != 0) {
        G_pGame->Quit();
        delete G_pGame;
    }

    if (G_mmTimer != 0) _StopTimer(G_mmTimer);
    _TermWinsock();

    if (pLogFile != 0) fclose(pLogFile);

    PostQuitMessage(0);
}
Esempio n. 6
0
//----------------------------------------------------------------------------//
double CUDAImpl::Allocate(std::string * err)
{
    _StartTimer();

    if (!_FreeBuffers(err)) {
        return GPUIP_ERROR;
    }
    
    std::map<std::string,Buffer::Ptr>::const_iterator it;
    for(it = _buffers.begin(); it != _buffers.end(); ++it) {
        _cudaBuffers[it->second->name] = NULL;
        cudaError_t c_err = cudaMalloc(&_cudaBuffers[it->second->name],
                                       _BufferSize(it->second));
        if(_cudaErrorMalloc(c_err, err)) {
            return GPUIP_ERROR;
        }
    }
    return _StopTimer();
}
Esempio n. 7
0
//----------------------------------------------------------------------------//
double CUDAImpl::Copy(Buffer::Ptr buffer,
                      Buffer::CopyOperation op,
                      void * data,
                      std::string * err)
{
    _StartTimer();
    cudaError_t e = cudaSuccess;
    const size_t size = _BufferSize(buffer);
    if (op == Buffer::COPY_FROM_GPU) {
        e =cudaMemcpy(data, _cudaBuffers[buffer->name],
                      size, cudaMemcpyDeviceToHost);
    } else if (op == Buffer::COPY_TO_GPU) {
        e = cudaMemcpy(_cudaBuffers[buffer->name],data,
                       size, cudaMemcpyHostToDevice);
    }
    if (_cudaErrorCopy(e, err, buffer->name, op)) {
        return GPUIP_ERROR;
    }
    return _StopTimer();
}
Esempio n. 8
0
double CUDAImpl::Build(std::string * err)
{
    _StartTimer();

    if(!_UnloadModule(err)) {
        return GPUIP_ERROR;
    }

    const char * file_helper_math_h = ".helper_math.h";
    const char * file_temp_cu = ".temp.cu";
    const char * file_temp_ptx = ".temp.ptx";
    
    // Includes vector float operations such as mult, add etc
    std::ofstream out_helper(file_helper_math_h);
    out_helper << get_cuda_helper_math();
    out_helper.close();
    
    // Create temporary file to compile
    std::ofstream out(file_temp_cu);
    out << "#include \"" << file_helper_math_h << "\"\n";
    out << "extern \"C\" { \n"; // To avoid function name mangling 
    for(size_t i = 0; i < _kernels.size(); ++i) {
        out << _kernels[i]->code << "\n";
    }
    out << "}"; // End the extern C bracket
    out.close();

    std::stringstream ss;
    const char * cuda_bin_path = getenv("CUDA_BIN_PATH");
    if (cuda_bin_path  != NULL) {
        ss << cuda_bin_path << "/nvcc";
    } else {
        ss << "nvcc";
    }
    ss << " -ptx " << file_temp_cu << " -o " << file_temp_ptx
       << " --Wno-deprecated-gpu-targets"
       << " -include " << file_helper_math_h;
    if(sizeof(void *) == 4) {
        ss << " -m32";
    } else {
        ss << " -m64";
    }
#ifdef _WIN32
    const char * cl_bin_path = getenv("CL_BIN_PATH");
    if (cl_bin_path != NULL) {
        ss << " -ccbin \"" << cl_bin_path << "\"";
    }
#endif
    ss << " 2>&1" << std::endl; // get both standard output and error
    std::string pipe_err;
    int nvcc_exit_status = _execPipe(ss.str().c_str(), &pipe_err);

    // Cleanup temp text file
    _removeFile(file_helper_math_h);
    _removeFile(file_temp_cu);
        
    if (nvcc_exit_status) {
        (*err) = "Cuda error: Could not compile kernels:\n";
        (*err) += pipe_err;
        return GPUIP_ERROR;
    }

    // Load cuda ptx from file
    CUresult c_err = cuModuleLoad(&_cudaModule, ".temp.ptx");
    _removeFile(file_temp_ptx);
    if (_cudaErrorLoadModule(c_err, err)) {
        return GPUIP_ERROR;
    }

    _cudaKernels.resize(_kernels.size());
    for(size_t i = 0; i < _kernels.size(); ++i) {
        c_err = cuModuleGetFunction(&_cudaKernels[i], _cudaModule,
                                    _kernels[i]->name.c_str());
        if (_cudaErrorGetFunction(c_err, err, _kernels[i]->name)) {
            return GPUIP_ERROR;
        }
    }

    _cudaBuild = true;
    
    return _StopTimer();
}