static int vmdinfo_tcl(ClientData, Tcl_Interp *interp, int argc, const char *argv[]) { VMDApp *app = (VMDApp *)Tcl_GetAssocData(interp, "VMDApp", NULL); if (argc == 2) { SIMPLE_TCL_OPT("version", VMDVERSION); SIMPLE_TCL_OPT("versionmsg", VERSION_MSG); SIMPLE_TCL_OPT("authors", VMD_AUTHORS); SIMPLE_TCL_OPT("arch", VMD_ARCH); SIMPLE_TCL_OPT("options", VMD_OPTIONS); SIMPLE_TCL_OPT("www", VMD_HOMEPAGE); SIMPLE_TCL_OPT("wwwhelp", VMD_HELPPAGE); // return the estimated amount of available physical memory if (!strcmp(argv[1], "freemem")) { long vmdcorefree = vmd_get_avail_physmem_mb(); Tcl_Obj *tcl_result = Tcl_NewListObj(0, NULL); Tcl_ListObjAppendElement(interp, tcl_result, Tcl_NewIntObj(vmdcorefree)); Tcl_SetObjResult(interp, tcl_result); return TCL_OK; } // return the number of available CPU cores if (!strcmp(argv[1], "numcpus")) { #if defined(VMDTHREADS) int numcpus = wkf_thread_numprocessors(); #else int numcpus = 1; #endif Tcl_Obj *tcl_result = Tcl_NewListObj(0, NULL); Tcl_ListObjAppendElement(interp, tcl_result, Tcl_NewIntObj(numcpus)); Tcl_SetObjResult(interp, tcl_result); return TCL_OK; } // return the CPU affinity list for the VMD process if (!strcmp(argv[1], "cpuaffinity")) { int numcpus = -1; int *cpuaffinitylist = NULL; #if defined(VMDTHREADS) cpuaffinitylist = wkf_cpu_affinitylist(&numcpus); #endif if (numcpus > 0 && cpuaffinitylist != NULL) { int i; Tcl_Obj *tcl_result = Tcl_NewListObj(0, NULL); for (i=0; i<numcpus; i++) Tcl_ListObjAppendElement(interp, tcl_result, Tcl_NewIntObj(cpuaffinitylist[i])); Tcl_SetObjResult(interp, tcl_result); return TCL_OK; } if (cpuaffinitylist != NULL) free(cpuaffinitylist); Tcl_AppendResult(interp, "CPU affinity query unavailable on this platform", NULL); return TCL_ERROR; } // return the number of available CUDA devices if (!strcmp(argv[1], "numcudadevices")) { int numdevices; #if defined(VMDCUDA) vmd_cuda_num_devices(&numdevices); #else numdevices = 0; #endif Tcl_Obj *tcl_result = Tcl_NewListObj(0, NULL); Tcl_ListObjAppendElement(interp, tcl_result, Tcl_NewIntObj(numdevices)); Tcl_SetObjResult(interp, tcl_result); return TCL_OK; } // return the active display device (e.g. "text", "win", "cave", ...) if (!strcmp(argv[1], "dispdev")) { const char *disp = VMDgetDisplayTypeName(); Tcl_AppendResult(interp, disp, NULL); return TCL_OK; } // return the MPI node name if (!strcmp(argv[1], "nodename")) { Tcl_Obj *tcl_result = Tcl_NewListObj(0, NULL); Tcl_ListObjAppendElement(interp, tcl_result, Tcl_NewStringObj(app->par_name(), strlen(app->par_name()))); Tcl_SetObjResult(interp, tcl_result); return TCL_OK; } // return the MPI node rank if (!strcmp(argv[1], "noderank")) { Tcl_Obj *tcl_result = Tcl_NewListObj(0, NULL); Tcl_ListObjAppendElement(interp, tcl_result, Tcl_NewIntObj(app->par_rank())); Tcl_SetObjResult(interp, tcl_result); return TCL_OK; } // return the MPI node count if (!strcmp(argv[1], "nodecount")) { Tcl_Obj *tcl_result = Tcl_NewListObj(0, NULL); Tcl_ListObjAppendElement(interp, tcl_result, Tcl_NewIntObj(app->par_size())); Tcl_SetObjResult(interp, tcl_result); return TCL_OK; } } Tcl_AppendResult(interp, "vmdinfo: version | versionmsg | authors | arch | \n" "freemem | numcpus | cpuaffinity | numcudadevices | \n" "dispdev | nodename | noderank | nodecount | \n" "options | www | wwwhelp", NULL); return TCL_ERROR; }
CUDAAccel::CUDAAccel(void) { cudaavail = 0; numdevices = 0; int usabledevices = 0; cudapool=NULL; if (getenv("VMDNOCUDA") != NULL) { msgInfo << "VMDNOCUDA environment variable is set, CUDA support disabled." << sendmsg; return; } unsigned int gpumask = 0xffffffff; const char *gpumaskstr = getenv("VMDCUDADEVICEMASK"); if (gpumaskstr != NULL) { unsigned int tmp; if (sscanf(gpumaskstr, "%x", &tmp) == 1) { gpumask = tmp; msgInfo << "Using GPU device mask '" << gpumaskstr << "'" << sendmsg; } else { msgInfo << "Failed to parse CUDA GPU device mask string '" << gpumaskstr << "'" << sendmsg; } } #if defined(VMDCUDA) int rc = 0; if ((rc=vmd_cuda_num_devices(&numdevices)) != VMDCUDA_ERR_NONE) { numdevices = 0; // Only emit error messages when there are CUDA GPUs on the machine // but that they can't be used for some reason // XXX turning this off for the time being, as some people have // NVIDIA drivers installed on machines with no NVIDIA GPU, as can // happen with some distros that package the drivers by default. switch (rc) { case VMDCUDA_ERR_NODEVICES: case VMDCUDA_ERR_SOMEDEVICES: // msgInfo << "No CUDA accelerator devices available." << sendmsg; break; #if 0 case VMDCUDA_ERR_SOMEDEVICES: msgWarn << "One or more CUDA accelerators may exist but are not usable." << sendmsg; msgWarn << "Check to make sure that GPU drivers are up to date." << sendmsg; break; #endif case VMDCUDA_ERR_DRVMISMATCH: msgWarn << "Detected a mismatch between CUDA runtime and GPU driver" << sendmsg; msgWarn << "Check to make sure that GPU drivers are up to date." << sendmsg; // msgInfo << "No CUDA accelerator devices available." << sendmsg; break; } return; } if (numdevices > 0) { cudaavail = 1; int i; for (i=0; i<numdevices; i++) { cudadevprops dp; memset(&dp, 0, sizeof(dp)); if (!vmd_cuda_device_props(i, dp.name, sizeof(dp.name), &dp.major, &dp.minor, &dp.membytes, &dp.clockratekhz, &dp.smcount, &dp.overlap, &dp.kernelexectimeoutenabled, &dp.canmaphostmem, &dp.computemode)) { dp.deviceid=i; // save the device index // Check that each GPU device has not been excluded by virtue of // being used for display, by a GPU device mask, or by the CUDA // device mode being set to a "prohibited" status. if (!(dp.kernelexectimeoutenabled && getenv("VMDCUDANODISPLAYGPUS")) && (gpumask & (1 << i)) && (dp.computemode != computeModeProhibited)) { devprops.append(dp); usabledevices++; } } else { msgWarn << " Failed to retrieve properties for CUDA accelerator " << i << sendmsg; } } } numdevices=usabledevices; devpool_init(); #endif }