GMM_EXPORT void gmm_dump_dptr(const char *filepath, const void *dptr, const size_t size) { struct region *r; if (!filepath) { gprint(DEBUG, "bad filepath for gmm_dump\n"); return; } if (size <= 0) { gprint(DEBUG, "bad size for gmm_dump\n"); return; } r = region_lookup(pcontext, dptr); if (!r) { gprint(DEBUG, "region lookup failed for %p in gmm_dump\n", dptr); return; } if (dptr + size > &(r->swp_addr) + r->size) {//potential bug gprint(DEBUG, "bad dump range for gmm_dump\n"); return; } gmm_dump_region(filepath, r, dptr, size); }
GMM_EXPORT cudaError_t cudaMemcpy( void *dst, const void *src, size_t count, enum cudaMemcpyKind kind) { cudaError_t ret; if (initialized) { if (kind == cudaMemcpyHostToDevice) ret = gmm_cudaMemcpyHtoD(dst, src, count); else if (kind == cudaMemcpyDeviceToHost) ret = gmm_cudaMemcpyDtoH(dst, src, count); else if (kind == cudaMemcpyDeviceToDevice) ret = gmm_cudaMemcpyDtoD(dst, src, count); else { gprint(WARN, "HtoH memory copy not supported by GMM\n"); ret = nv_cudaMemcpy(dst, src, count, kind); } } else { gprint(WARN, "cudaMemcpy called outside GMM\n"); ret = nv_cudaMemcpy(dst, src, count, kind); } return ret; }
void Engine::switchShader( GLint program ) { if (glGetError()) { gprint( PRINT_ERROR, "Error, glGetError() true prior to switchShader()\n" ); } GLint currShader; glGetIntegerv( GL_CURRENT_PROGRAM, &currShader ); if (currShader != _currentShader) { gprint( PRINT_ERROR, "ERROR: Shader in-use (%d) was not _currentShader (%d)!\n", currShader, _currentShader ); } if (program == _currentShader) return; // Switch to the new Shader. glUseProgram( program ); if (glGetError()) { gprint( PRINT_ERROR, "Could not switch from [%d] to [%d]\n", currShader, program ); } else { _currentShader = program; gprint( PRINT_VERBOSE, "Switched from [%d] to [%d]\n", currShader, program ); if (_renderingCamera) { // Since we're on a new shader, have the camera re-send its CTM. _renderingCamera->relinkUniforms(); _renderingCamera->view(); } } }
void stack_op(GLEPcode& pcode, int stk[], int stkp[], int *nstk, int i, int p) { dbg gprint("Stack oper %d priority %d \n",i,p); while ((*nstk)>0 && p<=stkp[*nstk]) { dbg gprint("ADDING oper stack = %d oper=%d \n",*nstk,stk[(*nstk)]); pcode.addInt(stk[(*nstk)--]); } stk[++(*nstk)] = i; stkp[*nstk] = p; }
void begin_config(const std::string& block, int *pln, int *pcode, int *cp) throw(ParserError) { string block_name(block); ConfigSection* section = g_Config.getSection(block_name); if (section == NULL) { g_throw_parser_error("unrecognized config section '", block_name.c_str(), "'"); } // Don't do config blocks in safe mode (except in RC file) GLEInterface* iface = GLEGetInterfacePointer(); if (iface->getCmdLine()->hasOption(GLE_OPT_SAFEMODE)) { GLEGlobalConfig* config = iface->getConfig(); if (!config->allowConfigBlocks()) { g_throw_parser_error("safe mode - config blocks not allowed"); } } // Start with pcode from the next line (*pln)++; begin_init(); while (true) { int st = begin_token(&pcode,cp,pln,srclin,tk,&ntk,outbuff); if (!st) { /* exit loop */ break; } int ct = 1; int mode = 0; bool plus_is = false; CmdLineOption* option = NULL; while (ct <= ntk) { skipspace; if (section != NULL) { if (mode == 0) { option = section->getOption(tk[ct]); if (option == NULL) { gprint("Not a valid setting for section '%s': {%s}\n", block_name.c_str(), tk[ct]); } } else if (mode == 1) { if (strcmp(tk[ct], "=") == 0) { plus_is = false; } else if (strcmp(tk[ct], "+=") == 0) { plus_is = true; } else { gprint("Expected '=' or '+=', not {%s}\n", tk[ct]); } } else if (option != NULL) { CmdLineOptionArg* arg = option->getArg(0); if (!plus_is) arg->reset(); arg->appendValue(tk[ct]); } mode++; } ct++; } } }
// For internal use only void gmm_print_region(void *rgn) { struct region *r = (struct region *)rgn; gprint(DEBUG, "printing dptr %p (%p)\n", r->swp_addr, r); gprint(DEBUG, "\tsize: %ld\t\tstate: %d\t\tflags: %d\n", \ r->size, r->state, r->flags); gprint(DEBUG, "\tdev_addr: %p\t\tswp_addr: %p\t\tpta_addr: %p\n", \ r->dev_addr, r->swp_addr, r->pta_addr); gprint(DEBUG, "\tpinned: %d\t\twriting: %d\t\treading: %d\n", \ atomic_read(&r->pinned), atomic_read(&r->writing), \ atomic_read(&r->reading)); }
/** * An animation callback that tests a variety of transformations * in the scene graph. * @param obj The object to animate. */ void animationTest( TransCache &obj ) { double timeScale = tick.scale(); double theta = timeScale * 0.1; gprint( PRINT_VERBOSE, "Timescale: %f\n", timeScale ); //Object increasingly grows. /* Note that, Scaling Adjustment, unlike Rotation and Translation, is a multiplicative adjustment, which means that we can't just multiply by our time scaling factor, we have to take pow( scaleFactor, timeScale ) instead. This is, of course, incredibly inefficient. */ // obj._scale.Adjust( pow( 1.001, timeScale ) ); //Object rotates in-place. obj._rotation.rotateX( theta ); obj._rotation.rotateY( theta ); obj._rotation.rotateZ( theta ); obj._offset.set( 5, 0, 0 ); //Object increasingly moves away from origin, x += 0.01 //obj._offset.delta( timeScale * 0.01, 0, 0 ); //Object orbits about the origin obj._orbit.rotateX( timeScale * 0.2 ); obj._orbit.rotateY( timeScale * 0.2 ); obj._orbit.rotateZ( timeScale * 0.2 ); // Object moves its focal _orbit-point, x = 5. //obj.displacement.set( 5, 0, 0 ); }
/** * Turns a binary-relative relative path into a working directory relative relative path. * * @praram the path * * @return the real path */ std::string getRelativePath( const char *path ) { std::stringstream wholepath; wholepath << dondeestanlosshaders << path; gprint( PRINT_DEBUG, "Looking for \"%s\"\tExecutable is in \"%s\"\tResolving to \"%s\"\n", path, dondeestanlosshaders.c_str(), wholepath.str().c_str()); return wholepath.str(); }
void GLEPolish::internalPolish(const char *expr, GLEPcode& pcode, int *rtype) throw(ParserError) { #ifdef DEBUG_POLISH gprint("==== Start of expression {%s} \n",expr); #endif m_tokens.set_string(expr); internalPolish(pcode, rtype); }
/** * Idle function that is called from the GLUT mainloop. * Applies animations, camera motion, and Wii input. */ void terrain_idle( void ) { Scene &theScene = (*Engine::instance()->rootScene()); Object &Terrain = *(theScene["terrain"]); Object &Pyramid = *(theScene["pyramid"]); Pyramid.animation( animationTest ); Pyramid["moon"]->animation( simpleRotateAnim ); Object &Heavy = *(theScene["heavy"]); Object &Medic = *(Heavy["medic"]); Object &Spy = *(Medic["spy"]); Heavy.animation( simpleRotateAnim ); Medic.animation( simpleRotateY ); Spy.animation( simpleRotateY ); if ( Engine::instance()->opt( "terrain_regen" ) ) { gprint( PRINT_INFO, "terrain_regen on, turning on switchingTerrain bool\n" ); switchingTerrain = true; Engine::instance()->opt( "terrain_regen", false ); } Terrain.animation( TerrainGenerationAnimation ); }
void gmm_init(void) { INTERCEPT_CUDA("cudaMalloc", nv_cudaMalloc); INTERCEPT_CUDA("cudaFree", nv_cudaFree); INTERCEPT_CUDA("cudaMemcpy", nv_cudaMemcpy); INTERCEPT_CUDA("cudaMemcpyAsync", nv_cudaMemcpyAsync); INTERCEPT_CUDA("cudaStreamCreate", nv_cudaStreamCreate); INTERCEPT_CUDA("cudaStreamDestroy", nv_cudaStreamDestroy); INTERCEPT_CUDA("cudaStreamSynchronize", nv_cudaStreamSynchronize); INTERCEPT_CUDA("cudaMemGetInfo", nv_cudaMemGetInfo); INTERCEPT_CUDA("cudaSetupArgument", nv_cudaSetupArgument); INTERCEPT_CUDA("cudaConfigureCall", nv_cudaConfigureCall); INTERCEPT_CUDA("cudaMemset", nv_cudaMemset); //INTERCEPT_CUDA2("cudaMemsetAsync", nv_cudaMemsetAsync); //INTERCEPT_CUDA2("cudaDeviceSynchronize", nv_cudaDeviceSynchronize); INTERCEPT_CUDA("cudaLaunch", nv_cudaLaunch); INTERCEPT_CUDA("cudaStreamAddCallback", nv_cudaStreamAddCallback); gprint_init(); if (gmm_context_init() == -1) { gprint(FATAL, "failed to initialize GMM local context\n"); return; } if (client_attach() == -1) { gprint(FATAL, "failed to attach to the GMM global arena\n"); gmm_context_fini(); return; } // Before marking GMM context initialized, invoke an NV function // to initialize CUDA runtime and let whatever memory regions // implicitly required by CUDA runtime be allocated now. Those // regions should be always attached and not managed by GMM runtime. do { size_t dummy; nv_cudaMemGetInfo(&dummy, &dummy); } while (0); initialized = 1; gprint(DEBUG, "gmm initialized\n"); }
// GMM-specific: allowing passing dptr array hints. GMM_EXPORT cudaError_t cudaMallocEx(void **devPtr, size_t size, int flags) { if (initialized) return gmm_cudaMalloc(devPtr, size, flags); else { gprint(WARN, "cudaMallocEx called outside GMM\n"); return nv_cudaMalloc(devPtr, size); } }
void Engine::run( void ) { if (glGetError()) { gprint( PRINT_ERROR, "glGetError() returning true prior to Engine::run().\n" ); } glutMainLoop(); delete Engine::instance(); }
void Engine::wiiInit( void ) { #ifdef WII bool usingWii = initWii( _wii ); if (!usingWii) { gprint( PRINT_WARNING, "Not using Wii controls for this runthrough.\n" ); } opt( "wii", usingWii ); #endif }
// Print info of the region containing %dptr GMM_EXPORT void gmm_print_dptr(const void *dptr) { struct region *r; r = region_lookup(pcontext, dptr); if (!r) { gprint(DEBUG, "failed to look up region containing %p\n", dptr); return; } gmm_print_region(r); }
void spin_inter(int frame, double blend, int seqflag, flam3_genome *parents, flam3_genome *templ) { flam3_genome *result; char action[50]; xmlDocPtr doc; char *ai; double stagger = argf("stagger", 0.0); /* Interpolate between spun parents */ result = sheep_edge(parents, blend, seqflag, stagger); /* Unsure why we check for random palettes on both ends... */ if ((parents[0].palette_index != flam3_palette_random) && (parents[1].palette_index != flam3_palette_random)) { result->palette_index = flam3_palette_interpolated; result->palette_index0 = parents[0].palette_index; result->hue_rotation0 = parents[0].hue_rotation; result->palette_index1 = parents[1].palette_index; result->hue_rotation1 = parents[1].hue_rotation; result->palette_blend = blend; } /* Apply template if necessary */ if (templ) flam3_apply_template(result, templ); /* Set genome attributes */ result->time = (double)frame; // result->interpolation_type = flam3_inttype_linear; /* Create the edit doc xml */ sprintf(action,"interpolate %g",blend*360.0); doc = create_new_editdoc(action, &parents[0], &parents[1]); result->edits = doc; /* Subpixel jitter */ offset(result); /* Make the name of the flame the time */ sprintf(result->flame_name,"%f",result->time); /* Print the genome */ gprint(result, 1); /* Clean up */ xmlFreeDoc(result->edits); /* Free genome storage */ clear_cp(result,flam3_defaults_on); free(result); }
// GMM-specific: pass reference hints. // %which_arg tells which argument (starting with 0) in the following // cudaSetupArgument calls is a device memory pointer. %flags is the // read-write flag. // The GMM runtime should expect to see call sequence similar to below: // cudaReference, ..., cudaReference, cudaConfigureCall, // cudaSetupArgument, ..., cudaSetupArgument, cudaLaunch // GMM_EXPORT cudaError_t cudaReference(int which_arg, int flags) { int i; gprint(DEBUG, "cudaReference: %d %x\n", which_arg, flags); if (!initialized) return cudaErrorInitializationError; if (which_arg < NREFS) { for (i = 0; i < nrefs; i++) { if (refs[i] == which_arg) break; } if (i == nrefs) { refs[nrefs] = which_arg; #ifdef GMM_CONFIG_RW rwflags[nrefs++] = flags; #else rwflags[nrefs++] = HINT_DEFAULT | (flags & HINT_PTARRAY) | HINT_PTADEFAULT; #endif } else { #ifdef GMM_CONFIG_RW rwflags[i] |= flags; #endif } } else { gprint(ERROR, "bad cudaReference argument %d (max %d)\n", \ which_arg, NREFS-1); return cudaErrorInvalidValue; } return cudaSuccess; }
GMM_EXPORT cudaError_t cudaFree(void *devPtr) { cudaError_t ret; if (initialized) ret = gmm_cudaFree(devPtr); else { gprint(WARN, "cudaFree called outside GMM\n"); ret = nv_cudaFree(devPtr); } return ret; }
/** * Display/re-render a viewport. */ void Engine::displayViewport( void ) { static Scene *theScene = Engine::instance()->rootScene(); static Cameras *camList = Engine::instance()->cams(); if (glGetError()) { gprint( PRINT_ERROR, "true in displayViewport\n" ); } instance()->_displayExtension(); //ensure all objects are lit the same boost::mutex::scoped_lock stopFlashingItsAFellony(instance()->LifeLock); theScene->draw(); camList->draw(); }
GMM_EXPORT cudaError_t cudaMemGetInfo(size_t *free, size_t *total) { cudaError_t ret; if (initialized) ret = gmm_cudaMemGetInfo(free, total); else { gprint(WARN, "cudaMemGetInfo called outside GMM\n"); ret = nv_cudaMemGetInfo(free, total); } return ret; }
GMM_EXPORT cudaError_t cudaLaunch(const void *entry) { cudaError_t ret; if (initialized) ret = gmm_cudaLaunch(entry); else { gprint(WARN, "cudaLaunch called outside GMM\n"); ret = nv_cudaLaunch(entry); } return ret; }
GMM_EXPORT cudaError_t cudaMemset(void * devPtr, int value, size_t count) { cudaError_t ret; if (initialized) ret = gmm_cudaMemset(devPtr, value, count); else { gprint(WARN, "cudaMemset called outside GMM\n"); ret = nv_cudaMemset(devPtr, value, count); } return ret; }
bool GLEVars::check(int *j) { int var = *j; /* convert var index and return true if var is local */ if (GLE_VAR_IS_LOCAL(var)) { var &= ~GLE_VAR_LOCAL_BIT; if (m_LocalMap == NULL) { gprint("No local variables assigned"); *j = 0; } else if (var < 0 || var >= m_LocalMap->size() || var >= NUM_LOCAL) { gprint("Local variable index out of range: %d is not in 0-%d", var, m_LocalMap->size()); *j = 0; } else { *j = var; return true; } } else { if (var < 0 || var >= m_GlobalMap.size()) { gprint("Global variable index out of range: %d is not in 0-%d", var, m_GlobalMap.size()); *j = 0; } } return false; }
void gmm_dump_region( const char *filepath, struct region *r, const void *addr, size_t size) { void *temp; FILE *f; temp = malloc(size); if (!temp) { gprint(FATAL, "malloc failed for temp dump buffer: %s\n", \ strerror(errno)); return; } /*if (gmm_dtoh(r, temp, addr, size) < 0) { gprint(ERROR, "dtoh failed for region dump\n"); goto finish; }*/ f = fopen(filepath, "wb"); if (!f) { gprint(ERROR, "failed to open file (%s) for region dump\n", filepath); goto finish; } if (fwrite(temp, size, 1, f) == 0) { gprint(ERROR, "write error for region dump\n"); goto finish; } fclose(f); finish: free(temp); }
bool dirExists(const std::string& where, const std::string& dir) { static struct stat s; std::stringstream str; str << where << dir; gprint( PRINT_DEBUG, "Checking for existence of %s\n",str.str().c_str()); int err = stat(str.str().c_str(), &s); if(-1 == err) { return false; } else { if(S_ISDIR(s.st_mode)) { return true; } else { return false; } } }
GMM_EXPORT cudaError_t cudaSetupArgument( const void *arg, size_t size, size_t offset) { cudaError_t ret; if (initialized) ret = gmm_cudaSetupArgument(arg, size, offset); else { gprint(WARN, "cudaSetupArgument called outside GMM\n"); ret = nv_cudaSetupArgument(arg, size, offset); } return ret; }
void spin(int frame, double blend, flam3_genome *parent, flam3_genome *templ) { flam3_genome *result; char action[50]; xmlDocPtr doc; /* Spin the parent blend*360 degrees */ result = sheep_loop(parent,blend); /* Apply the template if necessary */ if (templ) flam3_apply_template(result, templ); /* Set genome parameters accordingly */ result->time = (double)frame; result->interpolation = flam3_interpolation_linear; result->palette_interpolation = flam3_palette_interpolation_hsv; /* Force linear interpolation - unsure if this is still necessary */ /* I believe we put this in so that older clients could render frames */ // result->interpolation_type = flam3_inttype_linear; /* Create the edit doc xml */ sprintf(action,"rotate %g",blend*360.0); doc = create_new_editdoc(action, parent, (flam3_genome *)NULL); result->edits = doc; /* Subpixel jitter */ offset(result); /* Make the name of the flame the time */ sprintf(result->flame_name,"%f",result->time); /* Print the resulting xml */ gprint(result, 1); /* Clear out the xml doc */ xmlFreeDoc(result->edits); /* Clear the result cp */ clear_cp(result,flam3_defaults_on); /* Free the cp allocated in flam3_sheep_loop */ free(result); }
GMM_EXPORT cudaError_t cudaMalloc(void **devPtr, size_t size) { cudaError_t ret; if (initialized) ret = gmm_cudaMalloc(devPtr, size, 0); else { // TODO: We may need to remember those device memory allocated // before GMM was initialized, so that later when they are // used in cudaMemcpy or other functions we can treat them // specially. gprint(WARN, "cudaMalloc called outside GMM\n"); ret = nv_cudaMalloc(devPtr, size); } return ret; }
GMM_EXPORT cudaError_t cudaConfigureCall( dim3 gridDim, dim3 blockDim, size_t sharedMem, cudaStream_t stream) { cudaError_t ret; if (initialized) ret = gmm_cudaConfigureCall(gridDim, blockDim, sharedMem, stream); else { gprint(WARN, "cudaConfigureCall called outside GMM\n"); ret = nv_cudaConfigureCall(gridDim, blockDim, sharedMem, stream); } return ret; }
/** * Initialization: load and compile shaders, initialize camera(s), load models. */ void init() { Scene *rootScene = Engine::instance()->rootScene(); Object *model = rootScene->addObject( "Arbitrary Model" ); ObjLoader::loadModelFromFile( model, modelname ); Angel::vec4 min = model->getMin(); Angel::vec4 max = model->getMax(); gprint( PRINT_DEBUG, "Min: (%f,%f,%f)\nMax: (%f,%f,%f)\n", min.x, min.y, min.z, max.x, max.y, max.z ); model->_trans._offset.set( 0, -min.y, 0 ); model->propagateOLD(); model->buffer(); Object *floor = rootScene->addObject( "floor" ); quad( floor, Angel::vec4( -10, 0, 10, 1.0 ), Angel::vec4( -10, 0, -10, 1.0 ), Angel::vec4( 10, 0, -10, 1.0 ), Angel::vec4( 10, 0, 10, 1.0 ), Angel::vec4( 0.4, 0.4, 0.4, 0.9 ) ); floor->buffer(); }