struct pipe_screen * rbug_screen_create(struct pipe_screen *screen) { struct rbug_screen *rb_screen; if (!debug_get_option_rbug()) return screen; rb_screen = CALLOC_STRUCT(rbug_screen); if (!rb_screen) return screen; pipe_mutex_init(rb_screen->list_mutex); make_empty_list(&rb_screen->contexts); make_empty_list(&rb_screen->resources); make_empty_list(&rb_screen->surfaces); make_empty_list(&rb_screen->transfers); rb_screen->base.winsys = NULL; rb_screen->base.destroy = rbug_screen_destroy; rb_screen->base.get_name = rbug_screen_get_name; rb_screen->base.get_vendor = rbug_screen_get_vendor; rb_screen->base.get_param = rbug_screen_get_param; rb_screen->base.get_shader_param = rbug_screen_get_shader_param; rb_screen->base.get_paramf = rbug_screen_get_paramf; rb_screen->base.is_format_supported = rbug_screen_is_format_supported; rb_screen->base.context_create = rbug_screen_context_create; rb_screen->base.resource_create = rbug_screen_resource_create; rb_screen->base.resource_from_handle = rbug_screen_resource_from_handle; rb_screen->base.resource_get_handle = rbug_screen_resource_get_handle; rb_screen->base.resource_destroy = rbug_screen_resource_destroy; rb_screen->base.user_buffer_create = rbug_screen_user_buffer_create; rb_screen->base.flush_frontbuffer = rbug_screen_flush_frontbuffer; rb_screen->base.fence_reference = rbug_screen_fence_reference; rb_screen->base.fence_signalled = rbug_screen_fence_signalled; rb_screen->base.fence_finish = rbug_screen_fence_finish; rb_screen->screen = screen; rb_screen->private_context = screen->context_create(screen, NULL); if (!rb_screen->private_context) goto err_free; rb_screen->rbug = rbug_start(rb_screen); if (!rb_screen->rbug) goto err_context; return &rb_screen->base; err_context: rb_screen->private_context->destroy(rb_screen->private_context); err_free: FREE(rb_screen); return screen; }
struct util_cache * util_cache_create(uint32_t (*hash)(const void *key), int (*compare)(const void *key1, const void *key2), void (*destroy)(void *key, void *value), uint32_t size) { struct util_cache *cache; cache = CALLOC_STRUCT(util_cache); if(!cache) return NULL; cache->hash = hash; cache->compare = compare; cache->destroy = destroy; make_empty_list(&cache->lru); size *= CACHE_DEFAULT_ALPHA; cache->size = size; cache->entries = CALLOC(size, sizeof(struct util_cache_entry)); if(!cache->entries) { FREE(cache); return NULL; } ensure_sanity(cache); return cache; }
static r128TexObjPtr r128AllocTexObj( struct gl_texture_object *texObj ) { r128TexObjPtr t; if ( R128_DEBUG & DEBUG_VERBOSE_API ) { fprintf( stderr, "%s( %p )\n", __FUNCTION__, (void *) texObj ); } t = (r128TexObjPtr) CALLOC_STRUCT( r128_tex_obj ); texObj->DriverData = t; if ( t != NULL ) { /* Initialize non-image-dependent parts of the state: */ t->base.tObj = texObj; /* FIXME Something here to set initial values for other parts of * FIXME t->setup? */ make_empty_list( (driTextureObject *) t ); r128SetTexWrap( t, texObj->WrapS, texObj->WrapT ); r128SetTexFilter( t, texObj->MinFilter, texObj->MagFilter ); r128SetTexBorderColor( t, texObj->BorderColor.f ); } return t; }
int main(int argc, char **argv) { list_t *mylist; int i, find, *temp; mylist = make_empty_list(); for(i=0; i < 100; i++) { temp = malloc(sizeof(int)); *temp = i; insert_at_foot(mylist, temp); } printf("Find what value?: "); scanf("%d", &find); if (is_list_element(mylist, &find)) { printf("Found.\n"); } else { printf("Not Found.\n"); } printlist(mylist); return 0; }
static mgaTextureObjectPtr mgaAllocTexObj( struct gl_texture_object *tObj ) { mgaTextureObjectPtr t; t = CALLOC( sizeof( *t ) ); tObj->DriverData = t; if ( t != NULL ) { /* Initialize non-image-dependent parts of the state: */ t->base.tObj = tObj; t->setup.texctl = TMC_takey_1 | TMC_tamask_0; t->setup.texctl2 = TMC_ckstransdis_enable; t->setup.texfilter = TF_filteralpha_enable | TF_uvoffset_OGL; t->border_fallback = GL_FALSE; t->texenv_fallback = GL_FALSE; make_empty_list( & t->base ); mgaSetTexWrapping( t, tObj->WrapS, tObj->WrapT ); mgaSetTexFilter( t, tObj->MinFilter, tObj->MagFilter ); mgaSetTexBorderColor( t, tObj->_BorderChan ); } return( t ); }
static r300TexObjPtr r300AllocTexObj(struct gl_texture_object *texObj) { r300TexObjPtr t; t = CALLOC_STRUCT(r300_tex_obj); texObj->DriverData = t; if (t != NULL) { if (RADEON_DEBUG & DEBUG_TEXTURE) { fprintf(stderr, "%s( %p, %p )\n", __FUNCTION__, (void *)texObj, (void *)t); } /* Initialize non-image-dependent parts of the state: */ t->base.tObj = texObj; t->border_fallback = GL_FALSE; make_empty_list(&t->base); r300SetTexWrap(t, texObj->WrapS, texObj->WrapT, texObj->WrapR); r300SetTexMaxAnisotropy(t, texObj->MaxAnisotropy); r300SetTexFilter(t, texObj->MinFilter, texObj->MagFilter); r300SetTexBorderColor(t, texObj->_BorderChan); } return t; }
static r128TexObjPtr r128AllocTexObj( struct gl_texture_object *texObj ) { r128TexObjPtr t; if ( R128_DEBUG & DEBUG_VERBOSE_API ) { fprintf( stderr, "%s( %p )\n", __FUNCTION__, texObj ); } t = (r128TexObjPtr) CALLOC_STRUCT( r128_tex_obj ); if (!t) return NULL; /* Initialize non-image-dependent parts of the state: */ t->tObj = texObj; t->dirty_images = ~0; make_empty_list( t ); r128SetTexWrap( t, texObj->WrapS, texObj->WrapT ); /*r128SetTexMaxAnisotropy( t, texObj->MaxAnisotropy );*/ r128SetTexFilter( t, texObj->MinFilter, texObj->MagFilter ); r128SetTexBorderColor( t, texObj->BorderColor ); return t; }
static radeonTexObjPtr radeonAllocTexObj( struct gl_texture_object *texObj ) { radeonTexObjPtr t; t = CALLOC_STRUCT( radeon_tex_obj ); texObj->DriverData = t; if ( t != NULL ) { if ( RADEON_DEBUG & DEBUG_TEXTURE ) { fprintf( stderr, "%s( %p, %p )\n", __FUNCTION__, (void *)texObj, (void *)t ); } /* Initialize non-image-dependent parts of the state: */ t->base.tObj = texObj; t->border_fallback = GL_FALSE; t->pp_txfilter = RADEON_BORDER_MODE_OGL; t->pp_txformat = (RADEON_TXFORMAT_ENDIAN_NO_SWAP | RADEON_TXFORMAT_PERSPECTIVE_ENABLE); make_empty_list( & t->base ); radeonSetTexWrap( t, texObj->WrapS, texObj->WrapT ); radeonSetTexMaxAnisotropy( t, texObj->MaxAnisotropy ); radeonSetTexFilter( t, texObj->MinFilter, texObj->MagFilter ); radeonSetTexBorderColor( t, texObj->BorderColor ); } return t; }
/* The state atoms will be emitted in the order they appear in the atom list, * so this step is important. */ void radeonSetUpAtomList( r100ContextPtr rmesa ) { int i, mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits; make_empty_list(&rmesa->radeon.hw.atomlist); rmesa->radeon.hw.atomlist.name = "atom-list"; insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ctx); insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.set); insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lin); insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msk); insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.vpt); insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tcl); insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.msc); for (i = 0; i < mtu; ++i) { insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.tex[i]); insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.txr[i]); insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.cube[i]); } insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.zbs); insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mtl); for (i = 0; i < 3 + mtu; ++i) insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.mat[i]); for (i = 0; i < 8; ++i) insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.lit[i]); for (i = 0; i < 6; ++i) insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.ucp[i]); insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.stp); insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.eye); insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.grd); insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.fog); insert_at_tail(&rmesa->radeon.hw.atomlist, &rmesa->hw.glt); }
intelTextureObjectPtr i830AllocTexObj( struct gl_texture_object *texObj ) { i830TextureObjectPtr t = CALLOC_STRUCT( i830_texture_object ); if ( !t ) return NULL; texObj->DriverData = t; t->intel.base.tObj = texObj; t->intel.dirty = I830_UPLOAD_TEX_ALL; make_empty_list( &t->intel.base ); t->Setup[I830_TEXREG_TM0LI] = 0; /* not used */ t->Setup[I830_TEXREG_TM0S0] = 0; t->Setup[I830_TEXREG_TM0S1] = 0; t->Setup[I830_TEXREG_TM0S2] = 0; t->Setup[I830_TEXREG_TM0S3] = 0; t->Setup[I830_TEXREG_MCS] = (_3DSTATE_MAP_COORD_SET_CMD | MAP_UNIT(0) | ENABLE_TEXCOORD_PARAMS | TEXCOORDS_ARE_NORMAL | TEXCOORDTYPE_CARTESIAN | ENABLE_ADDR_V_CNTL | TEXCOORD_ADDR_V_MODE(TEXCOORDMODE_WRAP) | ENABLE_ADDR_U_CNTL | TEXCOORD_ADDR_U_MODE(TEXCOORDMODE_WRAP)); i830SetTexWrapping( t, texObj->WrapS, texObj->WrapT ); i830SetTexFilter( t, texObj->MinFilter, texObj->MagFilter, texObj->MaxAnisotropy ); i830SetTexBorderColor( t, texObj->_BorderChan ); return &t->intel; }
static mach64TexObjPtr mach64AllocTexObj( struct gl_texture_object *texObj ) { mach64TexObjPtr t; if ( MACH64_DEBUG & DEBUG_VERBOSE_API ) fprintf( stderr, "%s( %p )\n", __FUNCTION__, texObj ); t = (mach64TexObjPtr) CALLOC_STRUCT( mach64_texture_object ); texObj->DriverData = t; if ( !t ) return NULL; /* Initialize non-image-dependent parts of the state: */ t->base.tObj = texObj; t->base.dirty_images[0] = (1 << 0); t->bufAddr = 0; make_empty_list( (driTextureObject *) t ); mach64SetTexWrap( t, texObj->WrapS, texObj->WrapT ); mach64SetTexFilter( t, texObj->MinFilter, texObj->MagFilter ); mach64SetTexBorderColor( t, texObj->BorderColor.f ); return t; }
struct pipe_screen * vc4_screen_create(int fd) { struct vc4_screen *screen = rzalloc(NULL, struct vc4_screen); struct pipe_screen *pscreen; pscreen = &screen->base; pscreen->destroy = vc4_screen_destroy; pscreen->get_param = vc4_screen_get_param; pscreen->get_paramf = vc4_screen_get_paramf; pscreen->get_shader_param = vc4_screen_get_shader_param; pscreen->context_create = vc4_context_create; pscreen->is_format_supported = vc4_screen_is_format_supported; screen->fd = fd; make_empty_list(&screen->bo_cache.time_list); vc4_fence_init(screen); vc4_debug = debug_get_option_vc4_debug(); if (vc4_debug & VC4_DEBUG_SHADERDB) vc4_debug |= VC4_DEBUG_NORAST; #if USE_VC4_SIMULATOR vc4_simulator_init(screen); #endif vc4_resource_screen_init(pscreen); pscreen->get_name = vc4_screen_get_name; pscreen->get_vendor = vc4_screen_get_vendor; return pscreen; }
lisp_object_t let_vals_aux(lisp_object_t bindings) { if (is_null(bindings)) return make_empty_list(); else { lisp_object_t first = pair_car(bindings); return make_pair(pair_cadr(first), let_vals_aux(pair_cdr(bindings))); } }
SchemeObject* SchemeObjectCreator::make_tagged_list( std::string tag, SchemeObject* obj) { return make_pair( make_symbol(tag), make_pair(obj, make_empty_list()) ); }
SchemeObject* SchemeObjectCreator::make_list( const std::vector<SchemeObject*>& objects) { SchemeObject* list = make_empty_list(); for (auto iter = objects.crbegin(); iter != objects.crend(); ++iter) { list = make_pair(*iter, list); } return list; }
void r200SetUpAtomList( r200ContextPtr rmesa ) { int i, mtu; mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits; make_empty_list(&rmesa->radeon.hw.atomlist); rmesa->radeon.hw.atomlist.name = "atom-list"; insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.ctx ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.set ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.lin ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.msk ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.vpt ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.vtx ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.vap ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.vte ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.msc ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.cst ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.zbs ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.tcl ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.msl ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.tcg ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.grd ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.fog ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.tam ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.tf ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.atf ); for (i = 0; i < mtu; ++i) insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.tex[i] ); for (i = 0; i < mtu; ++i) insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.cube[i] ); for (i = 0; i < 6; ++i) insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.pix[i] ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.afs[0] ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.afs[1] ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.stp ); for (i = 0; i < 8; ++i) insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.lit[i] ); for (i = 0; i < 3 + mtu; ++i) insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.mat[i] ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.eye ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.glt ); for (i = 0; i < 2; ++i) insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.mtl[i] ); for (i = 0; i < 6; ++i) insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.ucp[i] ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.spr ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.ptp ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.prf ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.pvs ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.vpp[0] ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.vpp[1] ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.vpi[0] ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.vpi[1] ); insert_at_tail_if( &rmesa->radeon.hw.atomlist, &rmesa->hw.sci ); }
static LIRCChannelData* lirc_channel_new_metadata() { LIRCChannelData* channel = safe_malloc(sizeof(LIRCChannelData)); /* Initialize default values for new channel */ channel->name = NULL; channel->client_list = make_empty_list(); return channel; }
intelTextureObjectPtr i915AllocTexObj( struct gl_texture_object *texObj ) { i915TextureObjectPtr t = CALLOC_STRUCT( i915_texture_object ); if ( !t ) return NULL; texObj->DriverData = t; t->intel.base.tObj = texObj; t->intel.dirty = I915_UPLOAD_TEX_ALL; make_empty_list( &t->intel.base ); return &t->intel; }
static LIRCClientData* lirc_client_new_metadata() { LIRCClientData* client = safe_malloc(sizeof(LIRCClientData)); /* Initialize default values for the new client */ client->socket = 0; client->nick = NULL; client->user = "******"; client->channel_list = make_empty_list(); return client; }
struct pipe_context *brw_create_context(struct pipe_screen *screen, void *priv) { struct brw_context *brw = (struct brw_context *) CALLOC_STRUCT(brw_context); if (!brw) { debug_printf("%s: failed to alloc context\n", __FUNCTION__); return NULL; } brw->base.screen = screen; brw->base.priv = priv; brw->base.destroy = brw_destroy_context; brw->sws = brw_screen(screen)->sws; brw->chipset = brw_screen(screen)->chipset; brw_pipe_blend_init( brw ); brw_pipe_depth_stencil_init( brw ); brw_pipe_framebuffer_init( brw ); brw_pipe_flush_init( brw ); brw_pipe_misc_init( brw ); brw_pipe_query_init( brw ); brw_pipe_rast_init( brw ); brw_pipe_sampler_init( brw ); brw_pipe_shader_init( brw ); brw_pipe_vertex_init( brw ); brw_pipe_clear_init( brw ); brw_hw_cc_init( brw ); brw_init_state( brw ); brw_draw_init( brw ); brw->state.dirty.mesa = ~0; brw->state.dirty.brw = ~0; brw->flags.always_emit_state = 0; make_empty_list(&brw->query.active_head); brw->batch = brw_batchbuffer_alloc( brw->sws, brw->chipset ); if (brw->batch == NULL) goto fail; return &brw->base; fail: if (brw->batch) brw_batchbuffer_free( brw->batch ); return NULL; }
/** * \brief Allocate a Radeon texture object. * * \param texObj texture object. * * \return pointer to the device specific texture object on success, or NULL on failure. * * Allocates and initializes a radeon_tex_obj structure to connect it to the * driver private data pointer in \p texObj. */ static radeonTexObjPtr radeonAllocTexObj( struct gl_texture_object *texObj ) { radeonTexObjPtr t; t = CALLOC_STRUCT( radeon_tex_obj ); if (!t) return NULL; t->tObj = texObj; texObj->DriverData = t; make_empty_list( t ); t->dirty_images = ~0; return t; }
static i810TextureObjectPtr i810AllocTexObj( GLcontext *ctx, struct gl_texture_object *texObj ) { i810TextureObjectPtr t; i810ContextPtr imesa = I810_CONTEXT(ctx); t = CALLOC_STRUCT( i810_texture_object_t ); texObj->DriverData = t; if ( t != NULL ) { GLfloat bias = ctx->Texture.Unit[ctx->Texture.CurrentUnit].LodBias; /* Initialize non-image-dependent parts of the state: */ t->base.tObj = texObj; t->Setup[I810_TEXREG_MI0] = GFX_OP_MAP_INFO; t->Setup[I810_TEXREG_MI1] = MI1_MAP_0; t->Setup[I810_TEXREG_MI2] = MI2_DIMENSIONS_ARE_LOG2; t->Setup[I810_TEXREG_MLC] = (GFX_OP_MAP_LOD_CTL | MLC_MAP_0 | /*MLC_DITHER_WEIGHT_FULL |*/ MLC_DITHER_WEIGHT_12 | MLC_UPDATE_LOD_BIAS | 0x0); t->Setup[I810_TEXREG_MCS] = (GFX_OP_MAP_COORD_SETS | MCS_COORD_0 | MCS_UPDATE_NORMALIZED | MCS_NORMALIZED_COORDS | MCS_UPDATE_V_STATE | MCS_V_WRAP | MCS_UPDATE_U_STATE | MCS_U_WRAP); t->Setup[I810_TEXREG_MF] = (GFX_OP_MAP_FILTER | MF_MAP_0 | MF_UPDATE_ANISOTROPIC | MF_UPDATE_MIP_FILTER | MF_UPDATE_MAG_FILTER | MF_UPDATE_MIN_FILTER); make_empty_list( & t->base ); i810SetTexWrapping( t, texObj->WrapS, texObj->WrapT ); /*i830SetTexMaxAnisotropy( t, texObj->MaxAnisotropy );*/ i810SetTexFilter( imesa, t, texObj->MinFilter, texObj->MagFilter, bias ); i810SetTexBorderColor( t, texObj->_BorderChan ); } return t; }
viaTextureObjectPtr viaAllocTextureObject(struct gl_texture_object *texObj) { viaTextureObjectPtr t; t = (viaTextureObjectPtr)CALLOC_STRUCT(via_texture_object_t); if (!t) return NULL; /* Initialize non-image-dependent parts of the state: */ t->bufAddr = NULL; t->dirtyImages = ~0; t->actualLevel = 0; t->globj = texObj; make_empty_list(t); return t; }
static void * llvmpipe_create_fs_state(struct pipe_context *pipe, const struct pipe_shader_state *templ) { struct lp_fragment_shader *shader; int nr_samplers; shader = CALLOC_STRUCT(lp_fragment_shader); if (!shader) return NULL; shader->no = fs_no++; make_empty_list(&shader->variants); /* get/save the summary info for this shader */ tgsi_scan_shader(templ->tokens, &shader->info); /* we need to keep a local copy of the tokens */ shader->base.tokens = tgsi_dup_tokens(templ->tokens); nr_samplers = shader->info.file_max[TGSI_FILE_SAMPLER] + 1; shader->variant_key_size = Offset(struct lp_fragment_shader_variant_key, sampler[nr_samplers]); if (LP_DEBUG & DEBUG_TGSI) { unsigned attrib; debug_printf("llvmpipe: Create fragment shader #%u %p:\n", shader->no, (void *) shader); tgsi_dump(templ->tokens, 0); debug_printf("usage masks:\n"); for (attrib = 0; attrib < shader->info.num_inputs; ++attrib) { unsigned usage_mask = shader->info.input_usage_mask[attrib]; debug_printf(" IN[%u].%s%s%s%s\n", attrib, usage_mask & TGSI_WRITEMASK_X ? "x" : "", usage_mask & TGSI_WRITEMASK_Y ? "y" : "", usage_mask & TGSI_WRITEMASK_Z ? "z" : "", usage_mask & TGSI_WRITEMASK_W ? "w" : ""); } debug_printf("\n"); } return shader; }
void r600InitAtoms(context_t *context) { radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %p\n", __func__, context); context->radeon.hw.max_state_size = 10 + 5 + 14; /* start 3d, idle, cb/db flush */ /* Setup the atom linked list */ make_empty_list(&context->radeon.hw.atomlist); context->radeon.hw.atomlist.name = "atom-list"; ALLOC_STATE(sq, always, 34, r700SendSQConfig); ALLOC_STATE(db, always, 17, r700SendDBState); ALLOC_STATE(stencil, always, 4, r700SendStencilState); ALLOC_STATE(db_target, always, 12, r700SendDepthTargetState); ALLOC_STATE(sc, always, 15, r700SendSCState); ALLOC_STATE(scissor, always, 22, r700SendScissorState); ALLOC_STATE(aa, always, 12, r700SendAAState); ALLOC_STATE(cl, always, 12, r700SendCLState); ALLOC_STATE(gb, always, 6, r700SendGBState); ALLOC_STATE(ucp, ucp, (R700_MAX_UCP * 6), r700SendUCPState); ALLOC_STATE(su, always, 9, r700SendSUState); ALLOC_STATE(poly, always, 10, r700SendPolyState); ALLOC_STATE(cb, cb, 18, r700SendCBState); ALLOC_STATE(clrcmp, always, 6, r700SendCBCLRCMPState); ALLOC_STATE(cb_target, always, 29, r700SendRenderTargetState); ALLOC_STATE(blnd, blnd, (6 + (R700_MAX_RENDER_TARGETS * 3)), r700SendCBBlendState); ALLOC_STATE(blnd_clr, always, 6, r700SendCBBlendColorState); ALLOC_STATE(sx, always, 9, r700SendSXState); ALLOC_STATE(vgt, always, 41, r700SendVGTState); ALLOC_STATE(spi, always, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState); ALLOC_STATE(vpt, always, 16, r700SendViewportState); ALLOC_STATE(fs, always, 18, r700SendFSState); ALLOC_STATE(vs, always, 21, r700SendVSState); ALLOC_STATE(ps, always, 24, r700SendPSState); ALLOC_STATE(vs_consts, vs_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendVSConsts); ALLOC_STATE(ps_consts, ps_consts, (2 + (R700_MAX_DX9_CONSTS * 4)), r700SendPSConsts); ALLOC_STATE(vtx, vtx, (6 + (VERT_ATTRIB_MAX * 18)), r700SendVTXState); ALLOC_STATE(tx, tx, (R700_TEXTURE_NUMBERUNITS * 20), r700SendTexState); ALLOC_STATE(tx_smplr, tx, (R700_TEXTURE_NUMBERUNITS * 5), r700SendTexSamplerState); ALLOC_STATE(tx_brdr_clr, tx, (R700_TEXTURE_NUMBERUNITS * 6), r700SendTexBorderColorState); r600_init_query_stateobj(&context->radeon, 6 * 2); context->radeon.hw.is_dirty = GL_TRUE; context->radeon.hw.all_dirty = GL_TRUE; }
int main(int argc, char **argv) { list_t *mylist; int i, find; mylist = make_empty_list(); for(i=100; i > 0; i--) { mylist = insert_in_order(mylist, i); } printf("Insert what value?: "); scanf("%d", &find); mylist = insert_in_order(mylist, find); printlist(mylist); return 0; }
void _tnl_init_c_codegen( struct tnl_clipspace_codegen *p ) { p->emit_header = print_header; p->emit_footer = print_footer; p->emit_attr_header = print_attr_header; p->emit_attr_footer = print_attr_footer; p->emit_mov = print_mov; p->emit_const = print_const; p->emit_mad = print_mad; p->emit_float_to_chan = print_float_to_chan; p->emit_const_chan = print_const_chan; p->emit_float_to_ubyte = print_float_to_ubyte; p->emit_const_ubyte = print_const_ubyte; p->emit_store_func = print_store_func; make_empty_list(&p->codegen_list); p->buf_size = 2048; p->buf = (char *) MALLOC(p->buf_size); }
static radeonTexObjPtr radeonAllocTexObj( struct gl_texture_object *texObj ) { radeonTexObjPtr t; t = CALLOC_STRUCT( radeon_tex_obj ); if (!t) return NULL; if ( RADEON_DEBUG & DEBUG_TEXTURE ) { fprintf( stderr, "%s( %p, %p )\n", __FUNCTION__, texObj, t ); } t->tObj = texObj; make_empty_list( t ); /* Initialize non-image-dependent parts of the state: */ radeonSetTexWrap( t, texObj->WrapS, texObj->WrapT ); radeonSetTexMaxAnisotropy( t, texObj->MaxAnisotropy ); radeonSetTexFilter( t, texObj->MinFilter, texObj->MagFilter ); radeonSetTexBorderColor( t, texObj->BorderColor ); return t; }
/* Create the device specific context. */ GLboolean r128CreateContext( const __GLcontextModes *glVisual, __DRIcontextPrivate *driContextPriv, void *sharedContextPrivate ) { GLcontext *ctx, *shareCtx; __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv; struct dd_function_table functions; r128ContextPtr rmesa; r128ScreenPtr r128scrn; int i; /* Allocate the r128 context */ rmesa = (r128ContextPtr) CALLOC( sizeof(*rmesa) ); if ( !rmesa ) return GL_FALSE; /* Init default driver functions then plug in our Radeon-specific functions * (the texture functions are especially important) */ _mesa_init_driver_functions( &functions ); r128InitDriverFuncs( &functions ); r128InitIoctlFuncs( &functions ); r128InitTextureFuncs( &functions ); /* Allocate the Mesa context */ if (sharedContextPrivate) shareCtx = ((r128ContextPtr) sharedContextPrivate)->glCtx; else shareCtx = NULL; rmesa->glCtx = _mesa_create_context(glVisual, shareCtx, &functions, (void *) rmesa); if (!rmesa->glCtx) { FREE(rmesa); return GL_FALSE; } driContextPriv->driverPrivate = rmesa; ctx = rmesa->glCtx; rmesa->driContext = driContextPriv; rmesa->driScreen = sPriv; rmesa->driDrawable = NULL; rmesa->hHWContext = driContextPriv->hHWContext; rmesa->driHwLock = &sPriv->pSAREA->lock; rmesa->driFd = sPriv->fd; r128scrn = rmesa->r128Screen = (r128ScreenPtr)(sPriv->private); /* Parse configuration files */ driParseConfigFiles (&rmesa->optionCache, &r128scrn->optionCache, r128scrn->driScreen->myNum, "r128"); rmesa->sarea = (drm_r128_sarea_t *)((char *)sPriv->pSAREA + r128scrn->sarea_priv_offset); rmesa->CurrentTexObj[0] = NULL; rmesa->CurrentTexObj[1] = NULL; (void) memset( rmesa->texture_heaps, 0, sizeof( rmesa->texture_heaps ) ); make_empty_list( & rmesa->swapped ); rmesa->nr_heaps = r128scrn->numTexHeaps; for ( i = 0 ; i < rmesa->nr_heaps ; i++ ) { rmesa->texture_heaps[i] = driCreateTextureHeap( i, rmesa, r128scrn->texSize[i], 12, R128_NR_TEX_REGIONS, (drmTextureRegionPtr)rmesa->sarea->tex_list[i], &rmesa->sarea->tex_age[i], &rmesa->swapped, sizeof( r128TexObj ), (destroy_texture_object_t *) r128DestroyTexObj ); driSetTextureSwapCounterLocation( rmesa->texture_heaps[i], & rmesa->c_textureSwaps ); } rmesa->texture_depth = driQueryOptioni (&rmesa->optionCache, "texture_depth"); if (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FB) rmesa->texture_depth = ( r128scrn->cpp == 4 ) ? DRI_CONF_TEXTURE_DEPTH_32 : DRI_CONF_TEXTURE_DEPTH_16; rmesa->RenderIndex = -1; /* Impossible value */ rmesa->vert_buf = NULL; rmesa->num_verts = 0; RENDERINPUTS_ONES( rmesa->tnl_state_bitset ); /* Set the maximum texture size small enough that we can guarentee that * all texture units can bind a maximal texture and have them both in * texturable memory at once. */ ctx->Const.MaxTextureUnits = 2; ctx->Const.MaxTextureImageUnits = 2; ctx->Const.MaxTextureCoordUnits = 2; driCalculateMaxTextureLevels( rmesa->texture_heaps, rmesa->nr_heaps, & ctx->Const, 4, 10, /* max 2D texture size is 1024x1024 */ 0, /* 3D textures unsupported. */ 0, /* cube textures unsupported. */ 0, /* texture rectangles unsupported. */ 11, GL_FALSE, 0 ); /* No wide points. */ ctx->Const.MinPointSize = 1.0; ctx->Const.MinPointSizeAA = 1.0; ctx->Const.MaxPointSize = 1.0; ctx->Const.MaxPointSizeAA = 1.0; /* No wide lines. */ ctx->Const.MinLineWidth = 1.0; ctx->Const.MinLineWidthAA = 1.0; ctx->Const.MaxLineWidth = 1.0; ctx->Const.MaxLineWidthAA = 1.0; ctx->Const.LineWidthGranularity = 1.0; ctx->Const.MaxDrawBuffers = 1; #if ENABLE_PERF_BOXES rmesa->boxes = driQueryOptionb(&rmesa->optionCache, "performance_boxes"); #endif /* Initialize the software rasterizer and helper modules. */ _swrast_CreateContext( ctx ); _vbo_CreateContext( ctx ); _tnl_CreateContext( ctx ); _swsetup_CreateContext( ctx ); /* Install the customized pipeline: */ /* _tnl_destroy_pipeline( ctx ); */ /* _tnl_install_pipeline( ctx, r128_pipeline ); */ /* Configure swrast and T&L to match hardware characteristics: */ _swrast_allow_pixel_fog( ctx, GL_FALSE ); _swrast_allow_vertex_fog( ctx, GL_TRUE ); _tnl_allow_pixel_fog( ctx, GL_FALSE ); _tnl_allow_vertex_fog( ctx, GL_TRUE ); driInitExtensions( ctx, card_extensions, GL_TRUE ); if (sPriv->drm_version.minor >= 4) _mesa_enable_extension( ctx, "GL_MESA_ycbcr_texture" ); r128InitTriFuncs( ctx ); r128DDInitStateFuncs( ctx ); r128DDInitSpanFuncs( ctx ); r128DDInitState( rmesa ); driContextPriv->driverPrivate = (void *)rmesa; #if DO_DEBUG R128_DEBUG = driParseDebugString( getenv( "R128_DEBUG" ), debug_control ); #endif if (driQueryOptionb(&rmesa->optionCache, "no_rast")) { fprintf(stderr, "disabling 3D acceleration\n"); FALLBACK(rmesa, R128_FALLBACK_DISABLE, 1); } return GL_TRUE; }
struct draw_geometry_shader * draw_create_geometry_shader(struct draw_context *draw, const struct pipe_shader_state *state) { #ifdef HAVE_LLVM boolean use_llvm = draw_get_option_use_llvm(); struct llvm_geometry_shader *llvm_gs; #endif struct draw_geometry_shader *gs; unsigned i; #ifdef HAVE_LLVM if (use_llvm) { llvm_gs = CALLOC_STRUCT(llvm_geometry_shader); if (llvm_gs == NULL) return NULL; gs = &llvm_gs->base; make_empty_list(&llvm_gs->variants); } else #endif { gs = CALLOC_STRUCT(draw_geometry_shader); } if (!gs) return NULL; gs->draw = draw; gs->state = *state; gs->state.tokens = tgsi_dup_tokens(state->tokens); if (!gs->state.tokens) { FREE(gs); return NULL; } tgsi_scan_shader(state->tokens, &gs->info); /* setup the defaults */ gs->input_primitive = PIPE_PRIM_TRIANGLES; gs->output_primitive = PIPE_PRIM_TRIANGLE_STRIP; gs->max_output_vertices = 32; gs->max_out_prims = 0; #ifdef HAVE_LLVM if (use_llvm) { /* TODO: change the input array to handle the following vector length, instead of the currently hardcoded TGSI_NUM_CHANNELS gs->vector_length = lp_native_vector_width / 32;*/ gs->vector_length = TGSI_NUM_CHANNELS; } else #endif { gs->vector_length = 1; } for (i = 0; i < gs->info.num_properties; ++i) { if (gs->info.properties[i].name == TGSI_PROPERTY_GS_INPUT_PRIM) gs->input_primitive = gs->info.properties[i].data[0]; else if (gs->info.properties[i].name == TGSI_PROPERTY_GS_OUTPUT_PRIM) gs->output_primitive = gs->info.properties[i].data[0]; else if (gs->info.properties[i].name == TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES) gs->max_output_vertices = gs->info.properties[i].data[0]; } /* Primitive boundary is bigger than max_output_vertices by one, because * the specification says that the geometry shader should exit if the * number of emitted vertices is bigger or equal to max_output_vertices and * we can't do that because we're running in the SoA mode, which means that * our storing routines will keep getting called on channels that have * overflown. * So we need some scratch area where we can keep writing the overflown * vertices without overwriting anything important or crashing. */ gs->primitive_boundary = gs->max_output_vertices + 1; for (i = 0; i < gs->info.num_outputs; i++) { if (gs->info.output_semantic_name[i] == TGSI_SEMANTIC_POSITION && gs->info.output_semantic_index[i] == 0) gs->position_output = i; if (gs->info.output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX) gs->viewport_index_output = i; if (gs->info.output_semantic_name[i] == TGSI_SEMANTIC_CLIPDIST) { debug_assert(gs->info.output_semantic_index[i] < PIPE_MAX_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT); gs->clipdistance_output[gs->info.output_semantic_index[i]] = i; } if (gs->info.output_semantic_name[i] == TGSI_SEMANTIC_CULLDIST) { debug_assert(gs->info.output_semantic_index[i] < PIPE_MAX_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT); gs->culldistance_output[gs->info.output_semantic_index[i]] = i; } } gs->machine = draw->gs.tgsi.machine; #ifdef HAVE_LLVM if (use_llvm) { int vector_size = gs->vector_length * sizeof(float); gs->gs_input = align_malloc(sizeof(struct draw_gs_inputs), 16); memset(gs->gs_input, 0, sizeof(struct draw_gs_inputs)); gs->llvm_prim_lengths = 0; gs->llvm_emitted_primitives = align_malloc(vector_size, vector_size); gs->llvm_emitted_vertices = align_malloc(vector_size, vector_size); gs->llvm_prim_ids = align_malloc(vector_size, vector_size); gs->fetch_outputs = llvm_fetch_gs_outputs; gs->fetch_inputs = llvm_fetch_gs_input; gs->prepare = llvm_gs_prepare; gs->run = llvm_gs_run; gs->jit_context = &draw->llvm->gs_jit_context; llvm_gs->variant_key_size = draw_gs_llvm_variant_key_size( MAX2(gs->info.file_max[TGSI_FILE_SAMPLER]+1, gs->info.file_max[TGSI_FILE_SAMPLER_VIEW]+1)); } else #endif { gs->fetch_outputs = tgsi_fetch_gs_outputs; gs->fetch_inputs = tgsi_fetch_gs_input; gs->prepare = tgsi_gs_prepare; gs->run = tgsi_gs_run; } return gs; }