static void validate_reg_src(nir_src *src, validate_state *state) { assert(src->reg.reg != NULL); struct hash_entry *entry; entry = _mesa_hash_table_search(state->regs, src->reg.reg); assert(entry); reg_validate_state *reg_state = (reg_validate_state *) entry->data; if (state->instr) { _mesa_set_add(reg_state->uses, src); } else { assert(state->if_stmt); _mesa_set_add(reg_state->if_uses, src); } if (!src->reg.reg->is_global) { assert(reg_state->where_defined == state->impl && "using a register declared in a different function"); } assert((src->reg.reg->num_array_elems == 0 || src->reg.base_offset < src->reg.reg->num_array_elems) && "definitely out-of-bounds array access"); if (src->reg.indirect) { assert(src->reg.reg->num_array_elems != 0); assert((src->reg.indirect->is_ssa || src->reg.indirect->reg.indirect == NULL) && "only one level of indirection allowed"); validate_src(src->reg.indirect, state); } }
static void validate_ssa_src(nir_src *src, validate_state *state, unsigned bit_size, unsigned num_components) { validate_assert(state, src->ssa != NULL); struct hash_entry *entry = _mesa_hash_table_search(state->ssa_defs, src->ssa); validate_assert(state, entry); if (!entry) return; ssa_def_validate_state *def_state = (ssa_def_validate_state *)entry->data; validate_assert(state, def_state->where_defined == state->impl && "using an SSA value defined in a different function"); if (state->instr) { _mesa_set_add(def_state->uses, src); } else { validate_assert(state, state->if_stmt); _mesa_set_add(def_state->if_uses, src); } if (bit_size) validate_assert(state, src->ssa->bit_size == bit_size); if (num_components) validate_assert(state, src->ssa->num_components == num_components); /* TODO validate that the use is dominated by the definition */ }
static void add_src(nir_src *src, struct set *invariants) { if (src->is_ssa) { _mesa_set_add(invariants, src->ssa); } else { _mesa_set_add(invariants, src->reg.reg); } }
static void validate_reg_dest(nir_reg_dest *dest, validate_state *state) { assert(dest->reg != NULL); assert(dest->parent_instr == state->instr); struct hash_entry *entry2; entry2 = _mesa_hash_table_search(state->regs, dest->reg); assert(entry2); reg_validate_state *reg_state = (reg_validate_state *) entry2->data; _mesa_set_add(reg_state->defs, dest); if (!dest->reg->is_global) { assert(reg_state->where_defined == state->impl && "writing to a register declared in a different function"); } assert((dest->reg->num_array_elems == 0 || dest->base_offset < dest->reg->num_array_elems) && "definitely out-of-bounds array access"); if (dest->indirect) { assert(dest->reg->num_array_elems != 0); assert((dest->indirect->is_ssa || dest->indirect->reg.indirect == NULL) && "only one level of indirection allowed"); validate_src(dest->indirect, state); } }
static GLsync fence_sync(struct gl_context *ctx, GLenum condition, GLbitfield flags) { struct gl_sync_object *syncObj; syncObj = ctx->Driver.NewSyncObject(ctx); if (syncObj != NULL) { /* The name is not currently used, and it is never visible to * applications. If sync support is extended to provide support for * NV_fence, this field will be used. We'll also need to add an object * ID hashtable. */ syncObj->Name = 1; syncObj->RefCount = 1; syncObj->DeletePending = GL_FALSE; syncObj->SyncCondition = condition; syncObj->Flags = flags; syncObj->StatusFlag = 0; ctx->Driver.FenceSync(ctx, syncObj, condition, flags); simple_mtx_lock(&ctx->Shared->Mutex); _mesa_set_add(ctx->Shared->SyncObjects, syncObj); simple_mtx_unlock(&ctx->Shared->Mutex); return (GLsync)syncObj; } return NULL; }
virtual ir_visitor_status visit(ir_variable *ir) { /* If the variable is auto or temp, add it to the set of variables that * are candidates for removal. */ if (ir->data.mode != ir_var_auto && ir->data.mode != ir_var_temporary) return visit_continue; _mesa_set_add(variables, ir); return visit_continue; }
static void validate_ssa_src(nir_src *src, validate_state *state) { assert(src->ssa != NULL); struct hash_entry *entry = _mesa_hash_table_search(state->ssa_defs, src->ssa); assert(entry); ssa_def_validate_state *def_state = (ssa_def_validate_state *)entry->data; assert(def_state->where_defined == state->impl && "using an SSA value defined in a different function"); if (state->instr) { _mesa_set_add(def_state->uses, src); } else { assert(state->if_stmt); _mesa_set_add(def_state->if_uses, src); } /* TODO validate that the use is dominated by the definition */ }
static void register_store_instr(nir_intrinsic_instr *store_instr, struct lower_variables_state *state) { struct deref_node *node = get_deref_node(store_instr->variables[0], state); if (node == NULL) return; if (node->stores == NULL) node->stores = _mesa_set_create(state->dead_ctx, _mesa_hash_pointer, _mesa_key_pointer_equal); _mesa_set_add(node->stores, store_instr); }
GLsync GLAPIENTRY _mesa_FenceSync(GLenum condition, GLbitfield flags) { GET_CURRENT_CONTEXT(ctx); struct gl_sync_object *syncObj; ASSERT_OUTSIDE_BEGIN_END_WITH_RETVAL(ctx, 0); if (condition != GL_SYNC_GPU_COMMANDS_COMPLETE) { _mesa_error(ctx, GL_INVALID_ENUM, "glFenceSync(condition=0x%x)", condition); return 0; } if (flags != 0) { _mesa_error(ctx, GL_INVALID_VALUE, "glFenceSync(flags=0x%x)", condition); return 0; } syncObj = ctx->Driver.NewSyncObject(ctx, GL_SYNC_FENCE); if (syncObj != NULL) { syncObj->Type = GL_SYNC_FENCE; /* The name is not currently used, and it is never visible to * applications. If sync support is extended to provide support for * NV_fence, this field will be used. We'll also need to add an object * ID hashtable. */ syncObj->Name = 1; syncObj->RefCount = 1; syncObj->DeletePending = GL_FALSE; syncObj->SyncCondition = condition; syncObj->Flags = flags; syncObj->StatusFlag = 0; ctx->Driver.FenceSync(ctx, syncObj, condition, flags); _glthread_LOCK_MUTEX(ctx->Shared->Mutex); _mesa_set_add(ctx->Shared->SyncObjects, _mesa_hash_pointer(syncObj), syncObj); _glthread_UNLOCK_MUTEX(ctx->Shared->Mutex); return (GLsync) syncObj; } return NULL; }
static void register_copy_instr(nir_intrinsic_instr *copy_instr, struct lower_variables_state *state) { for (unsigned idx = 0; idx < 2; idx++) { struct deref_node *node = get_deref_node(copy_instr->variables[idx], state); if (node == NULL) continue; if (node->copies == NULL) node->copies = _mesa_set_create(state->dead_ctx, _mesa_hash_pointer, _mesa_key_pointer_equal); _mesa_set_add(node->copies, copy_instr); } }
static bool mark_indirect_uses_block(nir_block *block, void *void_state) { struct set *indirect_set = void_state; nir_foreach_instr(block, instr) { if (instr->type != nir_instr_type_intrinsic) continue; nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); for (unsigned i = 0; i < nir_intrinsic_infos[intrin->intrinsic].num_variables; i++) { if (deref_has_indirect(intrin->variables[i])) _mesa_set_add(indirect_set, intrin->variables[i]->var); } } return true; }
static GLintptr register_surface(struct gl_context *ctx, GLboolean isOutput, const GLvoid *vdpSurface, GLenum target, GLsizei numTextureNames, const GLuint *textureNames) { struct vdp_surface *surf; int i; if (!ctx->vdpDevice || !ctx->vdpGetProcAddress || !ctx->vdpSurfaces) { _mesa_error(ctx, GL_INVALID_OPERATION, "VDPAURegisterSurfaceNV"); return (GLintptr)NULL; } if (target != GL_TEXTURE_2D && target != GL_TEXTURE_RECTANGLE) { _mesa_error(ctx, GL_INVALID_ENUM, "VDPAURegisterSurfaceNV"); return (GLintptr)NULL; } if (target == GL_TEXTURE_RECTANGLE && !ctx->Extensions.NV_texture_rectangle) { _mesa_error(ctx, GL_INVALID_ENUM, "VDPAURegisterSurfaceNV"); return (GLintptr)NULL; } surf = CALLOC_STRUCT( vdp_surface ); if (surf == NULL) { _mesa_error_no_memory("VDPAURegisterSurfaceNV"); return (GLintptr)NULL; } surf->vdpSurface = vdpSurface; surf->target = target; surf->access = GL_READ_WRITE; surf->state = GL_SURFACE_REGISTERED_NV; surf->output = isOutput; for (i = 0; i < numTextureNames; ++i) { struct gl_texture_object *tex; tex = _mesa_lookup_texture(ctx, textureNames[i]); if (tex == NULL) { free(surf); _mesa_error(ctx, GL_INVALID_OPERATION, "VDPAURegisterSurfaceNV(texture ID not found)"); return (GLintptr)NULL; } _mesa_lock_texture(ctx, tex); if (tex->Immutable) { _mesa_unlock_texture(ctx, tex); free(surf); _mesa_error(ctx, GL_INVALID_OPERATION, "VDPAURegisterSurfaceNV(texture is immutable)"); return (GLintptr)NULL; } if (tex->Target == 0) tex->Target = target; else if (tex->Target != target) { _mesa_unlock_texture(ctx, tex); free(surf); _mesa_error(ctx, GL_INVALID_OPERATION, "VDPAURegisterSurfaceNV(target mismatch)"); return (GLintptr)NULL; } /* This will disallow respecifying the storage. */ tex->Immutable = GL_TRUE; _mesa_unlock_texture(ctx, tex); _mesa_reference_texobj(&surf->textures[i], tex); } _mesa_set_add(ctx->vdpSurfaces, surf); return (GLintptr)surf; }
static void add_var(nir_variable *var, struct set *invariants) { _mesa_set_add(invariants, var); }