static void vc4_screen_destroy(struct pipe_screen *pscreen) { vc4_bufmgr_destroy(pscreen); ralloc_free(pscreen); }
bool brwCreateContext(int api, const struct gl_config *mesaVis, __DRIcontext *driContextPriv, unsigned major_version, unsigned minor_version, uint32_t flags, unsigned *error, void *sharedContextPrivate) { __DRIscreen *sPriv = driContextPriv->driScreenPriv; struct intel_screen *screen = sPriv->driverPrivate; struct dd_function_table functions; struct brw_context *brw = rzalloc(NULL, struct brw_context); if (!brw) { printf("%s: failed to alloc context\n", __FUNCTION__); *error = __DRI_CTX_ERROR_NO_MEMORY; return false; } /* brwInitVtbl needs to know the chipset generation so that it can set the * right pointers. */ brw->gen = screen->gen; brwInitVtbl( brw ); brwInitDriverFunctions(screen, &functions); struct gl_context *ctx = &brw->ctx; if (!intelInitContext( brw, api, major_version, minor_version, mesaVis, driContextPriv, sharedContextPrivate, &functions, error)) { ralloc_free(brw); return false; } brw_initialize_context_constants(brw); /* Reinitialize the context point state. It depends on ctx->Const values. */ _mesa_init_point(ctx); if (brw->gen >= 6) { /* Create a new hardware context. Using a hardware context means that * our GPU state will be saved/restored on context switch, allowing us * to assume that the GPU is in the same state we left it in. * * This is required for transform feedback buffer offsets, query objects, * and also allows us to reduce how much state we have to emit. */ brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr); if (!brw->hw_ctx) { fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n"); ralloc_free(brw); return false; } } brw_init_surface_formats(brw); /* Initialize swrast, tnl driver tables: */ TNLcontext *tnl = TNL_CONTEXT(ctx); if (tnl) tnl->Driver.RunPipeline = _tnl_run_pipeline; ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK; ctx->DriverFlags.NewRasterizerDiscard = BRW_NEW_RASTERIZER_DISCARD; ctx->DriverFlags.NewUniformBuffer = BRW_NEW_UNIFORM_BUFFER; if (brw->is_g4x || brw->gen >= 5) { brw->CMD_VF_STATISTICS = GM45_3DSTATE_VF_STATISTICS; brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45; brw->has_surface_tile_offset = true; if (brw->gen < 6) brw->has_compr4 = true; brw->has_aa_line_parameters = true; brw->has_pln = true; } else { brw->CMD_VF_STATISTICS = GEN4_3DSTATE_VF_STATISTICS; brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_965; } /* WM maximum threads is number of EUs times number of threads per EU. */ assert(brw->gen <= 7); if (brw->is_haswell) { if (brw->gt == 1) { brw->max_wm_threads = 102; brw->max_vs_threads = 70; brw->urb.size = 128; brw->urb.max_vs_entries = 640; brw->urb.max_gs_entries = 256; } else if (brw->gt == 2) { brw->max_wm_threads = 204; brw->max_vs_threads = 280; brw->urb.size = 256; brw->urb.max_vs_entries = 1664; brw->urb.max_gs_entries = 640; } else if (brw->gt == 3) { brw->max_wm_threads = 408; brw->max_vs_threads = 280; brw->urb.size = 512; brw->urb.max_vs_entries = 1664; brw->urb.max_gs_entries = 640; } } else if (brw->gen == 7) { if (brw->gt == 1) { brw->max_wm_threads = 48; brw->max_vs_threads = 36; brw->max_gs_threads = 36; brw->urb.size = 128; brw->urb.max_vs_entries = 512; brw->urb.max_gs_entries = 192; } else if (brw->gt == 2) { brw->max_wm_threads = 172; brw->max_vs_threads = 128; brw->max_gs_threads = 128; brw->urb.size = 256; brw->urb.max_vs_entries = 704; brw->urb.max_gs_entries = 320; } else { assert(!"Unknown gen7 device."); } } else if (brw->gen == 6) { if (brw->gt == 2) { brw->max_wm_threads = 80; brw->max_vs_threads = 60; brw->max_gs_threads = 60; brw->urb.size = 64; /* volume 5c.5 section 5.1 */ brw->urb.max_vs_entries = 256; /* volume 2a (see 3DSTATE_URB) */ brw->urb.max_gs_entries = 256; } else { brw->max_wm_threads = 40; brw->max_vs_threads = 24; brw->max_gs_threads = 21; /* conservative; 24 if rendering disabled */ brw->urb.size = 32; /* volume 5c.5 section 5.1 */ brw->urb.max_vs_entries = 256; /* volume 2a (see 3DSTATE_URB) */ brw->urb.max_gs_entries = 256; } brw->urb.gen6_gs_previously_active = false; } else if (brw->gen == 5) { brw->urb.size = 1024; brw->max_vs_threads = 72; brw->max_gs_threads = 32; brw->max_wm_threads = 12 * 6; } else if (brw->is_g4x) { brw->urb.size = 384; brw->max_vs_threads = 32; brw->max_gs_threads = 2; brw->max_wm_threads = 10 * 5; } else if (brw->gen < 6) { brw->urb.size = 256; brw->max_vs_threads = 16; brw->max_gs_threads = 2; brw->max_wm_threads = 8 * 4; brw->has_negative_rhw_bug = true; } if (brw->gen <= 7) { brw->needs_unlit_centroid_workaround = true; } brw->prim_restart.in_progress = false; brw->prim_restart.enable_cut_index = false; brw_init_state( brw ); brw->curbe.last_buf = calloc(1, 4096); brw->curbe.next_buf = calloc(1, 4096); brw->state.dirty.mesa = ~0; brw->state.dirty.brw = ~0; brw->emit_state_always = 0; brw->batch.need_workaround_flush = true; ctx->VertexProgram._MaintainTnlProgram = true; ctx->FragmentProgram._MaintainTexEnvProgram = true; brw_draw_init( brw ); brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile"); ctx->Const.ContextFlags = 0; if ((flags & __DRI_CTX_FLAG_FORWARD_COMPATIBLE) != 0) ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT; if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) { ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_DEBUG_BIT; /* Turn on some extra GL_ARB_debug_output generation. */ brw->perf_debug = true; } brw_fs_alloc_reg_sets(brw); if (INTEL_DEBUG & DEBUG_SHADER_TIME) brw_init_shader_time(brw); _mesa_compute_version(ctx); _mesa_initialize_dispatch_tables(ctx); _mesa_initialize_vbo_vtxfmt(ctx); return true; }
static bool brw_codegen_gs_prog(struct brw_context *brw, struct brw_program *gp, struct brw_gs_prog_key *key) { struct brw_compiler *compiler = brw->screen->compiler; const struct gen_device_info *devinfo = &brw->screen->devinfo; struct brw_stage_state *stage_state = &brw->gs.base; struct brw_gs_prog_data prog_data; bool start_busy = false; double start_time = 0; memset(&prog_data, 0, sizeof(prog_data)); assign_gs_binding_table_offsets(devinfo, &gp->program, &prog_data); /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. * * Note: param_count needs to be num_uniform_components * 4, since we add * padding around uniform values below vec4 size, so the worst case is that * every uniform is a float which gets padded to the size of a vec4. */ int param_count = gp->program.nir->num_uniforms / 4; prog_data.base.base.param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.base.pull_param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.base.image_param = rzalloc_array(NULL, struct brw_image_param, gp->program.info.num_images); prog_data.base.base.nr_params = param_count; prog_data.base.base.nr_image_params = gp->program.info.num_images; brw_nir_setup_glsl_uniforms(gp->program.nir, &gp->program, &prog_data.base.base, compiler->scalar_stage[MESA_SHADER_GEOMETRY]); brw_nir_analyze_ubo_ranges(compiler, gp->program.nir, prog_data.base.base.ubo_ranges); uint64_t outputs_written = gp->program.info.outputs_written; brw_compute_vue_map(devinfo, &prog_data.base.vue_map, outputs_written, gp->program.info.separate_shader); int st_index = -1; if (INTEL_DEBUG & DEBUG_SHADER_TIME) st_index = brw_get_shader_time_index(brw, &gp->program, ST_GS, true); if (unlikely(brw->perf_debug)) { start_busy = brw->batch.last_bo && brw_bo_busy(brw->batch.last_bo); start_time = get_time(); } void *mem_ctx = ralloc_context(NULL); unsigned program_size; char *error_str; const unsigned *program = brw_compile_gs(brw->screen->compiler, brw, mem_ctx, key, &prog_data, gp->program.nir, &gp->program, st_index, &program_size, &error_str); if (program == NULL) { ralloc_strcat(&gp->program.sh.data->InfoLog, error_str); _mesa_problem(NULL, "Failed to compile geometry shader: %s\n", error_str); ralloc_free(mem_ctx); return false; } if (unlikely(brw->perf_debug)) { if (gp->compiled_once) { brw_gs_debug_recompile(brw, &gp->program, key); } if (start_busy && !brw_bo_busy(brw->batch.last_bo)) { perf_debug("GS compile took %.03f ms and stalled the GPU\n", (get_time() - start_time) * 1000); } gp->compiled_once = true; } /* Scratch space is used for register spilling */ brw_alloc_stage_scratch(brw, stage_state, prog_data.base.base.total_scratch, devinfo->max_gs_threads); brw_upload_cache(&brw->cache, BRW_CACHE_GS_PROG, key, sizeof(*key), program, program_size, &prog_data, sizeof(prog_data), &stage_state->prog_offset, &brw->gs.base.prog_data); ralloc_free(mem_ctx); return true; }
~has_recursion_visitor() { hash_table_dtor(this->function_hash); ralloc_free(this->mem_ctx); }
/** * All Mesa program -> GPU code generation goes through this function. * Depending on the instructions used (i.e. flow control instructions) * we'll use one of two code generators. */ bool do_wm_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_fragment_program *fp, struct brw_wm_prog_key *key) { struct brw_wm_compile *c; const GLuint *program; struct gl_shader *fs = NULL; GLuint program_size; if (prog) fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]; c = rzalloc(NULL, struct brw_wm_compile); /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. */ int param_count; if (fs) { param_count = fs->num_uniform_components; } else { param_count = fp->program.Base.Parameters->NumParameters * 4; } /* The backend also sometimes adds params for texture size. */ param_count += 2 * BRW_MAX_TEX_UNIT; c->prog_data.param = rzalloc_array(NULL, const float *, param_count); c->prog_data.pull_param = rzalloc_array(NULL, const float *, param_count); memcpy(&c->key, key, sizeof(*key)); c->prog_data.barycentric_interp_modes = brw_compute_barycentric_interp_modes(brw, c->key.flat_shade, &fp->program); program = brw_wm_fs_emit(brw, c, &fp->program, prog, &program_size); if (program == NULL) return false; /* Scratch space is used for register spilling */ if (c->last_scratch) { perf_debug("Fragment shader triggered register spilling. " "Try reducing the number of live scalar values to " "improve performance.\n"); c->prog_data.total_scratch = brw_get_scratch_size(c->last_scratch); brw_get_scratch_bo(brw, &brw->wm.scratch_bo, c->prog_data.total_scratch * brw->max_wm_threads); } if (unlikely(INTEL_DEBUG & DEBUG_WM)) fprintf(stderr, "\n"); brw_upload_cache(&brw->cache, BRW_WM_PROG, &c->key, sizeof(c->key), program, program_size, &c->prog_data, sizeof(c->prog_data), &brw->wm.prog_offset, &brw->wm.prog_data); ralloc_free(c); return true; }
static void DeleteShader(struct gl_context *ctx, struct gl_shader *shader) { ralloc_free(shader); }
static bool constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx) { nir_const_value src[NIR_MAX_VEC_COMPONENTS]; if (!instr->dest.dest.is_ssa) return false; /* In the case that any outputs/inputs have unsized types, then we need to * guess the bit-size. In this case, the validator ensures that all * bit-sizes match so we can just take the bit-size from first * output/input with an unsized type. If all the outputs/inputs are sized * then we don't need to guess the bit-size at all because the code we * generate for constant opcodes in this case already knows the sizes of * the types involved and does not need the provided bit-size for anything * (although it still requires to receive a valid bit-size). */ unsigned bit_size = 0; if (!nir_alu_type_get_type_size(nir_op_infos[instr->op].output_type)) bit_size = instr->dest.dest.ssa.bit_size; for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) { if (!instr->src[i].src.is_ssa) return false; if (bit_size == 0 && !nir_alu_type_get_type_size(nir_op_infos[instr->op].input_sizes[i])) { bit_size = instr->src[i].src.ssa->bit_size; } nir_instr *src_instr = instr->src[i].src.ssa->parent_instr; if (src_instr->type != nir_instr_type_load_const) return false; nir_load_const_instr* load_const = nir_instr_as_load_const(src_instr); for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i); j++) { switch(load_const->def.bit_size) { case 64: src[i].u64[j] = load_const->value.u64[instr->src[i].swizzle[j]]; break; case 32: src[i].u32[j] = load_const->value.u32[instr->src[i].swizzle[j]]; break; case 16: src[i].u16[j] = load_const->value.u16[instr->src[i].swizzle[j]]; break; case 8: src[i].u8[j] = load_const->value.u8[instr->src[i].swizzle[j]]; break; default: unreachable("Invalid bit size"); } } /* We shouldn't have any source modifiers in the optimization loop. */ assert(!instr->src[i].abs && !instr->src[i].negate); } if (bit_size == 0) bit_size = 32; /* We shouldn't have any saturate modifiers in the optimization loop. */ assert(!instr->dest.saturate); nir_const_value dest = nir_eval_const_opcode(instr->op, instr->dest.dest.ssa.num_components, bit_size, src); nir_load_const_instr *new_instr = nir_load_const_instr_create(mem_ctx, instr->dest.dest.ssa.num_components, instr->dest.dest.ssa.bit_size); new_instr->value = dest; nir_instr_insert_before(&instr->instr, &new_instr->instr); nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, nir_src_for_ssa(&new_instr->def)); nir_instr_remove(&instr->instr); ralloc_free(instr); return true; }
brw_blorp_const_color_program::~brw_blorp_const_color_program() { ralloc_free(mem_ctx); }
~get_sampler_name() { ralloc_free(this->mem_ctx); }
~string_buffer() { ralloc_free(m_Ptr); }
static bool do_gs_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_geometry_program *gp, struct brw_gs_prog_key *key) { struct brw_stage_state *stage_state = &brw->gs.base; struct brw_gs_compile c; memset(&c, 0, sizeof(c)); c.key = *key; c.gp = gp; c.prog_data.include_primitive_id = (gp->program.Base.InputsRead & VARYING_BIT_PRIMITIVE_ID) != 0; /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. * * Note: param_count needs to be num_uniform_components * 4, since we add * padding around uniform values below vec4 size, so the worst case is that * every uniform is a float which gets padded to the size of a vec4. */ struct gl_shader *gs = prog->_LinkedShaders[MESA_SHADER_GEOMETRY]; int param_count = gs->num_uniform_components * 4; /* We also upload clip plane data as uniforms */ param_count += MAX_CLIP_PLANES * 4; c.prog_data.base.param = rzalloc_array(NULL, const float *, param_count); c.prog_data.base.pull_param = rzalloc_array(NULL, const float *, param_count); if (gp->program.OutputType == GL_POINTS) { /* When the output type is points, the geometry shader may output data * to multiple streams, and EndPrimitive() has no effect. So we * configure the hardware to interpret the control data as stream ID. */ c.prog_data.control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID; /* However, StreamID is not yet supported, so we output zero bits of * control data per vertex. */ c.control_data_bits_per_vertex = 0; } else { /* When the output type is triangle_strip or line_strip, EndPrimitive() * may be used to terminate the current strip and start a new one * (similar to primitive restart), and outputting data to multiple * streams is not supported. So we configure the hardware to interpret * the control data as EndPrimitive information (a.k.a. "cut bits"). */ c.prog_data.control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT; /* We only need to output control data if the shader actually calls * EndPrimitive(). */ c.control_data_bits_per_vertex = gp->program.UsesEndPrimitive ? 1 : 0; } c.control_data_header_size_bits = gp->program.VerticesOut * c.control_data_bits_per_vertex; /* 1 HWORD = 32 bytes = 256 bits */ c.prog_data.control_data_header_size_hwords = ALIGN(c.control_data_header_size_bits, 256) / 256; GLbitfield64 outputs_written = gp->program.Base.OutputsWritten; /* In order for legacy clipping to work, we need to populate the clip * distance varying slots whenever clipping is enabled, even if the vertex * shader doesn't write to gl_ClipDistance. */ if (c.key.base.userclip_active) { outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0); outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1); } brw_compute_vue_map(brw, &c.prog_data.base.vue_map, outputs_written); /* Compute the output vertex size. * * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 STATE_GS - Output Vertex * Size (p168): * * [0,62] indicating [1,63] 16B units * * Specifies the size of each vertex stored in the GS output entry * (following any Control Header data) as a number of 128-bit units * (minus one). * * Programming Restrictions: The vertex size must be programmed as a * multiple of 32B units with the following exception: Rendering is * disabled (as per SOL stage state) and the vertex size output by the * GS thread is 16B. * * If rendering is enabled (as per SOL state) the vertex size must be * programmed as a multiple of 32B units. In other words, the only time * software can program a vertex size with an odd number of 16B units * is when rendering is disabled. * * Note: B=bytes in the above text. * * It doesn't seem worth the extra trouble to optimize the case where the * vertex size is 16B (especially since this would require special-casing * the GEN assembly that writes to the URB). So we just set the vertex * size to a multiple of 32B (2 vec4's) in all cases. * * The maximum output vertex size is 62*16 = 992 bytes (31 hwords). We * budget that as follows: * * 512 bytes for varyings (a varying component is 4 bytes and * gl_MaxGeometryOutputComponents = 128) * 16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16 * bytes) * 16 bytes overhead for gl_Position (we allocate it a slot in the VUE * even if it's not used) * 32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots * whenever clip planes are enabled, even if the shader doesn't * write to gl_ClipDistance) * 16 bytes overhead since the VUE size must be a multiple of 32 bytes * (see below)--this causes up to 1 VUE slot to be wasted * 400 bytes available for varying packing overhead * * Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes) * per interpolation type, so this is plenty. * */ unsigned output_vertex_size_bytes = c.prog_data.base.vue_map.num_slots * 16; assert(output_vertex_size_bytes <= GEN7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES); c.prog_data.output_vertex_size_hwords = ALIGN(output_vertex_size_bytes, 32) / 32; /* Compute URB entry size. The maximum allowed URB entry size is 32k. * That divides up as follows: * * 64 bytes for the control data header (cut indices or StreamID bits) * 4096 bytes for varyings (a varying component is 4 bytes and * gl_MaxGeometryTotalOutputComponents = 1024) * 4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16 * bytes/vertex and gl_MaxGeometryOutputVertices is 256) * 4096 bytes overhead for gl_Position (we allocate it a slot in the VUE * even if it's not used) * 8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots * whenever clip planes are enabled, even if the shader doesn't * write to gl_ClipDistance) * 4096 bytes overhead since the VUE size must be a multiple of 32 * bytes (see above)--this causes up to 1 VUE slot to be wasted * 8128 bytes available for varying packing overhead * * Worst-case varying packing overhead is 3/4 of a varying slot per * interpolation type, which works out to 3072 bytes, so this would allow * us to accommodate 2 interpolation types without any danger of running * out of URB space. * * In practice, the risk of running out of URB space is very small, since * the above figures are all worst-case, and most of them scale with the * number of output vertices. So we'll just calculate the amount of space * we need, and if it's too large, fail to compile. */ unsigned output_size_bytes = c.prog_data.output_vertex_size_hwords * 32 * gp->program.VerticesOut; output_size_bytes += 32 * c.prog_data.control_data_header_size_hwords; assert(output_size_bytes >= 1); if (output_size_bytes > GEN7_MAX_GS_URB_ENTRY_SIZE_BYTES) return false; /* URB entry sizes are stored as a multiple of 64 bytes. */ c.prog_data.base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64; c.prog_data.output_topology = prim_to_hw_prim[gp->program.OutputType]; brw_compute_vue_map(brw, &c.input_vue_map, c.key.input_varyings); /* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we * need to program a URB read length of ceiling(num_slots / 2). */ c.prog_data.base.urb_read_length = (c.input_vue_map.num_slots + 1) / 2; void *mem_ctx = ralloc_context(NULL); unsigned program_size; const unsigned *program = brw_gs_emit(brw, prog, &c, mem_ctx, &program_size); if (program == NULL) { ralloc_free(mem_ctx); return false; } /* Scratch space is used for register spilling */ if (c.base.last_scratch) { perf_debug("Geometry shader triggered register spilling. " "Try reducing the number of live vec4 values to " "improve performance.\n"); c.prog_data.base.total_scratch = brw_get_scratch_size(c.base.last_scratch*REG_SIZE); brw_get_scratch_bo(brw, &stage_state->scratch_bo, c.prog_data.base.total_scratch * brw->max_gs_threads); } brw_upload_cache(&brw->cache, BRW_GS_PROG, &c.key, sizeof(c.key), program, program_size, &c.prog_data, sizeof(c.prog_data), &stage_state->prog_offset, &brw->gs.prog_data); ralloc_free(mem_ctx); return true; }
void brw_wm_clear_compile(struct brw_context *brw, struct brw_wm_compile *c) { ralloc_free(c); }
/** * All Mesa program -> GPU code generation goes through this function. * Depending on the instructions used (i.e. flow control instructions) * we'll use one of two code generators. */ bool do_wm_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_fragment_program *fp, struct brw_wm_prog_key *key) { struct gl_context *ctx = &brw->ctx; void *mem_ctx = ralloc_context(NULL); struct brw_wm_prog_data prog_data; const GLuint *program; struct gl_shader *fs = NULL; GLuint program_size; if (prog) fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]; memset(&prog_data, 0, sizeof(prog_data)); /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. */ int param_count; if (fs) { param_count = fs->num_uniform_components; } else { param_count = fp->program.Base.Parameters->NumParameters * 4; } /* The backend also sometimes adds params for texture size. */ param_count += 2 * ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits; prog_data.base.param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.pull_param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.nr_params = param_count; prog_data.barycentric_interp_modes = brw_compute_barycentric_interp_modes(brw, key->flat_shade, key->persample_shading, &fp->program); program = brw_wm_fs_emit(brw, mem_ctx, key, &prog_data, &fp->program, prog, &program_size); if (program == NULL) { ralloc_free(mem_ctx); return false; } if (prog_data.total_scratch) { brw_get_scratch_bo(brw, &brw->wm.base.scratch_bo, prog_data.total_scratch * brw->max_wm_threads); } if (unlikely(INTEL_DEBUG & DEBUG_WM)) fprintf(stderr, "\n"); brw_upload_cache(&brw->cache, BRW_WM_PROG, key, sizeof(struct brw_wm_prog_key), program, program_size, &prog_data, sizeof(prog_data), &brw->wm.base.prog_offset, &brw->wm.prog_data); ralloc_free(mem_ctx); return true; }
static void compile_gs_prog( struct brw_context *brw, struct brw_gs_prog_key *key ) { struct brw_gs_compile c; const GLuint *program; void *mem_ctx; GLuint program_size; memset(&c, 0, sizeof(c)); c.key = *key; c.vue_map = brw->vs.prog_data->base.vue_map; c.nr_regs = (c.vue_map.num_slots + 1)/2; mem_ctx = ralloc_context(NULL); /* Begin the compilation: */ brw_init_compile(brw, &c.func, mem_ctx); c.func.single_program_flow = 1; /* For some reason the thread is spawned with only 4 channels * unmasked. */ brw_set_mask_control(&c.func, BRW_MASK_DISABLE); if (brw->gen >= 6) { unsigned num_verts; bool check_edge_flag; /* On Sandybridge, we use the GS for implementing transform feedback * (called "Stream Out" in the PRM). */ switch (key->primitive) { case _3DPRIM_POINTLIST: num_verts = 1; check_edge_flag = false; break; case _3DPRIM_LINELIST: case _3DPRIM_LINESTRIP: case _3DPRIM_LINELOOP: num_verts = 2; check_edge_flag = false; break; case _3DPRIM_TRILIST: case _3DPRIM_TRIFAN: case _3DPRIM_TRISTRIP: case _3DPRIM_RECTLIST: num_verts = 3; check_edge_flag = false; break; case _3DPRIM_QUADLIST: case _3DPRIM_QUADSTRIP: case _3DPRIM_POLYGON: num_verts = 3; check_edge_flag = true; break; default: assert(!"Unexpected primitive type in Gen6 SOL program."); return; } gen6_sol_program(&c, key, num_verts, check_edge_flag); } else { /* On Gen4-5, we use the GS to decompose certain types of primitives. * Note that primitives which don't require a GS program have already * been weeded out by now. */ switch (key->primitive) { case _3DPRIM_QUADLIST: brw_gs_quads( &c, key ); break; case _3DPRIM_QUADSTRIP: brw_gs_quad_strip( &c, key ); break; case _3DPRIM_LINELOOP: brw_gs_lines( &c ); break; default: ralloc_free(mem_ctx); return; } } /* get the program */ program = brw_get_program(&c.func, &program_size); if (unlikely(INTEL_DEBUG & DEBUG_GS)) { int i; printf("gs:\n"); for (i = 0; i < program_size / sizeof(struct brw_instruction); i++) brw_disasm(stdout, &((struct brw_instruction *)program)[i], brw->gen); printf("\n"); } brw_upload_cache(&brw->cache, BRW_GS_PROG, &c.key, sizeof(c.key), program, program_size, &c.prog_data, sizeof(c.prog_data), &brw->gs.prog_offset, &brw->gs.prog_data); ralloc_free(mem_ctx); }
static void setup_glsl_msaa_blit_shader(struct gl_context *ctx, struct blit_state *blit, const struct gl_framebuffer *drawFb, struct gl_renderbuffer *src_rb, GLenum target) { const char *vs_source; char *fs_source; void *mem_ctx; enum blit_msaa_shader shader_index; bool dst_is_msaa = false; GLenum src_datatype; const char *vec4_prefix; const char *sampler_array_suffix = ""; char *name; const char *texcoord_type = "vec2"; int samples; int shader_offset = 0; if (src_rb) { samples = MAX2(src_rb->NumSamples, 1); src_datatype = _mesa_get_format_datatype(src_rb->Format); } else { /* depth-or-color glCopyTexImage fallback path that passes a NULL rb and * doesn't handle integer. */ samples = 1; src_datatype = GL_UNSIGNED_NORMALIZED; } /* We expect only power of 2 samples in source multisample buffer. */ assert(samples > 0 && _mesa_is_pow_two(samples)); while (samples >> (shader_offset + 1)) { shader_offset++; } /* Update the assert if we plan to support more than 16X MSAA. */ assert(shader_offset >= 0 && shader_offset <= 4); if (drawFb->Visual.samples > 1) { /* If you're calling meta_BlitFramebuffer with the destination * multisampled, this is the only path that will work -- swrast and * CopyTexImage won't work on it either. */ assert(ctx->Extensions.ARB_sample_shading); dst_is_msaa = true; /* We need shader invocation per sample, not per pixel */ _mesa_set_enable(ctx, GL_MULTISAMPLE, GL_TRUE); _mesa_set_enable(ctx, GL_SAMPLE_SHADING, GL_TRUE); _mesa_MinSampleShading(1.0); } switch (target) { case GL_TEXTURE_2D_MULTISAMPLE: case GL_TEXTURE_2D_MULTISAMPLE_ARRAY: if (src_rb && (src_rb->_BaseFormat == GL_DEPTH_COMPONENT || src_rb->_BaseFormat == GL_DEPTH_STENCIL)) { if (dst_is_msaa) shader_index = BLIT_MSAA_SHADER_2D_MULTISAMPLE_DEPTH_COPY; else shader_index = BLIT_MSAA_SHADER_2D_MULTISAMPLE_DEPTH_RESOLVE; } else { if (dst_is_msaa) shader_index = BLIT_MSAA_SHADER_2D_MULTISAMPLE_COPY; else { shader_index = BLIT_1X_MSAA_SHADER_2D_MULTISAMPLE_RESOLVE + shader_offset; } } if (target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) { shader_index += (BLIT_1X_MSAA_SHADER_2D_MULTISAMPLE_ARRAY_RESOLVE - BLIT_1X_MSAA_SHADER_2D_MULTISAMPLE_RESOLVE); sampler_array_suffix = "Array"; texcoord_type = "vec3"; } break; default: _mesa_problem(ctx, "Unkown texture target %s\n", _mesa_enum_to_string(target)); shader_index = BLIT_2X_MSAA_SHADER_2D_MULTISAMPLE_RESOLVE; } /* We rely on the enum being sorted this way. */ STATIC_ASSERT(BLIT_1X_MSAA_SHADER_2D_MULTISAMPLE_RESOLVE_INT == BLIT_1X_MSAA_SHADER_2D_MULTISAMPLE_RESOLVE + 5); STATIC_ASSERT(BLIT_1X_MSAA_SHADER_2D_MULTISAMPLE_RESOLVE_UINT == BLIT_1X_MSAA_SHADER_2D_MULTISAMPLE_RESOLVE + 10); if (src_datatype == GL_INT) { shader_index += 5; vec4_prefix = "i"; } else if (src_datatype == GL_UNSIGNED_INT) { shader_index += 10; vec4_prefix = "u"; } else { vec4_prefix = ""; } if (blit->msaa_shaders[shader_index]) { _mesa_UseProgram(blit->msaa_shaders[shader_index]); return; } mem_ctx = ralloc_context(NULL); if (shader_index == BLIT_MSAA_SHADER_2D_MULTISAMPLE_DEPTH_RESOLVE || shader_index == BLIT_MSAA_SHADER_2D_MULTISAMPLE_ARRAY_DEPTH_RESOLVE || shader_index == BLIT_MSAA_SHADER_2D_MULTISAMPLE_ARRAY_DEPTH_COPY || shader_index == BLIT_MSAA_SHADER_2D_MULTISAMPLE_DEPTH_COPY) { char *sample_index; const char *arb_sample_shading_extension_string; if (dst_is_msaa) { arb_sample_shading_extension_string = "#extension GL_ARB_sample_shading : enable"; sample_index = "gl_SampleID"; name = "depth MSAA copy"; } else { /* Don't need that extension, since we're drawing to a single-sampled * destination. */ arb_sample_shading_extension_string = ""; /* From the GL 4.3 spec: * * "If there is a multisample buffer (the value of SAMPLE_BUFFERS * is one), then values are obtained from the depth samples in * this buffer. It is recommended that the depth value of the * centermost sample be used, though implementations may choose * any function of the depth sample values at each pixel. * * We're slacking and instead of choosing centermost, we've got 0. */ sample_index = "0"; name = "depth MSAA resolve"; } vs_source = ralloc_asprintf(mem_ctx, "#version 130\n" "in vec2 position;\n" "in %s textureCoords;\n" "out %s texCoords;\n" "void main()\n" "{\n" " texCoords = textureCoords;\n" " gl_Position = vec4(position, 0.0, 1.0);\n" "}\n", texcoord_type, texcoord_type); fs_source = ralloc_asprintf(mem_ctx, "#version 130\n" "#extension GL_ARB_texture_multisample : enable\n" "%s\n" "uniform sampler2DMS%s texSampler;\n" "in %s texCoords;\n" "out vec4 out_color;\n" "\n" "void main()\n" "{\n" " gl_FragDepth = texelFetch(texSampler, i%s(texCoords), %s).r;\n" "}\n", arb_sample_shading_extension_string, sampler_array_suffix, texcoord_type, texcoord_type, sample_index); } else { /* You can create 2D_MULTISAMPLE textures with 0 sample count (meaning 1 * sample). Yes, this is ridiculous. */ char *sample_resolve; const char *arb_sample_shading_extension_string; const char *merge_function; name = ralloc_asprintf(mem_ctx, "%svec4 MSAA %s", vec4_prefix, dst_is_msaa ? "copy" : "resolve"); if (dst_is_msaa) { arb_sample_shading_extension_string = "#extension GL_ARB_sample_shading : enable"; sample_resolve = ralloc_asprintf(mem_ctx, " out_color = texelFetch(texSampler, i%s(texCoords), gl_SampleID);", texcoord_type); merge_function = ""; } else { int i; int step; if (src_datatype == GL_INT || src_datatype == GL_UNSIGNED_INT) { merge_function = "gvec4 merge(gvec4 a, gvec4 b) { return (a >> gvec4(1)) + (b >> gvec4(1)) + (a & b & gvec4(1)); }\n"; } else { /* The divide will happen at the end for floats. */ merge_function = "vec4 merge(vec4 a, vec4 b) { return (a + b); }\n"; } arb_sample_shading_extension_string = ""; /* We're assuming power of two samples for this resolution procedure. * * To avoid losing any floating point precision if the samples all * happen to have the same value, we merge pairs of values at a time * (so the floating point exponent just gets increased), rather than * doing a naive sum and dividing. */ assert(_mesa_is_pow_two(samples)); /* Fetch each individual sample. */ sample_resolve = rzalloc_size(mem_ctx, 1); for (i = 0; i < samples; i++) { ralloc_asprintf_append(&sample_resolve, " gvec4 sample_1_%d = texelFetch(texSampler, i%s(texCoords), %d);\n", i, texcoord_type, i); } /* Now, merge each pair of samples, then merge each pair of those, * etc. */ for (step = 2; step <= samples; step *= 2) { for (i = 0; i < samples; i += step) { ralloc_asprintf_append(&sample_resolve, " gvec4 sample_%d_%d = merge(sample_%d_%d, sample_%d_%d);\n", step, i, step / 2, i, step / 2, i + step / 2); } } /* Scale the final result. */ if (src_datatype == GL_UNSIGNED_INT || src_datatype == GL_INT) { ralloc_asprintf_append(&sample_resolve, " out_color = sample_%d_0;\n", samples); } else { ralloc_asprintf_append(&sample_resolve, " gl_FragColor = sample_%d_0 / %f;\n", samples, (float)samples); } } vs_source = ralloc_asprintf(mem_ctx, "#version 130\n" "in vec2 position;\n" "in %s textureCoords;\n" "out %s texCoords;\n" "void main()\n" "{\n" " texCoords = textureCoords;\n" " gl_Position = vec4(position, 0.0, 1.0);\n" "}\n", texcoord_type, texcoord_type); fs_source = ralloc_asprintf(mem_ctx, "#version 130\n" "#extension GL_ARB_texture_multisample : enable\n" "%s\n" "#define gvec4 %svec4\n" "uniform %ssampler2DMS%s texSampler;\n" "in %s texCoords;\n" "out gvec4 out_color;\n" "\n" "%s" /* merge_function */ "void main()\n" "{\n" "%s\n" /* sample_resolve */ "}\n", arb_sample_shading_extension_string, vec4_prefix, vec4_prefix, sampler_array_suffix, texcoord_type, merge_function, sample_resolve); } _mesa_meta_compile_and_link_program(ctx, vs_source, fs_source, name, &blit->msaa_shaders[shader_index]); ralloc_free(mem_ctx); }
void program_resource_visitor::process(ir_variable *var) { const glsl_type *t = var->type; const bool row_major = var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR; /* false is always passed for the row_major parameter to the other * processing functions because no information is available to do * otherwise. See the warning in linker.h. */ /* Only strdup the name if we actually will need to modify it. */ if (var->data.from_named_ifc_block_array) { /* lower_named_interface_blocks created this variable by lowering an * interface block array to an array variable. For example if the * original source code was: * * out Blk { vec4 bar } foo[3]; * * Then the variable is now: * * out vec4 bar[3]; * * We need to visit each array element using the names constructed like * so: * * Blk[0].bar * Blk[1].bar * Blk[2].bar */ assert(t->is_array()); const glsl_type *ifc_type = var->get_interface_type(); char *name = ralloc_strdup(NULL, ifc_type->name); size_t name_length = strlen(name); for (unsigned i = 0; i < t->length; i++) { size_t new_length = name_length; ralloc_asprintf_rewrite_tail(&name, &new_length, "[%u].%s", i, var->name); /* Note: row_major is only meaningful for uniform blocks, and * lowering is only applied to non-uniform interface blocks, so we * can safely pass false for row_major. */ recursion(var->type, &name, new_length, row_major, NULL, false); } ralloc_free(name); } else if (var->data.from_named_ifc_block_nonarray) { /* lower_named_interface_blocks created this variable by lowering a * named interface block (non-array) to an ordinary variable. For * example if the original source code was: * * out Blk { vec4 bar } foo; * * Then the variable is now: * * out vec4 bar; * * We need to visit this variable using the name: * * Blk.bar */ const glsl_type *ifc_type = var->get_interface_type(); char *name = ralloc_asprintf(NULL, "%s.%s", ifc_type->name, var->name); /* Note: row_major is only meaningful for uniform blocks, and lowering * is only applied to non-uniform interface blocks, so we can safely * pass false for row_major. */ recursion(var->type, &name, strlen(name), row_major, NULL, false); ralloc_free(name); } else if (t->without_array()->is_record()) { char *name = ralloc_strdup(NULL, var->name); recursion(var->type, &name, strlen(name), row_major, NULL, false); ralloc_free(name); } else if (t->is_interface()) { char *name = ralloc_strdup(NULL, var->type->name); recursion(var->type, &name, strlen(name), row_major, NULL, false); ralloc_free(name); } else if (t->is_array() && t->fields.array->is_interface()) { char *name = ralloc_strdup(NULL, var->type->fields.array->name); recursion(var->type, &name, strlen(name), row_major, NULL, false); ralloc_free(name); } else { this->visit_field(t, var->name, row_major, NULL, false); } }
static void setup_glsl_msaa_blit_scaled_shader(struct gl_context *ctx, struct blit_state *blit, struct gl_renderbuffer *src_rb, GLenum target, GLenum filter) { GLint loc_src_width, loc_src_height; int i, samples; int shader_offset = 0; void *mem_ctx = ralloc_context(NULL); char *fs_source; char *name, *sample_number; const uint8_t *sample_map; char *sample_map_str = rzalloc_size(mem_ctx, 1); char *sample_map_expr = rzalloc_size(mem_ctx, 1); char *texel_fetch_macro = rzalloc_size(mem_ctx, 1); const char *sampler_array_suffix = ""; float y_scale; enum blit_msaa_shader shader_index; assert(src_rb); samples = MAX2(src_rb->NumSamples, 1); y_scale = samples * 0.5; /* We expect only power of 2 samples in source multisample buffer. */ assert(samples > 0 && _mesa_is_pow_two(samples)); while (samples >> (shader_offset + 1)) { shader_offset++; } /* Update the assert if we plan to support more than 8X MSAA. */ assert(shader_offset > 0 && shader_offset < 4); assert(target == GL_TEXTURE_2D_MULTISAMPLE || target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY); shader_index = BLIT_2X_MSAA_SHADER_2D_MULTISAMPLE_SCALED_RESOLVE + shader_offset - 1; if (target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) { shader_index += BLIT_2X_MSAA_SHADER_2D_MULTISAMPLE_ARRAY_SCALED_RESOLVE - BLIT_2X_MSAA_SHADER_2D_MULTISAMPLE_SCALED_RESOLVE; sampler_array_suffix = "Array"; } if (blit->msaa_shaders[shader_index]) { _mesa_UseProgram(blit->msaa_shaders[shader_index]); /* Update the uniform values. */ loc_src_width = _mesa_GetUniformLocation(blit->msaa_shaders[shader_index], "src_width"); loc_src_height = _mesa_GetUniformLocation(blit->msaa_shaders[shader_index], "src_height"); _mesa_Uniform1f(loc_src_width, src_rb->Width); _mesa_Uniform1f(loc_src_height, src_rb->Height); return; } name = ralloc_asprintf(mem_ctx, "vec4 MSAA scaled resolve"); /* Below switch is used to setup the shader expression, which computes * sample index and map it to to a sample number on hardware. */ switch(samples) { case 2: sample_number = "sample_map[int(2 * fract(coord.x))]"; sample_map = ctx->Const.SampleMap2x; break; case 4: sample_number = "sample_map[int(2 * fract(coord.x) + 4 * fract(coord.y))]"; sample_map = ctx->Const.SampleMap4x; break; case 8: sample_number = "sample_map[int(2 * fract(coord.x) + 8 * fract(coord.y))]"; sample_map = ctx->Const.SampleMap8x; break; default: sample_number = NULL; sample_map = NULL; _mesa_problem(ctx, "Unsupported sample count %d\n", samples); unreachable("Unsupported sample count"); } /* Create sample map string. */ for (i = 0 ; i < samples - 1; i++) { ralloc_asprintf_append(&sample_map_str, "%d, ", sample_map[i]); } ralloc_asprintf_append(&sample_map_str, "%d", sample_map[samples - 1]); /* Create sample map expression using above string. */ ralloc_asprintf_append(&sample_map_expr, " const int sample_map[%d] = int[%d](%s);\n", samples, samples, sample_map_str); if (target == GL_TEXTURE_2D_MULTISAMPLE) { ralloc_asprintf_append(&texel_fetch_macro, "#define TEXEL_FETCH(coord) texelFetch(texSampler, ivec2(coord), %s);\n", sample_number); } else { ralloc_asprintf_append(&texel_fetch_macro, "#define TEXEL_FETCH(coord) texelFetch(texSampler, ivec3(coord, layer), %s);\n", sample_number); } static const char vs_source[] = "#version 130\n" "in vec2 position;\n" "in vec3 textureCoords;\n" "out vec2 texCoords;\n" "flat out int layer;\n" "void main()\n" "{\n" " texCoords = textureCoords.xy;\n" " layer = int(textureCoords.z);\n" " gl_Position = vec4(position, 0.0, 1.0);\n" "}\n" ; fs_source = ralloc_asprintf(mem_ctx, "#version 130\n" "#extension GL_ARB_texture_multisample : enable\n" "uniform sampler2DMS%s texSampler;\n" "uniform float src_width, src_height;\n" "in vec2 texCoords;\n" "flat in int layer;\n" "out vec4 out_color;\n" "\n" "void main()\n" "{\n" "%s" " vec2 interp;\n" " const vec2 scale = vec2(2.0f, %ff);\n" " const vec2 scale_inv = vec2(0.5f, %ff);\n" " const vec2 s_0_offset = vec2(0.25f, %ff);\n" " vec2 s_0_coord, s_1_coord, s_2_coord, s_3_coord;\n" " vec4 s_0_color, s_1_color, s_2_color, s_3_color;\n" " vec4 x_0_color, x_1_color;\n" " vec2 tex_coord = texCoords - s_0_offset;\n" "\n" " tex_coord *= scale;\n" " clamp(tex_coord.x, 0.0f, scale.x * src_width - 1.0f);\n" " clamp(tex_coord.y, 0.0f, scale.y * src_height - 1.0f);\n" " interp = fract(tex_coord);\n" " tex_coord = ivec2(tex_coord) * scale_inv;\n" "\n" " /* Compute the sample coordinates used for filtering. */\n" " s_0_coord = tex_coord;\n" " s_1_coord = tex_coord + vec2(scale_inv.x, 0.0f);\n" " s_2_coord = tex_coord + vec2(0.0f, scale_inv.y);\n" " s_3_coord = tex_coord + vec2(scale_inv.x, scale_inv.y);\n" "\n" " /* Fetch sample color values. */\n" "%s" " s_0_color = TEXEL_FETCH(s_0_coord)\n" " s_1_color = TEXEL_FETCH(s_1_coord)\n" " s_2_color = TEXEL_FETCH(s_2_coord)\n" " s_3_color = TEXEL_FETCH(s_3_coord)\n" "#undef TEXEL_FETCH\n" "\n" " /* Do bilinear filtering on sample colors. */\n" " x_0_color = mix(s_0_color, s_1_color, interp.x);\n" " x_1_color = mix(s_2_color, s_3_color, interp.x);\n" " out_color = mix(x_0_color, x_1_color, interp.y);\n" "}\n", sampler_array_suffix, sample_map_expr, y_scale, 1.0f / y_scale, 1.0f / samples, texel_fetch_macro); _mesa_meta_compile_and_link_program(ctx, vs_source, fs_source, name, &blit->msaa_shaders[shader_index]); loc_src_width = _mesa_GetUniformLocation(blit->msaa_shaders[shader_index], "src_width"); loc_src_height = _mesa_GetUniformLocation(blit->msaa_shaders[shader_index], "src_height"); _mesa_Uniform1f(loc_src_width, src_rb->Width); _mesa_Uniform1f(loc_src_height, src_rb->Height); ralloc_free(mem_ctx); }
static bool load_body(ALLEGRO_CONFIG *cfg, const char *fullname, Body *body, char **primary_name) { const char *name; char *type; /* Just checking */ if (al_get_first_config_entry(cfg, fullname, NULL) == NULL) { log_err("Section %s not in file\n", fullname); return false; } /* Fill in some common parameters */ if (!config_get_double(cfg, fullname, "Mass", &body->mass, false)) return false; if (!config_get_double(cfg, fullname, "Gravitational parameter", &body->grav_param, true)) { body->grav_param = GRAV_CONST * body->mass; } if (!config_get_double(cfg, fullname, "Radius", &body->radius, false)) return false; /* Figure out what kind of object it is */ config_get_string(cfg, fullname, "Type", &type, true); if (type == NULL) body->type = BODY_UNKNOWN; else if (strcmp(type, "Star") == 0) body->type = BODY_STAR; else if (strcmp(type, "Planet") == 0) body->type = BODY_PLANET; else if (strcmp(type, "Moon") == 0) body->type = BODY_PLANET; else if (strcmp(type, "Comet") == 0) body->type = BODY_COMET; else { log_err("Unknown type: %s\n", type); ralloc_free(type); return false; } ralloc_free(type); /* Does it have a primary or not? * Full names are of the form of "Primary/Name" * We search backwards to allow for things like "Sol/Earth/Moon" */ if ((name = strrchr(fullname, '/')) == NULL) { /* This is a body without a primary */ body->name = ralloc_strdup(body->ctx, fullname); body->type = (body->type == BODY_UNKNOWN ? BODY_STAR : body->type); body->primary = NULL; *primary_name = NULL; } else if (name == fullname) /* No primary name, eg: sec = "/Earth" */ { log_err("Malformed name: %s", fullname); return false; } else { const char *c; for (c = name - 1; c >= fullname && *c != '/'; c--); c++; body->name = ralloc_strdup(body->ctx, name + 1); body->type = (body->type == BODY_UNKNOWN ? BODY_PLANET : body->type); body->primary = NULL; /* Fill in later */ *primary_name = ralloc_strndup(body->ctx, c, name - c); } body->num_satellites = 0; body->satellite = NULL; /* Bodies without primaries can't orbit another body */ if (*primary_name == NULL) return true; if (!config_get_double(cfg, fullname, "Ecc", &body->orbit.Ecc, false) || !config_get_double(cfg, fullname, "SMa", &body->orbit.SMa, false) || !config_get_double(cfg, fullname, "Inc", &body->orbit.Inc, false) || !config_get_double(cfg, fullname, "LAN", &body->orbit.LAN, false) || !config_get_double(cfg, fullname, "APe", &body->orbit.APe, false) || !config_get_double(cfg, fullname, "MnA", &body->orbit.MnA, false)) { log_err("Couldn't load orbital elements of %s\n", fullname); return false; } return true; }
~glslopt_ctx() { ralloc_free (mem_ctx); }
static SolarSystem *load_from_config(ALLEGRO_CONFIG *cfg) { SolarSystem *solsys; char **primary_names; const char *name; long num_bodies; ALLEGRO_CONFIG_SECTION *sec; int i; /* First pass: Determine the number of bodies in the file */ num_bodies = 0; sec = NULL; while ((name = get_next_config_section(cfg, &sec)) != NULL) { if (name[0] != '\0') num_bodies++; } if (num_bodies == 0) return NULL; /* Empty solarsystem */ solsys = ralloc_size(NULL, sizeof(SolarSystem) + num_bodies*sizeof(Body)); if (solsys == NULL) return NULL; solsys->num_bodies = num_bodies; primary_names = ralloc_array(solsys, char *, num_bodies); if (primary_names == NULL) { ralloc_free(solsys); return NULL; } /* Second pass: Load all celestial bodies */ i = 0; sec = NULL; while ((name = get_next_config_section(cfg, &sec)) != NULL && i < num_bodies) { if (name[0] == '\0') continue; solsys->body[i].ctx = solsys; if (!load_body(cfg, name, &solsys->body[i], &primary_names[i])) { log_err("Couldn't load body %s\n", name); ralloc_free(solsys); return NULL; } i++; } if (i < num_bodies) log_err("Internal consistency error\n"); /* Third pass: Connect each satellite body to its primary */ for (i = 0; i < num_bodies; i++) { Body *body = &solsys->body[i]; char *primary_name = primary_names[i]; if (primary_name == NULL) /* Independent body */ continue; /* Look for the primary */ body->primary = NULL; for (int j = 0; j < num_bodies; j++) { Body *body2 = &solsys->body[j]; if (strcmp(primary_name, body2->name) == 0) { body->primary = body2; break; } } if (body->primary == NULL) { log_err("Couldn't find %s's primary: %s\n", body->name, primary_name); ralloc_free(solsys); return NULL; } ralloc_free(primary_name); primary_name = NULL; /* Won't ever be used again */ body->primary->num_satellites++; body->primary->satellite = reralloc(solsys, body->primary->satellite, Body *, body->primary->num_satellites); if (body->primary->satellite == NULL) { log_err("Out of memory\n"); ralloc_free(solsys); return NULL; } body->primary->satellite[body->primary->num_satellites - 1] = body; body->orbit.epoch = 0; body->orbit.period = M_TWO_PI * sqrt(CUBE(body->orbit.SMa) / body->primary->grav_param); body->orbit.plane_orientation = quat_euler(RAD(body->orbit.LAN), RAD(body->orbit.Inc), RAD(body->orbit.APe)); } ralloc_free(primary_names); return solsys; }
/* If the user *does* call delete, that's OK, we will just * ralloc_free in that case. */ static void operator delete(void *node) { ralloc_free(node); }
loop_state::~loop_state() { hash_table_dtor(this->ht); ralloc_free(this->mem_ctx); }
static bool do_vs_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_vertex_program *vp, struct brw_vs_prog_key *key) { struct gl_context *ctx = &brw->intel.ctx; struct intel_context *intel = &brw->intel; GLuint program_size; const GLuint *program; struct brw_vs_compile c; void *mem_ctx; int aux_size; int i; struct gl_shader *vs = NULL; if (prog) vs = prog->_LinkedShaders[MESA_SHADER_VERTEX]; memset(&c, 0, sizeof(c)); memcpy(&c.key, key, sizeof(*key)); mem_ctx = ralloc_context(NULL); brw_init_compile(brw, &c.func, mem_ctx); c.vp = vp; /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. */ int param_count; if (vs) { /* We add padding around uniform values below vec4 size, with the worst * case being a float value that gets blown up to a vec4, so be * conservative here. */ param_count = vs->num_uniform_components * 4; /* We also upload clip plane data as uniforms */ param_count += MAX_CLIP_PLANES * 4; } else { param_count = vp->program.Base.Parameters->NumParameters * 4; } c.prog_data.param = rzalloc_array(NULL, const float *, param_count); c.prog_data.pull_param = rzalloc_array(NULL, const float *, param_count); c.prog_data.outputs_written = vp->program.Base.OutputsWritten; c.prog_data.inputs_read = vp->program.Base.InputsRead; if (c.key.copy_edgeflag) { c.prog_data.outputs_written |= BITFIELD64_BIT(VERT_RESULT_EDGE); c.prog_data.inputs_read |= VERT_BIT_EDGEFLAG; } /* Put dummy slots into the VUE for the SF to put the replaced * point sprite coords in. We shouldn't need these dummy slots, * which take up precious URB space, but it would mean that the SF * doesn't get nice aligned pairs of input coords into output * coords, which would be a pain to handle. */ for (i = 0; i < 8; i++) { if (c.key.point_coord_replace & (1 << i)) c.prog_data.outputs_written |= BITFIELD64_BIT(VERT_RESULT_TEX0 + i); } brw_compute_vue_map(&c); if (0) { _mesa_fprint_program_opt(stdout, &c.vp->program.Base, PROG_PRINT_DEBUG, true); } /* Emit GEN4 code. */ if (prog) { if (!brw_vs_emit(prog, &c)) { ralloc_free(mem_ctx); return false; } } else { brw_old_vs_emit(&c); } if (c.prog_data.nr_pull_params) c.prog_data.num_surfaces = 1; if (c.vp->program.Base.SamplersUsed) c.prog_data.num_surfaces = SURF_INDEX_VS_TEXTURE(BRW_MAX_TEX_UNIT); if (prog && prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks) { c.prog_data.num_surfaces = SURF_INDEX_VS_UBO(prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks); } /* Scratch space is used for register spilling */ if (c.last_scratch) { perf_debug("Vertex shader triggered register spilling. " "Try reducing the number of live vec4 values to " "improve performance.\n"); c.prog_data.total_scratch = brw_get_scratch_size(c.last_scratch); brw_get_scratch_bo(intel, &brw->vs.scratch_bo, c.prog_data.total_scratch * brw->max_vs_threads); } /* get the program */ program = brw_get_program(&c.func, &program_size); /* We upload from &c.prog_data including the constant_map assuming * they're packed together. It would be nice to have a * compile-time assert macro here. */ assert(c.constant_map == (int8_t *)&c.prog_data + sizeof(c.prog_data)); assert(ctx->Const.VertexProgram.MaxNativeParameters == ARRAY_SIZE(c.constant_map)); (void) ctx; aux_size = sizeof(c.prog_data); /* constant_map */ aux_size += c.vp->program.Base.Parameters->NumParameters; brw_upload_cache(&brw->cache, BRW_VS_PROG, &c.key, sizeof(c.key), program, program_size, &c.prog_data, aux_size, &brw->vs.prog_offset, &brw->vs.prog_data); ralloc_free(mem_ctx); return true; }
bool brw_codegen_gs_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_geometry_program *gp, struct brw_gs_prog_key *key) { struct brw_compiler *compiler = brw->intelScreen->compiler; struct brw_stage_state *stage_state = &brw->gs.base; struct brw_gs_prog_data prog_data; bool start_busy = false; double start_time = 0; memset(&prog_data, 0, sizeof(prog_data)); assign_gs_binding_table_offsets(brw->intelScreen->devinfo, prog, &gp->program.Base, &prog_data); /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. * * Note: param_count needs to be num_uniform_components * 4, since we add * padding around uniform values below vec4 size, so the worst case is that * every uniform is a float which gets padded to the size of a vec4. */ struct gl_linked_shader *gs = prog->_LinkedShaders[MESA_SHADER_GEOMETRY]; struct brw_shader *bgs = (struct brw_shader *) gs; int param_count = gp->program.Base.nir->num_uniforms / 4; prog_data.base.base.param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.base.pull_param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.base.image_param = rzalloc_array(NULL, struct brw_image_param, gs->NumImages); prog_data.base.base.nr_params = param_count; prog_data.base.base.nr_image_params = gs->NumImages; brw_nir_setup_glsl_uniforms(gp->program.Base.nir, prog, &gp->program.Base, &prog_data.base.base, compiler->scalar_stage[MESA_SHADER_GEOMETRY]); GLbitfield64 outputs_written = gp->program.Base.OutputsWritten; prog_data.base.cull_distance_mask = ((1 << gp->program.Base.CullDistanceArraySize) - 1) << gp->program.Base.ClipDistanceArraySize; brw_compute_vue_map(brw->intelScreen->devinfo, &prog_data.base.vue_map, outputs_written, prog->SeparateShader); if (unlikely(INTEL_DEBUG & DEBUG_GS)) brw_dump_ir("geometry", prog, gs, NULL); int st_index = -1; if (INTEL_DEBUG & DEBUG_SHADER_TIME) st_index = brw_get_shader_time_index(brw, prog, NULL, ST_GS); if (unlikely(brw->perf_debug)) { start_busy = brw->batch.last_bo && drm_intel_bo_busy(brw->batch.last_bo); start_time = get_time(); } void *mem_ctx = ralloc_context(NULL); unsigned program_size; char *error_str; const unsigned *program = brw_compile_gs(brw->intelScreen->compiler, brw, mem_ctx, key, &prog_data, gs->Program->nir, prog, st_index, &program_size, &error_str); if (program == NULL) { ralloc_free(mem_ctx); return false; } if (unlikely(brw->perf_debug)) { if (bgs->compiled_once) { brw_gs_debug_recompile(brw, prog, key); } if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) { perf_debug("GS compile took %.03f ms and stalled the GPU\n", (get_time() - start_time) * 1000); } bgs->compiled_once = true; } /* Scratch space is used for register spilling */ brw_alloc_stage_scratch(brw, stage_state, prog_data.base.base.total_scratch, brw->max_gs_threads); brw_upload_cache(&brw->cache, BRW_CACHE_GS_PROG, key, sizeof(*key), program, program_size, &prog_data, sizeof(prog_data), &stage_state->prog_offset, &brw->gs.prog_data); ralloc_free(mem_ctx); return true; }
extern GLboolean _mesa_validate_program_pipeline(struct gl_context* ctx, struct gl_pipeline_object *pipe, GLboolean IsBound) { unsigned i; pipe->Validated = GL_FALSE; /* Release and reset the info log. */ if (pipe->InfoLog != NULL) ralloc_free(pipe->InfoLog); pipe->InfoLog = NULL; /* Section 2.11.11 (Shader Execution), subheading "Validation," of the * OpenGL 4.1 spec says: * * "[INVALID_OPERATION] is generated by any command that transfers * vertices to the GL if: * * - A program object is active for at least one, but not all of * the shader stages that were present when the program was * linked." * * For each possible program stage, verify that the program bound to that * stage has all of its stages active. In other words, if the program * bound to the vertex stage also has a fragment shader, the fragment * shader must also be bound to the fragment stage. */ for (i = 0; i < MESA_SHADER_STAGES; i++) { if (!program_stages_all_active(pipe, pipe->CurrentProgram[i])) { goto err; } } /* Section 2.11.11 (Shader Execution), subheading "Validation," of the * OpenGL 4.1 spec says: * * "[INVALID_OPERATION] is generated by any command that transfers * vertices to the GL if: * * ... * * - One program object is active for at least two shader stages * and a second program is active for a shader stage between two * stages for which the first program was active." * * Without Tesselation, the only case where this can occur is the geometry * shader between the fragment shader and vertex shader. */ if (pipe->CurrentProgram[MESA_SHADER_GEOMETRY] && pipe->CurrentProgram[MESA_SHADER_FRAGMENT] && pipe->CurrentProgram[MESA_SHADER_VERTEX]) { if (pipe->CurrentProgram[MESA_SHADER_VERTEX]->Name == pipe->CurrentProgram[MESA_SHADER_FRAGMENT]->Name && pipe->CurrentProgram[MESA_SHADER_GEOMETRY]->Name != pipe->CurrentProgram[MESA_SHADER_VERTEX]->Name) { pipe->InfoLog = ralloc_asprintf(pipe, "Program %d is active for geometry stage between " "two stages for which another program %d is " "active", pipe->CurrentProgram[MESA_SHADER_GEOMETRY]->Name, pipe->CurrentProgram[MESA_SHADER_VERTEX]->Name); goto err; } } /* Section 2.11.11 (Shader Execution), subheading "Validation," of the * OpenGL 4.1 spec says: * * "[INVALID_OPERATION] is generated by any command that transfers * vertices to the GL if: * * ... * * - There is an active program for tessellation control, * tessellation evaluation, or geometry stages with corresponding * executable shader, but there is no active program with * executable vertex shader." */ if (!pipe->CurrentProgram[MESA_SHADER_VERTEX] && pipe->CurrentProgram[MESA_SHADER_GEOMETRY]) { pipe->InfoLog = ralloc_strdup(pipe, "Program lacks a vertex shader"); goto err; } /* Section 2.11.11 (Shader Execution), subheading "Validation," of the * OpenGL 4.1 spec says: * * "[INVALID_OPERATION] is generated by any command that transfers * vertices to the GL if: * * ... * * - There is no current program object specified by UseProgram, * there is a current program pipeline object, and the current * program for any shader stage has been relinked since being * applied to the pipeline object via UseProgramStages with the * PROGRAM_SEPARABLE parameter set to FALSE. */ for (i = 0; i < MESA_SHADER_STAGES; i++) { if (pipe->CurrentProgram[i] && !pipe->CurrentProgram[i]->SeparateShader) { pipe->InfoLog = ralloc_asprintf(pipe, "Program %d was relinked without " "PROGRAM_SEPARABLE state", pipe->CurrentProgram[i]->Name); goto err; } } /* Section 2.11.11 (Shader Execution), subheading "Validation," of the * OpenGL 4.1 spec says: * * "[INVALID_OPERATION] is generated by any command that transfers * vertices to the GL if: * * ... * * - Any two active samplers in the current program object are of * different types, but refer to the same texture image unit. * * - The number of active samplers in the program exceeds the * maximum number of texture image units allowed." */ if (!_mesa_sampler_uniforms_pipeline_are_valid(pipe)) goto err; pipe->Validated = GL_TRUE; return GL_TRUE; err: if (IsBound) _mesa_error(ctx, GL_INVALID_OPERATION, "glValidateProgramPipeline failed to validate the pipeline"); return GL_FALSE; }
static bool do_vs_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_vertex_program *vp, struct brw_vs_prog_key *key) { GLuint program_size; const GLuint *program; struct brw_vs_compile c; struct brw_vs_prog_data prog_data; struct brw_stage_prog_data *stage_prog_data = &prog_data.base.base; void *mem_ctx; int i; struct gl_shader *vs = NULL; if (prog) vs = prog->_LinkedShaders[MESA_SHADER_VERTEX]; memset(&c, 0, sizeof(c)); memcpy(&c.key, key, sizeof(*key)); memset(&prog_data, 0, sizeof(prog_data)); mem_ctx = ralloc_context(NULL); c.vp = vp; /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. */ int param_count; if (vs) { /* We add padding around uniform values below vec4 size, with the worst * case being a float value that gets blown up to a vec4, so be * conservative here. */ param_count = vs->num_uniform_components * 4; } else { param_count = vp->program.Base.Parameters->NumParameters * 4; } /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip * planes as uniforms. */ param_count += c.key.base.nr_userclip_plane_consts * 4; stage_prog_data->param = rzalloc_array(NULL, const float *, param_count); stage_prog_data->pull_param = rzalloc_array(NULL, const float *, param_count); /* Setting nr_params here NOT to the size of the param and pull_param * arrays, but to the number of uniform components vec4_visitor * needs. vec4_visitor::setup_uniforms() will set it back to a proper value. */ stage_prog_data->nr_params = ALIGN(param_count, 4) / 4; if (vs) { stage_prog_data->nr_params += vs->num_samplers; } GLbitfield64 outputs_written = vp->program.Base.OutputsWritten; prog_data.inputs_read = vp->program.Base.InputsRead; if (c.key.copy_edgeflag) { outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE); prog_data.inputs_read |= VERT_BIT_EDGEFLAG; } if (brw->gen < 6) { /* Put dummy slots into the VUE for the SF to put the replaced * point sprite coords in. We shouldn't need these dummy slots, * which take up precious URB space, but it would mean that the SF * doesn't get nice aligned pairs of input coords into output * coords, which would be a pain to handle. */ for (i = 0; i < 8; i++) { if (c.key.point_coord_replace & (1 << i)) outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i); } /* if back colors are written, allocate slots for front colors too */ if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0)) outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0); if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1)) outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1); } /* In order for legacy clipping to work, we need to populate the clip * distance varying slots whenever clipping is enabled, even if the vertex * shader doesn't write to gl_ClipDistance. */ if (c.key.base.userclip_active) { outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0); outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1); } brw_compute_vue_map(brw, &prog_data.base.vue_map, outputs_written); if (0) { _mesa_fprint_program_opt(stderr, &c.vp->program.Base, PROG_PRINT_DEBUG, true); } /* Emit GEN4 code. */ program = brw_vs_emit(brw, prog, &c, &prog_data, mem_ctx, &program_size); if (program == NULL) { ralloc_free(mem_ctx); return false; } /* Scratch space is used for register spilling */ if (c.base.last_scratch) { perf_debug("Vertex shader triggered register spilling. " "Try reducing the number of live vec4 values to " "improve performance.\n"); prog_data.base.total_scratch = brw_get_scratch_size(c.base.last_scratch*REG_SIZE); brw_get_scratch_bo(brw, &brw->vs.base.scratch_bo, prog_data.base.total_scratch * brw->max_vs_threads); } brw_upload_cache(&brw->cache, BRW_VS_PROG, &c.key, sizeof(c.key), program, program_size, &prog_data, sizeof(prog_data), &brw->vs.base.prog_offset, &brw->vs.prog_data); ralloc_free(mem_ctx); return true; }
output_read_remover::~output_read_remover() { hash_table_dtor(replacements); ralloc_free(mem_ctx); }
glsl_symbol_table::~glsl_symbol_table() { _mesa_symbol_table_dtor(table); ralloc_free(mem_ctx); }
/** * All Mesa program -> GPU code generation goes through this function. * Depending on the instructions used (i.e. flow control instructions) * we'll use one of two code generators. */ bool brw_codegen_wm_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_fragment_program *fp, struct brw_wm_prog_key *key) { struct gl_context *ctx = &brw->ctx; void *mem_ctx = ralloc_context(NULL); struct brw_wm_prog_data prog_data; const GLuint *program; struct gl_shader *fs = NULL; GLuint program_size; if (prog) fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]; memset(&prog_data, 0, sizeof(prog_data)); /* key->alpha_test_func means simulating alpha testing via discards, * so the shader definitely kills pixels. */ prog_data.uses_kill = fp->program.UsesKill || key->alpha_test_func; prog_data.uses_omask = fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK); prog_data.computed_depth_mode = computed_depth_mode(&fp->program); /* Use ALT floating point mode for ARB programs so that 0^0 == 1. */ if (!prog) prog_data.base.use_alt_mode = true; /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. */ int param_count; if (fs) { param_count = fs->num_uniform_components; } else { param_count = fp->program.Base.Parameters->NumParameters * 4; } /* The backend also sometimes adds params for texture size. */ param_count += 2 * ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits; prog_data.base.param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.pull_param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.nr_params = param_count; prog_data.barycentric_interp_modes = brw_compute_barycentric_interp_modes(brw, key->flat_shade, key->persample_shading, &fp->program); program = brw_wm_fs_emit(brw, mem_ctx, key, &prog_data, &fp->program, prog, &program_size); if (program == NULL) { ralloc_free(mem_ctx); return false; } if (prog_data.base.total_scratch) { brw_get_scratch_bo(brw, &brw->wm.base.scratch_bo, prog_data.base.total_scratch * brw->max_wm_threads); } if (unlikely(INTEL_DEBUG & DEBUG_WM)) fprintf(stderr, "\n"); brw_upload_cache(&brw->cache, BRW_CACHE_FS_PROG, key, sizeof(struct brw_wm_prog_key), program, program_size, &prog_data, sizeof(prog_data), &brw->wm.base.prog_offset, &brw->wm.prog_data); ralloc_free(mem_ctx); return true; }
static void compile_clip_prog( struct brw_context *brw, struct brw_clip_prog_key *key ) { struct brw_clip_compile c; const GLuint *program; void *mem_ctx; GLuint program_size; memset(&c, 0, sizeof(c)); mem_ctx = ralloc_context(NULL); /* Begin the compilation: */ brw_init_codegen(&brw->screen->devinfo, &c.func, mem_ctx); c.func.single_program_flow = 1; c.key = *key; c.vue_map = brw->vue_map_geom_out; /* nr_regs is the number of registers filled by reading data from the VUE. * This program accesses the entire VUE, so nr_regs needs to be the size of * the VUE (measured in pairs, since two slots are stored in each * register). */ c.nr_regs = (c.vue_map.num_slots + 1)/2; c.prog_data.clip_mode = c.key.clip_mode; /* XXX */ /* For some reason the thread is spawned with only 4 channels * unmasked. */ brw_set_default_mask_control(&c.func, BRW_MASK_DISABLE); /* Would ideally have the option of producing a program which could * do all three: */ switch (key->primitive) { case GL_TRIANGLES: if (key->do_unfilled) brw_emit_unfilled_clip( &c ); else brw_emit_tri_clip( &c ); break; case GL_LINES: brw_emit_line_clip( &c ); break; case GL_POINTS: brw_emit_point_clip( &c ); break; default: unreachable("not reached"); } brw_compact_instructions(&c.func, 0, 0, NULL); /* get the program */ program = brw_get_program(&c.func, &program_size); if (unlikely(INTEL_DEBUG & DEBUG_CLIP)) { fprintf(stderr, "clip:\n"); brw_disassemble(&brw->screen->devinfo, c.func.store, 0, program_size, stderr); fprintf(stderr, "\n"); } brw_upload_cache(&brw->cache, BRW_CACHE_CLIP_PROG, &c.key, sizeof(c.key), program, program_size, &c.prog_data, sizeof(c.prog_data), &brw->clip.prog_offset, &brw->clip.prog_data); ralloc_free(mem_ctx); }