static bool brw_codegen_cs_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_compute_program *cp, struct brw_cs_prog_key *key) { struct gl_context *ctx = &brw->ctx; const GLuint *program; void *mem_ctx = ralloc_context(NULL); GLuint program_size; struct brw_cs_prog_data prog_data; struct gl_shader *cs = prog->_LinkedShaders[MESA_SHADER_COMPUTE]; assert (cs); memset(&prog_data, 0, sizeof(prog_data)); /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. */ int param_count = cs->num_uniform_components + cs->NumImages * BRW_IMAGE_PARAM_SIZE; /* The backend also sometimes adds params for texture size. */ param_count += 2 * ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits; prog_data.base.param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.pull_param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.image_param = rzalloc_array(NULL, struct brw_image_param, cs->NumImages); prog_data.base.nr_params = param_count; prog_data.base.nr_image_params = cs->NumImages; program = brw_cs_emit(brw, mem_ctx, key, &prog_data, &cp->program, prog, &program_size); if (program == NULL) { ralloc_free(mem_ctx); return false; } if (prog_data.base.total_scratch) { brw_get_scratch_bo(brw, &brw->cs.base.scratch_bo, prog_data.base.total_scratch * brw->max_cs_threads); } if (unlikely(INTEL_DEBUG & DEBUG_CS)) fprintf(stderr, "\n"); brw_upload_cache(&brw->cache, BRW_CACHE_CS_PROG, key, sizeof(*key), program, program_size, &prog_data, sizeof(prog_data), &brw->cs.base.prog_offset, &brw->cs.prog_data); ralloc_free(mem_ctx); return true; }
/** * Creates the state cache entry for the given DEPTH_STENCIL_STATE state key. */ static drm_intel_bo * depth_stencil_state_create_from_key(struct brw_context *brw, struct brw_depth_stencil_state_key *key) { struct gen6_depth_stencil_state ds; drm_intel_bo *bo; memset(&ds, 0, sizeof(ds)); /* _NEW_STENCIL */ if (key->stencil) { ds.ds0.stencil_enable = 1; ds.ds0.stencil_func = intel_translate_compare_func(key->stencil_func[0]); ds.ds0.stencil_fail_op = intel_translate_stencil_op(key->stencil_fail_op[0]); ds.ds0.stencil_pass_depth_fail_op = intel_translate_stencil_op(key->stencil_pass_depth_fail_op[0]); ds.ds0.stencil_pass_depth_pass_op = intel_translate_stencil_op(key->stencil_pass_depth_pass_op[0]); ds.ds1.stencil_write_mask = key->stencil_write_mask[0]; ds.ds1.stencil_test_mask = key->stencil_test_mask[0]; if (key->stencil_two_side) { ds.ds0.bf_stencil_enable = 1; ds.ds0.bf_stencil_func = intel_translate_compare_func(key->stencil_func[1]); ds.ds0.bf_stencil_fail_op = intel_translate_stencil_op(key->stencil_fail_op[1]); ds.ds0.bf_stencil_pass_depth_fail_op = intel_translate_stencil_op(key->stencil_pass_depth_fail_op[1]); ds.ds0.bf_stencil_pass_depth_pass_op = intel_translate_stencil_op(key->stencil_pass_depth_pass_op[1]); ds.ds1.bf_stencil_write_mask = key->stencil_write_mask[1]; ds.ds1.bf_stencil_test_mask = key->stencil_test_mask[1]; } /* Not really sure about this: */ if (key->stencil_write_mask[0] || (key->stencil_two_side && key->stencil_write_mask[1])) ds.ds0.stencil_write_enable = 1; } /* _NEW_DEPTH */ if (key->depth_test) { ds.ds2.depth_test_enable = 1; ds.ds2.depth_test_func = intel_translate_compare_func(key->depth_func); ds.ds2.depth_write_enable = key->depth_write; } bo = brw_upload_cache(&brw->cache, BRW_DEPTH_STENCIL_STATE, key, sizeof(*key), NULL, 0, &ds, sizeof(ds)); return bo; }
static void brw_blorp_upload_shader(struct blorp_context *blorp, const void *key, uint32_t key_size, const void *kernel, uint32_t kernel_size, const struct brw_stage_prog_data *prog_data, uint32_t prog_data_size, uint32_t *kernel_out, void *prog_data_out) { struct brw_context *brw = blorp->driver_ctx; brw_upload_cache(&brw->cache, BRW_CACHE_BLORP_PROG, key, key_size, kernel, kernel_size, prog_data, prog_data_size, kernel_out, prog_data_out); }
static enum pipe_error gs_unit_create_from_key(struct brw_context *brw, struct brw_gs_unit_key *key, struct brw_winsys_reloc *reloc, unsigned nr_reloc, struct brw_winsys_buffer **bo_out) { struct brw_gs_unit_state gs; enum pipe_error ret; memset(&gs, 0, sizeof(gs)); /* reloc */ gs.thread0.grf_reg_count = align(key->total_grf, 16) / 16 - 1; gs.thread0.kernel_start_pointer = 0; gs.thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754; gs.thread1.single_program_flow = 1; gs.thread3.dispatch_grf_start_reg = 1; gs.thread3.const_urb_entry_read_offset = 0; gs.thread3.const_urb_entry_read_length = 0; gs.thread3.urb_entry_read_offset = 0; gs.thread3.urb_entry_read_length = key->urb_entry_read_length; gs.thread4.nr_urb_entries = key->nr_urb_entries; gs.thread4.urb_entry_allocation_size = key->urb_size - 1; if (key->nr_urb_entries >= 8) gs.thread4.max_threads = 1; else gs.thread4.max_threads = 0; if (BRW_IS_IGDNG(brw)) gs.thread4.rendering_enable = 1; if (BRW_DEBUG & DEBUG_STATS) gs.thread4.stats_enable = 1; ret = brw_upload_cache(&brw->cache, BRW_GS_UNIT, key, sizeof(*key), reloc, nr_reloc, &gs, sizeof(gs), NULL, NULL, bo_out); if (ret) return ret; return PIPE_OK; }
static dri_bo * brw_create_texture_surface( struct brw_context *brw, struct brw_surface_key *key ) { struct brw_surface_state surf; dri_bo *bo; memset(&surf, 0, sizeof(surf)); surf.ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW; surf.ss0.surface_type = translate_tex_target(key->target); surf.ss0.surface_format = translate_tex_format(key->format, key->internal_format, key->depthmode); /* This is ok for all textures with channel width 8bit or less: */ /* surf.ss0.data_return_format = BRW_SURFACERETURNFORMAT_S1; */ surf.ss1.base_addr = key->bo->offset; /* reloc */ surf.ss2.mip_count = key->last_level - key->first_level; surf.ss2.width = key->width - 1; surf.ss2.height = key->height - 1; brw_set_surface_tiling(&surf, key->tiling); surf.ss3.pitch = (key->pitch * key->cpp) - 1; surf.ss3.depth = key->depth - 1; surf.ss4.min_lod = 0; if (key->target == GL_TEXTURE_CUBE_MAP) { surf.ss0.cube_pos_x = 1; surf.ss0.cube_pos_y = 1; surf.ss0.cube_pos_z = 1; surf.ss0.cube_neg_x = 1; surf.ss0.cube_neg_y = 1; surf.ss0.cube_neg_z = 1; } bo = brw_upload_cache(&brw->surface_cache, BRW_SS_SURFACE, key, sizeof(*key), &key->bo, 1, &surf, sizeof(surf)); /* Emit relocation to surface contents */ drm_intel_bo_emit_reloc(bo, offsetof(struct brw_surface_state, ss1), key->bo, 0, I915_GEM_DOMAIN_SAMPLER, 0); return bo; }
static void brw_blorp_params_get_clear_kernel(struct brw_context *brw, struct brw_blorp_params *params, bool use_replicated_data) { struct brw_blorp_const_color_prog_key blorp_key; memset(&blorp_key, 0, sizeof(blorp_key)); blorp_key.use_simd16_replicated_data = use_replicated_data; if (brw_search_cache(&brw->cache, BRW_CACHE_BLORP_PROG, &blorp_key, sizeof(blorp_key), ¶ms->wm_prog_kernel, ¶ms->wm_prog_data)) return; void *mem_ctx = ralloc_context(NULL); nir_builder b; nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL); b.shader->info.name = ralloc_strdup(b.shader, "BLORP-clear"); nir_variable *v_color = nir_variable_create(b.shader, nir_var_shader_in, glsl_vec4_type(), "v_color"); v_color->data.location = VARYING_SLOT_VAR0; v_color->data.interpolation = INTERP_MODE_FLAT; nir_variable *frag_color = nir_variable_create(b.shader, nir_var_shader_out, glsl_vec4_type(), "gl_FragColor"); frag_color->data.location = FRAG_RESULT_COLOR; nir_copy_var(&b, frag_color, v_color); struct brw_wm_prog_key wm_key; brw_blorp_init_wm_prog_key(&wm_key); struct brw_blorp_prog_data prog_data; unsigned program_size; const unsigned *program = brw_blorp_compile_nir_shader(brw, b.shader, &wm_key, use_replicated_data, &prog_data, &program_size); brw_upload_cache(&brw->cache, BRW_CACHE_BLORP_PROG, &blorp_key, sizeof(blorp_key), program, program_size, &prog_data, sizeof(prog_data), ¶ms->wm_prog_kernel, ¶ms->wm_prog_data); ralloc_free(mem_ctx); }
static dri_bo * gs_unit_create_from_key(struct brw_context *brw, struct brw_gs_unit_key *key) { struct brw_gs_unit_state gs; dri_bo *bo; memset(&gs, 0, sizeof(gs)); gs.thread0.grf_reg_count = ALIGN(key->total_grf, 16) / 16 - 1; if (key->prog_active) /* reloc */ gs.thread0.kernel_start_pointer = brw->gs.prog_bo->offset >> 6; gs.thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754; gs.thread1.single_program_flow = 1; gs.thread3.dispatch_grf_start_reg = 1; gs.thread3.const_urb_entry_read_offset = 0; gs.thread3.const_urb_entry_read_length = 0; gs.thread3.urb_entry_read_offset = 0; gs.thread3.urb_entry_read_length = key->urb_entry_read_length; gs.thread4.nr_urb_entries = key->nr_urb_entries; gs.thread4.urb_entry_allocation_size = key->urb_size - 1; if (key->nr_urb_entries >= 8) gs.thread4.max_threads = 1; else gs.thread4.max_threads = 0; if (BRW_IS_IGDNG(brw)) gs.thread4.rendering_enable = 1; if (INTEL_DEBUG & DEBUG_STATS) gs.thread4.stats_enable = 1; bo = brw_upload_cache(&brw->cache, BRW_GS_UNIT, key, sizeof(*key), &brw->gs.prog_bo, 1, &gs, sizeof(gs), NULL, NULL); if (key->prog_active) { /* Emit GS program relocation */ dri_bo_emit_reloc(bo, I915_GEM_DOMAIN_INSTRUCTION, 0, gs.thread0.grf_reg_count << 1, offsetof(struct brw_gs_unit_state, thread0), brw->gs.prog_bo); }
uint32_t brw_blorp_clear_params::get_wm_prog(struct brw_context *brw, brw_blorp_prog_data **prog_data) const { uint32_t prog_offset; if (!brw_search_cache(&brw->cache, BRW_BLORP_CLEAR_PROG, &this->wm_prog_key, sizeof(this->wm_prog_key), &prog_offset, prog_data)) { brw_blorp_clear_program prog(brw, &this->wm_prog_key); GLuint program_size; const GLuint *program = prog.compile(brw, &program_size); brw_upload_cache(&brw->cache, BRW_BLORP_CLEAR_PROG, &this->wm_prog_key, sizeof(this->wm_prog_key), program, program_size, &prog.prog_data, sizeof(prog.prog_data), &prog_offset, prog_data); } return prog_offset; }
static void compile_sf_prog( struct brw_context *brw, struct brw_sf_prog_key *key ) { const unsigned *program; void *mem_ctx; unsigned program_size; mem_ctx = ralloc_context(NULL); struct brw_sf_prog_data prog_data; program = brw_compile_sf(brw->screen->compiler, mem_ctx, key, &prog_data, &brw->vue_map_geom_out, &program_size); brw_upload_cache(&brw->cache, BRW_CACHE_SF_PROG, key, sizeof(*key), program, program_size, &prog_data, sizeof(prog_data), &brw->sf.prog_offset, &brw->sf.prog_data); ralloc_free(mem_ctx); }
/** * Create the constant buffer surface. Vertex/fragment shader constants will be * read from this buffer with Data Port Read instructions/messages. */ dri_bo * brw_create_constant_surface( struct brw_context *brw, struct brw_surface_key *key ) { const GLint w = key->width - 1; struct brw_surface_state surf; dri_bo *bo; memset(&surf, 0, sizeof(surf)); surf.ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW; surf.ss0.surface_type = BRW_SURFACE_BUFFER; surf.ss0.surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT; assert(key->bo); surf.ss1.base_addr = key->bo->offset; /* reloc */ surf.ss2.width = w & 0x7f; /* bits 6:0 of size or width */ surf.ss2.height = (w >> 7) & 0x1fff; /* bits 19:7 of size or width */ surf.ss3.depth = (w >> 20) & 0x7f; /* bits 26:20 of size or width */ surf.ss3.pitch = (key->pitch * key->cpp) - 1; /* ignored?? */ brw_set_surface_tiling(&surf, key->tiling); /* tiling now allowed */ bo = brw_upload_cache(&brw->surface_cache, BRW_SS_SURFACE, key, sizeof(*key), &key->bo, 1, &surf, sizeof(surf)); /* Emit relocation to surface contents. Section 5.1.1 of the gen4 * bspec ("Data Cache") says that the data cache does not exist as * a separate cache and is just the sampler cache. */ drm_intel_bo_emit_reloc(bo, offsetof(struct brw_surface_state, ss1), key->bo, 0, I915_GEM_DOMAIN_SAMPLER, 0); return bo; }
static void compile_gs_prog( struct brw_context *brw, struct brw_gs_prog_key *key ) { struct brw_gs_compile c; const unsigned *program; unsigned program_size; memset(&c, 0, sizeof(c)); c.key = *key; /* Need to locate the two positions present in vertex + header. * These are currently hardcoded: */ c.nr_attrs = brw_count_bits(c.key.attrs); c.nr_regs = (c.nr_attrs + 1) / 2 + 1; /* are vertices packed, or reg-aligned? */ c.nr_bytes = c.nr_regs * REG_SIZE; /* Begin the compilation: */ brw_init_compile(&c.func); c.func.single_program_flow = 1; /* For some reason the thread is spawned with only 4 channels * unmasked. */ brw_set_mask_control(&c.func, BRW_MASK_DISABLE); /* Note that primitives which don't require a GS program have * already been weeded out by this stage: */ switch (key->primitive) { case PIPE_PRIM_QUADS: brw_gs_quads( &c ); break; case PIPE_PRIM_QUAD_STRIP: brw_gs_quad_strip( &c ); break; case PIPE_PRIM_LINE_LOOP: brw_gs_lines( &c ); break; case PIPE_PRIM_LINES: if (key->hint_gs_always) brw_gs_lines( &c ); else { return; } break; case PIPE_PRIM_TRIANGLES: if (key->hint_gs_always) brw_gs_tris( &c ); else { return; } break; case PIPE_PRIM_POINTS: if (key->hint_gs_always) brw_gs_points( &c ); else { return; } break; default: return; } /* get the program */ program = brw_get_program(&c.func, &program_size); /* Upload */ brw->gs.prog_gs_offset = brw_upload_cache( &brw->cache[BRW_GS_PROG], &c.key, sizeof(c.key), program, program_size, &c.prog_data, &brw->gs.prog_data ); }
bool brw_codegen_gs_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_geometry_program *gp, struct brw_gs_prog_key *key) { struct brw_stage_state *stage_state = &brw->gs.base; struct brw_gs_compile c; memset(&c, 0, sizeof(c)); c.key = *key; c.gp = gp; c.prog_data.include_primitive_id = (gp->program.Base.InputsRead & VARYING_BIT_PRIMITIVE_ID) != 0; c.prog_data.invocations = gp->program.Invocations; /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. * * Note: param_count needs to be num_uniform_components * 4, since we add * padding around uniform values below vec4 size, so the worst case is that * every uniform is a float which gets padded to the size of a vec4. */ struct gl_shader *gs = prog->_LinkedShaders[MESA_SHADER_GEOMETRY]; int param_count = gs->num_uniform_components * 4; /* We also upload clip plane data as uniforms */ param_count += MAX_CLIP_PLANES * 4; c.prog_data.base.base.param = rzalloc_array(NULL, const gl_constant_value *, param_count); c.prog_data.base.base.pull_param = rzalloc_array(NULL, const gl_constant_value *, param_count); c.prog_data.base.base.nr_params = param_count; if (brw->gen >= 7) { if (gp->program.OutputType == GL_POINTS) { /* When the output type is points, the geometry shader may output data * to multiple streams, and EndPrimitive() has no effect. So we * configure the hardware to interpret the control data as stream ID. */ c.prog_data.control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID; /* We only have to emit control bits if we are using streams */ if (prog->Geom.UsesStreams) c.control_data_bits_per_vertex = 2; else c.control_data_bits_per_vertex = 0; } else { /* When the output type is triangle_strip or line_strip, EndPrimitive() * may be used to terminate the current strip and start a new one * (similar to primitive restart), and outputting data to multiple * streams is not supported. So we configure the hardware to interpret * the control data as EndPrimitive information (a.k.a. "cut bits"). */ c.prog_data.control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT; /* We only need to output control data if the shader actually calls * EndPrimitive(). */ c.control_data_bits_per_vertex = gp->program.UsesEndPrimitive ? 1 : 0; } } else { /* There are no control data bits in gen6. */ c.control_data_bits_per_vertex = 0; /* If it is using transform feedback, enable it */ if (prog->TransformFeedback.NumVarying) c.prog_data.gen6_xfb_enabled = true; else c.prog_data.gen6_xfb_enabled = false; } c.control_data_header_size_bits = gp->program.VerticesOut * c.control_data_bits_per_vertex; /* 1 HWORD = 32 bytes = 256 bits */ c.prog_data.control_data_header_size_hwords = ALIGN(c.control_data_header_size_bits, 256) / 256; GLbitfield64 outputs_written = gp->program.Base.OutputsWritten; /* In order for legacy clipping to work, we need to populate the clip * distance varying slots whenever clipping is enabled, even if the vertex * shader doesn't write to gl_ClipDistance. */ if (c.key.base.userclip_active) { outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0); outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1); } brw_compute_vue_map(brw->intelScreen->devinfo, &c.prog_data.base.vue_map, outputs_written); /* Compute the output vertex size. * * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 STATE_GS - Output Vertex * Size (p168): * * [0,62] indicating [1,63] 16B units * * Specifies the size of each vertex stored in the GS output entry * (following any Control Header data) as a number of 128-bit units * (minus one). * * Programming Restrictions: The vertex size must be programmed as a * multiple of 32B units with the following exception: Rendering is * disabled (as per SOL stage state) and the vertex size output by the * GS thread is 16B. * * If rendering is enabled (as per SOL state) the vertex size must be * programmed as a multiple of 32B units. In other words, the only time * software can program a vertex size with an odd number of 16B units * is when rendering is disabled. * * Note: B=bytes in the above text. * * It doesn't seem worth the extra trouble to optimize the case where the * vertex size is 16B (especially since this would require special-casing * the GEN assembly that writes to the URB). So we just set the vertex * size to a multiple of 32B (2 vec4's) in all cases. * * The maximum output vertex size is 62*16 = 992 bytes (31 hwords). We * budget that as follows: * * 512 bytes for varyings (a varying component is 4 bytes and * gl_MaxGeometryOutputComponents = 128) * 16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16 * bytes) * 16 bytes overhead for gl_Position (we allocate it a slot in the VUE * even if it's not used) * 32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots * whenever clip planes are enabled, even if the shader doesn't * write to gl_ClipDistance) * 16 bytes overhead since the VUE size must be a multiple of 32 bytes * (see below)--this causes up to 1 VUE slot to be wasted * 400 bytes available for varying packing overhead * * Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes) * per interpolation type, so this is plenty. * */ unsigned output_vertex_size_bytes = c.prog_data.base.vue_map.num_slots * 16; assert(brw->gen == 6 || output_vertex_size_bytes <= GEN7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES); c.prog_data.output_vertex_size_hwords = ALIGN(output_vertex_size_bytes, 32) / 32; /* Compute URB entry size. The maximum allowed URB entry size is 32k. * That divides up as follows: * * 64 bytes for the control data header (cut indices or StreamID bits) * 4096 bytes for varyings (a varying component is 4 bytes and * gl_MaxGeometryTotalOutputComponents = 1024) * 4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16 * bytes/vertex and gl_MaxGeometryOutputVertices is 256) * 4096 bytes overhead for gl_Position (we allocate it a slot in the VUE * even if it's not used) * 8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots * whenever clip planes are enabled, even if the shader doesn't * write to gl_ClipDistance) * 4096 bytes overhead since the VUE size must be a multiple of 32 * bytes (see above)--this causes up to 1 VUE slot to be wasted * 8128 bytes available for varying packing overhead * * Worst-case varying packing overhead is 3/4 of a varying slot per * interpolation type, which works out to 3072 bytes, so this would allow * us to accommodate 2 interpolation types without any danger of running * out of URB space. * * In practice, the risk of running out of URB space is very small, since * the above figures are all worst-case, and most of them scale with the * number of output vertices. So we'll just calculate the amount of space * we need, and if it's too large, fail to compile. * * The above is for gen7+ where we have a single URB entry that will hold * all the output. In gen6, we will have to allocate URB entries for every * vertex we emit, so our URB entries only need to be large enough to hold * a single vertex. Also, gen6 does not have a control data header. */ unsigned output_size_bytes; if (brw->gen >= 7) { output_size_bytes = c.prog_data.output_vertex_size_hwords * 32 * gp->program.VerticesOut; output_size_bytes += 32 * c.prog_data.control_data_header_size_hwords; } else { output_size_bytes = c.prog_data.output_vertex_size_hwords * 32; } /* Broadwell stores "Vertex Count" as a full 8 DWord (32 byte) URB output, * which comes before the control header. */ if (brw->gen >= 8) output_size_bytes += 32; assert(output_size_bytes >= 1); int max_output_size_bytes = GEN7_MAX_GS_URB_ENTRY_SIZE_BYTES; if (brw->gen == 6) max_output_size_bytes = GEN6_MAX_GS_URB_ENTRY_SIZE_BYTES; if (output_size_bytes > max_output_size_bytes) return false; /* URB entry sizes are stored as a multiple of 64 bytes in gen7+ and * a multiple of 128 bytes in gen6. */ if (brw->gen >= 7) c.prog_data.base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64; else c.prog_data.base.urb_entry_size = ALIGN(output_size_bytes, 128) / 128; c.prog_data.output_topology = get_hw_prim_for_gl_prim(gp->program.OutputType); brw_compute_vue_map(brw->intelScreen->devinfo, &c.input_vue_map, c.key.input_varyings); /* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we * need to program a URB read length of ceiling(num_slots / 2). */ c.prog_data.base.urb_read_length = (c.input_vue_map.num_slots + 1) / 2; void *mem_ctx = ralloc_context(NULL); unsigned program_size; const unsigned *program = brw_gs_emit(brw, prog, &c, mem_ctx, &program_size); if (program == NULL) { ralloc_free(mem_ctx); return false; } /* Scratch space is used for register spilling */ if (c.base.last_scratch) { perf_debug("Geometry shader triggered register spilling. " "Try reducing the number of live vec4 values to " "improve performance.\n"); c.prog_data.base.base.total_scratch = brw_get_scratch_size(c.base.last_scratch*REG_SIZE); brw_get_scratch_bo(brw, &stage_state->scratch_bo, c.prog_data.base.base.total_scratch * brw->max_gs_threads); } brw_upload_cache(&brw->cache, BRW_CACHE_GS_PROG, &c.key, sizeof(c.key), program, program_size, &c.prog_data, sizeof(c.prog_data), &stage_state->prog_offset, &brw->gs.prog_data); ralloc_free(mem_ctx); return true; }
bool brw_codegen_gs_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_geometry_program *gp, struct brw_gs_prog_key *key) { struct brw_compiler *compiler = brw->intelScreen->compiler; struct gl_shader *shader = prog->_LinkedShaders[MESA_SHADER_GEOMETRY]; struct brw_stage_state *stage_state = &brw->gs.base; struct brw_gs_prog_data prog_data; bool start_busy = false; double start_time = 0; memset(&prog_data, 0, sizeof(prog_data)); assign_gs_binding_table_offsets(brw->intelScreen->devinfo, prog, &gp->program.Base, &prog_data); /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. * * Note: param_count needs to be num_uniform_components * 4, since we add * padding around uniform values below vec4 size, so the worst case is that * every uniform is a float which gets padded to the size of a vec4. */ struct gl_shader *gs = prog->_LinkedShaders[MESA_SHADER_GEOMETRY]; struct brw_shader *bgs = (struct brw_shader *) gs; int param_count = gp->program.Base.nir->num_uniforms; if (!compiler->scalar_stage[MESA_SHADER_GEOMETRY]) param_count *= 4; prog_data.base.base.param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.base.pull_param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.base.image_param = rzalloc_array(NULL, struct brw_image_param, gs->NumImages); prog_data.base.base.nr_params = param_count; prog_data.base.base.nr_image_params = gs->NumImages; brw_nir_setup_glsl_uniforms(gp->program.Base.nir, prog, &gp->program.Base, &prog_data.base.base, compiler->scalar_stage[MESA_SHADER_GEOMETRY]); GLbitfield64 outputs_written = gp->program.Base.OutputsWritten; brw_compute_vue_map(brw->intelScreen->devinfo, &prog_data.base.vue_map, outputs_written, prog ? prog->SeparateShader : false); if (unlikely(INTEL_DEBUG & DEBUG_GS)) brw_dump_ir("geometry", prog, gs, NULL); int st_index = -1; if (INTEL_DEBUG & DEBUG_SHADER_TIME) st_index = brw_get_shader_time_index(brw, prog, NULL, ST_GS); if (unlikely(brw->perf_debug)) { start_busy = brw->batch.last_bo && drm_intel_bo_busy(brw->batch.last_bo); start_time = get_time(); } void *mem_ctx = ralloc_context(NULL); unsigned program_size; char *error_str; const unsigned *program = brw_compile_gs(brw->intelScreen->compiler, brw, mem_ctx, key, &prog_data, shader->Program->nir, prog, st_index, &program_size, &error_str); if (program == NULL) { ralloc_free(mem_ctx); return false; } if (unlikely(brw->perf_debug)) { if (bgs->compiled_once) { brw_gs_debug_recompile(brw, prog, key); } if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) { perf_debug("GS compile took %.03f ms and stalled the GPU\n", (get_time() - start_time) * 1000); } bgs->compiled_once = true; } /* Scratch space is used for register spilling */ if (prog_data.base.base.total_scratch) { brw_get_scratch_bo(brw, &stage_state->scratch_bo, prog_data.base.base.total_scratch * brw->max_gs_threads); } brw_upload_cache(&brw->cache, BRW_CACHE_GS_PROG, key, sizeof(*key), program, program_size, &prog_data, sizeof(prog_data), &stage_state->prog_offset, &brw->gs.prog_data); ralloc_free(mem_ctx); return true; }
/** * All Mesa program -> GPU code generation goes through this function. * Depending on the instructions used (i.e. flow control instructions) * we'll use one of two code generators. */ bool brw_codegen_wm_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_fragment_program *fp, struct brw_wm_prog_key *key) { struct gl_context *ctx = &brw->ctx; void *mem_ctx = ralloc_context(NULL); struct brw_wm_prog_data prog_data; const GLuint *program; struct brw_shader *fs = NULL; GLuint program_size; bool start_busy = false; double start_time = 0; if (prog) fs = (struct brw_shader *)prog->_LinkedShaders[MESA_SHADER_FRAGMENT]; memset(&prog_data, 0, sizeof(prog_data)); /* Use ALT floating point mode for ARB programs so that 0^0 == 1. */ if (!prog) prog_data.base.use_alt_mode = true; assign_fs_binding_table_offsets(brw->intelScreen->devinfo, prog, &fp->program.Base, key, &prog_data); /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. */ int param_count = fp->program.Base.nir->num_uniforms; if (fs) prog_data.base.nr_image_params = fs->base.NumImages; /* The backend also sometimes adds params for texture size. */ param_count += 2 * ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits; prog_data.base.param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.pull_param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.image_param = rzalloc_array(NULL, struct brw_image_param, prog_data.base.nr_image_params); prog_data.base.nr_params = param_count; if (prog) { brw_nir_setup_glsl_uniforms(fp->program.Base.nir, prog, &fp->program.Base, &prog_data.base, true); } else { brw_nir_setup_arb_uniforms(fp->program.Base.nir, &fp->program.Base, &prog_data.base); } if (unlikely(brw->perf_debug)) { start_busy = (brw->batch.last_bo && drm_intel_bo_busy(brw->batch.last_bo)); start_time = get_time(); } if (unlikely(INTEL_DEBUG & DEBUG_WM)) brw_dump_ir("fragment", prog, fs ? &fs->base : NULL, &fp->program.Base); int st_index8 = -1, st_index16 = -1; if (INTEL_DEBUG & DEBUG_SHADER_TIME) { st_index8 = brw_get_shader_time_index(brw, prog, &fp->program.Base, ST_FS8); st_index16 = brw_get_shader_time_index(brw, prog, &fp->program.Base, ST_FS16); } char *error_str = NULL; program = brw_compile_fs(brw->intelScreen->compiler, brw, mem_ctx, key, &prog_data, fp->program.Base.nir, &fp->program.Base, st_index8, st_index16, brw->use_rep_send, &program_size, &error_str); if (program == NULL) { if (prog) { prog->LinkStatus = false; ralloc_strcat(&prog->InfoLog, error_str); } _mesa_problem(NULL, "Failed to compile fragment shader: %s\n", error_str); ralloc_free(mem_ctx); return false; } if (unlikely(brw->perf_debug) && fs) { if (fs->compiled_once) brw_wm_debug_recompile(brw, prog, key); fs->compiled_once = true; if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) { perf_debug("FS compile took %.03f ms and stalled the GPU\n", (get_time() - start_time) * 1000); } } if (prog_data.base.total_scratch) { brw_get_scratch_bo(brw, &brw->wm.base.scratch_bo, prog_data.base.total_scratch * brw->max_wm_threads); } if (unlikely(INTEL_DEBUG & DEBUG_WM)) fprintf(stderr, "\n"); brw_upload_cache(&brw->cache, BRW_CACHE_FS_PROG, key, sizeof(struct brw_wm_prog_key), program, program_size, &prog_data, sizeof(prog_data), &brw->wm.base.prog_offset, &brw->wm.prog_data); ralloc_free(mem_ctx); return true; }
static void compile_ff_gs_prog(struct brw_context *brw, struct brw_ff_gs_prog_key *key) { struct brw_ff_gs_compile c; const GLuint *program; void *mem_ctx; GLuint program_size; memset(&c, 0, sizeof(c)); c.key = *key; c.vue_map = brw->vs.prog_data->base.vue_map; c.nr_regs = (c.vue_map.num_slots + 1)/2; mem_ctx = ralloc_context(NULL); /* Begin the compilation: */ brw_init_compile(brw, &c.func, mem_ctx); c.func.single_program_flow = 1; /* For some reason the thread is spawned with only 4 channels * unmasked. */ brw_set_mask_control(&c.func, BRW_MASK_DISABLE); if (brw->gen >= 6) { unsigned num_verts; bool check_edge_flag; /* On Sandybridge, we use the GS for implementing transform feedback * (called "Stream Out" in the PRM). */ switch (key->primitive) { case _3DPRIM_POINTLIST: num_verts = 1; check_edge_flag = false; break; case _3DPRIM_LINELIST: case _3DPRIM_LINESTRIP: case _3DPRIM_LINELOOP: num_verts = 2; check_edge_flag = false; break; case _3DPRIM_TRILIST: case _3DPRIM_TRIFAN: case _3DPRIM_TRISTRIP: case _3DPRIM_RECTLIST: num_verts = 3; check_edge_flag = false; break; case _3DPRIM_QUADLIST: case _3DPRIM_QUADSTRIP: case _3DPRIM_POLYGON: num_verts = 3; check_edge_flag = true; break; default: assert(!"Unexpected primitive type in Gen6 SOL program."); return; } gen6_sol_program(&c, key, num_verts, check_edge_flag); } else { /* On Gen4-5, we use the GS to decompose certain types of primitives. * Note that primitives which don't require a GS program have already * been weeded out by now. */ switch (key->primitive) { case _3DPRIM_QUADLIST: brw_ff_gs_quads( &c, key ); break; case _3DPRIM_QUADSTRIP: brw_ff_gs_quad_strip( &c, key ); break; case _3DPRIM_LINELOOP: brw_ff_gs_lines( &c ); break; default: ralloc_free(mem_ctx); return; } } /* get the program */ program = brw_get_program(&c.func, &program_size); if (unlikely(INTEL_DEBUG & DEBUG_GS)) { int i; printf("gs:\n"); for (i = 0; i < program_size / sizeof(struct brw_instruction); i++) brw_disasm(stdout, &((struct brw_instruction *)program)[i], brw->gen); printf("\n"); } brw_upload_cache(&brw->cache, BRW_FF_GS_PROG, &c.key, sizeof(c.key), program, program_size, &c.prog_data, sizeof(c.prog_data), &brw->ff_gs.prog_offset, &brw->ff_gs.prog_data); ralloc_free(mem_ctx); }
static bool brw_codegen_gs_prog(struct brw_context *brw, struct brw_program *gp, struct brw_gs_prog_key *key) { struct brw_compiler *compiler = brw->screen->compiler; const struct gen_device_info *devinfo = &brw->screen->devinfo; struct brw_stage_state *stage_state = &brw->gs.base; struct brw_gs_prog_data prog_data; bool start_busy = false; double start_time = 0; memset(&prog_data, 0, sizeof(prog_data)); assign_gs_binding_table_offsets(devinfo, &gp->program, &prog_data); /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. * * Note: param_count needs to be num_uniform_components * 4, since we add * padding around uniform values below vec4 size, so the worst case is that * every uniform is a float which gets padded to the size of a vec4. */ int param_count = gp->program.nir->num_uniforms / 4; prog_data.base.base.param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.base.pull_param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.base.image_param = rzalloc_array(NULL, struct brw_image_param, gp->program.info.num_images); prog_data.base.base.nr_params = param_count; prog_data.base.base.nr_image_params = gp->program.info.num_images; brw_nir_setup_glsl_uniforms(gp->program.nir, &gp->program, &prog_data.base.base, compiler->scalar_stage[MESA_SHADER_GEOMETRY]); brw_nir_analyze_ubo_ranges(compiler, gp->program.nir, prog_data.base.base.ubo_ranges); uint64_t outputs_written = gp->program.info.outputs_written; brw_compute_vue_map(devinfo, &prog_data.base.vue_map, outputs_written, gp->program.info.separate_shader); int st_index = -1; if (INTEL_DEBUG & DEBUG_SHADER_TIME) st_index = brw_get_shader_time_index(brw, &gp->program, ST_GS, true); if (unlikely(brw->perf_debug)) { start_busy = brw->batch.last_bo && brw_bo_busy(brw->batch.last_bo); start_time = get_time(); } void *mem_ctx = ralloc_context(NULL); unsigned program_size; char *error_str; const unsigned *program = brw_compile_gs(brw->screen->compiler, brw, mem_ctx, key, &prog_data, gp->program.nir, &gp->program, st_index, &program_size, &error_str); if (program == NULL) { ralloc_strcat(&gp->program.sh.data->InfoLog, error_str); _mesa_problem(NULL, "Failed to compile geometry shader: %s\n", error_str); ralloc_free(mem_ctx); return false; } if (unlikely(brw->perf_debug)) { if (gp->compiled_once) { brw_gs_debug_recompile(brw, &gp->program, key); } if (start_busy && !brw_bo_busy(brw->batch.last_bo)) { perf_debug("GS compile took %.03f ms and stalled the GPU\n", (get_time() - start_time) * 1000); } gp->compiled_once = true; } /* Scratch space is used for register spilling */ brw_alloc_stage_scratch(brw, stage_state, prog_data.base.base.total_scratch, devinfo->max_gs_threads); brw_upload_cache(&brw->cache, BRW_CACHE_GS_PROG, key, sizeof(*key), program, program_size, &prog_data, sizeof(prog_data), &stage_state->prog_offset, &brw->gs.base.prog_data); ralloc_free(mem_ctx); return true; }
static void compile_gs_prog( struct brw_context *brw, struct brw_gs_prog_key *key ) { struct intel_context *intel = &brw->intel; struct brw_gs_compile c; const GLuint *program; void *mem_ctx; GLuint program_size; /* Gen6: VF has already converted into polygon, and LINELOOP is * converted to LINESTRIP at the beginning of the 3D pipeline. */ if (intel->gen >= 6) return; memset(&c, 0, sizeof(c)); c.key = *key; /* Need to locate the two positions present in vertex + header. * These are currently hardcoded: */ c.nr_attrs = brw_count_bits(c.key.attrs); if (intel->gen >= 5) c.nr_regs = (c.nr_attrs + 1) / 2 + 3; /* are vertices packed, or reg-aligned? */ else c.nr_regs = (c.nr_attrs + 1) / 2 + 1; /* are vertices packed, or reg-aligned? */ c.nr_bytes = c.nr_regs * REG_SIZE; mem_ctx = NULL; /* Begin the compilation: */ brw_init_compile(brw, &c.func, mem_ctx); c.func.single_program_flow = 1; /* For some reason the thread is spawned with only 4 channels * unmasked. */ brw_set_mask_control(&c.func, BRW_MASK_DISABLE); /* Note that primitives which don't require a GS program have * already been weeded out by this stage: */ switch (key->primitive) { case GL_QUADS: brw_gs_quads( &c, key ); break; case GL_QUAD_STRIP: brw_gs_quad_strip( &c, key ); break; case GL_LINE_LOOP: brw_gs_lines( &c ); break; default: ralloc_free(mem_ctx); return; } /* get the program */ program = brw_get_program(&c.func, &program_size); if (unlikely(INTEL_DEBUG & DEBUG_GS)) { int i; printf("gs:\n"); for (i = 0; i < program_size / sizeof(struct brw_instruction); i++) brw_disasm(stdout, &((struct brw_instruction *)program)[i], intel->gen); printf("\n"); } brw_upload_cache(&brw->cache, BRW_GS_PROG, &c.key, sizeof(c.key), program, program_size, &c.prog_data, sizeof(c.prog_data), &brw->gs.prog_offset, &brw->gs.prog_data); ralloc_free(mem_ctx); }
static bool do_vs_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_vertex_program *vp, struct brw_vs_prog_key *key) { GLuint program_size; const GLuint *program; struct brw_vs_compile c; struct brw_vs_prog_data prog_data; void *mem_ctx; int i; struct gl_shader *vs = NULL; if (prog) vs = prog->_LinkedShaders[MESA_SHADER_VERTEX]; memset(&c, 0, sizeof(c)); memcpy(&c.key, key, sizeof(*key)); memset(&prog_data, 0, sizeof(prog_data)); mem_ctx = ralloc_context(NULL); c.vp = vp; /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. */ int param_count; if (vs) { /* We add padding around uniform values below vec4 size, with the worst * case being a float value that gets blown up to a vec4, so be * conservative here. */ param_count = vs->num_uniform_components * 4; } else { param_count = vp->program.Base.Parameters->NumParameters * 4; } /* We also upload clip plane data as uniforms */ param_count += MAX_CLIP_PLANES * 4; prog_data.base.param = rzalloc_array(NULL, const float *, param_count); prog_data.base.pull_param = rzalloc_array(NULL, const float *, param_count); GLbitfield64 outputs_written = vp->program.Base.OutputsWritten; prog_data.inputs_read = vp->program.Base.InputsRead; if (c.key.copy_edgeflag) { outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE); prog_data.inputs_read |= VERT_BIT_EDGEFLAG; } if (brw->gen < 6) { /* Put dummy slots into the VUE for the SF to put the replaced * point sprite coords in. We shouldn't need these dummy slots, * which take up precious URB space, but it would mean that the SF * doesn't get nice aligned pairs of input coords into output * coords, which would be a pain to handle. */ for (i = 0; i < 8; i++) { if (c.key.point_coord_replace & (1 << i)) outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i); } /* if back colors are written, allocate slots for front colors too */ if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0)) outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0); if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1)) outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1); } brw_compute_vue_map(brw, &prog_data.base.vue_map, outputs_written, c.key.base.userclip_active); if (0) { _mesa_fprint_program_opt(stdout, &c.vp->program.Base, PROG_PRINT_DEBUG, true); } /* Emit GEN4 code. */ program = brw_vs_emit(brw, prog, &c, &prog_data, mem_ctx, &program_size); if (program == NULL) { ralloc_free(mem_ctx); return false; } if (prog_data.base.nr_pull_params) prog_data.base.num_surfaces = 1; if (c.vp->program.Base.SamplersUsed) prog_data.base.num_surfaces = SURF_INDEX_VS_TEXTURE(BRW_MAX_TEX_UNIT); if (prog && prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks) { prog_data.base.num_surfaces = SURF_INDEX_VS_UBO(prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks); } /* Scratch space is used for register spilling */ if (c.base.last_scratch) { perf_debug("Vertex shader triggered register spilling. " "Try reducing the number of live vec4 values to " "improve performance.\n"); prog_data.base.total_scratch = brw_get_scratch_size(c.base.last_scratch*REG_SIZE); brw_get_scratch_bo(brw, &brw->vs.scratch_bo, prog_data.base.total_scratch * brw->max_vs_threads); } brw_upload_cache(&brw->cache, BRW_VS_PROG, &c.key, sizeof(c.key), program, program_size, &prog_data, sizeof(prog_data), &brw->vs.prog_offset, &brw->vs.prog_data); ralloc_free(mem_ctx); return true; }
static bool do_vs_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_vertex_program *vp, struct brw_vs_prog_key *key) { struct gl_context *ctx = &brw->intel.ctx; struct intel_context *intel = &brw->intel; GLuint program_size; const GLuint *program; struct brw_vs_compile c; void *mem_ctx; int aux_size; int i; struct gl_shader *vs = NULL; if (prog) vs = prog->_LinkedShaders[MESA_SHADER_VERTEX]; memset(&c, 0, sizeof(c)); memcpy(&c.key, key, sizeof(*key)); mem_ctx = ralloc_context(NULL); brw_init_compile(brw, &c.func, mem_ctx); c.vp = vp; /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. */ int param_count; if (vs) { /* We add padding around uniform values below vec4 size, with the worst * case being a float value that gets blown up to a vec4, so be * conservative here. */ param_count = vs->num_uniform_components * 4; /* We also upload clip plane data as uniforms */ param_count += MAX_CLIP_PLANES * 4; } else { param_count = vp->program.Base.Parameters->NumParameters * 4; } c.prog_data.param = rzalloc_array(NULL, const float *, param_count); c.prog_data.pull_param = rzalloc_array(NULL, const float *, param_count); c.prog_data.outputs_written = vp->program.Base.OutputsWritten; c.prog_data.inputs_read = vp->program.Base.InputsRead; if (c.key.copy_edgeflag) { c.prog_data.outputs_written |= BITFIELD64_BIT(VERT_RESULT_EDGE); c.prog_data.inputs_read |= VERT_BIT_EDGEFLAG; } /* Put dummy slots into the VUE for the SF to put the replaced * point sprite coords in. We shouldn't need these dummy slots, * which take up precious URB space, but it would mean that the SF * doesn't get nice aligned pairs of input coords into output * coords, which would be a pain to handle. */ for (i = 0; i < 8; i++) { if (c.key.point_coord_replace & (1 << i)) c.prog_data.outputs_written |= BITFIELD64_BIT(VERT_RESULT_TEX0 + i); } brw_compute_vue_map(&c); if (0) { _mesa_fprint_program_opt(stdout, &c.vp->program.Base, PROG_PRINT_DEBUG, true); } /* Emit GEN4 code. */ if (prog) { if (!brw_vs_emit(prog, &c)) { ralloc_free(mem_ctx); return false; } } else { brw_old_vs_emit(&c); } if (c.prog_data.nr_pull_params) c.prog_data.num_surfaces = 1; if (c.vp->program.Base.SamplersUsed) c.prog_data.num_surfaces = SURF_INDEX_VS_TEXTURE(BRW_MAX_TEX_UNIT); if (prog && prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks) { c.prog_data.num_surfaces = SURF_INDEX_VS_UBO(prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks); } /* Scratch space is used for register spilling */ if (c.last_scratch) { perf_debug("Vertex shader triggered register spilling. " "Try reducing the number of live vec4 values to " "improve performance.\n"); c.prog_data.total_scratch = brw_get_scratch_size(c.last_scratch); brw_get_scratch_bo(intel, &brw->vs.scratch_bo, c.prog_data.total_scratch * brw->max_vs_threads); } /* get the program */ program = brw_get_program(&c.func, &program_size); /* We upload from &c.prog_data including the constant_map assuming * they're packed together. It would be nice to have a * compile-time assert macro here. */ assert(c.constant_map == (int8_t *)&c.prog_data + sizeof(c.prog_data)); assert(ctx->Const.VertexProgram.MaxNativeParameters == ARRAY_SIZE(c.constant_map)); (void) ctx; aux_size = sizeof(c.prog_data); /* constant_map */ aux_size += c.vp->program.Base.Parameters->NumParameters; brw_upload_cache(&brw->cache, BRW_VS_PROG, &c.key, sizeof(c.key), program, program_size, &c.prog_data, aux_size, &brw->vs.prog_offset, &brw->vs.prog_data); ralloc_free(mem_ctx); return true; }
bool brw_codegen_vs_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_vertex_program *vp, struct brw_vs_prog_key *key) { GLuint program_size; const GLuint *program; struct brw_vs_prog_data prog_data; struct brw_stage_prog_data *stage_prog_data = &prog_data.base.base; void *mem_ctx; int i; struct brw_shader *vs = NULL; bool start_busy = false; double start_time = 0; if (prog) vs = (struct brw_shader *) prog->_LinkedShaders[MESA_SHADER_VERTEX]; memset(&prog_data, 0, sizeof(prog_data)); /* Use ALT floating point mode for ARB programs so that 0^0 == 1. */ if (!prog) stage_prog_data->use_alt_mode = true; mem_ctx = ralloc_context(NULL); brw_assign_common_binding_table_offsets(MESA_SHADER_VERTEX, brw->intelScreen->devinfo, prog, &vp->program.Base, &prog_data.base.base, 0); /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. */ int param_count = vp->program.Base.nir->num_uniforms; if (!brw->intelScreen->compiler->scalar_vs) param_count *= 4; if (vs) prog_data.base.base.nr_image_params = vs->base.NumImages; /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip * planes as uniforms. */ param_count += key->nr_userclip_plane_consts * 4; stage_prog_data->param = rzalloc_array(NULL, const gl_constant_value *, param_count); stage_prog_data->pull_param = rzalloc_array(NULL, const gl_constant_value *, param_count); stage_prog_data->image_param = rzalloc_array(NULL, struct brw_image_param, stage_prog_data->nr_image_params); stage_prog_data->nr_params = param_count; if (prog) { brw_nir_setup_glsl_uniforms(vp->program.Base.nir, prog, &vp->program.Base, &prog_data.base.base, brw->intelScreen->compiler->scalar_vs); } else { brw_nir_setup_arb_uniforms(vp->program.Base.nir, &vp->program.Base, &prog_data.base.base); } GLbitfield64 outputs_written = vp->program.Base.OutputsWritten; prog_data.inputs_read = vp->program.Base.InputsRead; if (key->copy_edgeflag) { outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE); prog_data.inputs_read |= VERT_BIT_EDGEFLAG; } if (brw->gen < 6) { /* Put dummy slots into the VUE for the SF to put the replaced * point sprite coords in. We shouldn't need these dummy slots, * which take up precious URB space, but it would mean that the SF * doesn't get nice aligned pairs of input coords into output * coords, which would be a pain to handle. */ for (i = 0; i < 8; i++) { if (key->point_coord_replace & (1 << i)) outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i); } /* if back colors are written, allocate slots for front colors too */ if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0)) outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0); if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1)) outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1); } /* In order for legacy clipping to work, we need to populate the clip * distance varying slots whenever clipping is enabled, even if the vertex * shader doesn't write to gl_ClipDistance. */ if (key->nr_userclip_plane_consts > 0) { outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0); outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1); } brw_compute_vue_map(brw->intelScreen->devinfo, &prog_data.base.vue_map, outputs_written, prog ? prog->SeparateShader : false); if (0) { _mesa_fprint_program_opt(stderr, &vp->program.Base, PROG_PRINT_DEBUG, true); } if (unlikely(brw->perf_debug)) { start_busy = (brw->batch.last_bo && drm_intel_bo_busy(brw->batch.last_bo)); start_time = get_time(); } if (unlikely(INTEL_DEBUG & DEBUG_VS)) brw_dump_ir("vertex", prog, vs ? &vs->base : NULL, &vp->program.Base); int st_index = -1; if (INTEL_DEBUG & DEBUG_SHADER_TIME) st_index = brw_get_shader_time_index(brw, prog, &vp->program.Base, ST_VS); /* Emit GEN4 code. */ char *error_str; program = brw_compile_vs(brw->intelScreen->compiler, brw, mem_ctx, key, &prog_data, vp->program.Base.nir, brw_select_clip_planes(&brw->ctx), !_mesa_is_gles3(&brw->ctx), st_index, &program_size, &error_str); if (program == NULL) { if (prog) { prog->LinkStatus = false; ralloc_strcat(&prog->InfoLog, error_str); } _mesa_problem(NULL, "Failed to compile vertex shader: %s\n", error_str); ralloc_free(mem_ctx); return false; } if (unlikely(brw->perf_debug) && vs) { if (vs->compiled_once) { brw_vs_debug_recompile(brw, prog, key); } if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) { perf_debug("VS compile took %.03f ms and stalled the GPU\n", (get_time() - start_time) * 1000); } vs->compiled_once = true; } /* Scratch space is used for register spilling */ if (prog_data.base.base.total_scratch) { brw_get_scratch_bo(brw, &brw->vs.base.scratch_bo, prog_data.base.base.total_scratch * brw->max_vs_threads); } brw_upload_cache(&brw->cache, BRW_CACHE_VS_PROG, key, sizeof(struct brw_vs_prog_key), program, program_size, &prog_data, sizeof(prog_data), &brw->vs.base.prog_offset, &brw->vs.prog_data); ralloc_free(mem_ctx); return true; }
static enum pipe_error sf_unit_create_from_key(struct brw_context *brw, struct brw_sf_unit_key *key, struct brw_winsys_reloc *reloc, struct brw_winsys_buffer **bo_out) { struct brw_sf_unit_state sf; enum pipe_error ret; int chipset_max_threads; memset(&sf, 0, sizeof(sf)); sf.thread0.grf_reg_count = align(key->total_grf, 16) / 16 - 1; /* reloc */ sf.thread0.kernel_start_pointer = 0; sf.thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754; sf.thread3.dispatch_grf_start_reg = 3; if (BRW_IS_IGDNG(brw)) sf.thread3.urb_entry_read_offset = 3; else sf.thread3.urb_entry_read_offset = 1; sf.thread3.urb_entry_read_length = key->urb_entry_read_length; sf.thread4.nr_urb_entries = key->nr_urb_entries; sf.thread4.urb_entry_allocation_size = key->sfsize - 1; /* Each SF thread produces 1 PUE, and there can be up to 24(Pre-IGDNG) or * 48(IGDNG) threads */ if (BRW_IS_IGDNG(brw)) chipset_max_threads = 48; else chipset_max_threads = 24; sf.thread4.max_threads = MIN2(chipset_max_threads, key->nr_urb_entries) - 1; if (BRW_DEBUG & DEBUG_SINGLE_THREAD) sf.thread4.max_threads = 0; if (BRW_DEBUG & DEBUG_STATS) sf.thread4.stats_enable = 1; /* CACHE_NEW_SF_VP */ /* reloc */ sf.sf5.sf_viewport_state_offset = 0; sf.sf5.viewport_transform = 1; if (key->scissor) sf.sf6.scissor = 1; if (key->front_ccw) sf.sf5.front_winding = BRW_FRONTWINDING_CCW; else sf.sf5.front_winding = BRW_FRONTWINDING_CW; switch (key->cull_face) { case PIPE_FACE_FRONT: sf.sf6.cull_mode = BRW_CULLMODE_FRONT; break; case PIPE_FACE_BACK: sf.sf6.cull_mode = BRW_CULLMODE_BACK; break; case PIPE_FACE_FRONT_AND_BACK: sf.sf6.cull_mode = BRW_CULLMODE_BOTH; break; case PIPE_FACE_NONE: sf.sf6.cull_mode = BRW_CULLMODE_NONE; break; default: assert(0); sf.sf6.cull_mode = BRW_CULLMODE_NONE; break; } /* _NEW_LINE */ /* XXX use ctx->Const.Min/MaxLineWidth here */ sf.sf6.line_width = CLAMP(key->line_width, 1.0, 5.0) * (1<<1); sf.sf6.line_endcap_aa_region_width = 1; if (key->line_smooth) sf.sf6.aa_enable = 1; else if (sf.sf6.line_width <= 0x2) sf.sf6.line_width = 0; /* XXX: gl_rasterization_rules? something else? */ sf.sf6.point_rast_rule = BRW_RASTRULE_UPPER_RIGHT; sf.sf6.point_rast_rule = BRW_RASTRULE_LOWER_RIGHT; sf.sf6.point_rast_rule = 1; /* XXX clamp max depends on AA vs. non-AA */ /* _NEW_POINT */ sf.sf7.sprite_point = key->point_sprite; sf.sf7.point_size = CLAMP(rint(key->point_size), 1, 255) * (1<<3); sf.sf7.use_point_size_state = !key->point_attenuated; sf.sf7.aa_line_distance_mode = 0; /* might be BRW_NEW_PRIMITIVE if we have to adjust pv for polygons: */ if (!key->flatshade_first) { sf.sf7.trifan_pv = 2; sf.sf7.linestrip_pv = 1; sf.sf7.tristrip_pv = 2; } else { sf.sf7.trifan_pv = 1; sf.sf7.linestrip_pv = 0; sf.sf7.tristrip_pv = 0; } sf.sf7.line_last_pixel_enable = key->line_last_pixel_enable; /* Set bias for OpenGL rasterization rules: */ if (key->gl_rasterization_rules) { sf.sf6.dest_org_vbias = 0x8; sf.sf6.dest_org_hbias = 0x8; } else { sf.sf6.dest_org_vbias = 0x0; sf.sf6.dest_org_hbias = 0x0; } ret = brw_upload_cache(&brw->cache, BRW_SF_UNIT, key, sizeof(*key), reloc, 2, &sf, sizeof(sf), NULL, NULL, bo_out); if (ret) return ret; return PIPE_OK; }
/** * All Mesa program -> GPU code generation goes through this function. * Depending on the instructions used (i.e. flow control instructions) * we'll use one of two code generators. */ bool brw_codegen_wm_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_fragment_program *fp, struct brw_wm_prog_key *key) { struct gl_context *ctx = &brw->ctx; void *mem_ctx = ralloc_context(NULL); struct brw_wm_prog_data prog_data; const GLuint *program; struct gl_shader *fs = NULL; GLuint program_size; if (prog) fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]; memset(&prog_data, 0, sizeof(prog_data)); /* key->alpha_test_func means simulating alpha testing via discards, * so the shader definitely kills pixels. */ prog_data.uses_kill = fp->program.UsesKill || key->alpha_test_func; prog_data.uses_omask = fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK); prog_data.computed_depth_mode = computed_depth_mode(&fp->program); /* Use ALT floating point mode for ARB programs so that 0^0 == 1. */ if (!prog) prog_data.base.use_alt_mode = true; /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. */ int param_count; if (fs) { param_count = fs->num_uniform_components; } else { param_count = fp->program.Base.Parameters->NumParameters * 4; } /* The backend also sometimes adds params for texture size. */ param_count += 2 * ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits; prog_data.base.param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.pull_param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.nr_params = param_count; prog_data.barycentric_interp_modes = brw_compute_barycentric_interp_modes(brw, key->flat_shade, key->persample_shading, &fp->program); program = brw_wm_fs_emit(brw, mem_ctx, key, &prog_data, &fp->program, prog, &program_size); if (program == NULL) { ralloc_free(mem_ctx); return false; } if (prog_data.base.total_scratch) { brw_get_scratch_bo(brw, &brw->wm.base.scratch_bo, prog_data.base.total_scratch * brw->max_wm_threads); } if (unlikely(INTEL_DEBUG & DEBUG_WM)) fprintf(stderr, "\n"); brw_upload_cache(&brw->cache, BRW_CACHE_FS_PROG, key, sizeof(struct brw_wm_prog_key), program, program_size, &prog_data, sizeof(prog_data), &brw->wm.base.prog_offset, &brw->wm.prog_data); ralloc_free(mem_ctx); return true; }
/** * Creates the state cache entry for the given CC unit key. */ static dri_bo * cc_unit_create_from_key(struct brw_context *brw, struct brw_cc_unit_key *key) { struct brw_cc_unit_state cc; dri_bo *bo; memset(&cc, 0, sizeof(cc)); /* _NEW_STENCIL */ if (key->stencil) { cc.cc0.stencil_enable = 1; cc.cc0.stencil_func = intel_translate_compare_func(key->stencil_func[0]); cc.cc0.stencil_fail_op = intel_translate_stencil_op(key->stencil_fail_op[0]); cc.cc0.stencil_pass_depth_fail_op = intel_translate_stencil_op(key->stencil_pass_depth_fail_op[0]); cc.cc0.stencil_pass_depth_pass_op = intel_translate_stencil_op(key->stencil_pass_depth_pass_op[0]); cc.cc1.stencil_ref = key->stencil_ref[0]; cc.cc1.stencil_write_mask = key->stencil_write_mask[0]; cc.cc1.stencil_test_mask = key->stencil_test_mask[0]; if (key->stencil_two_side) { cc.cc0.bf_stencil_enable = 1; cc.cc0.bf_stencil_func = intel_translate_compare_func(key->stencil_func[1]); cc.cc0.bf_stencil_fail_op = intel_translate_stencil_op(key->stencil_fail_op[1]); cc.cc0.bf_stencil_pass_depth_fail_op = intel_translate_stencil_op(key->stencil_pass_depth_fail_op[1]); cc.cc0.bf_stencil_pass_depth_pass_op = intel_translate_stencil_op(key->stencil_pass_depth_pass_op[1]); cc.cc1.bf_stencil_ref = key->stencil_ref[1]; cc.cc2.bf_stencil_write_mask = key->stencil_write_mask[1]; cc.cc2.bf_stencil_test_mask = key->stencil_test_mask[1]; } /* Not really sure about this: */ if (key->stencil_write_mask[0] || (key->stencil_two_side && key->stencil_write_mask[1])) cc.cc0.stencil_write_enable = 1; } /* _NEW_COLOR */ if (key->logic_op != GL_COPY) { cc.cc2.logicop_enable = 1; cc.cc5.logicop_func = intel_translate_logic_op(key->logic_op); } else if (key->color_blend) { GLenum eqRGB = key->blend_eq_rgb; GLenum eqA = key->blend_eq_a; GLenum srcRGB = key->blend_src_rgb; GLenum dstRGB = key->blend_dst_rgb; GLenum srcA = key->blend_src_a; GLenum dstA = key->blend_dst_a; if (eqRGB == GL_MIN || eqRGB == GL_MAX) { srcRGB = dstRGB = GL_ONE; } if (eqA == GL_MIN || eqA == GL_MAX) { srcA = dstA = GL_ONE; } cc.cc6.dest_blend_factor = brw_translate_blend_factor(dstRGB); cc.cc6.src_blend_factor = brw_translate_blend_factor(srcRGB); cc.cc6.blend_function = brw_translate_blend_equation(eqRGB); cc.cc5.ia_dest_blend_factor = brw_translate_blend_factor(dstA); cc.cc5.ia_src_blend_factor = brw_translate_blend_factor(srcA); cc.cc5.ia_blend_function = brw_translate_blend_equation(eqA); cc.cc3.blend_enable = 1; cc.cc3.ia_blend_enable = (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB); } if (key->alpha_enabled) { cc.cc3.alpha_test = 1; cc.cc3.alpha_test_func = intel_translate_compare_func(key->alpha_func); cc.cc3.alpha_test_format = BRW_ALPHATEST_FORMAT_UNORM8; UNCLAMPED_FLOAT_TO_UBYTE(cc.cc7.alpha_ref.ub[0], key->alpha_ref); } if (key->dither) { cc.cc5.dither_enable = 1; cc.cc6.y_dither_offset = 0; cc.cc6.x_dither_offset = 0; } /* _NEW_DEPTH */ if (key->depth_test) { cc.cc2.depth_test = 1; cc.cc2.depth_test_function = intel_translate_compare_func(key->depth_func); cc.cc2.depth_write_enable = key->depth_write; } /* CACHE_NEW_CC_VP */ cc.cc4.cc_viewport_state_offset = brw->cc.vp_bo->offset >> 5; /* reloc */ if (INTEL_DEBUG & DEBUG_STATS) cc.cc5.statistics_enable = 1; bo = brw_upload_cache(&brw->cache, BRW_CC_UNIT, key, sizeof(*key), &brw->cc.vp_bo, 1, &cc, sizeof(cc), NULL, NULL); /* Emit CC viewport relocation */ dri_bo_emit_reloc(bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0, offsetof(struct brw_cc_unit_state, cc4), brw->cc.vp_bo); return bo; }
static bool do_vs_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_vertex_program *vp, struct brw_vs_prog_key *key) { GLuint program_size; const GLuint *program; struct brw_vs_compile c; struct brw_vs_prog_data prog_data; struct brw_stage_prog_data *stage_prog_data = &prog_data.base.base; void *mem_ctx; int i; struct gl_shader *vs = NULL; if (prog) vs = prog->_LinkedShaders[MESA_SHADER_VERTEX]; memset(&c, 0, sizeof(c)); memcpy(&c.key, key, sizeof(*key)); memset(&prog_data, 0, sizeof(prog_data)); mem_ctx = ralloc_context(NULL); c.vp = vp; /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. */ int param_count; if (vs) { /* We add padding around uniform values below vec4 size, with the worst * case being a float value that gets blown up to a vec4, so be * conservative here. */ param_count = vs->num_uniform_components * 4; } else { param_count = vp->program.Base.Parameters->NumParameters * 4; } /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip * planes as uniforms. */ param_count += c.key.base.nr_userclip_plane_consts * 4; stage_prog_data->param = rzalloc_array(NULL, const float *, param_count); stage_prog_data->pull_param = rzalloc_array(NULL, const float *, param_count); /* Setting nr_params here NOT to the size of the param and pull_param * arrays, but to the number of uniform components vec4_visitor * needs. vec4_visitor::setup_uniforms() will set it back to a proper value. */ stage_prog_data->nr_params = ALIGN(param_count, 4) / 4; if (vs) { stage_prog_data->nr_params += vs->num_samplers; } GLbitfield64 outputs_written = vp->program.Base.OutputsWritten; prog_data.inputs_read = vp->program.Base.InputsRead; if (c.key.copy_edgeflag) { outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE); prog_data.inputs_read |= VERT_BIT_EDGEFLAG; } if (brw->gen < 6) { /* Put dummy slots into the VUE for the SF to put the replaced * point sprite coords in. We shouldn't need these dummy slots, * which take up precious URB space, but it would mean that the SF * doesn't get nice aligned pairs of input coords into output * coords, which would be a pain to handle. */ for (i = 0; i < 8; i++) { if (c.key.point_coord_replace & (1 << i)) outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i); } /* if back colors are written, allocate slots for front colors too */ if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0)) outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0); if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1)) outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1); } /* In order for legacy clipping to work, we need to populate the clip * distance varying slots whenever clipping is enabled, even if the vertex * shader doesn't write to gl_ClipDistance. */ if (c.key.base.userclip_active) { outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0); outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1); } brw_compute_vue_map(brw, &prog_data.base.vue_map, outputs_written); if (0) { _mesa_fprint_program_opt(stderr, &c.vp->program.Base, PROG_PRINT_DEBUG, true); } /* Emit GEN4 code. */ program = brw_vs_emit(brw, prog, &c, &prog_data, mem_ctx, &program_size); if (program == NULL) { ralloc_free(mem_ctx); return false; } /* Scratch space is used for register spilling */ if (c.base.last_scratch) { perf_debug("Vertex shader triggered register spilling. " "Try reducing the number of live vec4 values to " "improve performance.\n"); prog_data.base.total_scratch = brw_get_scratch_size(c.base.last_scratch*REG_SIZE); brw_get_scratch_bo(brw, &brw->vs.base.scratch_bo, prog_data.base.total_scratch * brw->max_vs_threads); } brw_upload_cache(&brw->cache, BRW_VS_PROG, &c.key, sizeof(c.key), program, program_size, &prog_data, sizeof(prog_data), &brw->vs.base.prog_offset, &brw->vs.prog_data); ralloc_free(mem_ctx); return true; }
/** * Sets up a surface state structure to point at the given region. * While it is only used for the front/back buffer currently, it should be * usable for further buffers when doing ARB_draw_buffer support. */ static void brw_update_renderbuffer_surface(struct brw_context *brw, struct gl_renderbuffer *rb, unsigned int unit) { struct intel_context *intel = &brw->intel; GLcontext *ctx = &intel->ctx; dri_bo *region_bo = NULL; struct intel_renderbuffer *irb = intel_renderbuffer(rb); struct intel_region *region = irb ? irb->region : NULL; struct { unsigned int surface_type; unsigned int surface_format; unsigned int width, height, pitch, cpp; GLubyte color_mask[4]; GLboolean color_blend; uint32_t tiling; uint32_t draw_x; uint32_t draw_y; } key; memset(&key, 0, sizeof(key)); if (region != NULL) { region_bo = region->buffer; key.surface_type = BRW_SURFACE_2D; switch (irb->Base.Format) { /* XRGB and ARGB are treated the same here because the chips in this * family cannot render to XRGB targets. This means that we have to * mask writes to alpha (ala glColorMask) and reconfigure the alpha * blending hardware to use GL_ONE (or GL_ZERO) for cases where * GL_DST_ALPHA (or GL_ONE_MINUS_DST_ALPHA) is used. */ case MESA_FORMAT_ARGB8888: case MESA_FORMAT_XRGB8888: key.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM; break; case MESA_FORMAT_RGB565: key.surface_format = BRW_SURFACEFORMAT_B5G6R5_UNORM; break; case MESA_FORMAT_ARGB1555: key.surface_format = BRW_SURFACEFORMAT_B5G5R5A1_UNORM; break; case MESA_FORMAT_ARGB4444: key.surface_format = BRW_SURFACEFORMAT_B4G4R4A4_UNORM; break; default: _mesa_problem(ctx, "Bad renderbuffer format: %d\n", irb->Base.Format); } key.tiling = region->tiling; if (brw->intel.intelScreen->driScrnPriv->dri2.enabled) { key.width = rb->Width; key.height = rb->Height; } else { key.width = region->width; key.height = region->height; } key.pitch = region->pitch; key.cpp = region->cpp; key.draw_x = region->draw_x; key.draw_y = region->draw_y; } else { key.surface_type = BRW_SURFACE_NULL; key.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM; key.tiling = I915_TILING_X; key.width = 1; key.height = 1; key.cpp = 4; key.draw_x = 0; key.draw_y = 0; } if (intel->gen < 6) { /* _NEW_COLOR */ memcpy(key.color_mask, ctx->Color.ColorMask[unit], sizeof(key.color_mask)); /* As mentioned above, disable writes to the alpha component when the * renderbuffer is XRGB. */ if (ctx->DrawBuffer->Visual.alphaBits == 0) key.color_mask[3] = GL_FALSE; key.color_blend = (!ctx->Color._LogicOpEnabled && (ctx->Color.BlendEnabled & (1 << unit))); } dri_bo_unreference(brw->wm.surf_bo[unit]); brw->wm.surf_bo[unit] = brw_search_cache(&brw->surface_cache, BRW_SS_SURFACE, &key, sizeof(key), ®ion_bo, 1, NULL); if (brw->wm.surf_bo[unit] == NULL) { struct brw_surface_state surf; memset(&surf, 0, sizeof(surf)); surf.ss0.surface_format = key.surface_format; surf.ss0.surface_type = key.surface_type; if (key.tiling == I915_TILING_NONE) { surf.ss1.base_addr = (key.draw_x + key.draw_y * key.pitch) * key.cpp; } else { uint32_t tile_base, tile_x, tile_y; uint32_t pitch = key.pitch * key.cpp; if (key.tiling == I915_TILING_X) { tile_x = key.draw_x % (512 / key.cpp); tile_y = key.draw_y % 8; tile_base = ((key.draw_y / 8) * (8 * pitch)); tile_base += (key.draw_x - tile_x) / (512 / key.cpp) * 4096; } else { /* Y */ tile_x = key.draw_x % (128 / key.cpp); tile_y = key.draw_y % 32; tile_base = ((key.draw_y / 32) * (32 * pitch)); tile_base += (key.draw_x - tile_x) / (128 / key.cpp) * 4096; } assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0)); assert(tile_x % 4 == 0); assert(tile_y % 2 == 0); /* Note that the low bits of these fields are missing, so * there's the possibility of getting in trouble. */ surf.ss1.base_addr = tile_base; surf.ss5.x_offset = tile_x / 4; surf.ss5.y_offset = tile_y / 2; } if (region_bo != NULL) surf.ss1.base_addr += region_bo->offset; /* reloc */ surf.ss2.width = key.width - 1; surf.ss2.height = key.height - 1; brw_set_surface_tiling(&surf, key.tiling); surf.ss3.pitch = (key.pitch * key.cpp) - 1; if (intel->gen < 6) { /* _NEW_COLOR */ surf.ss0.color_blend = key.color_blend; surf.ss0.writedisable_red = !key.color_mask[0]; surf.ss0.writedisable_green = !key.color_mask[1]; surf.ss0.writedisable_blue = !key.color_mask[2]; surf.ss0.writedisable_alpha = !key.color_mask[3]; } /* Key size will never match key size for textures, so we're safe. */ brw->wm.surf_bo[unit] = brw_upload_cache(&brw->surface_cache, BRW_SS_SURFACE, &key, sizeof(key), ®ion_bo, 1, &surf, sizeof(surf)); if (region_bo != NULL) { /* We might sample from it, and we might render to it, so flag * them both. We might be able to figure out from other state * a more restrictive relocation to emit. */ drm_intel_bo_emit_reloc(brw->wm.surf_bo[unit], offsetof(struct brw_surface_state, ss1), region_bo, surf.ss1.base_addr - region_bo->offset, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER); } }
static void compile_sf_prog( struct brw_context *brw, struct brw_sf_prog_key *key ) { struct brw_sf_compile c; const GLuint *program; void *mem_ctx; GLuint program_size; GLuint i; memset(&c, 0, sizeof(c)); mem_ctx = ralloc_context(NULL); /* Begin the compilation: */ brw_init_compile(brw, &c.func, mem_ctx); c.key = *key; c.vue_map = brw->vue_map_geom_out; if (c.key.do_point_coord) { /* * gl_PointCoord is a FS instead of VS builtin variable, thus it's * not included in c.vue_map generated in VS stage. Here we add * it manually to let SF shader generate the needed interpolation * coefficient for FS shader. */ c.vue_map.varying_to_slot[BRW_VARYING_SLOT_PNTC] = c.vue_map.num_slots; c.vue_map.slot_to_varying[c.vue_map.num_slots++] = BRW_VARYING_SLOT_PNTC; } c.urb_entry_read_offset = BRW_SF_URB_ENTRY_READ_OFFSET; c.nr_attr_regs = (c.vue_map.num_slots + 1)/2 - c.urb_entry_read_offset; c.nr_setup_regs = c.nr_attr_regs; c.prog_data.urb_read_length = c.nr_attr_regs; c.prog_data.urb_entry_size = c.nr_setup_regs * 2; c.has_flat_shading = brw_any_flat_varyings(&key->interpolation_mode); /* Which primitive? Or all three? */ switch (key->primitive) { case SF_TRIANGLES: c.nr_verts = 3; brw_emit_tri_setup( &c, true ); break; case SF_LINES: c.nr_verts = 2; brw_emit_line_setup( &c, true ); break; case SF_POINTS: c.nr_verts = 1; if (key->do_point_sprite) brw_emit_point_sprite_setup( &c, true ); else brw_emit_point_setup( &c, true ); break; case SF_UNFILLED_TRIS: c.nr_verts = 3; brw_emit_anyprim_setup( &c ); break; default: assert(0); return; } /* get the program */ program = brw_get_program(&c.func, &program_size); if (unlikely(INTEL_DEBUG & DEBUG_SF)) { printf("sf:\n"); for (i = 0; i < program_size / sizeof(struct brw_instruction); i++) brw_disasm(stdout, &((struct brw_instruction *)program)[i], brw->gen); printf("\n"); } brw_upload_cache(&brw->cache, BRW_SF_PROG, &c.key, sizeof(c.key), program, program_size, &c.prog_data, sizeof(c.prog_data), &brw->sf.prog_offset, &brw->sf.prog_data); ralloc_free(mem_ctx); }
static bool brw_codegen_cs_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_compute_program *cp, struct brw_cs_prog_key *key) { struct gl_context *ctx = &brw->ctx; const GLuint *program; void *mem_ctx = ralloc_context(NULL); GLuint program_size; struct brw_cs_prog_data prog_data; bool start_busy = false; double start_time = 0; struct brw_shader *cs = (struct brw_shader *) prog->_LinkedShaders[MESA_SHADER_COMPUTE]; assert (cs); memset(&prog_data, 0, sizeof(prog_data)); if (prog->Comp.SharedSize > 64 * 1024) { prog->LinkStatus = false; const char *error_str = "Compute shader used more than 64KB of shared variables"; ralloc_strcat(&prog->InfoLog, error_str); _mesa_problem(NULL, "Failed to link compute shader: %s\n", error_str); ralloc_free(mem_ctx); return false; } else { prog_data.base.total_shared = prog->Comp.SharedSize; } assign_cs_binding_table_offsets(brw->intelScreen->devinfo, prog, &cp->program.Base, &prog_data); /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. */ int param_count = cp->program.Base.nir->num_uniforms; /* The backend also sometimes adds params for texture size. */ param_count += 2 * ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits; prog_data.base.param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.pull_param = rzalloc_array(NULL, const gl_constant_value *, param_count); prog_data.base.image_param = rzalloc_array(NULL, struct brw_image_param, cs->base.NumImages); prog_data.base.nr_params = param_count; prog_data.base.nr_image_params = cs->base.NumImages; brw_nir_setup_glsl_uniforms(cp->program.Base.nir, prog, &cp->program.Base, &prog_data.base, true); if (unlikely(brw->perf_debug)) { start_busy = (brw->batch.last_bo && drm_intel_bo_busy(brw->batch.last_bo)); start_time = get_time(); } if (unlikely(INTEL_DEBUG & DEBUG_CS)) brw_dump_ir("compute", prog, &cs->base, &cp->program.Base); int st_index = -1; if (INTEL_DEBUG & DEBUG_SHADER_TIME) st_index = brw_get_shader_time_index(brw, prog, &cp->program.Base, ST_CS); char *error_str; program = brw_compile_cs(brw->intelScreen->compiler, brw, mem_ctx, key, &prog_data, cp->program.Base.nir, st_index, &program_size, &error_str); if (program == NULL) { prog->LinkStatus = false; ralloc_strcat(&prog->InfoLog, error_str); _mesa_problem(NULL, "Failed to compile compute shader: %s\n", error_str); ralloc_free(mem_ctx); return false; } if (unlikely(brw->perf_debug) && cs) { if (cs->compiled_once) { _mesa_problem(&brw->ctx, "CS programs shouldn't need recompiles"); } cs->compiled_once = true; if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) { perf_debug("CS compile took %.03f ms and stalled the GPU\n", (get_time() - start_time) * 1000); } } if (prog_data.base.total_scratch) { brw_get_scratch_bo(brw, &brw->cs.base.scratch_bo, prog_data.base.total_scratch * brw->max_cs_threads); } if (unlikely(INTEL_DEBUG & DEBUG_CS)) fprintf(stderr, "\n"); brw_upload_cache(&brw->cache, BRW_CACHE_CS_PROG, key, sizeof(*key), program, program_size, &prog_data, sizeof(prog_data), &brw->cs.base.prog_offset, &brw->cs.prog_data); ralloc_free(mem_ctx); return true; }
/** * All Mesa program -> GPU code generation goes through this function. * Depending on the instructions used (i.e. flow control instructions) * we'll use one of two code generators. */ bool do_wm_prog(struct brw_context *brw, struct gl_shader_program *prog, struct brw_fragment_program *fp, struct brw_wm_prog_key *key) { struct brw_wm_compile *c; const GLuint *program; struct gl_shader *fs = NULL; GLuint program_size; if (prog) fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]; c = rzalloc(NULL, struct brw_wm_compile); /* Allocate the references to the uniforms that will end up in the * prog_data associated with the compiled program, and which will be freed * by the state cache. */ int param_count; if (fs) { param_count = fs->num_uniform_components; } else { param_count = fp->program.Base.Parameters->NumParameters * 4; } /* The backend also sometimes adds params for texture size. */ param_count += 2 * BRW_MAX_TEX_UNIT; c->prog_data.param = rzalloc_array(NULL, const float *, param_count); c->prog_data.pull_param = rzalloc_array(NULL, const float *, param_count); memcpy(&c->key, key, sizeof(*key)); c->prog_data.barycentric_interp_modes = brw_compute_barycentric_interp_modes(brw, c->key.flat_shade, &fp->program); program = brw_wm_fs_emit(brw, c, &fp->program, prog, &program_size); if (program == NULL) return false; /* Scratch space is used for register spilling */ if (c->last_scratch) { perf_debug("Fragment shader triggered register spilling. " "Try reducing the number of live scalar values to " "improve performance.\n"); c->prog_data.total_scratch = brw_get_scratch_size(c->last_scratch); brw_get_scratch_bo(brw, &brw->wm.scratch_bo, c->prog_data.total_scratch * brw->max_wm_threads); } if (unlikely(INTEL_DEBUG & DEBUG_WM)) fprintf(stderr, "\n"); brw_upload_cache(&brw->cache, BRW_WM_PROG, &c->key, sizeof(c->key), program, program_size, &c->prog_data, sizeof(c->prog_data), &brw->wm.prog_offset, &brw->wm.prog_data); ralloc_free(c); return true; }
static bool brw_codegen_wm_prog(struct brw_context *brw, struct brw_program *fp, struct brw_wm_prog_key *key, struct brw_vue_map *vue_map) { const struct gen_device_info *devinfo = &brw->screen->devinfo; void *mem_ctx = ralloc_context(NULL); struct brw_wm_prog_data prog_data; const GLuint *program; bool start_busy = false; double start_time = 0; nir_shader *nir = nir_shader_clone(mem_ctx, fp->program.nir); memset(&prog_data, 0, sizeof(prog_data)); /* Use ALT floating point mode for ARB programs so that 0^0 == 1. */ if (fp->program.is_arb_asm) prog_data.base.use_alt_mode = true; assign_fs_binding_table_offsets(devinfo, &fp->program, key, &prog_data); if (!fp->program.is_arb_asm) { brw_nir_setup_glsl_uniforms(mem_ctx, nir, &fp->program, &prog_data.base, true); brw_nir_analyze_ubo_ranges(brw->screen->compiler, nir, NULL, prog_data.base.ubo_ranges); } else { brw_nir_setup_arb_uniforms(mem_ctx, nir, &fp->program, &prog_data.base); if (unlikely(INTEL_DEBUG & DEBUG_WM)) brw_dump_arb_asm("fragment", &fp->program); } if (unlikely(brw->perf_debug)) { start_busy = (brw->batch.last_bo && brw_bo_busy(brw->batch.last_bo)); start_time = get_time(); } int st_index8 = -1, st_index16 = -1, st_index32 = -1; if (INTEL_DEBUG & DEBUG_SHADER_TIME) { st_index8 = brw_get_shader_time_index(brw, &fp->program, ST_FS8, !fp->program.is_arb_asm); st_index16 = brw_get_shader_time_index(brw, &fp->program, ST_FS16, !fp->program.is_arb_asm); st_index32 = brw_get_shader_time_index(brw, &fp->program, ST_FS32, !fp->program.is_arb_asm); } char *error_str = NULL; program = brw_compile_fs(brw->screen->compiler, brw, mem_ctx, key, &prog_data, nir, &fp->program, st_index8, st_index16, st_index32, true, false, vue_map, &error_str); if (program == NULL) { if (!fp->program.is_arb_asm) { fp->program.sh.data->LinkStatus = LINKING_FAILURE; ralloc_strcat(&fp->program.sh.data->InfoLog, error_str); } _mesa_problem(NULL, "Failed to compile fragment shader: %s\n", error_str); ralloc_free(mem_ctx); return false; } if (unlikely(brw->perf_debug)) { if (fp->compiled_once) { brw_debug_recompile(brw, MESA_SHADER_FRAGMENT, fp->program.Id, key->program_string_id, key); } fp->compiled_once = true; if (start_busy && !brw_bo_busy(brw->batch.last_bo)) { perf_debug("FS compile took %.03f ms and stalled the GPU\n", (get_time() - start_time) * 1000); } } brw_alloc_stage_scratch(brw, &brw->wm.base, prog_data.base.total_scratch); if (unlikely((INTEL_DEBUG & DEBUG_WM) && fp->program.is_arb_asm)) fprintf(stderr, "\n"); /* The param and pull_param arrays will be freed by the shader cache. */ ralloc_steal(NULL, prog_data.base.param); ralloc_steal(NULL, prog_data.base.pull_param); brw_upload_cache(&brw->cache, BRW_CACHE_FS_PROG, key, sizeof(struct brw_wm_prog_key), program, prog_data.base.program_size, &prog_data, sizeof(prog_data), &brw->wm.base.prog_offset, &brw->wm.base.prog_data); ralloc_free(mem_ctx); return true; }
static void compile_sf_prog( struct brw_context *brw, struct brw_sf_prog_key *key ) { GLcontext *ctx = &brw->intel.ctx; struct brw_sf_compile c; const GLuint *program; GLuint program_size; GLuint i, idx; memset(&c, 0, sizeof(c)); /* Begin the compilation: */ brw_init_compile(brw, &c.func); c.key = *key; c.nr_attrs = brw_count_bits(c.key.attrs); c.nr_attr_regs = (c.nr_attrs+1)/2; c.nr_setup_attrs = brw_count_bits(c.key.attrs & DO_SETUP_BITS); c.nr_setup_regs = (c.nr_setup_attrs+1)/2; c.prog_data.urb_read_length = c.nr_attr_regs; c.prog_data.urb_entry_size = c.nr_setup_regs * 2; /* Construct map from attribute number to position in the vertex. */ for (i = idx = 0; i < VERT_RESULT_MAX; i++) if (c.key.attrs & (1<<i)) { c.attr_to_idx[i] = idx; c.idx_to_attr[idx] = i; if (i >= VERT_RESULT_TEX0 && i <= VERT_RESULT_TEX7) { c.point_attrs[i].CoordReplace = ctx->Point.CoordReplace[i - VERT_RESULT_TEX0]; } else { c.point_attrs[i].CoordReplace = GL_FALSE; } idx++; } /* Which primitive? Or all three? */ switch (key->primitive) { case SF_TRIANGLES: c.nr_verts = 3; brw_emit_tri_setup( &c, GL_TRUE ); break; case SF_LINES: c.nr_verts = 2; brw_emit_line_setup( &c, GL_TRUE ); break; case SF_POINTS: c.nr_verts = 1; if (key->do_point_sprite) brw_emit_point_sprite_setup( &c, GL_TRUE ); else brw_emit_point_setup( &c, GL_TRUE ); break; case SF_UNFILLED_TRIS: c.nr_verts = 3; brw_emit_anyprim_setup( &c ); break; default: assert(0); return; } /* get the program */ program = brw_get_program(&c.func, &program_size); /* Upload */ dri_bo_unreference(brw->sf.prog_bo); brw->sf.prog_bo = brw_upload_cache( &brw->cache, BRW_SF_PROG, &c.key, sizeof(c.key), NULL, 0, program, program_size, &c.prog_data, &brw->sf.prog_data ); }