void vl_idct_stage2_frag_shader(struct vl_idct *idct, struct ureg_program *shader, unsigned first_input, struct ureg_dst fragment) { struct ureg_src l_addr[2], r_addr[2]; struct ureg_dst l[2], r[2]; --first_input; l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input + VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR); l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input + VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR); r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input + VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR); r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input + VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR); l[0] = ureg_DECL_temporary(shader); l[1] = ureg_DECL_temporary(shader); r[0] = ureg_DECL_temporary(shader); r[1] = ureg_DECL_temporary(shader); fetch_four(shader, l, l_addr, ureg_DECL_sampler(shader, 1), false); fetch_four(shader, r, r_addr, ureg_DECL_sampler(shader, 0), true); matrix_mul(shader, fragment, l, r); ureg_release_temporary(shader, l[0]); ureg_release_temporary(shader, l[1]); ureg_release_temporary(shader, r[0]); ureg_release_temporary(shader, r[1]); }
static INLINE void xrender_tex(struct ureg_program *ureg, struct ureg_dst dst, struct ureg_src coords, struct ureg_src sampler, struct ureg_src imm0, boolean repeat_none, boolean swizzle, boolean set_alpha) { if (repeat_none) { struct ureg_dst tmp0 = ureg_DECL_temporary(ureg); struct ureg_dst tmp1 = ureg_DECL_temporary(ureg); ureg_SGT(ureg, tmp1, ureg_swizzle(coords, TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y), ureg_scalar(imm0, TGSI_SWIZZLE_X)); ureg_SLT(ureg, tmp0, ureg_swizzle(coords, TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y), ureg_scalar(imm0, TGSI_SWIZZLE_W)); ureg_MIN(ureg, tmp0, ureg_src(tmp0), ureg_src(tmp1)); ureg_MIN(ureg, tmp0, ureg_scalar(ureg_src(tmp0), TGSI_SWIZZLE_X), ureg_scalar(ureg_src(tmp0), TGSI_SWIZZLE_Y)); ureg_TEX(ureg, tmp1, TGSI_TEXTURE_2D, coords, sampler); if (swizzle) ureg_MOV(ureg, tmp1, ureg_swizzle(ureg_src(tmp1), TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_X, TGSI_SWIZZLE_W)); if (set_alpha) ureg_MOV(ureg, ureg_writemask(tmp1, TGSI_WRITEMASK_W), ureg_scalar(imm0, TGSI_SWIZZLE_W)); ureg_MUL(ureg, dst, ureg_src(tmp1), ureg_src(tmp0)); ureg_release_temporary(ureg, tmp0); ureg_release_temporary(ureg, tmp1); } else { if (swizzle) { struct ureg_dst tmp = ureg_DECL_temporary(ureg); ureg_TEX(ureg, tmp, TGSI_TEXTURE_2D, coords, sampler); ureg_MOV(ureg, dst, ureg_swizzle(ureg_src(tmp), TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_X, TGSI_SWIZZLE_W)); ureg_release_temporary(ureg, tmp); } else { ureg_TEX(ureg, dst, TGSI_TEXTURE_2D, coords, sampler); } if (set_alpha) ureg_MOV(ureg, ureg_writemask(dst, TGSI_WRITEMASK_W), ureg_scalar(imm0, TGSI_SWIZZLE_W)); } }
static void * create_yuv_shader(struct pipe_context *pipe, struct ureg_program *ureg) { struct ureg_src y_sampler, u_sampler, v_sampler; struct ureg_src pos; struct ureg_src matrow0, matrow1, matrow2; struct ureg_dst y, u, v, rgb; struct ureg_dst out = ureg_DECL_output(ureg, TGSI_SEMANTIC_COLOR, 0); pos = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_GENERIC, 0, TGSI_INTERPOLATE_PERSPECTIVE); rgb = ureg_DECL_temporary(ureg); y = ureg_DECL_temporary(ureg); u = ureg_DECL_temporary(ureg); v = ureg_DECL_temporary(ureg); y_sampler = ureg_DECL_sampler(ureg, 0); u_sampler = ureg_DECL_sampler(ureg, 1); v_sampler = ureg_DECL_sampler(ureg, 2); matrow0 = ureg_DECL_constant(ureg, 0); matrow1 = ureg_DECL_constant(ureg, 1); matrow2 = ureg_DECL_constant(ureg, 2); ureg_TEX(ureg, y, TGSI_TEXTURE_2D, pos, y_sampler); ureg_TEX(ureg, u, TGSI_TEXTURE_2D, pos, u_sampler); ureg_TEX(ureg, v, TGSI_TEXTURE_2D, pos, v_sampler); ureg_SUB(ureg, u, ureg_src(u), ureg_scalar(matrow0, TGSI_SWIZZLE_W)); ureg_SUB(ureg, v, ureg_src(v), ureg_scalar(matrow0, TGSI_SWIZZLE_W)); ureg_MUL(ureg, rgb, ureg_scalar(ureg_src(y), TGSI_SWIZZLE_X), matrow0); ureg_MAD(ureg, rgb, ureg_scalar(ureg_src(u), TGSI_SWIZZLE_X), matrow1, ureg_src(rgb)); ureg_MAD(ureg, rgb, ureg_scalar(ureg_src(v), TGSI_SWIZZLE_X), matrow2, ureg_src(rgb)); /* rgb.a = 1; */ ureg_MOV(ureg, ureg_writemask(rgb, TGSI_WRITEMASK_W), ureg_scalar(matrow0, TGSI_SWIZZLE_X)); ureg_MOV(ureg, out, ureg_src(rgb)); ureg_release_temporary(ureg, rgb); ureg_release_temporary(ureg, y); ureg_release_temporary(ureg, u); ureg_release_temporary(ureg, v); ureg_END(ureg); return ureg_create_shader_and_destroy(ureg, pipe); }
static void * create_copy_frag_shader(struct vl_deint_filter *filter, unsigned field) { struct ureg_program *shader; struct ureg_src i_vtex; struct ureg_src sampler; struct ureg_dst o_fragment; struct ureg_dst t_tex; shader = ureg_create(PIPE_SHADER_FRAGMENT); if (!shader) { return NULL; } t_tex = ureg_DECL_temporary(shader); i_vtex = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR); sampler = ureg_DECL_sampler(shader, 2); o_fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); ureg_MOV(shader, t_tex, i_vtex); if (field) { ureg_MOV(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_ZW), ureg_imm4f(shader, 0, 0, 1.0f, 0)); } else { ureg_MOV(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 0)); } ureg_TEX(shader, o_fragment, TGSI_TEXTURE_2D_ARRAY, ureg_src(t_tex), sampler); ureg_release_temporary(shader, t_tex); ureg_END(shader); return ureg_create_shader_and_destroy(shader, filter->pipe); }
static void * create_ref_vert_shader(struct vl_mc *r) { struct ureg_program *shader; struct ureg_src mv_scale; struct ureg_src vmv[2]; struct ureg_dst t_vpos; struct ureg_dst o_vmv[2]; unsigned i; shader = ureg_create(TGSI_PROCESSOR_VERTEX); if (!shader) return NULL; vmv[0] = ureg_DECL_vs_input(shader, VS_I_MV_TOP); vmv[1] = ureg_DECL_vs_input(shader, VS_I_MV_BOTTOM); t_vpos = calc_position(r, shader, ureg_imm2f(shader, (float)VL_MACROBLOCK_WIDTH / r->buffer_width, (float)VL_MACROBLOCK_HEIGHT / r->buffer_height) ); o_vmv[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP); o_vmv[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM); /* * mv_scale.xy = 0.5 / (dst.width, dst.height); * mv_scale.z = 1.0f / 4.0f * mv_scale.w = 1.0f / 255.0f * * // Apply motion vectors * o_vmv[0..1].xy = vmv[0..1] * mv_scale + t_vpos * o_vmv[0..1].zw = vmv[0..1] * mv_scale * */ mv_scale = ureg_imm4f(shader, 0.5f / r->buffer_width, 0.5f / r->buffer_height, 1.0f / 4.0f, 1.0f / PIPE_VIDEO_MV_WEIGHT_MAX); for (i = 0; i < 2; ++i) { ureg_MAD(shader, ureg_writemask(o_vmv[i], TGSI_WRITEMASK_XY), mv_scale, vmv[i], ureg_src(t_vpos)); ureg_MUL(shader, ureg_writemask(o_vmv[i], TGSI_WRITEMASK_ZW), mv_scale, vmv[i]); } ureg_release_temporary(shader, t_vpos); ureg_END(shader); return ureg_create_shader_and_destroy(shader, r->pipe); }
static struct ureg_src vs_normalize_coords(struct ureg_program *ureg, struct ureg_src coords, struct ureg_src const0, struct ureg_src const1) { struct ureg_dst tmp = ureg_DECL_temporary(ureg); struct ureg_src ret; ureg_MAD(ureg, tmp, coords, const0, const1); ret = ureg_src(tmp); ureg_release_temporary(ureg, tmp); return ret; }
static void * create_frag_shader_palette(struct vl_compositor *c, bool include_cc) { struct ureg_program *shader; struct ureg_src csc[3]; struct ureg_src tc; struct ureg_src sampler; struct ureg_src palette; struct ureg_dst texel; struct ureg_dst fragment; unsigned i; shader = ureg_create(TGSI_PROCESSOR_FRAGMENT); if (!shader) return false; for (i = 0; include_cc && i < 3; ++i) csc[i] = ureg_DECL_constant(shader, i); tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR); sampler = ureg_DECL_sampler(shader, 0); palette = ureg_DECL_sampler(shader, 1); texel = ureg_DECL_temporary(shader); fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); /* * texel = tex(tc, sampler) * fragment.xyz = tex(texel, palette) * csc * fragment.a = texel.a */ ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler); ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_src(texel)); if (include_cc) { ureg_TEX(shader, texel, TGSI_TEXTURE_1D, ureg_src(texel), palette); for (i = 0; i < 3; ++i) ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel)); } else { ureg_TEX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ), TGSI_TEXTURE_1D, ureg_src(texel), palette); } ureg_release_temporary(shader, texel); ureg_END(shader); return ureg_create_shader_and_destroy(shader, c->pipe); }
static void * create_mismatch_vert_shader(struct vl_idct *idct) { struct ureg_program *shader; struct ureg_src vpos; struct ureg_src scale; struct ureg_dst t_tex; struct ureg_dst o_vpos, o_addr[2]; shader = ureg_create(TGSI_PROCESSOR_VERTEX); if (!shader) return NULL; vpos = ureg_DECL_vs_input(shader, VS_I_VPOS); t_tex = ureg_DECL_temporary(shader); o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS); o_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0); o_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1); /* * scale = (VL_BLOCK_WIDTH, VL_BLOCK_HEIGHT) / (dst.width, dst.height) * * t_vpos = vpos + 7 / VL_BLOCK_WIDTH * o_vpos.xy = t_vpos * scale * * o_addr = calc_addr(...) * */ scale = ureg_imm2f(shader, (float)VL_BLOCK_WIDTH / idct->buffer_width, (float)VL_BLOCK_HEIGHT / idct->buffer_height); ureg_MAD(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), vpos, scale, scale); ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 1.0f)); ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), vpos, scale); calc_addr(shader, o_addr, ureg_src(t_tex), ureg_src(t_tex), false, false, idct->buffer_width / 4); ureg_release_temporary(shader, t_tex); ureg_END(shader); return ureg_create_shader_and_destroy(shader, idct->pipe); }
static void matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2]) { struct ureg_dst tmp; tmp = ureg_DECL_temporary(shader); /* * tmp.xy = dot4(m[0][0..1], m[1][0..1]) * dst = tmp.x + tmp.y */ ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0])); ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(l[1]), ureg_src(r[1])); ureg_ADD(shader, dst, ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y)); ureg_release_temporary(shader, tmp); }
static void linear_gradient(struct ureg_program *ureg, struct ureg_dst out, struct ureg_src pos, struct ureg_src sampler, struct ureg_src coords, struct ureg_src const0124, struct ureg_src matrow0, struct ureg_src matrow1, struct ureg_src matrow2) { struct ureg_dst temp0 = ureg_DECL_temporary(ureg); struct ureg_dst temp1 = ureg_DECL_temporary(ureg); struct ureg_dst temp2 = ureg_DECL_temporary(ureg); struct ureg_dst temp3 = ureg_DECL_temporary(ureg); struct ureg_dst temp4 = ureg_DECL_temporary(ureg); struct ureg_dst temp5 = ureg_DECL_temporary(ureg); ureg_MOV(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_XY), pos); ureg_MOV(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_Z), ureg_scalar(const0124, TGSI_SWIZZLE_Y)); ureg_DP3(ureg, temp1, matrow0, ureg_src(temp0)); ureg_DP3(ureg, temp2, matrow1, ureg_src(temp0)); ureg_DP3(ureg, temp3, matrow2, ureg_src(temp0)); ureg_RCP(ureg, temp3, ureg_src(temp3)); ureg_MUL(ureg, temp1, ureg_src(temp1), ureg_src(temp3)); ureg_MUL(ureg, temp2, ureg_src(temp2), ureg_src(temp3)); ureg_MOV(ureg, ureg_writemask(temp4, TGSI_WRITEMASK_X), ureg_src(temp1)); ureg_MOV(ureg, ureg_writemask(temp4, TGSI_WRITEMASK_Y), ureg_src(temp2)); ureg_MUL(ureg, temp0, ureg_scalar(coords, TGSI_SWIZZLE_Y), ureg_scalar(ureg_src(temp4), TGSI_SWIZZLE_Y)); ureg_MAD(ureg, temp1, ureg_scalar(coords, TGSI_SWIZZLE_X), ureg_scalar(ureg_src(temp4), TGSI_SWIZZLE_X), ureg_src(temp0)); ureg_MUL(ureg, temp2, ureg_src(temp1), ureg_scalar(coords, TGSI_SWIZZLE_Z)); ureg_TEX(ureg, out, TGSI_TEXTURE_1D, ureg_src(temp2), sampler); ureg_release_temporary(ureg, temp0); ureg_release_temporary(ureg, temp1); ureg_release_temporary(ureg, temp2); ureg_release_temporary(ureg, temp3); ureg_release_temporary(ureg, temp4); ureg_release_temporary(ureg, temp5); }
static void * create_frag_shader_video_buffer(struct vl_compositor *c) { struct ureg_program *shader; struct ureg_src tc; struct ureg_src csc[3]; struct ureg_src sampler[3]; struct ureg_dst texel; struct ureg_dst fragment; unsigned i; shader = ureg_create(TGSI_PROCESSOR_FRAGMENT); if (!shader) return false; tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR); for (i = 0; i < 3; ++i) { csc[i] = ureg_DECL_constant(shader, i); sampler[i] = ureg_DECL_sampler(shader, i); } texel = ureg_DECL_temporary(shader); fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); /* * texel.xyz = tex(tc, sampler[i]) * fragment = csc * texel */ for (i = 0; i < 3; ++i) ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_3D, tc, sampler[i]); ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f)); for (i = 0; i < 3; ++i) ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel)); ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f)); ureg_release_temporary(shader, texel); ureg_END(shader); return ureg_create_shader_and_destroy(shader, c->pipe); }
static void * create_frag_shader_weave(struct vl_compositor *c) { struct ureg_program *shader; struct ureg_src i_tc[2]; struct ureg_src csc[3]; struct ureg_src sampler[3]; struct ureg_dst t_tc[2]; struct ureg_dst t_texel[2]; struct ureg_dst o_fragment; unsigned i, j; shader = ureg_create(TGSI_PROCESSOR_FRAGMENT); if (!shader) return false; i_tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP, TGSI_INTERPOLATE_LINEAR); i_tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM, TGSI_INTERPOLATE_LINEAR); for (i = 0; i < 3; ++i) { csc[i] = ureg_DECL_constant(shader, i); sampler[i] = ureg_DECL_sampler(shader, i); } for (i = 0; i < 2; ++i) { t_tc[i] = ureg_DECL_temporary(shader); t_texel[i] = ureg_DECL_temporary(shader); } o_fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); /* calculate the texture offsets * t_tc.x = i_tc.x * t_tc.y = (round(i_tc.y) + 0.5) / height * 2 */ for (i = 0; i < 2; ++i) { ureg_MOV(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_X), i_tc[i]); ureg_ROUND(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ), i_tc[i]); ureg_MOV(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_W), ureg_imm1f(shader, i ? 0.75f : 0.25f)); ureg_ADD(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ), ureg_src(t_tc[i]), ureg_imm1f(shader, 0.5f)); ureg_MUL(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_Y), ureg_src(t_tc[i]), ureg_scalar(i_tc[0], TGSI_SWIZZLE_W)); ureg_MUL(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_Z), ureg_src(t_tc[i]), ureg_scalar(i_tc[1], TGSI_SWIZZLE_W)); } /* fetch the texels * texel[0..1].x = tex(t_tc[0..1][0]) * texel[0..1].y = tex(t_tc[0..1][1]) * texel[0..1].z = tex(t_tc[0..1][2]) */ for (i = 0; i < 2; ++i) for (j = 0; j < 3; ++j) { struct ureg_src src = ureg_swizzle(ureg_src(t_tc[i]), TGSI_SWIZZLE_X, j ? TGSI_SWIZZLE_Z : TGSI_SWIZZLE_Y, TGSI_SWIZZLE_W, TGSI_SWIZZLE_W); ureg_TEX(shader, ureg_writemask(t_texel[i], TGSI_WRITEMASK_X << j), TGSI_TEXTURE_3D, src, sampler[j]); } /* calculate linear interpolation factor * factor = |round(i_tc.y) - i_tc.y| * 2 */ ureg_ROUND(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ), i_tc[0]); ureg_ADD(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ), ureg_src(t_tc[0]), ureg_negate(i_tc[0])); ureg_MUL(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_XY), ureg_abs(ureg_src(t_tc[0])), ureg_imm1f(shader, 2.0f)); ureg_LRP(shader, t_texel[0], ureg_swizzle(ureg_src(t_tc[0]), TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Z), ureg_src(t_texel[1]), ureg_src(t_texel[0])); /* and finally do colour space transformation * fragment = csc * texel */ ureg_MOV(shader, ureg_writemask(t_texel[0], TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f)); for (i = 0; i < 3; ++i) ureg_DP4(shader, ureg_writemask(o_fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(t_texel[0])); ureg_MOV(shader, ureg_writemask(o_fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f)); for (i = 0; i < 2; ++i) { ureg_release_temporary(shader, t_texel[i]); ureg_release_temporary(shader, t_tc[i]); } ureg_END(shader); return ureg_create_shader_and_destroy(shader, c->pipe); }
static void * create_deint_frag_shader(struct vl_deint_filter *filter, unsigned field, struct vertex2f *sizes, bool spatial_filter) { struct ureg_program *shader; struct ureg_src i_vtex; struct ureg_src sampler_cur; struct ureg_src sampler_prevprev; struct ureg_src sampler_prev; struct ureg_src sampler_next; struct ureg_dst o_fragment; struct ureg_dst t_tex; struct ureg_dst t_comp_top, t_comp_bot; struct ureg_dst t_diff; struct ureg_dst t_a, t_b; struct ureg_dst t_weave, t_linear; shader = ureg_create(PIPE_SHADER_FRAGMENT); if (!shader) { return NULL; } t_tex = ureg_DECL_temporary(shader); t_comp_top = ureg_DECL_temporary(shader); t_comp_bot = ureg_DECL_temporary(shader); t_diff = ureg_DECL_temporary(shader); t_a = ureg_DECL_temporary(shader); t_b = ureg_DECL_temporary(shader); t_weave = ureg_DECL_temporary(shader); t_linear = ureg_DECL_temporary(shader); i_vtex = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR); sampler_prevprev = ureg_DECL_sampler(shader, 0); sampler_prev = ureg_DECL_sampler(shader, 1); sampler_cur = ureg_DECL_sampler(shader, 2); sampler_next = ureg_DECL_sampler(shader, 3); o_fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); // we don't care about ZW interpolation (allows better optimization) ureg_MOV(shader, t_tex, i_vtex); ureg_MOV(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 0)); // sample between texels for cheap lowpass ureg_ADD(shader, t_comp_top, ureg_src(t_tex), ureg_imm4f(shader, sizes->x * 0.5f, sizes->y * -0.5f, 0, 0)); ureg_ADD(shader, t_comp_bot, ureg_src(t_tex), ureg_imm4f(shader, sizes->x * -0.5f, sizes->y * 0.5f, 1.0f, 0)); if (field == 0) { /* interpolating top field -> current field is a bottom field */ // cur vs prev2 ureg_TEX(shader, t_a, TGSI_TEXTURE_2D_ARRAY, ureg_src(t_comp_bot), sampler_cur); ureg_TEX(shader, t_b, TGSI_TEXTURE_2D_ARRAY, ureg_src(t_comp_bot), sampler_prevprev); ureg_ADD(shader, ureg_writemask(t_diff, TGSI_WRITEMASK_X), ureg_src(t_a), ureg_negate(ureg_src(t_b))); // prev vs next ureg_TEX(shader, t_a, TGSI_TEXTURE_2D_ARRAY, ureg_src(t_comp_top), sampler_prev); ureg_TEX(shader, t_b, TGSI_TEXTURE_2D_ARRAY, ureg_src(t_comp_top), sampler_next); ureg_ADD(shader, ureg_writemask(t_diff, TGSI_WRITEMASK_Y), ureg_src(t_a), ureg_negate(ureg_src(t_b))); } else { /* interpolating bottom field -> current field is a top field */ // cur vs prev2 ureg_TEX(shader, t_a, TGSI_TEXTURE_2D_ARRAY, ureg_src(t_comp_top), sampler_cur); ureg_TEX(shader, t_b, TGSI_TEXTURE_2D_ARRAY, ureg_src(t_comp_top), sampler_prevprev); ureg_ADD(shader, ureg_writemask(t_diff, TGSI_WRITEMASK_X), ureg_src(t_a), ureg_negate(ureg_src(t_b))); // prev vs next ureg_TEX(shader, t_a, TGSI_TEXTURE_2D_ARRAY, ureg_src(t_comp_bot), sampler_prev); ureg_TEX(shader, t_b, TGSI_TEXTURE_2D_ARRAY, ureg_src(t_comp_bot), sampler_next); ureg_ADD(shader, ureg_writemask(t_diff, TGSI_WRITEMASK_Y), ureg_src(t_a), ureg_negate(ureg_src(t_b))); } // absolute maximum of differences ureg_MAX(shader, ureg_writemask(t_diff, TGSI_WRITEMASK_X), ureg_abs(ureg_src(t_diff)), ureg_scalar(ureg_abs(ureg_src(t_diff)), TGSI_SWIZZLE_Y)); if (field == 0) { /* weave with prev top field */ ureg_TEX(shader, t_weave, TGSI_TEXTURE_2D_ARRAY, ureg_src(t_tex), sampler_prev); /* get linear interpolation from current bottom field */ ureg_ADD(shader, t_comp_top, ureg_src(t_tex), ureg_imm4f(shader, 0, sizes->y * -1.0f, 1.0f, 0)); ureg_TEX(shader, t_linear, TGSI_TEXTURE_2D_ARRAY, ureg_src(t_comp_top), sampler_cur); } else { /* weave with prev bottom field */ ureg_ADD(shader, t_comp_bot, ureg_src(t_tex), ureg_imm4f(shader, 0, 0, 1.0f, 0)); ureg_TEX(shader, t_weave, TGSI_TEXTURE_2D_ARRAY, ureg_src(t_comp_bot), sampler_prev); /* get linear interpolation from current top field */ ureg_ADD(shader, t_comp_bot, ureg_src(t_tex), ureg_imm4f(shader, 0, sizes->y * 1.0f, 0, 0)); ureg_TEX(shader, t_linear, TGSI_TEXTURE_2D_ARRAY, ureg_src(t_comp_bot), sampler_cur); } // mix between weave and linear // fully weave if diff < 6 (0.02353), fully interpolate if diff > 14 (0.05490) ureg_ADD(shader, ureg_writemask(t_diff, TGSI_WRITEMASK_X), ureg_src(t_diff), ureg_imm4f(shader, -0.02353f, 0, 0, 0)); ureg_MUL(shader, ureg_saturate(ureg_writemask(t_diff, TGSI_WRITEMASK_X)), ureg_src(t_diff), ureg_imm4f(shader, 31.8750f, 0, 0, 0)); ureg_LRP(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_X), ureg_src(t_diff), ureg_src(t_linear), ureg_src(t_weave)); ureg_MOV(shader, o_fragment, ureg_scalar(ureg_src(t_tex), TGSI_SWIZZLE_X)); ureg_release_temporary(shader, t_tex); ureg_release_temporary(shader, t_comp_top); ureg_release_temporary(shader, t_comp_bot); ureg_release_temporary(shader, t_diff); ureg_release_temporary(shader, t_a); ureg_release_temporary(shader, t_b); ureg_release_temporary(shader, t_weave); ureg_release_temporary(shader, t_linear); ureg_END(shader); return ureg_create_shader_and_destroy(shader, filter->pipe); }
static void * create_fs(struct st_context *st, bool download, enum pipe_texture_target target) { struct pipe_context *pipe = st->pipe; struct pipe_screen *screen = pipe->screen; struct ureg_program *ureg; bool have_layer; struct ureg_dst out; struct ureg_src sampler; struct ureg_src pos; struct ureg_src layer; struct ureg_src const0; struct ureg_src const1; struct ureg_dst temp0; have_layer = st->pbo.layers && (!download || target == PIPE_TEXTURE_1D_ARRAY || target == PIPE_TEXTURE_2D_ARRAY || target == PIPE_TEXTURE_3D || target == PIPE_TEXTURE_CUBE || target == PIPE_TEXTURE_CUBE_ARRAY); ureg = ureg_create(PIPE_SHADER_FRAGMENT); if (!ureg) return NULL; if (!download) { out = ureg_DECL_output(ureg, TGSI_SEMANTIC_COLOR, 0); } else { struct ureg_src image; /* writeonly images do not require an explicitly given format. */ image = ureg_DECL_image(ureg, 0, TGSI_TEXTURE_BUFFER, PIPE_FORMAT_NONE, true, false); out = ureg_dst(image); } sampler = ureg_DECL_sampler(ureg, 0); if (screen->get_param(screen, PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL)) { pos = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_POSITION, 0); } else { pos = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_POSITION, 0, TGSI_INTERPOLATE_LINEAR); } if (have_layer) { layer = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_LAYER, 0, TGSI_INTERPOLATE_CONSTANT); } const0 = ureg_DECL_constant(ureg, 0); const1 = ureg_DECL_constant(ureg, 1); temp0 = ureg_DECL_temporary(ureg); /* Note: const0 = [ -xoffset + skip_pixels, -yoffset, stride, image_height ] */ /* temp0.xy = f2i(temp0.xy) */ ureg_F2I(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_XY), ureg_swizzle(pos, TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y)); /* temp0.xy = temp0.xy + const0.xy */ ureg_UADD(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_XY), ureg_swizzle(ureg_src(temp0), TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y), ureg_swizzle(const0, TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y)); /* temp0.x = const0.z * temp0.y + temp0.x */ ureg_UMAD(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_X), ureg_scalar(const0, TGSI_SWIZZLE_Z), ureg_scalar(ureg_src(temp0), TGSI_SWIZZLE_Y), ureg_scalar(ureg_src(temp0), TGSI_SWIZZLE_X)); if (have_layer) { /* temp0.x = const0.w * layer + temp0.x */ ureg_UMAD(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_X), ureg_scalar(const0, TGSI_SWIZZLE_W), ureg_scalar(layer, TGSI_SWIZZLE_X), ureg_scalar(ureg_src(temp0), TGSI_SWIZZLE_X)); } /* temp0.w = 0 */ ureg_MOV(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_W), ureg_imm1u(ureg, 0)); if (download) { struct ureg_dst temp1; struct ureg_src op[2]; temp1 = ureg_DECL_temporary(ureg); /* temp1.xy = pos.xy */ ureg_F2I(ureg, ureg_writemask(temp1, TGSI_WRITEMASK_XY), pos); /* temp1.zw = 0 */ ureg_MOV(ureg, ureg_writemask(temp1, TGSI_WRITEMASK_ZW), ureg_imm1u(ureg, 0)); if (have_layer) { struct ureg_dst temp1_layer = ureg_writemask(temp1, target == PIPE_TEXTURE_1D_ARRAY ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_Z); /* temp1.y/z = layer */ ureg_MOV(ureg, temp1_layer, ureg_scalar(layer, TGSI_SWIZZLE_X)); if (target == PIPE_TEXTURE_3D) { /* temp1.z += layer_offset */ ureg_UADD(ureg, temp1_layer, ureg_scalar(ureg_src(temp1), TGSI_SWIZZLE_Z), ureg_scalar(const1, TGSI_SWIZZLE_X)); } } /* temp1 = txf(sampler, temp1) */ ureg_TXF(ureg, temp1, util_pipe_tex_to_tgsi_tex(target, 1), ureg_src(temp1), sampler); /* store(out, temp0, temp1) */ op[0] = ureg_src(temp0); op[1] = ureg_src(temp1); ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &out, 1, op, 2, 0, TGSI_TEXTURE_BUFFER, PIPE_FORMAT_NONE); ureg_release_temporary(ureg, temp1); } else { /* out = txf(sampler, temp0.x) */ ureg_TXF(ureg, out, TGSI_TEXTURE_BUFFER, ureg_src(temp0), sampler); } ureg_release_temporary(ureg, temp0); ureg_END(ureg); return ureg_create_shader_and_destroy(ureg, pipe); }
static void * create_ycbcr_vert_shader(struct vl_mc *r, vl_mc_ycbcr_vert_shader vs_callback, void *callback_priv) { struct ureg_program *shader; struct ureg_src vrect, vpos; struct ureg_dst t_vpos, t_vtex; struct ureg_dst o_vpos, o_flags; struct vertex2f scale = { (float)VL_BLOCK_WIDTH / r->buffer_width * VL_MACROBLOCK_WIDTH / r->macroblock_size, (float)VL_BLOCK_HEIGHT / r->buffer_height * VL_MACROBLOCK_HEIGHT / r->macroblock_size }; unsigned label; shader = ureg_create(TGSI_PROCESSOR_VERTEX); if (!shader) return NULL; vrect = ureg_DECL_vs_input(shader, VS_I_RECT); vpos = ureg_DECL_vs_input(shader, VS_I_VPOS); t_vpos = calc_position(r, shader, ureg_imm2f(shader, scale.x, scale.y)); t_vtex = ureg_DECL_temporary(shader); o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS); o_flags = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_FLAGS); /* * o_vtex.xy = t_vpos * o_flags.z = intra * 0.5 * * if(interlaced) { * t_vtex.xy = vrect.y ? { 0, scale.y } : { -scale.y : 0 } * t_vtex.z = vpos.y % 2 * t_vtex.y = t_vtex.z ? t_vtex.x : t_vtex.y * o_vpos.y = t_vtex.y + t_vpos.y * * o_flags.w = t_vtex.z ? 0 : 1 * } * */ vs_callback(callback_priv, r, shader, VS_O_VTEX, t_vpos); ureg_MUL(shader, ureg_writemask(o_flags, TGSI_WRITEMASK_Z), ureg_scalar(vpos, TGSI_SWIZZLE_Z), ureg_imm1f(shader, 0.5f)); ureg_MOV(shader, ureg_writemask(o_flags, TGSI_WRITEMASK_W), ureg_imm1f(shader, -1.0f)); if (r->macroblock_size == VL_MACROBLOCK_HEIGHT) { //TODO ureg_IF(shader, ureg_scalar(vpos, TGSI_SWIZZLE_W), &label); ureg_CMP(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_XY), ureg_negate(ureg_scalar(vrect, TGSI_SWIZZLE_Y)), ureg_imm2f(shader, 0.0f, scale.y), ureg_imm2f(shader, -scale.y, 0.0f)); ureg_MUL(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Z), ureg_scalar(vpos, TGSI_SWIZZLE_Y), ureg_imm1f(shader, 0.5f)); ureg_FRC(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Z), ureg_src(t_vtex)); ureg_CMP(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y), ureg_negate(ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Z)), ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_X), ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Y)); ureg_ADD(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_Y), ureg_src(t_vpos), ureg_src(t_vtex)); ureg_CMP(shader, ureg_writemask(o_flags, TGSI_WRITEMASK_W), ureg_negate(ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Z)), ureg_imm1f(shader, 0.0f), ureg_imm1f(shader, 1.0f)); ureg_fixup_label(shader, label, ureg_get_instruction_number(shader)); ureg_ENDIF(shader); } ureg_release_temporary(shader, t_vtex); ureg_release_temporary(shader, t_vpos); ureg_END(shader); return ureg_create_shader_and_destroy(shader, r->pipe); }
static void * create_ref_frag_shader(struct vl_mc *r) { const float y_scale = r->buffer_height / 2 * r->macroblock_size / VL_MACROBLOCK_HEIGHT; struct ureg_program *shader; struct ureg_src tc[2], sampler; struct ureg_dst ref, field; struct ureg_dst fragment; unsigned label; shader = ureg_create(TGSI_PROCESSOR_FRAGMENT); if (!shader) return NULL; tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP, TGSI_INTERPOLATE_LINEAR); tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM, TGSI_INTERPOLATE_LINEAR); sampler = ureg_DECL_sampler(shader, 0); ref = ureg_DECL_temporary(shader); fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); field = calc_line(shader); /* * ref = field.z ? tc[1] : tc[0] * * // Adjust tc acording to top/bottom field selection * if (|ref.z|) { * ref.y *= y_scale * ref.y = floor(ref.y) * ref.y += ref.z * ref.y /= y_scale * } * fragment.xyz = tex(ref, sampler[0]) */ ureg_CMP(shader, ureg_writemask(ref, TGSI_WRITEMASK_XYZ), ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)), tc[1], tc[0]); ureg_CMP(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)), tc[1], tc[0]); ureg_IF(shader, ureg_scalar(ureg_src(ref), TGSI_SWIZZLE_Z), &label); ureg_MUL(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y), ureg_src(ref), ureg_imm1f(shader, y_scale)); ureg_FLR(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y), ureg_src(ref)); ureg_ADD(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y), ureg_src(ref), ureg_scalar(ureg_src(ref), TGSI_SWIZZLE_Z)); ureg_MUL(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y), ureg_src(ref), ureg_imm1f(shader, 1.0f / y_scale)); ureg_fixup_label(shader, label, ureg_get_instruction_number(shader)); ureg_ENDIF(shader); ureg_TEX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ), TGSI_TEXTURE_2D, ureg_src(ref), sampler); ureg_release_temporary(shader, ref); ureg_release_temporary(shader, field); ureg_END(shader); return ureg_create_shader_and_destroy(shader, r->pipe); }
static void * combine_shaders(const struct shader_asm_info *shaders[SHADER_STAGES], int num_shaders, struct pipe_context *pipe, struct pipe_shader_state *shader) { VGboolean declare_input = VG_FALSE; VGint start_const = -1, end_const = 0; VGint start_temp = -1, end_temp = 0; VGint start_sampler = -1, end_sampler = 0; VGint i, current_shader = 0; VGint num_consts, num_temps, num_samplers; struct ureg_program *ureg; struct ureg_src in[2]; struct ureg_src *sampler = NULL; struct ureg_src *constant = NULL; struct ureg_dst out, *temp = NULL; void *p = NULL; for (i = 0; i < num_shaders; ++i) { if (shaders[i]->num_consts) start_const = range_min(start_const, shaders[i]->start_const); if (shaders[i]->num_temps) start_temp = range_min(start_temp, shaders[i]->start_temp); if (shaders[i]->num_samplers) start_sampler = range_min(start_sampler, shaders[i]->start_sampler); end_const = range_max(end_const, shaders[i]->start_const + shaders[i]->num_consts); end_temp = range_max(end_temp, shaders[i]->start_temp + shaders[i]->num_temps); end_sampler = range_max(end_sampler, shaders[i]->start_sampler + shaders[i]->num_samplers); if (shaders[i]->needs_position) declare_input = VG_TRUE; } /* if they're still unitialized, initialize them */ if (start_const < 0) start_const = 0; if (start_temp < 0) start_temp = 0; if (start_sampler < 0) start_sampler = 0; num_consts = end_const - start_const; num_temps = end_temp - start_temp; num_samplers = end_sampler - start_sampler; ureg = ureg_create(TGSI_PROCESSOR_FRAGMENT); if (!ureg) return NULL; if (declare_input) { in[0] = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_POSITION, 0, TGSI_INTERPOLATE_LINEAR); in[1] = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_GENERIC, 0, TGSI_INTERPOLATE_PERSPECTIVE); } /* we always have a color output */ out = ureg_DECL_output(ureg, TGSI_SEMANTIC_COLOR, 0); if (num_consts >= 1) { constant = (struct ureg_src *) malloc(sizeof(struct ureg_src) * end_const); for (i = start_const; i < end_const; i++) { constant[i] = ureg_DECL_constant(ureg, i); } } if (num_temps >= 1) { temp = (struct ureg_dst *) malloc(sizeof(struct ureg_dst) * end_temp); for (i = start_temp; i < end_temp; i++) { temp[i] = ureg_DECL_temporary(ureg); } } if (num_samplers >= 1) { sampler = (struct ureg_src *) malloc(sizeof(struct ureg_src) * end_sampler); for (i = start_sampler; i < end_sampler; i++) { sampler[i] = ureg_DECL_sampler(ureg, i); } } while (current_shader < num_shaders) { if ((current_shader + 1) == num_shaders) { shaders[current_shader]->func(ureg, &out, in, sampler, temp, constant); } else { shaders[current_shader]->func(ureg, &temp[0], in, sampler, temp, constant); } current_shader++; } ureg_END(ureg); shader->tokens = ureg_finalize(ureg); if(!shader->tokens) return NULL; p = pipe->create_fs_state(pipe, shader); ureg_destroy(ureg); if (num_temps >= 1) { for (i = start_temp; i < end_temp; i++) { ureg_release_temporary(ureg, temp[i]); } } if (temp) free(temp); if (constant) free(constant); if (sampler) free(sampler); return p; }
static void * create_stage1_frag_shader(struct vl_idct *idct) { struct ureg_program *shader; struct ureg_src l_addr[2], r_addr[2]; struct ureg_dst l[4][2], r[2]; struct ureg_dst *fragment; int i, j; shader = ureg_create(TGSI_PROCESSOR_FRAGMENT); if (!shader) return NULL; fragment = MALLOC(idct->nr_of_render_targets * sizeof(struct ureg_dst)); l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR); l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR); r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR); r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR); for (i = 0; i < idct->nr_of_render_targets; ++i) fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i); for (i = 0; i < 4; ++i) { l[i][0] = ureg_DECL_temporary(shader); l[i][1] = ureg_DECL_temporary(shader); } r[0] = ureg_DECL_temporary(shader); r[1] = ureg_DECL_temporary(shader); for (i = 0; i < 4; ++i) { increment_addr(shader, l[i], l_addr, false, false, i - 2, idct->buffer_height); } for (i = 0; i < 4; ++i) { struct ureg_src s_addr[2]; s_addr[0] = ureg_src(l[i][0]); s_addr[1] = ureg_src(l[i][1]); fetch_four(shader, l[i], s_addr, ureg_DECL_sampler(shader, 0), false); } for (i = 0; i < idct->nr_of_render_targets; ++i) { struct ureg_src s_addr[2]; increment_addr(shader, r, r_addr, true, true, i - (signed)idct->nr_of_render_targets / 2, VL_BLOCK_HEIGHT); s_addr[0] = ureg_src(r[0]); s_addr[1] = ureg_src(r[1]); fetch_four(shader, r, s_addr, ureg_DECL_sampler(shader, 1), false); for (j = 0; j < 4; ++j) { matrix_mul(shader, ureg_writemask(fragment[i], TGSI_WRITEMASK_X << j), l[j], r); } } for (i = 0; i < 4; ++i) { ureg_release_temporary(shader, l[i][0]); ureg_release_temporary(shader, l[i][1]); } ureg_release_temporary(shader, r[0]); ureg_release_temporary(shader, r[1]); ureg_END(shader); FREE(fragment); return ureg_create_shader_and_destroy(shader, idct->pipe); }
static void * create_mismatch_frag_shader(struct vl_idct *idct) { struct ureg_program *shader; struct ureg_src addr[2]; struct ureg_dst m[8][2]; struct ureg_dst fragment; unsigned i; shader = ureg_create(TGSI_PROCESSOR_FRAGMENT); if (!shader) return NULL; addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR); addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR); fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); for (i = 0; i < 8; ++i) { m[i][0] = ureg_DECL_temporary(shader); m[i][1] = ureg_DECL_temporary(shader); } for (i = 0; i < 8; ++i) { increment_addr(shader, m[i], addr, false, false, i, idct->buffer_height); } for (i = 0; i < 8; ++i) { struct ureg_src s_addr[2]; s_addr[0] = ureg_src(m[i][0]); s_addr[1] = ureg_src(m[i][1]); fetch_four(shader, m[i], s_addr, ureg_DECL_sampler(shader, 0), false); } for (i = 1; i < 8; ++i) { ureg_ADD(shader, m[0][0], ureg_src(m[0][0]), ureg_src(m[i][0])); ureg_ADD(shader, m[0][1], ureg_src(m[0][1]), ureg_src(m[i][1])); } ureg_ADD(shader, m[0][0], ureg_src(m[0][0]), ureg_src(m[0][1])); ureg_DP4(shader, m[0][0], ureg_abs(ureg_src(m[0][0])), ureg_imm1f(shader, 1 << 14)); ureg_MUL(shader, ureg_writemask(m[0][0], TGSI_WRITEMASK_W), ureg_abs(ureg_src(m[7][1])), ureg_imm1f(shader, 1 << 14)); ureg_FRC(shader, m[0][0], ureg_src(m[0][0])); ureg_SGT(shader, m[0][0], ureg_imm1f(shader, 0.5f), ureg_abs(ureg_src(m[0][0]))); ureg_CMP(shader, ureg_writemask(m[0][0], TGSI_WRITEMASK_W), ureg_negate(ureg_src(m[0][0])), ureg_imm1f(shader, 1.0f / (1 << 15)), ureg_imm1f(shader, -1.0f / (1 << 15))); ureg_MUL(shader, ureg_writemask(m[0][0], TGSI_WRITEMASK_W), ureg_src(m[0][0]), ureg_scalar(ureg_src(m[0][0]), TGSI_SWIZZLE_X)); ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ), ureg_src(m[7][1])); ureg_ADD(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_src(m[0][0]), ureg_src(m[7][1])); for (i = 0; i < 8; ++i) { ureg_release_temporary(shader, m[i][0]); ureg_release_temporary(shader, m[i][1]); } ureg_END(shader); return ureg_create_shader_and_destroy(shader, idct->pipe); }
static void * create_frag_shader(struct vl_median_filter *filter, struct vertex2f *offsets, unsigned num_offsets) { struct pipe_screen *screen = filter->pipe->screen; struct ureg_program *shader; struct ureg_src i_vtex; struct ureg_src sampler; struct ureg_dst *t_array = MALLOC(sizeof(struct ureg_dst) * num_offsets); struct ureg_dst o_fragment; const unsigned median = num_offsets >> 1; unsigned i, j; assert(num_offsets & 1); /* we need an odd number of offsets */ if (!(num_offsets & 1)) { /* yeah, we REALLY need an odd number of offsets!!! */ FREE(t_array); return NULL; } if (num_offsets > screen->get_shader_param( screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_TEMPS)) { FREE(t_array); return NULL; } shader = ureg_create(PIPE_SHADER_FRAGMENT); if (!shader) { FREE(t_array); return NULL; } i_vtex = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR); sampler = ureg_DECL_sampler(shader, 0); ureg_DECL_sampler_view(shader, 0, TGSI_TEXTURE_2D, TGSI_RETURN_TYPE_FLOAT, TGSI_RETURN_TYPE_FLOAT, TGSI_RETURN_TYPE_FLOAT, TGSI_RETURN_TYPE_FLOAT); for (i = 0; i < num_offsets; ++i) t_array[i] = ureg_DECL_temporary(shader); o_fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); /* * t_array[0..*] = vtex + offset[0..*] * t_array[0..*] = tex(t_array[0..*], sampler) * result = partial_bubblesort(t_array)[mid] */ for (i = 0; i < num_offsets; ++i) { if (!is_vec_zero(offsets[i])) { ureg_ADD(shader, ureg_writemask(t_array[i], TGSI_WRITEMASK_XY), i_vtex, ureg_imm2f(shader, offsets[i].x, offsets[i].y)); ureg_MOV(shader, ureg_writemask(t_array[i], TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 0.0f)); } } for (i = 0; i < num_offsets; ++i) { struct ureg_src src = is_vec_zero(offsets[i]) ? i_vtex : ureg_src(t_array[i]); ureg_TEX(shader, t_array[i], TGSI_TEXTURE_2D, src, sampler); } // TODO: Couldn't this be improved even more? for (i = 0; i <= median; ++i) { for (j = 1; j < (num_offsets - i - 1); ++j) { struct ureg_dst tmp = ureg_DECL_temporary(shader); ureg_MOV(shader, tmp, ureg_src(t_array[j])); ureg_MAX(shader, t_array[j], ureg_src(t_array[j]), ureg_src(t_array[j - 1])); ureg_MIN(shader, t_array[j - 1], ureg_src(tmp), ureg_src(t_array[j - 1])); ureg_release_temporary(shader, tmp); } if (i == median) ureg_MAX(shader, t_array[j], ureg_src(t_array[j]), ureg_src(t_array[j - 1])); else ureg_MIN(shader, t_array[j - 1], ureg_src(t_array[j]), ureg_src(t_array[j - 1])); } ureg_MOV(shader, o_fragment, ureg_src(t_array[median])); ureg_END(shader); FREE(t_array); return ureg_create_shader_and_destroy(shader, filter->pipe); }
static void * create_fs(struct pipe_context *pipe, unsigned fs_traits) { struct ureg_program *ureg; struct ureg_src /*dst_sampler, */ src_sampler, mask_sampler; struct ureg_src /*dst_pos, */ src_input, mask_pos; struct ureg_dst src, mask; struct ureg_dst out; struct ureg_src imm0 = { 0 }; unsigned has_mask = (fs_traits & FS_MASK) != 0; unsigned is_fill = (fs_traits & FS_FILL) != 0; unsigned is_composite = (fs_traits & FS_COMPOSITE) != 0; unsigned is_solid = (fs_traits & FS_SOLID_FILL) != 0; unsigned is_lingrad = (fs_traits & FS_LINGRAD_FILL) != 0; unsigned is_radgrad = (fs_traits & FS_RADGRAD_FILL) != 0; unsigned comp_alpha_mask = fs_traits & FS_COMPONENT_ALPHA; unsigned is_yuv = (fs_traits & FS_YUV) != 0; unsigned src_repeat_none = (fs_traits & FS_SRC_REPEAT_NONE) != 0; unsigned mask_repeat_none = (fs_traits & FS_MASK_REPEAT_NONE) != 0; unsigned src_swizzle = (fs_traits & FS_SRC_SWIZZLE_RGB) != 0; unsigned mask_swizzle = (fs_traits & FS_MASK_SWIZZLE_RGB) != 0; unsigned src_set_alpha = (fs_traits & FS_SRC_SET_ALPHA) != 0; unsigned mask_set_alpha = (fs_traits & FS_MASK_SET_ALPHA) != 0; unsigned src_luminance = (fs_traits & FS_SRC_LUMINANCE) != 0; unsigned mask_luminance = (fs_traits & FS_MASK_LUMINANCE) != 0; unsigned dst_luminance = (fs_traits & FS_DST_LUMINANCE) != 0; #if 0 print_fs_traits(fs_traits); #else (void)print_fs_traits; #endif ureg = ureg_create(TGSI_PROCESSOR_FRAGMENT); if (ureg == NULL) return 0; /* it has to be either a fill, a composite op or a yuv conversion */ debug_assert((is_fill ^ is_composite) ^ is_yuv); (void)is_yuv; out = ureg_DECL_output(ureg, TGSI_SEMANTIC_COLOR, 0); if (src_repeat_none || mask_repeat_none || src_set_alpha || mask_set_alpha || src_luminance) { imm0 = ureg_imm4f(ureg, 0, 0, 0, 1); } if (is_composite) { src_sampler = ureg_DECL_sampler(ureg, 0); src_input = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_GENERIC, 0, TGSI_INTERPOLATE_PERSPECTIVE); } else if (is_fill) { if (is_solid) src_input = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_COLOR, 0, TGSI_INTERPOLATE_PERSPECTIVE); else src_input = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_POSITION, 0, TGSI_INTERPOLATE_PERSPECTIVE); } else { debug_assert(is_yuv); return create_yuv_shader(pipe, ureg); } if (has_mask) { mask_sampler = ureg_DECL_sampler(ureg, 1); mask_pos = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_PERSPECTIVE); } #if 0 /* unused right now */ dst_sampler = ureg_DECL_sampler(ureg, 2); dst_pos = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_POSITION, 2, TGSI_INTERPOLATE_PERSPECTIVE); #endif if (is_composite) { if (has_mask || src_luminance || dst_luminance) src = ureg_DECL_temporary(ureg); else src = out; xrender_tex(ureg, src, src_input, src_sampler, imm0, src_repeat_none, src_swizzle, src_set_alpha); } else if (is_fill) { if (is_solid) { if (has_mask || src_luminance || dst_luminance) src = ureg_dst(src_input); else ureg_MOV(ureg, out, src_input); } else if (is_lingrad || is_radgrad) { struct ureg_src coords, const0124, matrow0, matrow1, matrow2; if (has_mask || src_luminance || dst_luminance) src = ureg_DECL_temporary(ureg); else src = out; coords = ureg_DECL_constant(ureg, 0); const0124 = ureg_DECL_constant(ureg, 1); matrow0 = ureg_DECL_constant(ureg, 2); matrow1 = ureg_DECL_constant(ureg, 3); matrow2 = ureg_DECL_constant(ureg, 4); if (is_lingrad) { linear_gradient(ureg, src, src_input, src_sampler, coords, const0124, matrow0, matrow1, matrow2); } else if (is_radgrad) { radial_gradient(ureg, src, src_input, src_sampler, coords, const0124, matrow0, matrow1, matrow2); } } else debug_assert(!"Unknown fill type!"); } if (src_luminance) { ureg_MOV(ureg, src, ureg_scalar(ureg_src(src), TGSI_SWIZZLE_X)); ureg_MOV(ureg, ureg_writemask(src, TGSI_WRITEMASK_XYZ), ureg_scalar(imm0, TGSI_SWIZZLE_X)); if (!has_mask && !dst_luminance) ureg_MOV(ureg, out, ureg_src(src)); } if (has_mask) { mask = ureg_DECL_temporary(ureg); xrender_tex(ureg, mask, mask_pos, mask_sampler, imm0, mask_repeat_none, mask_swizzle, mask_set_alpha); /* src IN mask */ src_in_mask(ureg, (dst_luminance) ? src : out, ureg_src(src), ureg_src(mask), comp_alpha_mask, mask_luminance); ureg_release_temporary(ureg, mask); } if (dst_luminance) { /* * Make sure the alpha channel goes into the output L8 surface. */ ureg_MOV(ureg, out, ureg_scalar(ureg_src(src), TGSI_SWIZZLE_W)); } ureg_END(ureg); return ureg_create_shader_and_destroy(ureg, pipe); }
static void * create_frag_shader(struct vl_zscan *zscan) { struct ureg_program *shader; struct ureg_src *vtex; struct ureg_src samp_src, samp_scan, samp_quant; struct ureg_dst *tmp; struct ureg_dst quant, fragment; unsigned i; shader = ureg_create(PIPE_SHADER_FRAGMENT); if (!shader) return NULL; vtex = MALLOC(zscan->num_channels * sizeof(struct ureg_src)); tmp = MALLOC(zscan->num_channels * sizeof(struct ureg_dst)); for (i = 0; i < zscan->num_channels; ++i) vtex[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX + i, TGSI_INTERPOLATE_LINEAR); samp_src = ureg_DECL_sampler(shader, 0); samp_scan = ureg_DECL_sampler(shader, 1); samp_quant = ureg_DECL_sampler(shader, 2); for (i = 0; i < zscan->num_channels; ++i) tmp[i] = ureg_DECL_temporary(shader); quant = ureg_DECL_temporary(shader); fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); /* * tmp.x = tex(vtex, 1) * tmp.y = vtex.z * fragment = tex(tmp, 0) * quant */ for (i = 0; i < zscan->num_channels; ++i) ureg_TEX(shader, ureg_writemask(tmp[i], TGSI_WRITEMASK_X), TGSI_TEXTURE_2D, vtex[i], samp_scan); for (i = 0; i < zscan->num_channels; ++i) ureg_MOV(shader, ureg_writemask(tmp[i], TGSI_WRITEMASK_Y), ureg_scalar(vtex[i], TGSI_SWIZZLE_W)); for (i = 0; i < zscan->num_channels; ++i) { ureg_TEX(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X << i), TGSI_TEXTURE_2D, ureg_src(tmp[i]), samp_src); ureg_TEX(shader, ureg_writemask(quant, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_3D, vtex[i], samp_quant); } ureg_MUL(shader, quant, ureg_src(quant), ureg_imm1f(shader, 16.0f)); ureg_MUL(shader, fragment, ureg_src(tmp[0]), ureg_src(quant)); for (i = 0; i < zscan->num_channels; ++i) ureg_release_temporary(shader, tmp[i]); ureg_END(shader); FREE(vtex); FREE(tmp); return ureg_create_shader_and_destroy(shader, zscan->pipe); }
static void * create_ycbcr_frag_shader(struct vl_mc *r, float scale, bool invert, vl_mc_ycbcr_frag_shader fs_callback, void *callback_priv) { struct ureg_program *shader; struct ureg_src flags; struct ureg_dst tmp; struct ureg_dst fragment; unsigned label; shader = ureg_create(TGSI_PROCESSOR_FRAGMENT); if (!shader) return NULL; flags = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_FLAGS, TGSI_INTERPOLATE_LINEAR); fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); tmp = calc_line(shader); /* * if (field == tc.w) * kill(); * else { * fragment.xyz = tex(tc, sampler) * scale + tc.z * fragment.w = 1.0f * } */ ureg_SEQ(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_scalar(flags, TGSI_SWIZZLE_W), ureg_src(tmp)); ureg_IF(shader, ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), &label); ureg_KILP(shader); ureg_fixup_label(shader, label, ureg_get_instruction_number(shader)); ureg_ELSE(shader, &label); fs_callback(callback_priv, r, shader, VS_O_VTEX, tmp); if (scale != 1.0f) ureg_MAD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XYZ), ureg_src(tmp), ureg_imm1f(shader, scale), ureg_scalar(flags, TGSI_SWIZZLE_Z)); else ureg_ADD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XYZ), ureg_src(tmp), ureg_scalar(flags, TGSI_SWIZZLE_Z)); ureg_MUL(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ), ureg_src(tmp), ureg_imm1f(shader, invert ? -1.0f : 1.0f)); ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f)); ureg_fixup_label(shader, label, ureg_get_instruction_number(shader)); ureg_ENDIF(shader); ureg_release_temporary(shader, tmp); ureg_END(shader); return ureg_create_shader_and_destroy(shader, r->pipe); }
static void * create_vert_shader(struct vl_zscan *zscan) { struct ureg_program *shader; struct ureg_src scale; struct ureg_src vrect, vpos, block_num; struct ureg_dst tmp; struct ureg_dst o_vpos; struct ureg_dst *o_vtex; unsigned i; shader = ureg_create(PIPE_SHADER_VERTEX); if (!shader) return NULL; o_vtex = MALLOC(zscan->num_channels * sizeof(struct ureg_dst)); scale = ureg_imm2f(shader, (float)VL_BLOCK_WIDTH / zscan->buffer_width, (float)VL_BLOCK_HEIGHT / zscan->buffer_height); vrect = ureg_DECL_vs_input(shader, VS_I_RECT); vpos = ureg_DECL_vs_input(shader, VS_I_VPOS); block_num = ureg_DECL_vs_input(shader, VS_I_BLOCK_NUM); tmp = ureg_DECL_temporary(shader); o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS); for (i = 0; i < zscan->num_channels; ++i) o_vtex[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX + i); /* * o_vpos.xy = (vpos + vrect) * scale * o_vpos.zw = 1.0f * * tmp.xy = InstanceID / blocks_per_line * tmp.x = frac(tmp.x) * tmp.y = floor(tmp.y) * * o_vtex.x = vrect.x / blocks_per_line + tmp.x * o_vtex.y = vrect.y * o_vtex.z = tmp.z * blocks_per_line / blocks_total */ ureg_ADD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XY), vpos, vrect); ureg_MUL(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(tmp), scale); ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 1.0f)); ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XW), ureg_scalar(block_num, TGSI_SWIZZLE_X), ureg_imm1f(shader, 1.0f / zscan->blocks_per_line)); ureg_FRC(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X)); ureg_FLR(shader, ureg_writemask(tmp, TGSI_WRITEMASK_W), ureg_src(tmp)); for (i = 0; i < zscan->num_channels; ++i) { ureg_ADD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), ureg_imm1f(shader, 1.0f / (zscan->blocks_per_line * VL_BLOCK_WIDTH) * (i - (signed)zscan->num_channels / 2))); ureg_MAD(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_X), vrect, ureg_imm1f(shader, 1.0f / zscan->blocks_per_line), ureg_src(tmp)); ureg_MOV(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_Y), vrect); ureg_MOV(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_Z), vpos); ureg_MUL(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_W), ureg_src(tmp), ureg_imm1f(shader, (float)zscan->blocks_per_line / zscan->blocks_total)); } ureg_release_temporary(shader, tmp); ureg_END(shader); FREE(o_vtex); return ureg_create_shader_and_destroy(shader, zscan->pipe); }