static nir_ssa_def * build_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x) { nir_ssa_def *zero = nir_imm_float(b, 0.0f); /* If |x| >= 1.0e-8 * |y|: */ nir_ssa_def *condition = nir_fge(b, nir_fabs(b, x), nir_fmul(b, nir_imm_float(b, 1.0e-8f), nir_fabs(b, y))); /* Then...call atan(y/x) and fix it up: */ nir_ssa_def *atan1 = build_atan(b, nir_fdiv(b, y, x)); nir_ssa_def *r_then = nir_bcsel(b, nir_flt(b, x, zero), nir_fadd(b, atan1, nir_bcsel(b, nir_fge(b, y, zero), nir_imm_float(b, M_PIf), nir_imm_float(b, -M_PIf))), atan1); /* Else... */ nir_ssa_def *r_else = nir_fmul(b, nir_fsign(b, y), nir_imm_float(b, M_PI_2f)); return nir_bcsel(b, condition, r_then, r_else); }
/** * Approximate asin(x) by the formula: * asin~(x) = sign(x) * (pi/2 - sqrt(1 - |x|) * (pi/2 + |x|(pi/4 - 1 + |x|(p0 + |x|p1)))) * * which is correct to first order at x=0 and x=±1 regardless of the p * coefficients but can be made second-order correct at both ends by selecting * the fit coefficients appropriately. Different p coefficients can be used * in the asin and acos implementation to minimize some relative error metric * in each case. */ static nir_ssa_def * build_asin(nir_builder *b, nir_ssa_def *x, float p0, float p1) { nir_ssa_def *abs_x = nir_fabs(b, x); return nir_fmul(b, nir_fsign(b, x), nir_fsub(b, nir_imm_float(b, M_PI_2f), nir_fmul(b, nir_fsqrt(b, nir_fsub(b, nir_imm_float(b, 1.0f), abs_x)), nir_fadd(b, nir_imm_float(b, M_PI_2f), nir_fmul(b, abs_x, nir_fadd(b, nir_imm_float(b, M_PI_4f - 1.0f), nir_fmul(b, abs_x, nir_fadd(b, nir_imm_float(b, p0), nir_fmul(b, abs_x, nir_imm_float(b, p1)))))))))); }
static nir_ssa_def * build_mat2_det(nir_builder *b, nir_ssa_def *col[2]) { unsigned swiz[4] = {1, 0, 0, 0}; nir_ssa_def *p = nir_fmul(b, col[0], nir_swizzle(b, col[1], swiz, 2, true)); return nir_fsub(b, nir_channel(b, p, 0), nir_channel(b, p, 1)); }
static struct vtn_ssa_value * matrix_inverse(struct vtn_builder *b, struct vtn_ssa_value *src) { nir_ssa_def *adj_col[4]; unsigned size = glsl_get_vector_elements(src->type); /* Build up an adjugate matrix */ for (unsigned c = 0; c < size; c++) { nir_ssa_def *elem[4]; for (unsigned r = 0; r < size; r++) { elem[r] = build_mat_subdet(&b->nb, src, size, c, r); if ((r + c) % 2) elem[r] = nir_fneg(&b->nb, elem[r]); } adj_col[c] = nir_vec(&b->nb, elem, size); } nir_ssa_def *det_inv = nir_frcp(&b->nb, build_mat_det(b, src)); struct vtn_ssa_value *val = vtn_create_ssa_value(b, src->type); for (unsigned i = 0; i < size; i++) val->elems[i]->def = nir_fmul(&b->nb, adj_col[i], det_inv); return val; }
static bool lower_offset(nir_builder *b, nir_tex_instr *tex) { int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset); if (offset_index < 0) return false; int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord); assert(coord_index >= 0); assert(tex->src[offset_index].src.is_ssa); assert(tex->src[coord_index].src.is_ssa); nir_ssa_def *offset = tex->src[offset_index].src.ssa; nir_ssa_def *coord = tex->src[coord_index].src.ssa; b->cursor = nir_before_instr(&tex->instr); nir_ssa_def *offset_coord; if (nir_tex_instr_src_type(tex, coord_index) == nir_type_float) { if (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) { offset_coord = nir_fadd(b, coord, nir_i2f32(b, offset)); } else { nir_ssa_def *txs = get_texture_size(b, tex); nir_ssa_def *scale = nir_frcp(b, txs); offset_coord = nir_fadd(b, coord, nir_fmul(b, nir_i2f32(b, offset), scale)); } } else { offset_coord = nir_iadd(b, coord, offset); } if (tex->is_array) { /* The offset is not applied to the array index */ if (tex->coord_components == 2) { offset_coord = nir_vec2(b, nir_channel(b, offset_coord, 0), nir_channel(b, coord, 1)); } else if (tex->coord_components == 3) { offset_coord = nir_vec3(b, nir_channel(b, offset_coord, 0), nir_channel(b, offset_coord, 1), nir_channel(b, coord, 2)); } else { unreachable("Invalid number of components"); } } nir_instr_rewrite_src(&tex->instr, &tex->src[coord_index].src, nir_src_for_ssa(offset_coord)); nir_tex_instr_remove_src(tex, offset_index); return true; }
static nir_ssa_def * build_mat3_det(nir_builder *b, nir_ssa_def *col[3]) { unsigned yzx[4] = {1, 2, 0, 0}; unsigned zxy[4] = {2, 0, 1, 0}; nir_ssa_def *prod0 = nir_fmul(b, col[0], nir_fmul(b, nir_swizzle(b, col[1], yzx, 3, true), nir_swizzle(b, col[2], zxy, 3, true))); nir_ssa_def *prod1 = nir_fmul(b, col[0], nir_fmul(b, nir_swizzle(b, col[1], zxy, 3, true), nir_swizzle(b, col[2], yzx, 3, true))); nir_ssa_def *diff = nir_fsub(b, prod0, prod1); return nir_fadd(b, nir_channel(b, diff, 0), nir_fadd(b, nir_channel(b, diff, 1), nir_channel(b, diff, 2))); }
static nir_ssa_def* build_length(nir_builder *b, nir_ssa_def *vec) { switch (vec->num_components) { case 1: return nir_fsqrt(b, nir_fmul(b, vec, vec)); case 2: return nir_fsqrt(b, nir_fdot2(b, vec, vec)); case 3: return nir_fsqrt(b, nir_fdot3(b, vec, vec)); case 4: return nir_fsqrt(b, nir_fdot4(b, vec, vec)); default: unreachable("Invalid number of components"); } }
static struct vtn_ssa_value * mat_times_scalar(struct vtn_builder *b, struct vtn_ssa_value *mat, nir_ssa_def *scalar) { struct vtn_ssa_value *dest = vtn_create_ssa_value(b, mat->type); for (unsigned i = 0; i < glsl_get_matrix_columns(mat->type); i++) { if (glsl_get_base_type(mat->type) == GLSL_TYPE_FLOAT) dest->elems[i]->def = nir_fmul(&b->nb, mat->elems[i]->def, scalar); else dest->elems[i]->def = nir_imul(&b->nb, mat->elems[i]->def, scalar); } return dest; }
/* Multiply interp_var_at_offset's offset by transform.x to flip it. */ static void lower_interp_var_at_offset(lower_wpos_ytransform_state *state, nir_intrinsic_instr *interp) { nir_builder *b = &state->b; nir_ssa_def *offset; nir_ssa_def *flip_y; b->cursor = nir_before_instr(&interp->instr); offset = nir_ssa_for_src(b, interp->src[0], 2); flip_y = nir_fmul(b, nir_channel(b, offset, 1), nir_channel(b, get_transform(state), 0)); nir_instr_rewrite_src(&interp->instr, &interp->src[0], nir_src_for_ssa(nir_vec2(b, nir_channel(b, offset, 0), flip_y))); }
/* turns 'fddy(p)' into 'fddy(fmul(p, transform.x))' */ static void lower_fddy(lower_wpos_ytransform_state *state, nir_alu_instr *fddy) { nir_builder *b = &state->b; nir_ssa_def *p, *pt, *trans; b->cursor = nir_before_instr(&fddy->instr); p = nir_ssa_for_alu_src(b, fddy, 0); trans = get_transform(state); pt = nir_fmul(b, p, nir_channel(b, trans, 0)); nir_instr_rewrite_src(&fddy->instr, &fddy->src[0].src, nir_src_for_ssa(pt)); for (unsigned i = 0; i < 4; i++) fddy->src[0].swizzle[i] = MIN2(i, pt->num_components - 1); }
static void lower_load_sample_pos(lower_wpos_ytransform_state *state, nir_intrinsic_instr *intr) { nir_builder *b = &state->b; b->cursor = nir_after_instr(&intr->instr); nir_ssa_def *pos = &intr->dest.ssa; nir_ssa_def *scale = nir_channel(b, get_transform(state), 0); nir_ssa_def *neg_scale = nir_channel(b, get_transform(state), 2); /* Either y or 1-y for scale equal to 1 or -1 respectively. */ nir_ssa_def *flipped_y = nir_fadd(b, nir_fmax(b, neg_scale, nir_imm_float(b, 0.0)), nir_fmul(b, nir_channel(b, pos, 1), scale)); nir_ssa_def *flipped_pos = nir_vec2(b, nir_channel(b, pos, 0), flipped_y); nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, nir_src_for_ssa(flipped_pos), flipped_pos->parent_instr); }
static void lower_rect(nir_builder *b, nir_tex_instr *tex) { nir_ssa_def *txs = get_texture_size(b, tex); nir_ssa_def *scale = nir_frcp(b, txs); /* Walk through the sources normalizing the requested arguments. */ for (unsigned i = 0; i < tex->num_srcs; i++) { if (tex->src[i].src_type != nir_tex_src_coord) continue; nir_ssa_def *coords = nir_ssa_for_src(b, tex->src[i].src, tex->coord_components); nir_instr_rewrite_src(&tex->instr, &tex->src[i].src, nir_src_for_ssa(nir_fmul(b, coords, scale))); } tex->sampler_dim = GLSL_SAMPLER_DIM_2D; }
static nir_ssa_def * build_mat4_det(nir_builder *b, nir_ssa_def **col) { nir_ssa_def *subdet[4]; for (unsigned i = 0; i < 4; i++) { unsigned swiz[3]; for (unsigned j = 0; j < 3; j++) swiz[j] = j + (j >= i); nir_ssa_def *subcol[3]; subcol[0] = nir_swizzle(b, col[1], swiz, 3, true); subcol[1] = nir_swizzle(b, col[2], swiz, 3, true); subcol[2] = nir_swizzle(b, col[3], swiz, 3, true); subdet[i] = build_mat3_det(b, subcol); } nir_ssa_def *prod = nir_fmul(b, col[0], nir_vec(b, subdet, 4)); return nir_fadd(b, nir_fsub(b, nir_channel(b, prod, 0), nir_channel(b, prod, 1)), nir_fsub(b, nir_channel(b, prod, 2), nir_channel(b, prod, 3))); }
static void lower_load_pointcoord(lower_wpos_ytransform_state *state, nir_intrinsic_instr *intr) { nir_builder *b = &state->b; b->cursor = nir_after_instr(&intr->instr); nir_ssa_def *pntc = &intr->dest.ssa; nir_ssa_def *transform = get_transform(state); nir_ssa_def *y = nir_channel(b, pntc, 1); /* The offset is 1 if we're flipping, 0 otherwise. */ nir_ssa_def *offset = nir_fmax(b, nir_channel(b, transform, 2), nir_imm_float(b, 0.0)); /* Flip the sign of y if we're flipping. */ nir_ssa_def *scaled = nir_fmul(b, y, nir_channel(b, transform, 0)); /* Reassemble the vector. */ nir_ssa_def *flipped_pntc = nir_vec2(b, nir_channel(b, pntc, 0), nir_fadd(b, offset, scaled)); nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, nir_src_for_ssa(flipped_pntc), flipped_pntc->parent_instr); }
/** * Return e^x. */ static nir_ssa_def * build_exp(nir_builder *b, nir_ssa_def *x) { return nir_fexp2(b, nir_fmul(b, x, nir_imm_float(b, M_LOG2E))); }
static void project_src(nir_builder *b, nir_tex_instr *tex) { /* Find the projector in the srcs list, if present. */ int proj_index = nir_tex_instr_src_index(tex, nir_tex_src_projector); if (proj_index < 0) return; b->cursor = nir_before_instr(&tex->instr); nir_ssa_def *inv_proj = nir_frcp(b, nir_ssa_for_src(b, tex->src[proj_index].src, 1)); /* Walk through the sources projecting the arguments. */ for (unsigned i = 0; i < tex->num_srcs; i++) { switch (tex->src[i].src_type) { case nir_tex_src_coord: case nir_tex_src_comparator: break; default: continue; } nir_ssa_def *unprojected = nir_ssa_for_src(b, tex->src[i].src, nir_tex_instr_src_size(tex, i)); nir_ssa_def *projected = nir_fmul(b, unprojected, inv_proj); /* Array indices don't get projected, so make an new vector with the * coordinate's array index untouched. */ if (tex->is_array && tex->src[i].src_type == nir_tex_src_coord) { switch (tex->coord_components) { case 4: projected = nir_vec4(b, nir_channel(b, projected, 0), nir_channel(b, projected, 1), nir_channel(b, projected, 2), nir_channel(b, unprojected, 3)); break; case 3: projected = nir_vec3(b, nir_channel(b, projected, 0), nir_channel(b, projected, 1), nir_channel(b, unprojected, 2)); break; case 2: projected = nir_vec2(b, nir_channel(b, projected, 0), nir_channel(b, unprojected, 1)); break; default: unreachable("bad texture coord count for array"); break; } } nir_instr_rewrite_src(&tex->instr, &tex->src[i].src, nir_src_for_ssa(projected)); } nir_tex_instr_remove_src(tex, proj_index); }
/* see emit_wpos_adjustment() in st_mesa_to_tgsi.c */ static void emit_wpos_adjustment(lower_wpos_ytransform_state *state, nir_intrinsic_instr *intr, bool invert, float adjX, float adjY[2]) { nir_builder *b = &state->b; nir_variable *fragcoord = intr->variables[0]->var; nir_ssa_def *wpostrans, *wpos_temp, *wpos_temp_y, *wpos_input; assert(intr->dest.is_ssa); b->cursor = nir_before_instr(&intr->instr); wpostrans = get_transform(state); wpos_input = nir_load_var(b, fragcoord); /* First, apply the coordinate shift: */ if (adjX || adjY[0] || adjY[1]) { if (adjY[0] != adjY[1]) { /* Adjust the y coordinate by adjY[1] or adjY[0] respectively * depending on whether inversion is actually going to be applied * or not, which is determined by testing against the inversion * state variable used below, which will be either +1 or -1. */ nir_ssa_def *adj_temp; adj_temp = nir_cmp(b, nir_channel(b, wpostrans, invert ? 2 : 0), nir_imm_vec4(b, adjX, adjY[0], 0.0f, 0.0f), nir_imm_vec4(b, adjX, adjY[1], 0.0f, 0.0f)); wpos_temp = nir_fadd(b, wpos_input, adj_temp); } else { wpos_temp = nir_fadd(b, wpos_input, nir_imm_vec4(b, adjX, adjY[0], 0.0f, 0.0f)); } wpos_input = wpos_temp; } else { /* MOV wpos_temp, input[wpos] */ wpos_temp = wpos_input; } /* Now the conditional y flip: STATE_FB_WPOS_Y_TRANSFORM.xy/zw will be * inversion/identity, or the other way around if we're drawing to an FBO. */ if (invert) { /* wpos_temp.y = wpos_input * wpostrans.xxxx + wpostrans.yyyy */ wpos_temp_y = nir_fadd(b, nir_fmul(b, nir_channel(b, wpos_temp, 1), nir_channel(b, wpostrans, 0)), nir_channel(b, wpostrans, 1)); } else { /* wpos_temp.y = wpos_input * wpostrans.zzzz + wpostrans.wwww */ wpos_temp_y = nir_fadd(b, nir_fmul(b, nir_channel(b, wpos_temp, 1), nir_channel(b, wpostrans, 2)), nir_channel(b, wpostrans, 3)); } wpos_temp = nir_vec4(b, nir_channel(b, wpos_temp, 0), wpos_temp_y, nir_channel(b, wpos_temp, 2), nir_channel(b, wpos_temp, 3)); nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(wpos_temp)); }
/** * Return ln(x) - the natural logarithm of x. */ static nir_ssa_def * build_log(nir_builder *b, nir_ssa_def *x) { return nir_fmul(b, nir_flog2(b, x), nir_imm_float(b, 1.0 / M_LOG2E)); }
static void handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint, const uint32_t *w, unsigned count) { struct nir_builder *nb = &b->nb; const struct glsl_type *dest_type = vtn_value(b, w[1], vtn_value_type_type)->type->type; struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); val->ssa = vtn_create_ssa_value(b, dest_type); /* Collect the various SSA sources */ unsigned num_inputs = count - 5; nir_ssa_def *src[3] = { NULL, }; for (unsigned i = 0; i < num_inputs; i++) src[i] = vtn_ssa_value(b, w[i + 5])->def; switch (entrypoint) { case GLSLstd450Radians: val->ssa->def = nir_fmul(nb, src[0], nir_imm_float(nb, 0.01745329251)); return; case GLSLstd450Degrees: val->ssa->def = nir_fmul(nb, src[0], nir_imm_float(nb, 57.2957795131)); return; case GLSLstd450Tan: val->ssa->def = nir_fdiv(nb, nir_fsin(nb, src[0]), nir_fcos(nb, src[0])); return; case GLSLstd450Modf: { nir_ssa_def *sign = nir_fsign(nb, src[0]); nir_ssa_def *abs = nir_fabs(nb, src[0]); val->ssa->def = nir_fmul(nb, sign, nir_ffract(nb, abs)); nir_store_deref_var(nb, vtn_nir_deref(b, w[6]), nir_fmul(nb, sign, nir_ffloor(nb, abs)), 0xf); return; } case GLSLstd450ModfStruct: { nir_ssa_def *sign = nir_fsign(nb, src[0]); nir_ssa_def *abs = nir_fabs(nb, src[0]); assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_fmul(nb, sign, nir_ffract(nb, abs)); val->ssa->elems[1]->def = nir_fmul(nb, sign, nir_ffloor(nb, abs)); return; } case GLSLstd450Step: val->ssa->def = nir_sge(nb, src[1], src[0]); return; case GLSLstd450Length: val->ssa->def = build_length(nb, src[0]); return; case GLSLstd450Distance: val->ssa->def = build_length(nb, nir_fsub(nb, src[0], src[1])); return; case GLSLstd450Normalize: val->ssa->def = nir_fdiv(nb, src[0], build_length(nb, src[0])); return; case GLSLstd450Exp: val->ssa->def = build_exp(nb, src[0]); return; case GLSLstd450Log: val->ssa->def = build_log(nb, src[0]); return; case GLSLstd450FClamp: val->ssa->def = build_fclamp(nb, src[0], src[1], src[2]); return; case GLSLstd450UClamp: val->ssa->def = nir_umin(nb, nir_umax(nb, src[0], src[1]), src[2]); return; case GLSLstd450SClamp: val->ssa->def = nir_imin(nb, nir_imax(nb, src[0], src[1]), src[2]); return; case GLSLstd450Cross: { unsigned yzx[4] = { 1, 2, 0, 0 }; unsigned zxy[4] = { 2, 0, 1, 0 }; val->ssa->def = nir_fsub(nb, nir_fmul(nb, nir_swizzle(nb, src[0], yzx, 3, true), nir_swizzle(nb, src[1], zxy, 3, true)), nir_fmul(nb, nir_swizzle(nb, src[0], zxy, 3, true), nir_swizzle(nb, src[1], yzx, 3, true))); return; } case GLSLstd450SmoothStep: { /* t = clamp((x - edge0) / (edge1 - edge0), 0, 1) */ nir_ssa_def *t = build_fclamp(nb, nir_fdiv(nb, nir_fsub(nb, src[2], src[0]), nir_fsub(nb, src[1], src[0])), nir_imm_float(nb, 0.0), nir_imm_float(nb, 1.0)); /* result = t * t * (3 - 2 * t) */ val->ssa->def = nir_fmul(nb, t, nir_fmul(nb, t, nir_fsub(nb, nir_imm_float(nb, 3.0), nir_fmul(nb, nir_imm_float(nb, 2.0), t)))); return; } case GLSLstd450FaceForward: val->ssa->def = nir_bcsel(nb, nir_flt(nb, nir_fdot(nb, src[2], src[1]), nir_imm_float(nb, 0.0)), src[0], nir_fneg(nb, src[0])); return; case GLSLstd450Reflect: /* I - 2 * dot(N, I) * N */ val->ssa->def = nir_fsub(nb, src[0], nir_fmul(nb, nir_imm_float(nb, 2.0), nir_fmul(nb, nir_fdot(nb, src[0], src[1]), src[1]))); return; case GLSLstd450Refract: { nir_ssa_def *I = src[0]; nir_ssa_def *N = src[1]; nir_ssa_def *eta = src[2]; nir_ssa_def *n_dot_i = nir_fdot(nb, N, I); nir_ssa_def *one = nir_imm_float(nb, 1.0); nir_ssa_def *zero = nir_imm_float(nb, 0.0); /* k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) */ nir_ssa_def *k = nir_fsub(nb, one, nir_fmul(nb, eta, nir_fmul(nb, eta, nir_fsub(nb, one, nir_fmul(nb, n_dot_i, n_dot_i))))); nir_ssa_def *result = nir_fsub(nb, nir_fmul(nb, eta, I), nir_fmul(nb, nir_fadd(nb, nir_fmul(nb, eta, n_dot_i), nir_fsqrt(nb, k)), N)); /* XXX: bcsel, or if statement? */ val->ssa->def = nir_bcsel(nb, nir_flt(nb, k, zero), zero, result); return; } case GLSLstd450Sinh: /* 0.5 * (e^x - e^(-x)) */ val->ssa->def = nir_fmul(nb, nir_imm_float(nb, 0.5f), nir_fsub(nb, build_exp(nb, src[0]), build_exp(nb, nir_fneg(nb, src[0])))); return; case GLSLstd450Cosh: /* 0.5 * (e^x + e^(-x)) */ val->ssa->def = nir_fmul(nb, nir_imm_float(nb, 0.5f), nir_fadd(nb, build_exp(nb, src[0]), build_exp(nb, nir_fneg(nb, src[0])))); return; case GLSLstd450Tanh: /* (0.5 * (e^x - e^(-x))) / (0.5 * (e^x + e^(-x))) */ val->ssa->def = nir_fdiv(nb, nir_fmul(nb, nir_imm_float(nb, 0.5f), nir_fsub(nb, build_exp(nb, src[0]), build_exp(nb, nir_fneg(nb, src[0])))), nir_fmul(nb, nir_imm_float(nb, 0.5f), nir_fadd(nb, build_exp(nb, src[0]), build_exp(nb, nir_fneg(nb, src[0]))))); return; case GLSLstd450Asinh: val->ssa->def = nir_fmul(nb, nir_fsign(nb, src[0]), build_log(nb, nir_fadd(nb, nir_fabs(nb, src[0]), nir_fsqrt(nb, nir_fadd(nb, nir_fmul(nb, src[0], src[0]), nir_imm_float(nb, 1.0f)))))); return; case GLSLstd450Acosh: val->ssa->def = build_log(nb, nir_fadd(nb, src[0], nir_fsqrt(nb, nir_fsub(nb, nir_fmul(nb, src[0], src[0]), nir_imm_float(nb, 1.0f))))); return; case GLSLstd450Atanh: { nir_ssa_def *one = nir_imm_float(nb, 1.0); val->ssa->def = nir_fmul(nb, nir_imm_float(nb, 0.5f), build_log(nb, nir_fdiv(nb, nir_fadd(nb, one, src[0]), nir_fsub(nb, one, src[0])))); return; } case GLSLstd450Asin: val->ssa->def = build_asin(nb, src[0], 0.086566724, -0.03102955); return; case GLSLstd450Acos: val->ssa->def = nir_fsub(nb, nir_imm_float(nb, M_PI_2f), build_asin(nb, src[0], 0.08132463, -0.02363318)); return; case GLSLstd450Atan: val->ssa->def = build_atan(nb, src[0]); return; case GLSLstd450Atan2: val->ssa->def = build_atan2(nb, src[0], src[1]); return; case GLSLstd450Frexp: { nir_ssa_def *exponent; val->ssa->def = build_frexp(nb, src[0], &exponent); nir_store_deref_var(nb, vtn_nir_deref(b, w[6]), exponent, 0xf); return; } case GLSLstd450FrexpStruct: { assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = build_frexp(nb, src[0], &val->ssa->elems[1]->def); return; } default: val->ssa->def = nir_build_alu(&b->nb, vtn_nir_alu_op_for_spirv_glsl_opcode(entrypoint), src[0], src[1], src[2], NULL); return; } }
static nir_ssa_def * build_atan(nir_builder *b, nir_ssa_def *y_over_x) { nir_ssa_def *abs_y_over_x = nir_fabs(b, y_over_x); nir_ssa_def *one = nir_imm_float(b, 1.0f); /* * range-reduction, first step: * * / y_over_x if |y_over_x| <= 1.0; * x = < * \ 1.0 / y_over_x otherwise */ nir_ssa_def *x = nir_fdiv(b, nir_fmin(b, abs_y_over_x, one), nir_fmax(b, abs_y_over_x, one)); /* * approximate atan by evaluating polynomial: * * x * 0.9999793128310355 - x^3 * 0.3326756418091246 + * x^5 * 0.1938924977115610 - x^7 * 0.1173503194786851 + * x^9 * 0.0536813784310406 - x^11 * 0.0121323213173444 */ nir_ssa_def *x_2 = nir_fmul(b, x, x); nir_ssa_def *x_3 = nir_fmul(b, x_2, x); nir_ssa_def *x_5 = nir_fmul(b, x_3, x_2); nir_ssa_def *x_7 = nir_fmul(b, x_5, x_2); nir_ssa_def *x_9 = nir_fmul(b, x_7, x_2); nir_ssa_def *x_11 = nir_fmul(b, x_9, x_2); nir_ssa_def *polynomial_terms[] = { nir_fmul(b, x, nir_imm_float(b, 0.9999793128310355f)), nir_fmul(b, x_3, nir_imm_float(b, -0.3326756418091246f)), nir_fmul(b, x_5, nir_imm_float(b, 0.1938924977115610f)), nir_fmul(b, x_7, nir_imm_float(b, -0.1173503194786851f)), nir_fmul(b, x_9, nir_imm_float(b, 0.0536813784310406f)), nir_fmul(b, x_11, nir_imm_float(b, -0.0121323213173444f)), }; nir_ssa_def *tmp = build_fsum(b, polynomial_terms, ARRAY_SIZE(polynomial_terms)); /* range-reduction fixup */ tmp = nir_fadd(b, tmp, nir_fmul(b, nir_b2f(b, nir_flt(b, one, abs_y_over_x)), nir_fadd(b, nir_fmul(b, tmp, nir_imm_float(b, -2.0f)), nir_imm_float(b, M_PI_2f)))); /* sign fixup */ return nir_fmul(b, tmp, nir_fsign(b, y_over_x)); }
} static void convert_yuv_to_rgb(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *y, nir_ssa_def *u, nir_ssa_def *v, nir_ssa_def *a) { nir_const_value m[3] = { { .f32 = { 1.0f, 0.0f, 1.59602678f, 0.0f } }, { .f32 = { 1.0f, -0.39176229f, -0.81296764f, 0.0f } }, { .f32 = { 1.0f, 2.01723214f, 0.0f, 0.0f } } }; nir_ssa_def *yuv = nir_vec4(b, nir_fmul(b, nir_imm_float(b, 1.16438356f), nir_fadd(b, y, nir_imm_float(b, -16.0f / 255.0f))), nir_channel(b, nir_fadd(b, u, nir_imm_float(b, -128.0f / 255.0f)), 0), nir_channel(b, nir_fadd(b, v, nir_imm_float(b, -128.0f / 255.0f)), 0), nir_imm_float(b, 0.0)); nir_ssa_def *red = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[0])); nir_ssa_def *green = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[1])); nir_ssa_def *blue = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[2])); nir_ssa_def *result = nir_vec4(b, red, green, blue, a); nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_src_for_ssa(result)); } static void lower_y_uv_external(nir_builder *b, nir_tex_instr *tex)
static void convert_instr(nir_builder *bld, nir_alu_instr *alu) { nir_ssa_def *numer, *denom, *af, *bf, *a, *b, *q, *r; nir_op op = alu->op; bool is_signed; if ((op != nir_op_idiv) && (op != nir_op_udiv) && (op != nir_op_umod)) return; is_signed = (op == nir_op_idiv); bld->cursor = nir_before_instr(&alu->instr); numer = nir_ssa_for_alu_src(bld, alu, 0); denom = nir_ssa_for_alu_src(bld, alu, 1); if (is_signed) { af = nir_i2f(bld, numer); bf = nir_i2f(bld, denom); af = nir_fabs(bld, af); bf = nir_fabs(bld, bf); a = nir_iabs(bld, numer); b = nir_iabs(bld, denom); } else { af = nir_u2f(bld, numer); bf = nir_u2f(bld, denom); a = numer; b = denom; } /* get first result: */ bf = nir_frcp(bld, bf); bf = nir_isub(bld, bf, nir_imm_int(bld, 2)); /* yes, really */ q = nir_fmul(bld, af, bf); if (is_signed) { q = nir_f2i(bld, q); } else { q = nir_f2u(bld, q); } /* get error of first result: */ r = nir_imul(bld, q, b); r = nir_isub(bld, a, r); r = nir_u2f(bld, r); r = nir_fmul(bld, r, bf); r = nir_f2u(bld, r); /* add quotients: */ q = nir_iadd(bld, q, r); /* correction: if modulus >= divisor, add 1 */ r = nir_imul(bld, q, b); r = nir_isub(bld, a, r); r = nir_uge(bld, r, b); r = nir_b2i(bld, r); q = nir_iadd(bld, q, r); if (is_signed) { /* fix the sign: */ r = nir_ixor(bld, numer, denom); r = nir_ushr(bld, r, nir_imm_int(bld, 31)); r = nir_i2b(bld, r); b = nir_ineg(bld, q); q = nir_bcsel(bld, r, b, q); } if (op == nir_op_umod) { /* division result in q */ r = nir_imul(bld, q, b); q = nir_isub(bld, a, r); } assert(alu->dest.dest.is_ssa); nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(q)); }
static struct vtn_ssa_value * matrix_multiply(struct vtn_builder *b, struct vtn_ssa_value *_src0, struct vtn_ssa_value *_src1) { struct vtn_ssa_value *src0 = wrap_matrix(b, _src0); struct vtn_ssa_value *src1 = wrap_matrix(b, _src1); struct vtn_ssa_value *src0_transpose = wrap_matrix(b, _src0->transposed); struct vtn_ssa_value *src1_transpose = wrap_matrix(b, _src1->transposed); unsigned src0_rows = glsl_get_vector_elements(src0->type); unsigned src0_columns = glsl_get_matrix_columns(src0->type); unsigned src1_columns = glsl_get_matrix_columns(src1->type); const struct glsl_type *dest_type; if (src1_columns > 1) { dest_type = glsl_matrix_type(glsl_get_base_type(src0->type), src0_rows, src1_columns); } else { dest_type = glsl_vector_type(glsl_get_base_type(src0->type), src0_rows); } struct vtn_ssa_value *dest = vtn_create_ssa_value(b, dest_type); dest = wrap_matrix(b, dest); bool transpose_result = false; if (src0_transpose && src1_transpose) { /* transpose(A) * transpose(B) = transpose(B * A) */ src1 = src0_transpose; src0 = src1_transpose; src0_transpose = NULL; src1_transpose = NULL; transpose_result = true; } if (src0_transpose && !src1_transpose && glsl_get_base_type(src0->type) == GLSL_TYPE_FLOAT) { /* We already have the rows of src0 and the columns of src1 available, * so we can just take the dot product of each row with each column to * get the result. */ for (unsigned i = 0; i < src1_columns; i++) { nir_ssa_def *vec_src[4]; for (unsigned j = 0; j < src0_rows; j++) { vec_src[j] = nir_fdot(&b->nb, src0_transpose->elems[j]->def, src1->elems[i]->def); } dest->elems[i]->def = nir_vec(&b->nb, vec_src, src0_rows); } } else { /* We don't handle the case where src1 is transposed but not src0, since * the general case only uses individual components of src1 so the * optimizer should chew through the transpose we emitted for src1. */ for (unsigned i = 0; i < src1_columns; i++) { /* dest[i] = sum(src0[j] * src1[i][j] for all j) */ dest->elems[i]->def = nir_fmul(&b->nb, src0->elems[0]->def, nir_channel(&b->nb, src1->elems[i]->def, 0)); for (unsigned j = 1; j < src0_columns; j++) { dest->elems[i]->def = nir_fadd(&b->nb, dest->elems[i]->def, nir_fmul(&b->nb, src0->elems[j]->def, nir_channel(&b->nb, src1->elems[i]->def, j))); } } } dest = unwrap_matrix(dest); if (transpose_result) dest = vtn_ssa_transpose(b, dest); return dest; }
void vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); const struct glsl_type *type = vtn_value(b, w[1], vtn_value_type_type)->type->type; vtn_foreach_decoration(b, val, handle_no_contraction, NULL); /* Collect the various SSA sources */ const unsigned num_inputs = count - 3; struct vtn_ssa_value *vtn_src[4] = { NULL, }; for (unsigned i = 0; i < num_inputs; i++) vtn_src[i] = vtn_ssa_value(b, w[i + 3]); if (glsl_type_is_matrix(vtn_src[0]->type) || (num_inputs >= 2 && glsl_type_is_matrix(vtn_src[1]->type))) { vtn_handle_matrix_alu(b, opcode, val, vtn_src[0], vtn_src[1]); b->nb.exact = false; return; } val->ssa = vtn_create_ssa_value(b, type); nir_ssa_def *src[4] = { NULL, }; for (unsigned i = 0; i < num_inputs; i++) { assert(glsl_type_is_vector_or_scalar(vtn_src[i]->type)); src[i] = vtn_src[i]->def; } switch (opcode) { case SpvOpAny: if (src[0]->num_components == 1) { val->ssa->def = nir_imov(&b->nb, src[0]); } else { nir_op op; switch (src[0]->num_components) { case 2: op = nir_op_bany_inequal2; break; case 3: op = nir_op_bany_inequal3; break; case 4: op = nir_op_bany_inequal4; break; default: unreachable("invalid number of components"); } val->ssa->def = nir_build_alu(&b->nb, op, src[0], nir_imm_int(&b->nb, NIR_FALSE), NULL, NULL); } break; case SpvOpAll: if (src[0]->num_components == 1) { val->ssa->def = nir_imov(&b->nb, src[0]); } else { nir_op op; switch (src[0]->num_components) { case 2: op = nir_op_ball_iequal2; break; case 3: op = nir_op_ball_iequal3; break; case 4: op = nir_op_ball_iequal4; break; default: unreachable("invalid number of components"); } val->ssa->def = nir_build_alu(&b->nb, op, src[0], nir_imm_int(&b->nb, NIR_TRUE), NULL, NULL); } break; case SpvOpOuterProduct: { for (unsigned i = 0; i < src[1]->num_components; i++) { val->ssa->elems[i]->def = nir_fmul(&b->nb, src[0], nir_channel(&b->nb, src[1], i)); } break; } case SpvOpDot: val->ssa->def = nir_fdot(&b->nb, src[0], src[1]); break; case SpvOpIAddCarry: assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_iadd(&b->nb, src[0], src[1]); val->ssa->elems[1]->def = nir_uadd_carry(&b->nb, src[0], src[1]); break; case SpvOpISubBorrow: assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_isub(&b->nb, src[0], src[1]); val->ssa->elems[1]->def = nir_usub_borrow(&b->nb, src[0], src[1]); break; case SpvOpUMulExtended: assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_imul(&b->nb, src[0], src[1]); val->ssa->elems[1]->def = nir_umul_high(&b->nb, src[0], src[1]); break; case SpvOpSMulExtended: assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_imul(&b->nb, src[0], src[1]); val->ssa->elems[1]->def = nir_imul_high(&b->nb, src[0], src[1]); break; case SpvOpFwidth: val->ssa->def = nir_fadd(&b->nb, nir_fabs(&b->nb, nir_fddx(&b->nb, src[0])), nir_fabs(&b->nb, nir_fddy(&b->nb, src[0]))); break; case SpvOpFwidthFine: val->ssa->def = nir_fadd(&b->nb, nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[0])), nir_fabs(&b->nb, nir_fddy_fine(&b->nb, src[0]))); break; case SpvOpFwidthCoarse: val->ssa->def = nir_fadd(&b->nb, nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[0])), nir_fabs(&b->nb, nir_fddy_coarse(&b->nb, src[0]))); break; case SpvOpVectorTimesScalar: /* The builder will take care of splatting for us. */ val->ssa->def = nir_fmul(&b->nb, src[0], src[1]); break; case SpvOpIsNan: val->ssa->def = nir_fne(&b->nb, src[0], src[0]); break; case SpvOpIsInf: val->ssa->def = nir_feq(&b->nb, nir_fabs(&b->nb, src[0]), nir_imm_float(&b->nb, INFINITY)); break; case SpvOpFUnordEqual: case SpvOpFUnordNotEqual: case SpvOpFUnordLessThan: case SpvOpFUnordGreaterThan: case SpvOpFUnordLessThanEqual: case SpvOpFUnordGreaterThanEqual: { bool swap; nir_alu_type src_alu_type = nir_get_nir_type_for_glsl_type(vtn_src[0]->type); nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(type); nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type); if (swap) { nir_ssa_def *tmp = src[0]; src[0] = src[1]; src[1] = tmp; } val->ssa->def = nir_ior(&b->nb, nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL), nir_ior(&b->nb, nir_fne(&b->nb, src[0], src[0]), nir_fne(&b->nb, src[1], src[1]))); break; } case SpvOpFOrdEqual: case SpvOpFOrdNotEqual: case SpvOpFOrdLessThan: case SpvOpFOrdGreaterThan: case SpvOpFOrdLessThanEqual: case SpvOpFOrdGreaterThanEqual: { bool swap; nir_alu_type src_alu_type = nir_get_nir_type_for_glsl_type(vtn_src[0]->type); nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(type); nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type); if (swap) { nir_ssa_def *tmp = src[0]; src[0] = src[1]; src[1] = tmp; } val->ssa->def = nir_iand(&b->nb, nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL), nir_iand(&b->nb, nir_feq(&b->nb, src[0], src[0]), nir_feq(&b->nb, src[1], src[1]))); break; } default: { bool swap; nir_alu_type src_alu_type = nir_get_nir_type_for_glsl_type(vtn_src[0]->type); nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(type); nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type); if (swap) { nir_ssa_def *tmp = src[0]; src[0] = src[1]; src[1] = tmp; } val->ssa->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], src[3]); break; } /* default */ } b->nb.exact = false; }
return &plane_tex->dest.ssa; } static void convert_yuv_to_rgb(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *y, nir_ssa_def *u, nir_ssa_def *v) { nir_const_value m[3] = { { .f32 = { 1.0f, 0.0f, 1.59602678f, 0.0f } }, { .f32 = { 1.0f, -0.39176229f, -0.81296764f, 0.0f } }, { .f32 = { 1.0f, 2.01723214f, 0.0f, 0.0f } } }; nir_ssa_def *yuv = nir_vec4(b, nir_fmul(b, nir_imm_float(b, 1.16438356f), nir_fadd(b, y, nir_imm_float(b, -0.0625f))), nir_channel(b, nir_fadd(b, u, nir_imm_float(b, -0.5f)), 0), nir_channel(b, nir_fadd(b, v, nir_imm_float(b, -0.5f)), 0), nir_imm_float(b, 0.0)); nir_ssa_def *red = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[0])); nir_ssa_def *green = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[1])); nir_ssa_def *blue = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[2])); nir_ssa_def *result = nir_vec4(b, red, green, blue, nir_imm_float(b, 1.0f)); nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_src_for_ssa(result)); } static void lower_y_uv_external(nir_builder *b, nir_tex_instr *tex)