static struct vtn_type * vtn_access_chain_tail_type(struct vtn_builder *b, struct vtn_access_chain *chain) { struct vtn_type *type = chain->var->type; for (unsigned i = 0; i < chain->length; i++) { if (glsl_type_is_struct(type->type)) { assert(chain->link[i].mode == vtn_access_mode_literal); type = type->members[chain->link[i].id]; } else { type = type->array_element; } } return type; }
void vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); const struct glsl_type *type = vtn_value(b, w[1], vtn_value_type_type)->type->type; vtn_foreach_decoration(b, val, handle_no_contraction, NULL); /* Collect the various SSA sources */ const unsigned num_inputs = count - 3; struct vtn_ssa_value *vtn_src[4] = { NULL, }; for (unsigned i = 0; i < num_inputs; i++) vtn_src[i] = vtn_ssa_value(b, w[i + 3]); if (glsl_type_is_matrix(vtn_src[0]->type) || (num_inputs >= 2 && glsl_type_is_matrix(vtn_src[1]->type))) { vtn_handle_matrix_alu(b, opcode, val, vtn_src[0], vtn_src[1]); b->nb.exact = false; return; } val->ssa = vtn_create_ssa_value(b, type); nir_ssa_def *src[4] = { NULL, }; for (unsigned i = 0; i < num_inputs; i++) { assert(glsl_type_is_vector_or_scalar(vtn_src[i]->type)); src[i] = vtn_src[i]->def; } switch (opcode) { case SpvOpAny: if (src[0]->num_components == 1) { val->ssa->def = nir_imov(&b->nb, src[0]); } else { nir_op op; switch (src[0]->num_components) { case 2: op = nir_op_bany_inequal2; break; case 3: op = nir_op_bany_inequal3; break; case 4: op = nir_op_bany_inequal4; break; default: unreachable("invalid number of components"); } val->ssa->def = nir_build_alu(&b->nb, op, src[0], nir_imm_int(&b->nb, NIR_FALSE), NULL, NULL); } break; case SpvOpAll: if (src[0]->num_components == 1) { val->ssa->def = nir_imov(&b->nb, src[0]); } else { nir_op op; switch (src[0]->num_components) { case 2: op = nir_op_ball_iequal2; break; case 3: op = nir_op_ball_iequal3; break; case 4: op = nir_op_ball_iequal4; break; default: unreachable("invalid number of components"); } val->ssa->def = nir_build_alu(&b->nb, op, src[0], nir_imm_int(&b->nb, NIR_TRUE), NULL, NULL); } break; case SpvOpOuterProduct: { for (unsigned i = 0; i < src[1]->num_components; i++) { val->ssa->elems[i]->def = nir_fmul(&b->nb, src[0], nir_channel(&b->nb, src[1], i)); } break; } case SpvOpDot: val->ssa->def = nir_fdot(&b->nb, src[0], src[1]); break; case SpvOpIAddCarry: assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_iadd(&b->nb, src[0], src[1]); val->ssa->elems[1]->def = nir_uadd_carry(&b->nb, src[0], src[1]); break; case SpvOpISubBorrow: assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_isub(&b->nb, src[0], src[1]); val->ssa->elems[1]->def = nir_usub_borrow(&b->nb, src[0], src[1]); break; case SpvOpUMulExtended: assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_imul(&b->nb, src[0], src[1]); val->ssa->elems[1]->def = nir_umul_high(&b->nb, src[0], src[1]); break; case SpvOpSMulExtended: assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_imul(&b->nb, src[0], src[1]); val->ssa->elems[1]->def = nir_imul_high(&b->nb, src[0], src[1]); break; case SpvOpFwidth: val->ssa->def = nir_fadd(&b->nb, nir_fabs(&b->nb, nir_fddx(&b->nb, src[0])), nir_fabs(&b->nb, nir_fddy(&b->nb, src[0]))); break; case SpvOpFwidthFine: val->ssa->def = nir_fadd(&b->nb, nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[0])), nir_fabs(&b->nb, nir_fddy_fine(&b->nb, src[0]))); break; case SpvOpFwidthCoarse: val->ssa->def = nir_fadd(&b->nb, nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[0])), nir_fabs(&b->nb, nir_fddy_coarse(&b->nb, src[0]))); break; case SpvOpVectorTimesScalar: /* The builder will take care of splatting for us. */ val->ssa->def = nir_fmul(&b->nb, src[0], src[1]); break; case SpvOpIsNan: val->ssa->def = nir_fne(&b->nb, src[0], src[0]); break; case SpvOpIsInf: val->ssa->def = nir_feq(&b->nb, nir_fabs(&b->nb, src[0]), nir_imm_float(&b->nb, INFINITY)); break; case SpvOpFUnordEqual: case SpvOpFUnordNotEqual: case SpvOpFUnordLessThan: case SpvOpFUnordGreaterThan: case SpvOpFUnordLessThanEqual: case SpvOpFUnordGreaterThanEqual: { bool swap; nir_alu_type src_alu_type = nir_get_nir_type_for_glsl_type(vtn_src[0]->type); nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(type); nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type); if (swap) { nir_ssa_def *tmp = src[0]; src[0] = src[1]; src[1] = tmp; } val->ssa->def = nir_ior(&b->nb, nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL), nir_ior(&b->nb, nir_fne(&b->nb, src[0], src[0]), nir_fne(&b->nb, src[1], src[1]))); break; } case SpvOpFOrdEqual: case SpvOpFOrdNotEqual: case SpvOpFOrdLessThan: case SpvOpFOrdGreaterThan: case SpvOpFOrdLessThanEqual: case SpvOpFOrdGreaterThanEqual: { bool swap; nir_alu_type src_alu_type = nir_get_nir_type_for_glsl_type(vtn_src[0]->type); nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(type); nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type); if (swap) { nir_ssa_def *tmp = src[0]; src[0] = src[1]; src[1] = tmp; } val->ssa->def = nir_iand(&b->nb, nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL), nir_iand(&b->nb, nir_feq(&b->nb, src[0], src[0]), nir_feq(&b->nb, src[1], src[1]))); break; } default: { bool swap; nir_alu_type src_alu_type = nir_get_nir_type_for_glsl_type(vtn_src[0]->type); nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(type); nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type); if (swap) { nir_ssa_def *tmp = src[0]; src[0] = src[1]; src[1] = tmp; } val->ssa->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], src[3]); break; } /* default */ } b->nb.exact = false; }
static void handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint, const uint32_t *w, unsigned count) { struct nir_builder *nb = &b->nb; const struct glsl_type *dest_type = vtn_value(b, w[1], vtn_value_type_type)->type->type; struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); val->ssa = vtn_create_ssa_value(b, dest_type); /* Collect the various SSA sources */ unsigned num_inputs = count - 5; nir_ssa_def *src[3] = { NULL, }; for (unsigned i = 0; i < num_inputs; i++) src[i] = vtn_ssa_value(b, w[i + 5])->def; switch (entrypoint) { case GLSLstd450Radians: val->ssa->def = nir_fmul(nb, src[0], nir_imm_float(nb, 0.01745329251)); return; case GLSLstd450Degrees: val->ssa->def = nir_fmul(nb, src[0], nir_imm_float(nb, 57.2957795131)); return; case GLSLstd450Tan: val->ssa->def = nir_fdiv(nb, nir_fsin(nb, src[0]), nir_fcos(nb, src[0])); return; case GLSLstd450Modf: { nir_ssa_def *sign = nir_fsign(nb, src[0]); nir_ssa_def *abs = nir_fabs(nb, src[0]); val->ssa->def = nir_fmul(nb, sign, nir_ffract(nb, abs)); nir_store_deref_var(nb, vtn_nir_deref(b, w[6]), nir_fmul(nb, sign, nir_ffloor(nb, abs)), 0xf); return; } case GLSLstd450ModfStruct: { nir_ssa_def *sign = nir_fsign(nb, src[0]); nir_ssa_def *abs = nir_fabs(nb, src[0]); assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_fmul(nb, sign, nir_ffract(nb, abs)); val->ssa->elems[1]->def = nir_fmul(nb, sign, nir_ffloor(nb, abs)); return; } case GLSLstd450Step: val->ssa->def = nir_sge(nb, src[1], src[0]); return; case GLSLstd450Length: val->ssa->def = build_length(nb, src[0]); return; case GLSLstd450Distance: val->ssa->def = build_length(nb, nir_fsub(nb, src[0], src[1])); return; case GLSLstd450Normalize: val->ssa->def = nir_fdiv(nb, src[0], build_length(nb, src[0])); return; case GLSLstd450Exp: val->ssa->def = build_exp(nb, src[0]); return; case GLSLstd450Log: val->ssa->def = build_log(nb, src[0]); return; case GLSLstd450FClamp: val->ssa->def = build_fclamp(nb, src[0], src[1], src[2]); return; case GLSLstd450UClamp: val->ssa->def = nir_umin(nb, nir_umax(nb, src[0], src[1]), src[2]); return; case GLSLstd450SClamp: val->ssa->def = nir_imin(nb, nir_imax(nb, src[0], src[1]), src[2]); return; case GLSLstd450Cross: { unsigned yzx[4] = { 1, 2, 0, 0 }; unsigned zxy[4] = { 2, 0, 1, 0 }; val->ssa->def = nir_fsub(nb, nir_fmul(nb, nir_swizzle(nb, src[0], yzx, 3, true), nir_swizzle(nb, src[1], zxy, 3, true)), nir_fmul(nb, nir_swizzle(nb, src[0], zxy, 3, true), nir_swizzle(nb, src[1], yzx, 3, true))); return; } case GLSLstd450SmoothStep: { /* t = clamp((x - edge0) / (edge1 - edge0), 0, 1) */ nir_ssa_def *t = build_fclamp(nb, nir_fdiv(nb, nir_fsub(nb, src[2], src[0]), nir_fsub(nb, src[1], src[0])), nir_imm_float(nb, 0.0), nir_imm_float(nb, 1.0)); /* result = t * t * (3 - 2 * t) */ val->ssa->def = nir_fmul(nb, t, nir_fmul(nb, t, nir_fsub(nb, nir_imm_float(nb, 3.0), nir_fmul(nb, nir_imm_float(nb, 2.0), t)))); return; } case GLSLstd450FaceForward: val->ssa->def = nir_bcsel(nb, nir_flt(nb, nir_fdot(nb, src[2], src[1]), nir_imm_float(nb, 0.0)), src[0], nir_fneg(nb, src[0])); return; case GLSLstd450Reflect: /* I - 2 * dot(N, I) * N */ val->ssa->def = nir_fsub(nb, src[0], nir_fmul(nb, nir_imm_float(nb, 2.0), nir_fmul(nb, nir_fdot(nb, src[0], src[1]), src[1]))); return; case GLSLstd450Refract: { nir_ssa_def *I = src[0]; nir_ssa_def *N = src[1]; nir_ssa_def *eta = src[2]; nir_ssa_def *n_dot_i = nir_fdot(nb, N, I); nir_ssa_def *one = nir_imm_float(nb, 1.0); nir_ssa_def *zero = nir_imm_float(nb, 0.0); /* k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) */ nir_ssa_def *k = nir_fsub(nb, one, nir_fmul(nb, eta, nir_fmul(nb, eta, nir_fsub(nb, one, nir_fmul(nb, n_dot_i, n_dot_i))))); nir_ssa_def *result = nir_fsub(nb, nir_fmul(nb, eta, I), nir_fmul(nb, nir_fadd(nb, nir_fmul(nb, eta, n_dot_i), nir_fsqrt(nb, k)), N)); /* XXX: bcsel, or if statement? */ val->ssa->def = nir_bcsel(nb, nir_flt(nb, k, zero), zero, result); return; } case GLSLstd450Sinh: /* 0.5 * (e^x - e^(-x)) */ val->ssa->def = nir_fmul(nb, nir_imm_float(nb, 0.5f), nir_fsub(nb, build_exp(nb, src[0]), build_exp(nb, nir_fneg(nb, src[0])))); return; case GLSLstd450Cosh: /* 0.5 * (e^x + e^(-x)) */ val->ssa->def = nir_fmul(nb, nir_imm_float(nb, 0.5f), nir_fadd(nb, build_exp(nb, src[0]), build_exp(nb, nir_fneg(nb, src[0])))); return; case GLSLstd450Tanh: /* (0.5 * (e^x - e^(-x))) / (0.5 * (e^x + e^(-x))) */ val->ssa->def = nir_fdiv(nb, nir_fmul(nb, nir_imm_float(nb, 0.5f), nir_fsub(nb, build_exp(nb, src[0]), build_exp(nb, nir_fneg(nb, src[0])))), nir_fmul(nb, nir_imm_float(nb, 0.5f), nir_fadd(nb, build_exp(nb, src[0]), build_exp(nb, nir_fneg(nb, src[0]))))); return; case GLSLstd450Asinh: val->ssa->def = nir_fmul(nb, nir_fsign(nb, src[0]), build_log(nb, nir_fadd(nb, nir_fabs(nb, src[0]), nir_fsqrt(nb, nir_fadd(nb, nir_fmul(nb, src[0], src[0]), nir_imm_float(nb, 1.0f)))))); return; case GLSLstd450Acosh: val->ssa->def = build_log(nb, nir_fadd(nb, src[0], nir_fsqrt(nb, nir_fsub(nb, nir_fmul(nb, src[0], src[0]), nir_imm_float(nb, 1.0f))))); return; case GLSLstd450Atanh: { nir_ssa_def *one = nir_imm_float(nb, 1.0); val->ssa->def = nir_fmul(nb, nir_imm_float(nb, 0.5f), build_log(nb, nir_fdiv(nb, nir_fadd(nb, one, src[0]), nir_fsub(nb, one, src[0])))); return; } case GLSLstd450Asin: val->ssa->def = build_asin(nb, src[0], 0.086566724, -0.03102955); return; case GLSLstd450Acos: val->ssa->def = nir_fsub(nb, nir_imm_float(nb, M_PI_2f), build_asin(nb, src[0], 0.08132463, -0.02363318)); return; case GLSLstd450Atan: val->ssa->def = build_atan(nb, src[0]); return; case GLSLstd450Atan2: val->ssa->def = build_atan2(nb, src[0], src[1]); return; case GLSLstd450Frexp: { nir_ssa_def *exponent; val->ssa->def = build_frexp(nb, src[0], &exponent); nir_store_deref_var(nb, vtn_nir_deref(b, w[6]), exponent, 0xf); return; } case GLSLstd450FrexpStruct: { assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = build_frexp(nb, src[0], &val->ssa->elems[1]->def); return; } default: val->ssa->def = nir_build_alu(&b->nb, vtn_nir_alu_op_for_spirv_glsl_opcode(entrypoint), src[0], src[1], src[2], NULL); return; } }
void vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { switch (opcode) { case SpvOpVariable: { struct vtn_variable *var = rzalloc(b, struct vtn_variable); var->type = vtn_value(b, w[1], vtn_value_type_type)->type; var->chain.var = var; var->chain.length = 0; struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_access_chain); val->access_chain = &var->chain; struct vtn_type *without_array = var->type; while(glsl_type_is_array(without_array->type)) without_array = without_array->array_element; nir_variable_mode nir_mode; switch ((SpvStorageClass)w[3]) { case SpvStorageClassUniform: case SpvStorageClassUniformConstant: if (without_array->block) { var->mode = vtn_variable_mode_ubo; b->shader->info.num_ubos++; } else if (without_array->buffer_block) { var->mode = vtn_variable_mode_ssbo; b->shader->info.num_ssbos++; } else if (glsl_type_is_image(without_array->type)) { var->mode = vtn_variable_mode_image; nir_mode = nir_var_uniform; b->shader->info.num_images++; } else if (glsl_type_is_sampler(without_array->type)) { var->mode = vtn_variable_mode_sampler; nir_mode = nir_var_uniform; b->shader->info.num_textures++; } else { assert(!"Invalid uniform variable type"); } break; case SpvStorageClassPushConstant: var->mode = vtn_variable_mode_push_constant; assert(b->shader->num_uniforms == 0); b->shader->num_uniforms = vtn_type_block_size(var->type) * 4; break; case SpvStorageClassInput: var->mode = vtn_variable_mode_input; nir_mode = nir_var_shader_in; break; case SpvStorageClassOutput: var->mode = vtn_variable_mode_output; nir_mode = nir_var_shader_out; break; case SpvStorageClassPrivate: var->mode = vtn_variable_mode_global; nir_mode = nir_var_global; break; case SpvStorageClassFunction: var->mode = vtn_variable_mode_local; nir_mode = nir_var_local; break; case SpvStorageClassWorkgroup: var->mode = vtn_variable_mode_workgroup; nir_mode = nir_var_shared; break; case SpvStorageClassCrossWorkgroup: case SpvStorageClassGeneric: case SpvStorageClassAtomicCounter: default: unreachable("Unhandled variable storage class"); } switch (var->mode) { case vtn_variable_mode_local: case vtn_variable_mode_global: case vtn_variable_mode_image: case vtn_variable_mode_sampler: case vtn_variable_mode_workgroup: /* For these, we create the variable normally */ var->var = rzalloc(b->shader, nir_variable); var->var->name = ralloc_strdup(var->var, val->name); var->var->type = var->type->type; var->var->data.mode = nir_mode; switch (var->mode) { case vtn_variable_mode_image: case vtn_variable_mode_sampler: var->var->interface_type = without_array->type; break; default: var->var->interface_type = NULL; break; } break; case vtn_variable_mode_input: case vtn_variable_mode_output: { /* For inputs and outputs, we immediately split structures. This * is for a couple of reasons. For one, builtins may all come in * a struct and we really want those split out into separate * variables. For another, interpolation qualifiers can be * applied to members of the top-level struct ane we need to be * able to preserve that information. */ int array_length = -1; struct vtn_type *interface_type = var->type; if (b->shader->stage == MESA_SHADER_GEOMETRY && glsl_type_is_array(var->type->type)) { /* In Geometry shaders (and some tessellation), inputs come * in per-vertex arrays. However, some builtins come in * non-per-vertex, hence the need for the is_array check. In * any case, there are no non-builtin arrays allowed so this * check should be sufficient. */ interface_type = var->type->array_element; array_length = glsl_get_length(var->type->type); } if (glsl_type_is_struct(interface_type->type)) { /* It's a struct. Split it. */ unsigned num_members = glsl_get_length(interface_type->type); var->members = ralloc_array(b, nir_variable *, num_members); for (unsigned i = 0; i < num_members; i++) { const struct glsl_type *mtype = interface_type->members[i]->type; if (array_length >= 0) mtype = glsl_array_type(mtype, array_length); var->members[i] = rzalloc(b->shader, nir_variable); var->members[i]->name = ralloc_asprintf(var->members[i], "%s.%d", val->name, i); var->members[i]->type = mtype; var->members[i]->interface_type = interface_type->members[i]->type; var->members[i]->data.mode = nir_mode; } } else { var->var = rzalloc(b->shader, nir_variable); var->var->name = ralloc_strdup(var->var, val->name); var->var->type = var->type->type; var->var->interface_type = interface_type->type; var->var->data.mode = nir_mode; } /* For inputs and outputs, we need to grab locations and builtin * information from the interface type. */ vtn_foreach_decoration(b, interface_type->val, var_decoration_cb, var); break; case vtn_variable_mode_param: unreachable("Not created through OpVariable"); } case vtn_variable_mode_ubo: case vtn_variable_mode_ssbo: case vtn_variable_mode_push_constant: /* These don't need actual variables. */ break; } if (count > 4) { assert(count == 5); nir_constant *constant = vtn_value(b, w[4], vtn_value_type_constant)->constant; var->var->constant_initializer = nir_constant_clone(constant, var->var); } vtn_foreach_decoration(b, val, var_decoration_cb, var); if (var->mode == vtn_variable_mode_image || var->mode == vtn_variable_mode_sampler) { /* XXX: We still need the binding information in the nir_variable * for these. We should fix that. */ var->var->data.binding = var->binding; var->var->data.descriptor_set = var->descriptor_set; if (var->mode == vtn_variable_mode_image) var->var->data.image.format = without_array->image_format; } if (var->mode == vtn_variable_mode_local) { assert(var->members == NULL && var->var != NULL); nir_function_impl_add_variable(b->impl, var->var); } else if (var->var) { nir_shader_add_variable(b->shader, var->var); } else if (var->members) { unsigned count = glsl_get_length(without_array->type); for (unsigned i = 0; i < count; i++) { assert(var->members[i]->data.mode != nir_var_local); nir_shader_add_variable(b->shader, var->members[i]); } } else { assert(var->mode == vtn_variable_mode_ubo || var->mode == vtn_variable_mode_ssbo || var->mode == vtn_variable_mode_push_constant); } break; }