struct ureg_src ureg_DECL_immediate_block_uint( struct ureg_program *ureg, const unsigned *v, unsigned nr ) { uint index; uint i; if (ureg->nr_immediates + (nr + 3) / 4 > UREG_MAX_IMMEDIATE) { set_bad(ureg); return ureg_src_register(TGSI_FILE_IMMEDIATE, 0); } index = ureg->nr_immediates; ureg->nr_immediates += (nr + 3) / 4; for (i = index; i < ureg->nr_immediates; i++) { ureg->immediate[i].type = TGSI_IMM_UINT32; ureg->immediate[i].nr = nr > 4 ? 4 : nr; memcpy(ureg->immediate[i].value.u, &v[(i - index) * 4], ureg->immediate[i].nr * sizeof(uint)); nr -= 4; } return ureg_src_register(TGSI_FILE_IMMEDIATE, index); }
struct ureg_dst ureg_DECL_output_masked( struct ureg_program *ureg, unsigned name, unsigned index, unsigned usage_mask ) { unsigned i; assert(usage_mask != 0); for (i = 0; i < ureg->nr_outputs; i++) { if (ureg->output[i].semantic_name == name && ureg->output[i].semantic_index == index) { ureg->output[i].usage_mask |= usage_mask; goto out; } } if (ureg->nr_outputs < UREG_MAX_OUTPUT) { ureg->output[i].semantic_name = name; ureg->output[i].semantic_index = index; ureg->output[i].usage_mask = usage_mask; ureg->nr_outputs++; } else { set_bad( ureg ); } out: return ureg_dst_register( TGSI_FILE_OUTPUT, i ); }
struct ureg_src ureg_DECL_fs_input_cyl_centroid(struct ureg_program *ureg, unsigned semantic_name, unsigned semantic_index, unsigned interp_mode, unsigned cylindrical_wrap, unsigned centroid) { unsigned i; for (i = 0; i < ureg->nr_fs_inputs; i++) { if (ureg->fs_input[i].semantic_name == semantic_name && ureg->fs_input[i].semantic_index == semantic_index) { goto out; } } if (ureg->nr_fs_inputs < UREG_MAX_INPUT) { ureg->fs_input[i].semantic_name = semantic_name; ureg->fs_input[i].semantic_index = semantic_index; ureg->fs_input[i].interp = interp_mode; ureg->fs_input[i].cylindrical_wrap = cylindrical_wrap; ureg->fs_input[i].centroid = centroid; ureg->nr_fs_inputs++; } else { set_bad(ureg); } out: return ureg_src_register(TGSI_FILE_INPUT, i); }
struct ureg_dst ureg_DECL_output( struct ureg_program *ureg, unsigned name, unsigned index ) { unsigned i; for (i = 0; i < ureg->nr_outputs; i++) { if (ureg->output[i].semantic_name == name && ureg->output[i].semantic_index == index) goto out; } if (ureg->nr_outputs < UREG_MAX_OUTPUT) { ureg->output[i].semantic_name = name; ureg->output[i].semantic_index = index; ureg->nr_outputs++; } else { set_bad( ureg ); } out: return ureg_dst_register( TGSI_FILE_OUTPUT, i ); }
static struct ureg_src decl_immediate( struct ureg_program *ureg, const unsigned *v, unsigned nr, unsigned type ) { unsigned i, j; unsigned swizzle = 0; /* Could do a first pass where we examine all existing immediates * without expanding. */ for (i = 0; i < ureg->nr_immediates; i++) { if (ureg->immediate[i].type != type) { continue; } if (match_or_expand_immediate(v, nr, ureg->immediate[i].value.u, &ureg->immediate[i].nr, &swizzle)) { goto out; } } if (ureg->nr_immediates < UREG_MAX_IMMEDIATE) { i = ureg->nr_immediates++; ureg->immediate[i].type = type; if (match_or_expand_immediate(v, nr, ureg->immediate[i].value.u, &ureg->immediate[i].nr, &swizzle)) { goto out; } } set_bad(ureg); out: /* Make sure that all referenced elements are from this immediate. * Has the effect of making size-one immediates into scalars. */ for (j = nr; j < 4; j++) { swizzle |= (swizzle & 0x3) << (j * 2); } return ureg_swizzle(ureg_src_register(TGSI_FILE_IMMEDIATE, i), (swizzle >> 0) & 0x3, (swizzle >> 2) & 0x3, (swizzle >> 4) & 0x3, (swizzle >> 6) & 0x3); }
struct ureg_src ureg_DECL_system_value(struct ureg_program *ureg, unsigned index, unsigned semantic_name, unsigned semantic_index) { if (ureg->nr_system_values < UREG_MAX_SYSTEM_VALUE) { ureg->system_value[ureg->nr_system_values].index = index; ureg->system_value[ureg->nr_system_values].semantic_name = semantic_name; ureg->system_value[ureg->nr_system_values].semantic_index = semantic_index; ureg->nr_system_values++; } else { set_bad(ureg); } return ureg_src_register(TGSI_FILE_SYSTEM_VALUE, index); }
struct ureg_src ureg_DECL_gs_input(struct ureg_program *ureg, unsigned index, unsigned semantic_name, unsigned semantic_index) { if (ureg->nr_gs_inputs < UREG_MAX_INPUT) { ureg->gs_input[ureg->nr_gs_inputs].index = index; ureg->gs_input[ureg->nr_gs_inputs].semantic_name = semantic_name; ureg->gs_input[ureg->nr_gs_inputs].semantic_index = semantic_index; ureg->nr_gs_inputs++; } else { set_bad(ureg); } /* XXX: Add suport for true 2D input registers. */ return ureg_src_register(TGSI_FILE_INPUT, index); }
std::ios_base::iostate handle_error(StreamType& strm) const { std::ios_base::iostate err(error_m); try { throw; } catch (std::bad_alloc&) { set_bad(); std::ios_base::iostate exception_mask(strm.exceptions()); if (exception_mask & std::ios_base::failbit && !(exception_mask & std::ios_base::badbit)) strm.setstate(err); else if (exception_mask & std::ios_base::badbit) { try { strm.setstate(err); } catch (std::ios_base::failure&) { } throw; } } catch (...) { set_fail(); std::ios_base::iostate exception_mask(strm.exceptions()); if ((exception_mask & std::ios_base::badbit) && (err & std::ios_base::badbit)) strm.setstate(err); else if (exception_mask & std::ios_base::failbit) { try { strm.setstate(err); } catch (std::ios_base::failure&) { } throw; } } return err; }