void m(char16_t c0, char32_t c1) { f_c (c0); /* { dg-warning "conversion from .char16_t. to .char. may change value" } */ fsc (c0); /* { dg-warning "change value" } */ fuc (c0); /* { dg-warning "change value" } */ f_s (c0); /* { dg-warning "change the sign" } */ fss (c0); /* { dg-warning "change the sign" } */ fus (c0); f_i (c0); fsi (c0); fui (c0); f_l (c0); fsl (c0); ful (c0); f_ll (c0); fsll (c0); full (c0); f_c (c1); /* { dg-warning "change value" } */ fsc (c1); /* { dg-warning "change value" } */ fuc (c1); /* { dg-warning "change value" } */ f_s (c1); /* { dg-warning "change value" } */ fss (c1); /* { dg-warning "change value" } */ fus (c1); /* { dg-warning "change value" } */ f_i (c1); /* { dg-warning "change the sign" } */ fsi (c1); /* { dg-warning "change the sign" } */ fui (c1); f_l (c1); /* { dg-warning "change the sign" "" { target { llp64 || ilp32 } } } */ fsl (c1); /* { dg-warning "change the sign" "" { target { llp64 || ilp32 } } } */ ful (c1); f_ll (c1); fsll (c1); full (c1); }
int virgl_encode_sampler_state(struct virgl_context *ctx, uint32_t handle, const struct pipe_sampler_state *state) { uint32_t tmp; int i; virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJ_SAMPLER_STATE_SIZE)); virgl_encoder_write_dword(ctx->cbuf, handle); tmp = VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state->wrap_s) | VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state->wrap_t) | VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state->wrap_r) | VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state->min_img_filter) | VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) | VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) | VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) | VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func); virgl_encoder_write_dword(ctx->cbuf, tmp); virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias)); virgl_encoder_write_dword(ctx->cbuf, fui(state->min_lod)); virgl_encoder_write_dword(ctx->cbuf, fui(state->max_lod)); for (i = 0; i < 4; i++) virgl_encoder_write_dword(ctx->cbuf, state->border_color.ui[i]); return 0; }
static void nv30_screen_init(struct nvfx_screen *screen) { struct nouveau_channel *chan = screen->base.channel; struct nouveau_grobj *eng3d = screen->eng3d; int i; /* TODO: perhaps we should do some of this on nv40 too? */ for (i=1; i<8; i++) { BEGIN_RING(chan, eng3d, NV30_3D_VIEWPORT_CLIP_HORIZ(i), 1); OUT_RING(chan, 0); BEGIN_RING(chan, eng3d, NV30_3D_VIEWPORT_CLIP_VERT(i), 1); OUT_RING(chan, 0); } BEGIN_RING(chan, eng3d, 0x220, 1); OUT_RING(chan, 1); BEGIN_RING(chan, eng3d, 0x03b0, 1); OUT_RING(chan, 0x00100000); BEGIN_RING(chan, eng3d, 0x1454, 1); OUT_RING(chan, 0); BEGIN_RING(chan, eng3d, 0x1d80, 1); OUT_RING(chan, 3); BEGIN_RING(chan, eng3d, 0x1450, 1); OUT_RING(chan, 0x00030004); /* NEW */ BEGIN_RING(chan, eng3d, 0x1e98, 1); OUT_RING(chan, 0); BEGIN_RING(chan, eng3d, 0x17e0, 3); OUT_RING(chan, fui(0.0)); OUT_RING(chan, fui(0.0)); OUT_RING(chan, fui(1.0)); BEGIN_RING(chan, eng3d, 0x1f80, 16); for (i=0; i<16; i++) { OUT_RING(chan, (i==8) ? 0x0000ffff : 0); } BEGIN_RING(chan, eng3d, 0x120, 3); OUT_RING(chan, 0); OUT_RING(chan, 1); OUT_RING(chan, 2); BEGIN_RING(chan, eng3d, 0x1d88, 1); OUT_RING(chan, 0x00001200); BEGIN_RING(chan, eng3d, NV30_3D_RC_ENABLE, 1); OUT_RING(chan, 0); BEGIN_RING(chan, eng3d, NV30_3D_DEPTH_RANGE_NEAR, 2); OUT_RING(chan, fui(0.0)); OUT_RING(chan, fui(1.0)); BEGIN_RING(chan, eng3d, NV30_3D_MULTISAMPLE_CONTROL, 1); OUT_RING(chan, 0xffff0000); /* enables use of vp rather than fixed-function somehow */ BEGIN_RING(chan, eng3d, 0x1e94, 1); OUT_RING(chan, 0x13); }
void m (char16_t c0, char32_t c1) { f_c (c0); /* { dg-warning "alter its value" } */ fsc (c0); /* { dg-warning "alter its value" } */ fuc (c0); /* { dg-warning "alter its value" } */ f_s (c0); /* { dg-warning "change the sign" } */ fss (c0); /* { dg-warning "change the sign" } */ fus (c0); f_i (c0); fsi (c0); fui (c0); f_l (c0); fsl (c0); ful (c0); f_ll (c0); fsll (c0); full (c0); f_c (c1); /* { dg-warning "alter its value" } */ fsc (c1); /* { dg-warning "alter its value" } */ fuc (c1); /* { dg-warning "alter its value" } */ f_s (c1); /* { dg-warning "alter its value" } */ fss (c1); /* { dg-warning "alter its value" } */ fus (c1); /* { dg-warning "alter its value" } */ f_i (c1); /* { dg-warning "change the sign" "" { target { ! int16 } } } */ fsi (c1); /* { dg-warning "change the sign" "" { target { ! int16 } } } */ fui (c1); f_l (c1); /* { dg-warning "change the sign" "" { target { llp64 || ilp32 } } } */ fsl (c1); /* { dg-warning "change the sign" "" { target { llp64 || ilp32 } } } */ ful (c1); f_ll (c1); fsll (c1); full (c1); }
static boolean nv40_vbo_static_attrib(struct nv40_context *nv40, struct nouveau_stateobj *so, int attrib, struct pipe_vertex_element *ve, struct pipe_vertex_buffer *vb) { struct pipe_winsys *ws = nv40->pipe.winsys; struct nouveau_grobj *curie = nv40->screen->curie; unsigned type, ncomp; void *map; if (nv40_vbo_format_to_hw(ve->src_format, &type, &ncomp)) return FALSE; map = ws->buffer_map(ws, vb->buffer, PIPE_BUFFER_USAGE_CPU_READ); map += vb->buffer_offset + ve->src_offset; switch (type) { case NV40TCL_VTXFMT_TYPE_FLOAT: { float *v = map; switch (ncomp) { case 4: so_method(so, curie, NV40TCL_VTX_ATTR_4F_X(attrib), 4); so_data (so, fui(v[0])); so_data (so, fui(v[1])); so_data (so, fui(v[2])); so_data (so, fui(v[3])); break; case 3: so_method(so, curie, NV40TCL_VTX_ATTR_3F_X(attrib), 3); so_data (so, fui(v[0])); so_data (so, fui(v[1])); so_data (so, fui(v[2])); break; case 2: so_method(so, curie, NV40TCL_VTX_ATTR_2F_X(attrib), 2); so_data (so, fui(v[0])); so_data (so, fui(v[1])); break; case 1: so_method(so, curie, NV40TCL_VTX_ATTR_1F(attrib), 1); so_data (so, fui(v[0])); break; default: ws->buffer_unmap(ws, vb->buffer); return FALSE; } } break; default: ws->buffer_unmap(ws, vb->buffer); return FALSE; } ws->buffer_unmap(ws, vb->buffer); return TRUE; }
/** * Extract the needed fields from vertex_header and emit i915 dwords. * Recall that the vertices are constructed by the 'draw' module and * have a couple of slots at the beginning (1-dword header, 4-dword * clip pos) that we ignore here. */ static INLINE void emit_hw_vertex( struct i915_context *i915, const struct vertex_header *vertex) { const struct vertex_info *vinfo = &i915->current.vertex_info; uint i; uint count = 0; /* for debug/sanity */ assert(!i915->dirty); for (i = 0; i < vinfo->num_attribs; i++) { const uint j = vinfo->attrib[i].src_index; const float *attrib = vertex->data[j]; switch (vinfo->attrib[i].emit) { case EMIT_1F: OUT_BATCH( fui(attrib[0]) ); count++; break; case EMIT_2F: OUT_BATCH( fui(attrib[0]) ); OUT_BATCH( fui(attrib[1]) ); count += 2; break; case EMIT_3F: OUT_BATCH( fui(attrib[0]) ); OUT_BATCH( fui(attrib[1]) ); OUT_BATCH( fui(attrib[2]) ); count += 3; break; case EMIT_4F: OUT_BATCH( fui(attrib[0]) ); OUT_BATCH( fui(attrib[1]) ); OUT_BATCH( fui(attrib[2]) ); OUT_BATCH( fui(attrib[3]) ); count += 4; break; case EMIT_4UB: OUT_BATCH( pack_ub4(float_to_ubyte( attrib[0] ), float_to_ubyte( attrib[1] ), float_to_ubyte( attrib[2] ), float_to_ubyte( attrib[3] )) ); count += 1; break; case EMIT_4UB_BGRA: OUT_BATCH( pack_ub4(float_to_ubyte( attrib[2] ), float_to_ubyte( attrib[1] ), float_to_ubyte( attrib[0] ), float_to_ubyte( attrib[3] )) ); count += 1; break; default: assert(0); } } assert(count == vinfo->size); }
int virgl_encode_rasterizer_state(struct virgl_context *ctx, uint32_t handle, const struct pipe_rasterizer_state *state) { uint32_t tmp; virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_RASTERIZER, VIRGL_OBJ_RS_SIZE)); virgl_encoder_write_dword(ctx->cbuf, handle); tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) | VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip) | VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) | VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) | VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) | VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state->light_twoside) | VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state->sprite_coord_mode) | VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state->point_quad_rasterization) | VIRGL_OBJ_RS_S0_CULL_FACE(state->cull_face) | VIRGL_OBJ_RS_S0_FILL_FRONT(state->fill_front) | VIRGL_OBJ_RS_S0_FILL_BACK(state->fill_back) | VIRGL_OBJ_RS_S0_SCISSOR(state->scissor) | VIRGL_OBJ_RS_S0_FRONT_CCW(state->front_ccw) | VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state->clamp_vertex_color) | VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state->clamp_fragment_color) | VIRGL_OBJ_RS_S0_OFFSET_LINE(state->offset_line) | VIRGL_OBJ_RS_S0_OFFSET_POINT(state->offset_point) | VIRGL_OBJ_RS_S0_OFFSET_TRI(state->offset_tri) | VIRGL_OBJ_RS_S0_POLY_SMOOTH(state->poly_smooth) | VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state->poly_stipple_enable) | VIRGL_OBJ_RS_S0_POINT_SMOOTH(state->point_smooth) | VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state->point_size_per_vertex) | VIRGL_OBJ_RS_S0_MULTISAMPLE(state->multisample) | VIRGL_OBJ_RS_S0_LINE_SMOOTH(state->line_smooth) | VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) | VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) | VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) | VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule); virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */ virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */ virgl_encoder_write_dword(ctx->cbuf, state->sprite_coord_enable); /* S2 */ tmp = VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state->line_stipple_pattern) | VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state->line_stipple_factor) | VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state->clip_plane_enable); virgl_encoder_write_dword(ctx->cbuf, tmp); /* S3 */ virgl_encoder_write_dword(ctx->cbuf, fui(state->line_width)); /* S4 */ virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_units)); /* S5 */ virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_scale)); /* S6 */ virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_clamp)); /* S7 */ return 0; }
static boolean nv30_state_viewport_validate(struct nv30_context *nv30) { struct pipe_viewport_state *vpt = &nv30->viewport; struct nouveau_stateobj *so; if (nv30->state.hw[NV30_STATE_VIEWPORT] && !(nv30->dirty & NV30_NEW_VIEWPORT)) return FALSE; so = so_new(3, 10, 0); so_method(so, nv30->screen->rankine, NV34TCL_VIEWPORT_TRANSLATE_X, 8); so_data (so, fui(vpt->translate[0])); so_data (so, fui(vpt->translate[1])); so_data (so, fui(vpt->translate[2])); so_data (so, fui(vpt->translate[3])); so_data (so, fui(vpt->scale[0])); so_data (so, fui(vpt->scale[1])); so_data (so, fui(vpt->scale[2])); so_data (so, fui(vpt->scale[3])); /* so_method(so, nv30->screen->rankine, 0x1d78, 1); so_data (so, 1); */ /* TODO/FIXME: never saw value 0x0110 in renouveau dumps, only 0x0001 */ so_method(so, nv30->screen->rankine, 0x1d78, 1); so_data (so, 1); so_ref(so, &nv30->state.hw[NV30_STATE_VIEWPORT]); so_ref(NULL, &so); return TRUE; }
int virgl_encoder_set_viewport_states(struct virgl_context *ctx, int start_slot, int num_viewports, const struct pipe_viewport_state *states) { int i,v; virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports))); virgl_encoder_write_dword(ctx->cbuf, start_slot); for (v = 0; v < num_viewports; v++) { for (i = 0; i < 3; i++) virgl_encoder_write_dword(ctx->cbuf, fui(states[v].scale[i])); for (i = 0; i < 3; i++) virgl_encoder_write_dword(ctx->cbuf, fui(states[v].translate[i])); } return 0; }
int virgl_encode_dsa_state(struct virgl_context *ctx, uint32_t handle, const struct pipe_depth_stencil_alpha_state *dsa_state) { uint32_t tmp; int i; virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_DSA, VIRGL_OBJ_DSA_SIZE)); virgl_encoder_write_dword(ctx->cbuf, handle); tmp = VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state->depth.enabled) | VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state->depth.writemask) | VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state->depth.func) | VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state->alpha.enabled) | VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state->alpha.func); virgl_encoder_write_dword(ctx->cbuf, tmp); for (i = 0; i < 2; i++) { tmp = VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state->stencil[i].enabled) | VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state->stencil[i].func) | VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state->stencil[i].fail_op) | VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state->stencil[i].zpass_op) | VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state->stencil[i].zfail_op) | VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state->stencil[i].valuemask) | VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state->stencil[i].writemask); virgl_encoder_write_dword(ctx->cbuf, tmp); } virgl_encoder_write_dword(ctx->cbuf, fui(dsa_state->alpha.ref_value)); return 0; }
static void * fd_zsa_state_create(struct pipe_context *pctx, const struct pipe_depth_stencil_alpha_state *cso) { struct fd_zsa_stateobj *so; so = CALLOC_STRUCT(fd_zsa_stateobj); if (!so) return NULL; so->base = *cso; so->rb_depthcontrol |= A2XX_RB_DEPTHCONTROL_ZFUNC(cso->depth.func); /* maps 1:1 */ if (cso->depth.enabled) so->rb_depthcontrol |= A2XX_RB_DEPTHCONTROL_Z_ENABLE; if (cso->depth.writemask) so->rb_depthcontrol |= A2XX_RB_DEPTHCONTROL_Z_WRITE_ENABLE; if (cso->stencil[0].enabled) { const struct pipe_stencil_state *s = &cso->stencil[0]; so->rb_depthcontrol |= A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE | A2XX_RB_DEPTHCONTROL_STENCILFUNC(s->func) | /* maps 1:1 */ A2XX_RB_DEPTHCONTROL_STENCILFAIL(stencil_op(s->fail_op)) | A2XX_RB_DEPTHCONTROL_STENCILZPASS(stencil_op(s->zpass_op)) | A2XX_RB_DEPTHCONTROL_STENCILZFAIL(stencil_op(s->zfail_op)); so->rb_stencilrefmask |= 0xff000000 | /* ??? */ A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(s->writemask) | A2XX_RB_STENCILREFMASK_STENCILMASK(s->valuemask); if (cso->stencil[1].enabled) { const struct pipe_stencil_state *bs = &cso->stencil[1]; so->rb_depthcontrol |= A2XX_RB_DEPTHCONTROL_BACKFACE_ENABLE | A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF(bs->func) | /* maps 1:1 */ A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF(stencil_op(bs->fail_op)) | A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF(stencil_op(bs->zpass_op)) | A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF(stencil_op(bs->zfail_op)); so->rb_stencilrefmask_bf |= 0xff000000 | /* ??? */ A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(bs->writemask) | A2XX_RB_STENCILREFMASK_STENCILMASK(bs->valuemask); } } if (cso->alpha.enabled) { so->rb_colorcontrol = A2XX_RB_COLORCONTROL_ALPHA_FUNC(cso->alpha.func) | A2XX_RB_COLORCONTROL_ALPHA_TEST_ENABLE; so->rb_alpha_ref = fui(cso->alpha.ref_value); } return so; }
bool ilo_state_raster_set_params(struct ilo_state_raster *rs, const struct ilo_dev *dev, const struct ilo_state_raster_params_info *params) { const bool line_aa_enable = (rs->line_aa_enable && raster_params_is_gen6_line_aa_allowed(dev, params)); const int line_width = get_gen6_line_width(dev, params->line_width, line_aa_enable, rs->line_giq_enable); ILO_DEV_ASSERT(dev, 6, 8); /* modify line AA enable */ if (rs->line_aa_enable) { if (ilo_dev_gen(dev) >= ILO_GEN(8)) { if (line_aa_enable) rs->raster[0] |= GEN8_RASTER_DW1_AA_LINE_ENABLE; else rs->raster[0] &= ~GEN8_RASTER_DW1_AA_LINE_ENABLE; } else { if (line_aa_enable) rs->sf[1] |= GEN7_SF_DW2_AA_LINE_ENABLE; else rs->sf[1] &= ~GEN7_SF_DW2_AA_LINE_ENABLE; } } /* modify line width */ rs->sf[1] = (rs->sf[1] & ~GEN7_SF_DW2_LINE_WIDTH__MASK) | line_width << GEN7_SF_DW2_LINE_WIDTH__SHIFT; /* modify point width */ if (rs->sf[2] & GEN7_SF_DW3_USE_POINT_WIDTH) { const int point_width = get_gen6_point_width(dev, params->point_width); rs->sf[2] = (rs->sf[2] & ~GEN7_SF_DW3_POINT_WIDTH__MASK) | point_width << GEN7_SF_DW3_POINT_WIDTH__SHIFT; } /* modify depth offset */ rs->raster[1] = fui(params->depth_offset_const); rs->raster[2] = fui(params->depth_offset_scale); rs->raster[3] = fui(params->depth_offset_clamp); return true; }
void nv50_clear(struct pipe_context *pipe, unsigned buffers, const float *rgba, double depth, unsigned stencil) { struct nv50_context *nv50 = nv50_context(pipe); struct nouveau_channel *chan = nv50->screen->base.channel; struct nouveau_grobj *tesla = nv50->screen->tesla; struct pipe_framebuffer_state *fb = &nv50->framebuffer; unsigned mode = 0, i; const unsigned dirty = nv50->dirty; /* don't need NEW_BLEND, NV50TCL_COLOR_MASK doesn't affect CLEAR_BUFFERS */ nv50->dirty &= NV50_NEW_FRAMEBUFFER | NV50_NEW_SCISSOR; if (!nv50_state_validate(nv50, 64)) return; if (buffers & PIPE_CLEAR_COLOR && fb->nr_cbufs) { BEGIN_RING(chan, tesla, NV50TCL_CLEAR_COLOR(0), 4); OUT_RING (chan, fui(rgba[0])); OUT_RING (chan, fui(rgba[1])); OUT_RING (chan, fui(rgba[2])); OUT_RING (chan, fui(rgba[3])); mode |= 0x3c; } if (buffers & PIPE_CLEAR_DEPTH) { BEGIN_RING(chan, tesla, NV50TCL_CLEAR_DEPTH, 1); OUT_RING (chan, fui(depth)); mode |= NV50TCL_CLEAR_BUFFERS_Z; } if (buffers & PIPE_CLEAR_STENCIL) { BEGIN_RING(chan, tesla, NV50TCL_CLEAR_STENCIL, 1); OUT_RING (chan, stencil & 0xff); mode |= NV50TCL_CLEAR_BUFFERS_S; } BEGIN_RING(chan, tesla, NV50TCL_CLEAR_BUFFERS, 1); OUT_RING (chan, mode); for (i = 1; i < fb->nr_cbufs; i++) { BEGIN_RING(chan, tesla, NV50TCL_CLEAR_BUFFERS, 1); OUT_RING (chan, (i << 6) | 0x3c); } nv50->dirty = dirty; }
int virgl_encoder_set_blend_color(struct virgl_context *ctx, const struct pipe_blend_color *color) { int i; virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR, 0, VIRGL_SET_BLEND_COLOR_SIZE)); for (i = 0; i < 4; i++) virgl_encoder_write_dword(ctx->cbuf, fui(color->color[i])); return 0; }
static void etna_set_viewport_states(struct pipe_context *pctx, unsigned start_slot, unsigned num_scissors, const struct pipe_viewport_state *vs) { struct etna_context *ctx = etna_context(pctx); struct compiled_viewport_state *cs = &ctx->viewport; ctx->viewport_s = *vs; /** * For Vivante GPU, viewport z transformation is 0..1 to 0..1 instead of * -1..1 to 0..1. * scaling and translation to 0..1 already happened, so remove that * * z' = (z * 2 - 1) * scale + translate * = z * (2 * scale) + (translate - scale) * * scale' = 2 * scale * translate' = translate - scale */ /* must be fixp as v4 state deltas assume it is */ cs->PA_VIEWPORT_SCALE_X = etna_f32_to_fixp16(vs->scale[0]); cs->PA_VIEWPORT_SCALE_Y = etna_f32_to_fixp16(vs->scale[1]); cs->PA_VIEWPORT_SCALE_Z = fui(vs->scale[2] * 2.0f); cs->PA_VIEWPORT_OFFSET_X = etna_f32_to_fixp16(vs->translate[0]); cs->PA_VIEWPORT_OFFSET_Y = etna_f32_to_fixp16(vs->translate[1]); cs->PA_VIEWPORT_OFFSET_Z = fui(vs->translate[2] - vs->scale[2]); /* Compute scissor rectangle (fixp) from viewport. * Make sure left is always < right and top always < bottom. */ cs->SE_SCISSOR_LEFT = etna_f32_to_fixp16(MAX2(vs->translate[0] - fabsf(vs->scale[0]), 0.0f)); cs->SE_SCISSOR_TOP = etna_f32_to_fixp16(MAX2(vs->translate[1] - fabsf(vs->scale[1]), 0.0f)); uint32_t right_fixp = etna_f32_to_fixp16(MAX2(vs->translate[0] + fabsf(vs->scale[0]), 0.0f)); uint32_t bottom_fixp = etna_f32_to_fixp16(MAX2(vs->translate[1] + fabsf(vs->scale[1]), 0.0f)); cs->SE_SCISSOR_RIGHT = right_fixp + ETNA_SE_SCISSOR_MARGIN_RIGHT; cs->SE_SCISSOR_BOTTOM = bottom_fixp + ETNA_SE_SCISSOR_MARGIN_BOTTOM; cs->SE_CLIP_RIGHT = right_fixp + ETNA_SE_CLIP_MARGIN_RIGHT; cs->SE_CLIP_BOTTOM = bottom_fixp + ETNA_SE_CLIP_MARGIN_BOTTOM; cs->PE_DEPTH_NEAR = fui(0.0); /* not affected if depth mode is Z (as in GL) */ cs->PE_DEPTH_FAR = fui(1.0); ctx->dirty |= ETNA_DIRTY_VIEWPORT; }
static void * nvc0_zsa_state_create(struct pipe_context *pipe, const struct pipe_depth_stencil_alpha_state *cso) { struct nvc0_zsa_stateobj *so = CALLOC_STRUCT(nvc0_zsa_stateobj); so->pipe = *cso; SB_IMMED_3D(so, DEPTH_TEST_ENABLE, cso->depth.enabled); if (cso->depth.enabled) { SB_IMMED_3D(so, DEPTH_WRITE_ENABLE, cso->depth.writemask); SB_BEGIN_3D(so, DEPTH_TEST_FUNC, 1); SB_DATA (so, nvgl_comparison_op(cso->depth.func)); } if (cso->stencil[0].enabled) { SB_BEGIN_3D(so, STENCIL_ENABLE, 5); SB_DATA (so, 1); SB_DATA (so, nvgl_stencil_op(cso->stencil[0].fail_op)); SB_DATA (so, nvgl_stencil_op(cso->stencil[0].zfail_op)); SB_DATA (so, nvgl_stencil_op(cso->stencil[0].zpass_op)); SB_DATA (so, nvgl_comparison_op(cso->stencil[0].func)); SB_BEGIN_3D(so, STENCIL_FRONT_FUNC_MASK, 2); SB_DATA (so, cso->stencil[0].valuemask); SB_DATA (so, cso->stencil[0].writemask); } else { SB_IMMED_3D(so, STENCIL_ENABLE, 0); } if (cso->stencil[1].enabled) { assert(cso->stencil[0].enabled); SB_BEGIN_3D(so, STENCIL_TWO_SIDE_ENABLE, 5); SB_DATA (so, 1); SB_DATA (so, nvgl_stencil_op(cso->stencil[1].fail_op)); SB_DATA (so, nvgl_stencil_op(cso->stencil[1].zfail_op)); SB_DATA (so, nvgl_stencil_op(cso->stencil[1].zpass_op)); SB_DATA (so, nvgl_comparison_op(cso->stencil[1].func)); SB_BEGIN_3D(so, STENCIL_BACK_MASK, 2); SB_DATA (so, cso->stencil[1].writemask); SB_DATA (so, cso->stencil[1].valuemask); } else if (cso->stencil[0].enabled) { SB_IMMED_3D(so, STENCIL_TWO_SIDE_ENABLE, 0); } SB_IMMED_3D(so, ALPHA_TEST_ENABLE, cso->alpha.enabled); if (cso->alpha.enabled) { SB_BEGIN_3D(so, ALPHA_TEST_REF, 2); SB_DATA (so, fui(cso->alpha.ref_value)); SB_DATA (so, nvgl_comparison_op(cso->alpha.func)); } assert(so->size <= (sizeof(so->state) / sizeof(so->state[0]))); return (void *)so; }
void virgl_encoder_set_clip_state(struct virgl_context *ctx, const struct pipe_clip_state *clip) { int i, j; virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE, 0, VIRGL_SET_CLIP_STATE_SIZE)); for (i = 0; i < VIRGL_MAX_CLIP_PLANES; i++) { for (j = 0; j < 4; j++) { virgl_encoder_write_dword(ctx->cbuf, fui(clip->ucp[i][j])); } } }
static void r600_texture_state_viewport(struct r600_screen *rscreen, struct r600_resource_texture *rtexture, unsigned level) { struct radeon_state *rstate = &rtexture->viewport[level]; radeon_state_init(rstate, rscreen->rw, R600_STATE_VIEWPORT, 0, 0); /* set states (most default value are 0 and struct already * initialized to 0, thus avoid resetting them) */ rstate->states[R600_VIEWPORT__PA_CL_VPORT_XOFFSET_0] = fui((float)rtexture->width[level]/2.0); rstate->states[R600_VIEWPORT__PA_CL_VPORT_XSCALE_0] = fui((float)rtexture->width[level]/2.0); rstate->states[R600_VIEWPORT__PA_CL_VPORT_YOFFSET_0] = fui((float)rtexture->height[level]/2.0); rstate->states[R600_VIEWPORT__PA_CL_VPORT_YSCALE_0] = fui((float)-rtexture->height[level]/2.0); rstate->states[R600_VIEWPORT__PA_CL_VPORT_ZOFFSET_0] = 0x3F000000; rstate->states[R600_VIEWPORT__PA_CL_VPORT_ZSCALE_0] = 0x3F000000; rstate->states[R600_VIEWPORT__PA_CL_VTE_CNTL] = 0x0000043F; rstate->states[R600_VIEWPORT__PA_SC_VPORT_ZMAX_0] = 0x3F800000; radeon_state_pm4(rstate); }
void h2i (int x) { /* For some reason, we only give certain warnings for implicit conversions among values of the same precision with -Wconversion, while we don't give others at all. */ fsi ((unsigned)INT_MAX + 1); si = (unsigned)INT_MAX + 1; si = x ? (unsigned)INT_MAX + 1 : 1; fsi ((unsigned)INT_MAX + 2); si = (unsigned)INT_MAX + 2; si = x ? (unsigned)INT_MAX + 2 : 1; fsi (UINT_MAX); si = UINT_MAX; fui (-1); ui = -1; ui = x ? -1 : 1U; fui (INT_MIN); ui = INT_MIN; ui = x ? INT_MIN : 1U; }
void nv50_clear(struct pipe_context *pipe, unsigned buffers, const float *rgba, double depth, unsigned stencil) { struct nv50_context *nv50 = nv50_context(pipe); struct nouveau_channel *chan = nv50->screen->nvws->channel; struct nouveau_grobj *tesla = nv50->screen->tesla; struct pipe_framebuffer_state *fb = &nv50->framebuffer; unsigned mode = 0, i; if (!nv50_state_validate(nv50)) return; if (buffers & PIPE_CLEAR_COLOR && fb->nr_cbufs) { BEGIN_RING(chan, tesla, NV50TCL_CLEAR_COLOR(0), 4); OUT_RING (chan, fui(rgba[0])); OUT_RING (chan, fui(rgba[1])); OUT_RING (chan, fui(rgba[2])); OUT_RING (chan, fui(rgba[3])); mode |= 0x3c; } if (buffers & PIPE_CLEAR_DEPTHSTENCIL) { BEGIN_RING(chan, tesla, NV50TCL_CLEAR_DEPTH, 1); OUT_RING (chan, fui(depth)); BEGIN_RING(chan, tesla, NV50TCL_CLEAR_STENCIL, 1); OUT_RING (chan, stencil & 0xff); mode |= 0x03; } BEGIN_RING(chan, tesla, NV50TCL_CLEAR_BUFFERS, 1); OUT_RING (chan, mode); for (i = 1; i < fb->nr_cbufs; i++) { BEGIN_RING(chan, tesla, NV50TCL_CLEAR_BUFFERS, 1); OUT_RING (chan, (i << 6) | 0x3c); } }
void h2i (int x) { /* For some reason, we only give certain warnings for implicit conversions among values of the same precision with -Wconversion, while we don't give others at all. */ fsi ((unsigned)INT_MAX + 1); /* { dg-warning "warning: passing argument 1 of 'fsi' as signed due to prototype" } */ si = (unsigned)INT_MAX + 1; si = x ? (unsigned)INT_MAX + 1 : 1; fsi ((unsigned)INT_MAX + 2); /* { dg-warning "warning: passing argument 1 of 'fsi' as signed due to prototype" } */ si = (unsigned)INT_MAX + 2; si = x ? (unsigned)INT_MAX + 2 : 1; fsi (UINT_MAX); /* { dg-warning "warning: passing argument 1 of 'fsi' as signed due to prototype" } */ si = UINT_MAX; fui (-1); /* { dg-warning "warning: negative integer implicitly converted to unsigned type" } */ /* { dg-warning "warning: passing argument 1 of 'fui' as unsigned due to prototype" "-Wconversion" { target *-*-* } 124 } */ ui = -1; /* { dg-warning "warning: negative integer implicitly converted to unsigned type" } */ ui = x ? -1 : 1U; /* { dg-warning "warning: negative integer implicitly converted to unsigned type" } */ fui (INT_MIN); /* { dg-warning "warning: negative integer implicitly converted to unsigned type" } */ /* { dg-warning "warning: passing argument 1 of 'fui' as unsigned due to prototype" "-Wconversion" { target *-*-* } 128 } */ ui = INT_MIN; /* { dg-warning "warning: negative integer implicitly converted to unsigned type" } */ ui = x ? INT_MIN : 1U; /* { dg-warning "warning: negative integer implicitly converted to unsigned type" } */ }
void nv50_clear(struct pipe_context *pipe, unsigned buffers, const float *rgba, double depth, unsigned stencil) { struct nv50_context *nv50 = nv50_context(pipe); struct nouveau_channel *chan = nv50->screen->base.channel; struct pipe_framebuffer_state *fb = &nv50->framebuffer; unsigned i; const unsigned dirty = nv50->dirty; uint32_t mode = 0; /* don't need NEW_BLEND, COLOR_MASK doesn't affect CLEAR_BUFFERS */ nv50->dirty &= NV50_NEW_FRAMEBUFFER; if (!nv50_state_validate(nv50, 9 + (fb->nr_cbufs * 2))) return; if (buffers & PIPE_CLEAR_COLOR && fb->nr_cbufs) { BEGIN_RING(chan, RING_3D(CLEAR_COLOR(0)), 4); OUT_RINGf (chan, rgba[0]); OUT_RINGf (chan, rgba[1]); OUT_RINGf (chan, rgba[2]); OUT_RINGf (chan, rgba[3]); mode = NV50_3D_CLEAR_BUFFERS_R | NV50_3D_CLEAR_BUFFERS_G | NV50_3D_CLEAR_BUFFERS_B | NV50_3D_CLEAR_BUFFERS_A; } if (buffers & PIPE_CLEAR_DEPTH) { BEGIN_RING(chan, RING_3D(CLEAR_DEPTH), 1); OUT_RING (chan, fui(depth)); mode |= NV50_3D_CLEAR_BUFFERS_Z; } if (buffers & PIPE_CLEAR_STENCIL) { BEGIN_RING(chan, RING_3D(CLEAR_STENCIL), 1); OUT_RING (chan, stencil & 0xff); mode |= NV50_3D_CLEAR_BUFFERS_S; } BEGIN_RING(chan, RING_3D(CLEAR_BUFFERS), 1); OUT_RING (chan, mode); for (i = 1; i < fb->nr_cbufs; i++) { BEGIN_RING(chan, RING_3D(CLEAR_BUFFERS), 1); OUT_RING (chan, (i << 6) | 0x3c); } nv50->dirty = dirty & ~NV50_NEW_FRAMEBUFFER; }
static uint32_t get_texrect_scale(struct vc5_texture_stateobj *texstate, enum quniform_contents contents, uint32_t data) { struct pipe_sampler_view *texture = texstate->textures[data]; uint32_t dim; if (contents == QUNIFORM_TEXRECT_SCALE_X) dim = texture->texture->width0; else dim = texture->texture->height0; return fui(1.0f / dim); }
/* helper for setting up border-color buffer for a3xx/a4xx: */ void fd_setup_border_colors(struct fd_texture_stateobj *tex, void *ptr, unsigned offset) { unsigned i, j; for (i = 0; i < tex->num_samplers; i++) { struct pipe_sampler_state *sampler = tex->samplers[i]; uint16_t *bcolor = (uint16_t *)((uint8_t *)ptr + (BORDERCOLOR_SIZE * offset) + (BORDERCOLOR_SIZE * i)); uint32_t *bcolor32 = (uint32_t *)&bcolor[16]; if (!sampler) continue; /* * XXX HACK ALERT XXX * * The border colors need to be swizzled in a particular * format-dependent order. Even though samplers don't know about * formats, we can assume that with a GL state tracker, there's a * 1:1 correspondence between sampler and texture. Take advantage * of that knowledge. */ if (i < tex->num_textures && tex->textures[i]) { const struct util_format_description *desc = util_format_description(tex->textures[i]->format); for (j = 0; j < 4; j++) { if (desc->swizzle[j] >= 4) continue; const struct util_format_channel_description *chan = &desc->channel[desc->swizzle[j]]; if (chan->pure_integer) { bcolor32[desc->swizzle[j] + 4] = sampler->border_color.i[j]; bcolor[desc->swizzle[j] + 8] = sampler->border_color.i[j]; } else { bcolor32[desc->swizzle[j]] = fui(sampler->border_color.f[j]); bcolor[desc->swizzle[j]] = util_float_to_half(sampler->border_color.f[j]); } } } } }
void nv50_clear(struct pipe_context *pipe, unsigned buffers, const union pipe_color_union *color, double depth, unsigned stencil) { struct nv50_context *nv50 = nv50_context(pipe); struct nouveau_pushbuf *push = nv50->base.pushbuf; struct pipe_framebuffer_state *fb = &nv50->framebuffer; unsigned i; uint32_t mode = 0; /* don't need NEW_BLEND, COLOR_MASK doesn't affect CLEAR_BUFFERS */ if (!nv50_state_validate(nv50, NV50_NEW_FRAMEBUFFER, 9 + (fb->nr_cbufs * 2))) return; if (buffers & PIPE_CLEAR_COLOR && fb->nr_cbufs) { BEGIN_NV04(push, NV50_3D(CLEAR_COLOR(0)), 4); PUSH_DATAf(push, color->f[0]); PUSH_DATAf(push, color->f[1]); PUSH_DATAf(push, color->f[2]); PUSH_DATAf(push, color->f[3]); mode = NV50_3D_CLEAR_BUFFERS_R | NV50_3D_CLEAR_BUFFERS_G | NV50_3D_CLEAR_BUFFERS_B | NV50_3D_CLEAR_BUFFERS_A; } if (buffers & PIPE_CLEAR_DEPTH) { BEGIN_NV04(push, NV50_3D(CLEAR_DEPTH), 1); PUSH_DATA (push, fui(depth)); mode |= NV50_3D_CLEAR_BUFFERS_Z; } if (buffers & PIPE_CLEAR_STENCIL) { BEGIN_NV04(push, NV50_3D(CLEAR_STENCIL), 1); PUSH_DATA (push, stencil & 0xff); mode |= NV50_3D_CLEAR_BUFFERS_S; } BEGIN_NV04(push, NV50_3D(CLEAR_BUFFERS), 1); PUSH_DATA (push, mode); for (i = 1; i < fb->nr_cbufs; i++) { BEGIN_NV04(push, NV50_3D(CLEAR_BUFFERS), 1); PUSH_DATA (push, (i << 6) | 0x3c); } }
void * nv50_sampler_state_create(struct pipe_context *pipe, const struct pipe_sampler_state *cso) { struct nv50_tsc_entry *so = MALLOC_STRUCT(nv50_tsc_entry); float f[2]; so->id = -1; so->tsc[0] = (0x00026000 | (nv50_tsc_wrap_mode(cso->wrap_s) << 0) | (nv50_tsc_wrap_mode(cso->wrap_t) << 3) | (nv50_tsc_wrap_mode(cso->wrap_r) << 6)); switch (cso->mag_img_filter) { case PIPE_TEX_FILTER_LINEAR: so->tsc[1] = NV50_TSC_1_MAGF_LINEAR; break; case PIPE_TEX_FILTER_NEAREST: default: so->tsc[1] = NV50_TSC_1_MAGF_NEAREST; break; } switch (cso->min_img_filter) { case PIPE_TEX_FILTER_LINEAR: so->tsc[1] |= NV50_TSC_1_MINF_LINEAR; break; case PIPE_TEX_FILTER_NEAREST: default: so->tsc[1] |= NV50_TSC_1_MINF_NEAREST; break; } switch (cso->min_mip_filter) { case PIPE_TEX_MIPFILTER_LINEAR: so->tsc[1] |= NV50_TSC_1_MIPF_LINEAR; break; case PIPE_TEX_MIPFILTER_NEAREST: so->tsc[1] |= NV50_TSC_1_MIPF_NEAREST; break; case PIPE_TEX_MIPFILTER_NONE: default: so->tsc[1] |= NV50_TSC_1_MIPF_NONE; break; } if (nouveau_screen(pipe->screen)->class_3d >= NVE4_3D_CLASS) { if (cso->seamless_cube_map) so->tsc[1] |= NVE4_TSC_1_CUBE_SEAMLESS; if (!cso->normalized_coords) so->tsc[1] |= NVE4_TSC_1_FORCE_NONNORMALIZED_COORDS; } if (cso->max_anisotropy >= 16) so->tsc[0] |= (7 << 20); else if (cso->max_anisotropy >= 12) so->tsc[0] |= (6 << 20); else { so->tsc[0] |= (cso->max_anisotropy >> 1) << 20; if (cso->max_anisotropy >= 4) so->tsc[1] |= NV50_TSC_1_UNKN_ANISO_35; else if (cso->max_anisotropy >= 2) so->tsc[1] |= NV50_TSC_1_UNKN_ANISO_15; } if (cso->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) { /* NOTE: must be deactivated for non-shadow textures */ so->tsc[0] |= (1 << 9); so->tsc[0] |= (nvgl_comparison_op(cso->compare_func) & 0x7) << 10; } f[0] = CLAMP(cso->lod_bias, -16.0f, 15.0f); so->tsc[1] |= ((int)(f[0] * 256.0f) & 0x1fff) << 12; f[0] = CLAMP(cso->min_lod, 0.0f, 15.0f); f[1] = CLAMP(cso->max_lod, 0.0f, 15.0f); so->tsc[2] = (((int)(f[1] * 256.0f) & 0xfff) << 12) | ((int)(f[0] * 256.0f) & 0xfff); so->tsc[2] |= util_format_linear_float_to_srgb_8unorm(cso->border_color.f[0]) << 24; so->tsc[3] = util_format_linear_float_to_srgb_8unorm(cso->border_color.f[1]) << 12; so->tsc[3] |= util_format_linear_float_to_srgb_8unorm(cso->border_color.f[2]) << 20; so->tsc[4] = fui(cso->border_color.f[0]); so->tsc[5] = fui(cso->border_color.f[1]); so->tsc[6] = fui(cso->border_color.f[2]); so->tsc[7] = fui(cso->border_color.f[3]); return (void *)so; }
/* NOTE: ignoring line_last_pixel */ static void * nv50_rasterizer_state_create(struct pipe_context *pipe, const struct pipe_rasterizer_state *cso) { struct nv50_rasterizer_stateobj *so; uint32_t reg; so = CALLOC_STRUCT(nv50_rasterizer_stateobj); if (!so) return NULL; so->pipe = *cso; #ifndef NV50_SCISSORS_CLIPPING for (int i = 0; i < NV50_MAX_VIEWPORTS; i++) { SB_BEGIN_3D(so, SCISSOR_ENABLE(i), 1); SB_DATA (so, cso->scissor); } #endif SB_BEGIN_3D(so, SHADE_MODEL, 1); SB_DATA (so, cso->flatshade ? NV50_3D_SHADE_MODEL_FLAT : NV50_3D_SHADE_MODEL_SMOOTH); SB_BEGIN_3D(so, PROVOKING_VERTEX_LAST, 1); SB_DATA (so, !cso->flatshade_first); SB_BEGIN_3D(so, VERTEX_TWO_SIDE_ENABLE, 1); SB_DATA (so, cso->light_twoside); SB_BEGIN_3D(so, FRAG_COLOR_CLAMP_EN, 1); SB_DATA (so, cso->clamp_fragment_color ? 0x11111111 : 0x00000000); SB_BEGIN_3D(so, MULTISAMPLE_ENABLE, 1); SB_DATA (so, cso->multisample); SB_BEGIN_3D(so, LINE_WIDTH, 1); SB_DATA (so, fui(cso->line_width)); SB_BEGIN_3D(so, LINE_SMOOTH_ENABLE, 1); SB_DATA (so, cso->line_smooth); SB_BEGIN_3D(so, LINE_STIPPLE_ENABLE, 1); if (cso->line_stipple_enable) { SB_DATA (so, 1); SB_BEGIN_3D(so, LINE_STIPPLE, 1); SB_DATA (so, (cso->line_stipple_pattern << 8) | cso->line_stipple_factor); } else { SB_DATA (so, 0); } if (!cso->point_size_per_vertex) { SB_BEGIN_3D(so, POINT_SIZE, 1); SB_DATA (so, fui(cso->point_size)); } SB_BEGIN_3D(so, POINT_SPRITE_ENABLE, 1); SB_DATA (so, cso->point_quad_rasterization); SB_BEGIN_3D(so, POINT_SMOOTH_ENABLE, 1); SB_DATA (so, cso->point_smooth); SB_BEGIN_3D(so, POLYGON_MODE_FRONT, 3); SB_DATA (so, nvgl_polygon_mode(cso->fill_front)); SB_DATA (so, nvgl_polygon_mode(cso->fill_back)); SB_DATA (so, cso->poly_smooth); SB_BEGIN_3D(so, CULL_FACE_ENABLE, 3); SB_DATA (so, cso->cull_face != PIPE_FACE_NONE); SB_DATA (so, cso->front_ccw ? NV50_3D_FRONT_FACE_CCW : NV50_3D_FRONT_FACE_CW); switch (cso->cull_face) { case PIPE_FACE_FRONT_AND_BACK: SB_DATA(so, NV50_3D_CULL_FACE_FRONT_AND_BACK); break; case PIPE_FACE_FRONT: SB_DATA(so, NV50_3D_CULL_FACE_FRONT); break; case PIPE_FACE_BACK: default: SB_DATA(so, NV50_3D_CULL_FACE_BACK); break; } SB_BEGIN_3D(so, POLYGON_STIPPLE_ENABLE, 1); SB_DATA (so, cso->poly_stipple_enable); SB_BEGIN_3D(so, POLYGON_OFFSET_POINT_ENABLE, 3); SB_DATA (so, cso->offset_point); SB_DATA (so, cso->offset_line); SB_DATA (so, cso->offset_tri); if (cso->offset_point || cso->offset_line || cso->offset_tri) { SB_BEGIN_3D(so, POLYGON_OFFSET_FACTOR, 1); SB_DATA (so, fui(cso->offset_scale)); SB_BEGIN_3D(so, POLYGON_OFFSET_UNITS, 1); SB_DATA (so, fui(cso->offset_units * 2.0f)); SB_BEGIN_3D(so, POLYGON_OFFSET_CLAMP, 1); SB_DATA (so, fui(cso->offset_clamp)); } if (cso->depth_clip) { reg = 0; } else { reg = NV50_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_NEAR | NV50_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_FAR | NV50_3D_VIEW_VOLUME_CLIP_CTRL_UNK12_UNK1; } #ifndef NV50_SCISSORS_CLIPPING reg |= NV50_3D_VIEW_VOLUME_CLIP_CTRL_UNK7 | NV50_3D_VIEW_VOLUME_CLIP_CTRL_UNK12_UNK1; #endif SB_BEGIN_3D(so, VIEW_VOLUME_CLIP_CTRL, 1); SB_DATA (so, reg); SB_BEGIN_3D(so, DEPTH_CLIP_NEGATIVE_Z, 1); SB_DATA (so, cso->clip_halfz); SB_BEGIN_3D(so, PIXEL_CENTER_INTEGER, 1); SB_DATA (so, !cso->half_pixel_center); assert(so->size <= (sizeof(so->state) / sizeof(so->state[0]))); return (void *)so; }
/** * Use pipe_screen::get_param() to query PIPE_CAP_ values to determine * which GL extensions are supported. * Quite a few extensions are always supported because they are standard * features or can be built on top of other gallium features. * Some fine tuning may still be needed. */ void st_init_extensions(struct pipe_screen *screen, struct gl_constants *consts, struct gl_extensions *extensions, struct st_config_options *options, boolean has_lib_dxtc) { unsigned i; int glsl_feature_level; GLboolean *extension_table = (GLboolean *) extensions; static const struct st_extension_cap_mapping cap_mapping[] = { { o(ARB_base_instance), PIPE_CAP_START_INSTANCE }, { o(ARB_buffer_storage), PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT }, { o(ARB_color_buffer_float), PIPE_CAP_VERTEX_COLOR_UNCLAMPED }, { o(ARB_copy_image), PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS }, { o(ARB_depth_clamp), PIPE_CAP_DEPTH_CLIP_DISABLE }, { o(ARB_depth_texture), PIPE_CAP_TEXTURE_SHADOW_MAP }, { o(ARB_draw_buffers_blend), PIPE_CAP_INDEP_BLEND_FUNC }, { o(ARB_draw_instanced), PIPE_CAP_TGSI_INSTANCEID }, { o(ARB_fragment_program_shadow), PIPE_CAP_TEXTURE_SHADOW_MAP }, { o(ARB_framebuffer_object), PIPE_CAP_MIXED_FRAMEBUFFER_SIZES }, { o(ARB_instanced_arrays), PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR }, { o(ARB_occlusion_query), PIPE_CAP_OCCLUSION_QUERY }, { o(ARB_occlusion_query2), PIPE_CAP_OCCLUSION_QUERY }, { o(ARB_pipeline_statistics_query), PIPE_CAP_QUERY_PIPELINE_STATISTICS }, { o(ARB_point_sprite), PIPE_CAP_POINT_SPRITE }, { o(ARB_seamless_cube_map), PIPE_CAP_SEAMLESS_CUBE_MAP }, { o(ARB_shader_stencil_export), PIPE_CAP_SHADER_STENCIL_EXPORT }, { o(ARB_shader_texture_image_samples), PIPE_CAP_TGSI_TXQS }, { o(ARB_shader_texture_lod), PIPE_CAP_SM3 }, { o(ARB_shadow), PIPE_CAP_TEXTURE_SHADOW_MAP }, { o(ARB_texture_buffer_object), PIPE_CAP_TEXTURE_BUFFER_OBJECTS }, { o(ARB_texture_gather), PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS }, { o(ARB_texture_mirror_clamp_to_edge), PIPE_CAP_TEXTURE_MIRROR_CLAMP }, { o(ARB_texture_non_power_of_two), PIPE_CAP_NPOT_TEXTURES }, { o(ARB_timer_query), PIPE_CAP_QUERY_TIMESTAMP }, { o(ARB_transform_feedback2), PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME }, { o(ARB_transform_feedback3), PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME }, { o(EXT_blend_equation_separate), PIPE_CAP_BLEND_EQUATION_SEPARATE }, { o(EXT_draw_buffers2), PIPE_CAP_INDEP_BLEND_ENABLE }, { o(EXT_stencil_two_side), PIPE_CAP_TWO_SIDED_STENCIL }, { o(EXT_texture_array), PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS }, { o(EXT_texture_filter_anisotropic), PIPE_CAP_ANISOTROPIC_FILTER }, { o(EXT_texture_mirror_clamp), PIPE_CAP_TEXTURE_MIRROR_CLAMP }, { o(EXT_texture_swizzle), PIPE_CAP_TEXTURE_SWIZZLE }, { o(EXT_transform_feedback), PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS }, { o(AMD_pinned_memory), PIPE_CAP_RESOURCE_FROM_USER_MEMORY }, { o(AMD_seamless_cubemap_per_texture), PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE }, { o(ATI_separate_stencil), PIPE_CAP_TWO_SIDED_STENCIL }, { o(ATI_texture_mirror_once), PIPE_CAP_TEXTURE_MIRROR_CLAMP }, { o(NV_conditional_render), PIPE_CAP_CONDITIONAL_RENDER }, { o(NV_primitive_restart), PIPE_CAP_PRIMITIVE_RESTART }, { o(NV_texture_barrier), PIPE_CAP_TEXTURE_BARRIER }, /* GL_NV_point_sprite is not supported by gallium because we don't * support the GL_POINT_SPRITE_R_MODE_NV option. */ { o(OES_standard_derivatives), PIPE_CAP_SM3 }, { o(OES_texture_float_linear), PIPE_CAP_TEXTURE_FLOAT_LINEAR }, { o(OES_texture_half_float_linear), PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR }, { o(ARB_texture_cube_map_array), PIPE_CAP_CUBE_MAP_ARRAY }, { o(ARB_texture_multisample), PIPE_CAP_TEXTURE_MULTISAMPLE }, { o(ARB_texture_query_lod), PIPE_CAP_TEXTURE_QUERY_LOD }, { o(ARB_sample_shading), PIPE_CAP_SAMPLE_SHADING }, { o(ARB_draw_indirect), PIPE_CAP_DRAW_INDIRECT }, { o(ARB_derivative_control), PIPE_CAP_TGSI_FS_FINE_DERIVATIVE }, { o(ARB_conditional_render_inverted), PIPE_CAP_CONDITIONAL_RENDER_INVERTED }, { o(ARB_texture_view), PIPE_CAP_SAMPLER_VIEW_TARGET }, { o(ARB_clip_control), PIPE_CAP_CLIP_HALFZ }, { o(EXT_polygon_offset_clamp), PIPE_CAP_POLYGON_OFFSET_CLAMP }, { o(EXT_depth_bounds_test), PIPE_CAP_DEPTH_BOUNDS_TEST }, }; /* Required: render target and sampler support */ static const struct st_extension_format_mapping rendertarget_mapping[] = { { { o(ARB_texture_float) }, { PIPE_FORMAT_R32G32B32A32_FLOAT, PIPE_FORMAT_R16G16B16A16_FLOAT } }, { { o(OES_texture_float) }, { PIPE_FORMAT_R32G32B32A32_FLOAT } }, { { o(OES_texture_half_float) }, { PIPE_FORMAT_R16G16B16A16_FLOAT } }, { { o(ARB_texture_rgb10_a2ui) }, { PIPE_FORMAT_R10G10B10A2_UINT, PIPE_FORMAT_B10G10R10A2_UINT }, GL_TRUE }, /* at least one format must be supported */ { { o(EXT_framebuffer_sRGB) }, { PIPE_FORMAT_A8B8G8R8_SRGB, PIPE_FORMAT_B8G8R8A8_SRGB }, GL_TRUE }, /* at least one format must be supported */ { { o(EXT_packed_float) }, { PIPE_FORMAT_R11G11B10_FLOAT } }, { { o(EXT_texture_integer) }, { PIPE_FORMAT_R32G32B32A32_UINT, PIPE_FORMAT_R32G32B32A32_SINT } }, { { o(ARB_texture_rg) }, { PIPE_FORMAT_R8_UNORM, PIPE_FORMAT_R8G8_UNORM } }, }; /* Required: depth stencil and sampler support */ static const struct st_extension_format_mapping depthstencil_mapping[] = { { { o(ARB_depth_buffer_float) }, { PIPE_FORMAT_Z32_FLOAT, PIPE_FORMAT_Z32_FLOAT_S8X24_UINT } }, }; /* Required: sampler support */ static const struct st_extension_format_mapping texture_mapping[] = { { { o(ARB_texture_compression_rgtc) }, { PIPE_FORMAT_RGTC1_UNORM, PIPE_FORMAT_RGTC1_SNORM, PIPE_FORMAT_RGTC2_UNORM, PIPE_FORMAT_RGTC2_SNORM } }, { { o(EXT_texture_compression_latc) }, { PIPE_FORMAT_LATC1_UNORM, PIPE_FORMAT_LATC1_SNORM, PIPE_FORMAT_LATC2_UNORM, PIPE_FORMAT_LATC2_SNORM } }, { { o(EXT_texture_compression_s3tc), o(ANGLE_texture_compression_dxt) }, { PIPE_FORMAT_DXT1_RGB, PIPE_FORMAT_DXT1_RGBA, PIPE_FORMAT_DXT3_RGBA, PIPE_FORMAT_DXT5_RGBA } }, { { o(ARB_texture_compression_bptc) }, { PIPE_FORMAT_BPTC_RGBA_UNORM, PIPE_FORMAT_BPTC_SRGBA, PIPE_FORMAT_BPTC_RGB_FLOAT, PIPE_FORMAT_BPTC_RGB_UFLOAT } }, { { o(EXT_texture_shared_exponent) }, { PIPE_FORMAT_R9G9B9E5_FLOAT } }, { { o(EXT_texture_snorm) }, { PIPE_FORMAT_R8G8B8A8_SNORM } }, { { o(EXT_texture_sRGB), o(EXT_texture_sRGB_decode) }, { PIPE_FORMAT_A8B8G8R8_SRGB, PIPE_FORMAT_B8G8R8A8_SRGB }, GL_TRUE }, /* at least one format must be supported */ { { o(ATI_texture_compression_3dc) }, { PIPE_FORMAT_LATC2_UNORM } }, { { o(MESA_ycbcr_texture) }, { PIPE_FORMAT_UYVY, PIPE_FORMAT_YUYV }, GL_TRUE }, /* at least one format must be supported */ { { o(OES_compressed_ETC1_RGB8_texture) }, { PIPE_FORMAT_ETC1_RGB8, PIPE_FORMAT_R8G8B8A8_UNORM }, GL_TRUE }, /* at least one format must be supported */ { { o(ARB_stencil_texturing), o(ARB_texture_stencil8) }, { PIPE_FORMAT_X24S8_UINT, PIPE_FORMAT_S8X24_UINT }, GL_TRUE }, /* at least one format must be supported */ }; /* Required: vertex fetch support. */ static const struct st_extension_format_mapping vertex_mapping[] = { { { o(ARB_vertex_type_2_10_10_10_rev) }, { PIPE_FORMAT_R10G10B10A2_UNORM, PIPE_FORMAT_B10G10R10A2_UNORM, PIPE_FORMAT_R10G10B10A2_SNORM, PIPE_FORMAT_B10G10R10A2_SNORM, PIPE_FORMAT_R10G10B10A2_USCALED, PIPE_FORMAT_B10G10R10A2_USCALED, PIPE_FORMAT_R10G10B10A2_SSCALED, PIPE_FORMAT_B10G10R10A2_SSCALED } }, { { o(ARB_vertex_type_10f_11f_11f_rev) }, { PIPE_FORMAT_R11G11B10_FLOAT } }, }; static const struct st_extension_format_mapping tbo_rgb32[] = { { {o(ARB_texture_buffer_object_rgb32) }, { PIPE_FORMAT_R32G32B32_FLOAT, PIPE_FORMAT_R32G32B32_UINT, PIPE_FORMAT_R32G32B32_SINT, } }, }; /* * Extensions that are supported by all Gallium drivers: */ extensions->ARB_ES2_compatibility = GL_TRUE; extensions->ARB_draw_elements_base_vertex = GL_TRUE; extensions->ARB_explicit_attrib_location = GL_TRUE; extensions->ARB_explicit_uniform_location = GL_TRUE; extensions->ARB_fragment_coord_conventions = GL_TRUE; extensions->ARB_fragment_program = GL_TRUE; extensions->ARB_fragment_shader = GL_TRUE; extensions->ARB_half_float_vertex = GL_TRUE; extensions->ARB_internalformat_query = GL_TRUE; extensions->ARB_map_buffer_range = GL_TRUE; extensions->ARB_texture_border_clamp = GL_TRUE; /* XXX temp */ extensions->ARB_texture_cube_map = GL_TRUE; extensions->ARB_texture_env_combine = GL_TRUE; extensions->ARB_texture_env_crossbar = GL_TRUE; extensions->ARB_texture_env_dot3 = GL_TRUE; extensions->ARB_vertex_program = GL_TRUE; extensions->ARB_vertex_shader = GL_TRUE; extensions->EXT_blend_color = GL_TRUE; extensions->EXT_blend_func_separate = GL_TRUE; extensions->EXT_blend_minmax = GL_TRUE; extensions->EXT_gpu_program_parameters = GL_TRUE; extensions->EXT_pixel_buffer_object = GL_TRUE; extensions->EXT_point_parameters = GL_TRUE; extensions->EXT_provoking_vertex = GL_TRUE; extensions->EXT_texture_env_dot3 = GL_TRUE; extensions->EXT_vertex_array_bgra = GL_TRUE; extensions->ATI_texture_env_combine3 = GL_TRUE; extensions->MESA_pack_invert = GL_TRUE; extensions->NV_fog_distance = GL_TRUE; extensions->NV_texture_env_combine4 = GL_TRUE; extensions->NV_texture_rectangle = GL_TRUE; extensions->OES_EGL_image = GL_TRUE; extensions->OES_EGL_image_external = GL_TRUE; extensions->OES_draw_texture = GL_TRUE; /* Expose the extensions which directly correspond to gallium caps. */ for (i = 0; i < ARRAY_SIZE(cap_mapping); i++) { if (screen->get_param(screen, cap_mapping[i].cap)) { extension_table[cap_mapping[i].extension_offset] = GL_TRUE; } } /* Expose the extensions which directly correspond to gallium formats. */ init_format_extensions(screen, extensions, rendertarget_mapping, ARRAY_SIZE(rendertarget_mapping), PIPE_TEXTURE_2D, PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW); init_format_extensions(screen, extensions, depthstencil_mapping, ARRAY_SIZE(depthstencil_mapping), PIPE_TEXTURE_2D, PIPE_BIND_DEPTH_STENCIL | PIPE_BIND_SAMPLER_VIEW); init_format_extensions(screen, extensions, texture_mapping, ARRAY_SIZE(texture_mapping), PIPE_TEXTURE_2D, PIPE_BIND_SAMPLER_VIEW); init_format_extensions(screen, extensions, vertex_mapping, ARRAY_SIZE(vertex_mapping), PIPE_BUFFER, PIPE_BIND_VERTEX_BUFFER); /* Figure out GLSL support. */ glsl_feature_level = screen->get_param(screen, PIPE_CAP_GLSL_FEATURE_LEVEL); consts->GLSLVersion = glsl_feature_level; if (glsl_feature_level >= 410) consts->GLSLVersion = 410; _mesa_override_glsl_version(consts); if (options->force_glsl_version > 0 && options->force_glsl_version <= consts->GLSLVersion) { consts->ForceGLSLVersion = options->force_glsl_version; } if (glsl_feature_level >= 400) extensions->ARB_gpu_shader5 = GL_TRUE; if (glsl_feature_level >= 410) extensions->ARB_shader_precision = GL_TRUE; /* This extension needs full OpenGL 3.2, but we don't know if that's * supported at this point. Only check the GLSL version. */ if (consts->GLSLVersion >= 150 && screen->get_param(screen, PIPE_CAP_TGSI_VS_LAYER_VIEWPORT)) { extensions->AMD_vertex_shader_layer = GL_TRUE; } if (consts->GLSLVersion >= 130) { consts->NativeIntegers = GL_TRUE; consts->MaxClipPlanes = 8; if (screen->get_param(screen, PIPE_CAP_VERTEXID_NOBASE)) { consts->VertexID_is_zero_based = GL_TRUE; } /* Extensions that either depend on GLSL 1.30 or are a subset thereof. */ extensions->ARB_conservative_depth = GL_TRUE; extensions->ARB_shading_language_packing = GL_TRUE; extensions->OES_depth_texture_cube_map = GL_TRUE; extensions->ARB_shading_language_420pack = GL_TRUE; extensions->ARB_texture_query_levels = GL_TRUE; extensions->ARB_shader_subroutine = GL_TRUE; if (!options->disable_shader_bit_encoding) { extensions->ARB_shader_bit_encoding = GL_TRUE; } extensions->EXT_shader_integer_mix = GL_TRUE; } else { /* Optional integer support for GLSL 1.2. */ if (screen->get_shader_param(screen, PIPE_SHADER_VERTEX, PIPE_SHADER_CAP_INTEGERS) && screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_INTEGERS)) { consts->NativeIntegers = GL_TRUE; extensions->EXT_shader_integer_mix = GL_TRUE; } /* Integer textures make no sense before GLSL 1.30 */ extensions->EXT_texture_integer = GL_FALSE; } consts->UniformBooleanTrue = consts->NativeIntegers ? ~0U : fui(1.0f); /* Below are the cases which cannot be moved into tables easily. */ if (!has_lib_dxtc && !options->force_s3tc_enable) { extensions->EXT_texture_compression_s3tc = GL_FALSE; extensions->ANGLE_texture_compression_dxt = GL_FALSE; } if (screen->get_shader_param(screen, PIPE_SHADER_TESS_CTRL, PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) { extensions->ARB_tessellation_shader = GL_TRUE; } if (screen->fence_finish) { extensions->ARB_sync = GL_TRUE; } /* Maximum sample count. */ { enum pipe_format color_formats[] = { PIPE_FORMAT_R8G8B8A8_UNORM, PIPE_FORMAT_B8G8R8A8_UNORM, PIPE_FORMAT_A8R8G8B8_UNORM, PIPE_FORMAT_A8B8G8R8_UNORM, }; enum pipe_format depth_formats[] = { PIPE_FORMAT_Z16_UNORM, PIPE_FORMAT_Z24X8_UNORM, PIPE_FORMAT_X8Z24_UNORM, PIPE_FORMAT_Z32_UNORM, PIPE_FORMAT_Z32_FLOAT }; enum pipe_format int_formats[] = { PIPE_FORMAT_R8G8B8A8_SINT }; consts->MaxSamples = get_max_samples_for_formats(screen, ARRAY_SIZE(color_formats), color_formats, 16, PIPE_BIND_RENDER_TARGET); consts->MaxColorTextureSamples = get_max_samples_for_formats(screen, ARRAY_SIZE(color_formats), color_formats, consts->MaxSamples, PIPE_BIND_SAMPLER_VIEW); consts->MaxDepthTextureSamples = get_max_samples_for_formats(screen, ARRAY_SIZE(depth_formats), depth_formats, consts->MaxSamples, PIPE_BIND_SAMPLER_VIEW); consts->MaxIntegerSamples = get_max_samples_for_formats(screen, ARRAY_SIZE(int_formats), int_formats, consts->MaxSamples, PIPE_BIND_SAMPLER_VIEW); } if (consts->MaxSamples == 1) { /* one sample doesn't really make sense */ consts->MaxSamples = 0; } else if (consts->MaxSamples >= 2) { extensions->EXT_framebuffer_multisample = GL_TRUE; extensions->EXT_framebuffer_multisample_blit_scaled = GL_TRUE; } if (consts->MaxSamples == 0 && screen->get_param(screen, PIPE_CAP_FAKE_SW_MSAA)) { consts->FakeSWMSAA = GL_TRUE; extensions->EXT_framebuffer_multisample = GL_TRUE; extensions->EXT_framebuffer_multisample_blit_scaled = GL_TRUE; extensions->ARB_texture_multisample = GL_TRUE; } if (consts->MaxDualSourceDrawBuffers > 0 && !options->disable_blend_func_extended) extensions->ARB_blend_func_extended = GL_TRUE; if (screen->get_param(screen, PIPE_CAP_QUERY_TIME_ELAPSED) || extensions->ARB_timer_query) { extensions->EXT_timer_query = GL_TRUE; } if (extensions->ARB_transform_feedback2 && extensions->ARB_draw_instanced) { extensions->ARB_transform_feedback_instanced = GL_TRUE; } if (options->force_glsl_extensions_warn) consts->ForceGLSLExtensionsWarn = 1; if (options->disable_glsl_line_continuations) consts->DisableGLSLLineContinuations = 1; if (options->allow_glsl_extension_directive_midshader) consts->AllowGLSLExtensionDirectiveMidShader = GL_TRUE; consts->MinMapBufferAlignment = screen->get_param(screen, PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT); if (extensions->ARB_texture_buffer_object) { consts->MaxTextureBufferSize = _min(screen->get_param(screen, PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE), (1u << 31) - 1); consts->TextureBufferOffsetAlignment = screen->get_param(screen, PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT); if (consts->TextureBufferOffsetAlignment) extensions->ARB_texture_buffer_range = GL_TRUE; init_format_extensions(screen, extensions, tbo_rgb32, ARRAY_SIZE(tbo_rgb32), PIPE_BUFFER, PIPE_BIND_SAMPLER_VIEW); } /* Unpacking a varying in the fragment shader costs 1 texture indirection. * If the number of available texture indirections is very limited, then we * prefer to disable varying packing rather than run the risk of varying * packing preventing a shader from running. */ if (screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS) <= 8) { /* We can't disable varying packing if transform feedback is available, * because transform feedback code assumes a packed varying layout. */ if (!extensions->EXT_transform_feedback) consts->DisableVaryingPacking = GL_TRUE; } consts->MaxViewports = screen->get_param(screen, PIPE_CAP_MAX_VIEWPORTS); if (consts->MaxViewports >= 16) { if (glsl_feature_level >= 400) { consts->ViewportBounds.Min = -32768.0; consts->ViewportBounds.Max = 32767.0; } else { consts->ViewportBounds.Min = -16384.0; consts->ViewportBounds.Max = 16383.0; } extensions->ARB_viewport_array = GL_TRUE; extensions->ARB_fragment_layer_viewport = GL_TRUE; if (extensions->AMD_vertex_shader_layer) extensions->AMD_vertex_shader_viewport_index = GL_TRUE; } /* GL_ARB_ES3_compatibility. * * Assume that ES3 is supported if GLSL 3.30 is supported. * (OpenGL 3.3 is a requirement for that extension.) */ if (consts->GLSLVersion >= 330 && /* Requirements for ETC2 emulation. */ screen->is_format_supported(screen, PIPE_FORMAT_R8G8B8A8_UNORM, PIPE_TEXTURE_2D, 0, PIPE_BIND_SAMPLER_VIEW) && screen->is_format_supported(screen, PIPE_FORMAT_B8G8R8A8_SRGB, PIPE_TEXTURE_2D, 0, PIPE_BIND_SAMPLER_VIEW) && screen->is_format_supported(screen, PIPE_FORMAT_R16_UNORM, PIPE_TEXTURE_2D, 0, PIPE_BIND_SAMPLER_VIEW) && screen->is_format_supported(screen, PIPE_FORMAT_R16G16_UNORM, PIPE_TEXTURE_2D, 0, PIPE_BIND_SAMPLER_VIEW) && screen->is_format_supported(screen, PIPE_FORMAT_R16_SNORM, PIPE_TEXTURE_2D, 0, PIPE_BIND_SAMPLER_VIEW) && screen->is_format_supported(screen, PIPE_FORMAT_R16G16_SNORM, PIPE_TEXTURE_2D, 0, PIPE_BIND_SAMPLER_VIEW)) { extensions->ARB_ES3_compatibility = GL_TRUE; } if (screen->get_video_param && screen->get_video_param(screen, PIPE_VIDEO_PROFILE_UNKNOWN, PIPE_VIDEO_ENTRYPOINT_BITSTREAM, PIPE_VIDEO_CAP_SUPPORTS_INTERLACED)) { extensions->NV_vdpau_interop = GL_TRUE; } if (screen->get_shader_param(screen, PIPE_SHADER_VERTEX, PIPE_SHADER_CAP_DOUBLES) && screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_DOUBLES)) { extensions->ARB_gpu_shader_fp64 = GL_TRUE; extensions->ARB_vertex_attrib_64bit = GL_TRUE; } }
static INLINE void nvfx_render_prim(struct draw_stage *stage, struct prim_header *prim, unsigned mode, unsigned count) { struct nvfx_render_stage *rs = nvfx_render_stage(stage); struct nvfx_context *nvfx = rs->nvfx; struct nvfx_screen *screen = nvfx->screen; struct nouveau_channel *chan = screen->base.channel; boolean no_elements = nvfx->vertprog->draw_no_elements; unsigned num_attribs = nvfx->vertprog->draw_elements; /* we need to account the flush as well here even if it is done afterthis * function */ if (AVAIL_RING(chan) < ((1 + count * num_attribs * 4) + 6 + 64)) { nvfx_render_flush(stage, 0); FIRE_RING(chan); nvfx_state_emit(nvfx); assert(AVAIL_RING(chan) >= ((1 + count * num_attribs * 4) + 6 + 64)); } /* Switch primitive modes if necessary */ if (rs->prim != mode) { if (rs->prim != NV30_3D_VERTEX_BEGIN_END_STOP) { OUT_RING(chan, RING_3D(NV30_3D_VERTEX_BEGIN_END, 1)); OUT_RING(chan, NV30_3D_VERTEX_BEGIN_END_STOP); } /* XXX: any command a lot of times seems to (mostly) fix corruption that would otherwise happen */ /* this seems to cause issues on nv3x, and also be unneeded there */ if(nvfx->is_nv4x) { int i; for(i = 0; i < 32; ++i) { OUT_RING(chan, RING_3D(0x1dac, 1)); OUT_RING(chan, 0); } } OUT_RING(chan, RING_3D(NV30_3D_VERTEX_BEGIN_END, 1)); OUT_RING (chan, mode); rs->prim = mode; } OUT_RING(chan, RING_3D_NI(NV30_3D_VERTEX_DATA, num_attribs * 4 * count)); if(no_elements) { OUT_RING(chan, 0); OUT_RING(chan, 0); OUT_RING(chan, 0); OUT_RING(chan, 0); } else { for (unsigned i = 0; i < count; ++i) { struct vertex_header* v = prim->v[i]; /* TODO: disable divide where it's causing the problem, and remove this hack */ OUT_RING(chan, fui(v->data[0][0] / v->data[0][3])); OUT_RING(chan, fui(v->data[0][1] / v->data[0][3])); OUT_RING(chan, fui(v->data[0][2] / v->data[0][3])); OUT_RING(chan, fui(1.0f / v->data[0][3])); OUT_RINGp(chan, &v->data[1][0], 4 * (num_attribs - 1)); } } }
static void fd6_clear_lrz(struct fd_batch *batch, struct fd_resource *zsbuf, double depth) { struct fd_ringbuffer *ring; // TODO mid-frame clears (ie. app doing crazy stuff)?? Maybe worth // splitting both clear and lrz clear out into their own rb's. And // just throw away any draws prior to clear. (Anything not fullscreen // clear, just fallback to generic path that treats it as a normal // draw if (!batch->lrz_clear) { batch->lrz_clear = fd_ringbuffer_new(batch->ctx->pipe, 0x1000); fd_ringbuffer_set_parent(batch->lrz_clear, batch->gmem); } ring = batch->lrz_clear; emit_marker6(ring, 7); OUT_PKT7(ring, CP_SET_MARKER, 1); OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(RM6_BYPASS)); emit_marker6(ring, 7); OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1); OUT_RING(ring, 0x10000000); OUT_PKT4(ring, REG_A6XX_HLSQ_UPDATE_CNTL, 1); OUT_RING(ring, 0x7ffff); emit_marker6(ring, 7); OUT_PKT7(ring, CP_SET_MARKER, 1); OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(0xc)); emit_marker6(ring, 7); OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8C01, 1); OUT_RING(ring, 0x0); OUT_PKT4(ring, REG_A6XX_SP_PS_2D_SRC_INFO, 13); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_ACC0, 1); OUT_RING(ring, 0x0000f410); OUT_PKT4(ring, REG_A6XX_GRAS_2D_BLIT_CNTL, 1); OUT_RING(ring, A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(RB6_R16_UNORM) | 0x4f00080); OUT_PKT4(ring, REG_A6XX_RB_2D_BLIT_CNTL, 1); OUT_RING(ring, A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(RB6_R16_UNORM) | 0x4f00080); fd6_event_write(batch, ring, UNK_1D, true); fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false); OUT_PKT4(ring, REG_A6XX_RB_2D_SRC_SOLID_C0, 4); OUT_RING(ring, fui(depth)); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_PKT4(ring, REG_A6XX_RB_2D_DST_INFO, 9); OUT_RING(ring, A6XX_RB_2D_DST_INFO_COLOR_FORMAT(RB6_R16_UNORM) | A6XX_RB_2D_DST_INFO_TILE_MODE(TILE6_LINEAR) | A6XX_RB_2D_DST_INFO_COLOR_SWAP(WZYX)); OUT_RELOCW(ring, zsbuf->lrz, 0, 0, 0); OUT_RING(ring, A6XX_RB_2D_DST_SIZE_PITCH(zsbuf->lrz_pitch * 2)); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); OUT_PKT4(ring, REG_A6XX_GRAS_2D_SRC_TL_X, 4); OUT_RING(ring, A6XX_GRAS_2D_SRC_TL_X_X(0)); OUT_RING(ring, A6XX_GRAS_2D_SRC_BR_X_X(0)); OUT_RING(ring, A6XX_GRAS_2D_SRC_TL_Y_Y(0)); OUT_RING(ring, A6XX_GRAS_2D_SRC_BR_Y_Y(0)); OUT_PKT4(ring, REG_A6XX_GRAS_2D_DST_TL, 2); OUT_RING(ring, A6XX_GRAS_2D_DST_TL_X(0) | A6XX_GRAS_2D_DST_TL_Y(0)); OUT_RING(ring, A6XX_GRAS_2D_DST_BR_X(zsbuf->lrz_width - 1) | A6XX_GRAS_2D_DST_BR_Y(zsbuf->lrz_height - 1)); fd6_event_write(batch, ring, 0x3f, false); OUT_WFI5(ring); OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1); OUT_RING(ring, 0x1000000); OUT_PKT7(ring, CP_BLIT, 1); OUT_RING(ring, CP_BLIT_0_OP(BLIT_OP_SCALE)); OUT_WFI5(ring); OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1); OUT_RING(ring, 0x0); fd6_event_write(batch, ring, UNK_1D, true); fd6_event_write(batch, ring, FACENESS_FLUSH, true); fd6_event_write(batch, ring, CACHE_FLUSH_TS, true); fd6_cache_flush(batch, ring); }