static void nvc0_tep_state_bind(struct pipe_context *pipe, void *hwcso) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->tevlprog = hwcso; nvc0->dirty |= NVC0_NEW_TEVLPROG; }
static void nvc0_texture_barrier(struct pipe_context *pipe) { struct nouveau_pushbuf *push = nvc0_context(pipe)->base.pushbuf; IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0); IMMED_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 0); }
static void nvc0_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->sample_mask = sample_mask; nvc0->dirty |= NVC0_NEW_SAMPLE_MASK; }
static void nvc0_rasterizer_state_bind(struct pipe_context *pipe, void *hwcso) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->rast = hwcso; nvc0->dirty_3d |= NVC0_NEW_3D_RASTERIZER; }
static void nvc0_blend_state_bind(struct pipe_context *pipe, void *hwcso) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->blend = hwcso; nvc0->dirty_3d |= NVC0_NEW_3D_BLEND; }
static void nvc0_fp_state_bind(struct pipe_context *pipe, void *hwcso) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->fragprog = hwcso; nvc0->dirty |= NVC0_NEW_FRAGPROG; }
static void nvc0_gp_state_bind(struct pipe_context *pipe, void *hwcso) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->gmtyprog = hwcso; nvc0->dirty |= NVC0_NEW_GMTYPROG; }
static void nvc0_zsa_state_bind(struct pipe_context *pipe, void *hwcso) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->zsa = hwcso; nvc0->dirty_3d |= NVC0_NEW_3D_ZSA; }
static void nvc0_vertex_state_bind(struct pipe_context *pipe, void *hwcso) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->vertex = hwcso; nvc0->dirty |= NVC0_NEW_VERTEX; }
static void nvc0_vp_state_bind(struct pipe_context *pipe, void *hwcso) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->vertprog = hwcso; nvc0->dirty_3d |= NVC0_NEW_3D_VERTPROG; }
static void nvc0_texture_barrier(struct pipe_context *pipe) { struct nouveau_channel *chan = nvc0_context(pipe)->screen->base.channel; IMMED_RING(chan, RING_3D(SERIALIZE), 0); IMMED_RING(chan, RING_3D(TEX_CACHE_CTL), 0); }
static void nvc0_tcp_state_bind(struct pipe_context *pipe, void *hwcso) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->tctlprog = hwcso; nvc0->dirty_3d |= NVC0_NEW_3D_TCTLPROG; }
void nvc0_so_target_save_offset(struct pipe_context *pipe, struct pipe_stream_output_target *ptarg, unsigned index, boolean *serialize) { struct nvc0_so_target *targ = nvc0_so_target(ptarg); if (*serialize) { *serialize = FALSE; PUSH_SPACE(nvc0_context(pipe)->base.pushbuf, 1); IMMED_NVC0(nvc0_context(pipe)->base.pushbuf, NVC0_3D(SERIALIZE), 0); } nvc0_query(targ->pq)->index = index; nvc0_query_end(pipe, targ->pq); }
static void nvc0_cp_state_bind(struct pipe_context *pipe, void *hwcso) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->compprog = hwcso; nvc0->dirty_cp |= NVC0_NEW_CP_PROGRAM; }
static struct pipe_query * nvc0_query_create(struct pipe_context *pipe, unsigned type) { struct nvc0_context *nvc0 = nvc0_context(pipe); struct nvc0_query *q; unsigned space = NVC0_QUERY_ALLOC_SPACE; q = CALLOC_STRUCT(nvc0_query); if (!q) return NULL; switch (type) { case PIPE_QUERY_OCCLUSION_COUNTER: case PIPE_QUERY_OCCLUSION_PREDICATE: q->rotate = 32; space = NVC0_QUERY_ALLOC_SPACE; break; case PIPE_QUERY_PIPELINE_STATISTICS: q->is64bit = TRUE; space = 512; break; case PIPE_QUERY_SO_STATISTICS: case PIPE_QUERY_SO_OVERFLOW_PREDICATE: q->is64bit = TRUE; space = 64; break; case PIPE_QUERY_TIME_ELAPSED: case PIPE_QUERY_TIMESTAMP: case PIPE_QUERY_TIMESTAMP_DISJOINT: case PIPE_QUERY_GPU_FINISHED: case PIPE_QUERY_PRIMITIVES_GENERATED: case PIPE_QUERY_PRIMITIVES_EMITTED: space = 32; break; case NVC0_QUERY_TFB_BUFFER_OFFSET: space = 16; break; default: FREE(q); return NULL; } if (!nvc0_query_allocate(nvc0, q, space)) { FREE(q); return NULL; } q->type = type; if (q->rotate) { /* we advance before query_begin ! */ q->offset -= q->rotate; q->data -= q->rotate / sizeof(*q->data); } else if (!q->is64bit) q->data[0] = 0; /* initialize sequence */ return (struct pipe_query *)q; }
static void nvc0_set_sampler_views(struct pipe_context *pipe, unsigned shader, unsigned start, unsigned nr, struct pipe_sampler_view **views) { assert(start == 0); switch (shader) { case PIPE_SHADER_VERTEX: nvc0_stage_set_sampler_views(nvc0_context(pipe), 0, nr, views); break; case PIPE_SHADER_TESS_CTRL: nvc0_stage_set_sampler_views(nvc0_context(pipe), 1, nr, views); break; case PIPE_SHADER_TESS_EVAL: nvc0_stage_set_sampler_views(nvc0_context(pipe), 2, nr, views); break; case PIPE_SHADER_GEOMETRY: nvc0_stage_set_sampler_views(nvc0_context(pipe), 3, nr, views); break; case PIPE_SHADER_FRAGMENT: nvc0_stage_set_sampler_views(nvc0_context(pipe), 4, nr, views); break; case PIPE_SHADER_COMPUTE: nvc0_stage_set_sampler_views_range(nvc0_context(pipe), 5, start, nr, views); nvc0_context(pipe)->dirty_cp |= NVC0_NEW_CP_TEXTURES; break; default: ; } }
static void nvc0_bind_sampler_states(struct pipe_context *pipe, unsigned shader, unsigned start, unsigned nr, void **s) { switch (shader) { case PIPE_SHADER_VERTEX: assert(start == 0); nvc0_stage_sampler_states_bind(nvc0_context(pipe), 0, nr, s); break; case PIPE_SHADER_TESS_CTRL: assert(start == 0); nvc0_stage_sampler_states_bind(nvc0_context(pipe), 1, nr, s); break; case PIPE_SHADER_TESS_EVAL: assert(start == 0); nvc0_stage_sampler_states_bind(nvc0_context(pipe), 2, nr, s); break; case PIPE_SHADER_GEOMETRY: assert(start == 0); nvc0_stage_sampler_states_bind(nvc0_context(pipe), 3, nr, s); break; case PIPE_SHADER_FRAGMENT: assert(start == 0); nvc0_stage_sampler_states_bind(nvc0_context(pipe), 4, nr, s); break; case PIPE_SHADER_COMPUTE: nvc0_stage_sampler_states_bind_range(nvc0_context(pipe), 5, start, nr, s); nvc0_context(pipe)->dirty_cp |= NVC0_NEW_CP_SAMPLERS; break; } }
static void nvc0_so_target_save_offset(struct pipe_context *pipe, struct pipe_stream_output_target *ptarg, unsigned index, bool *serialize) { struct nvc0_so_target *targ = nvc0_so_target(ptarg); if (*serialize) { *serialize = false; PUSH_SPACE(nvc0_context(pipe)->base.pushbuf, 1); IMMED_NVC0(nvc0_context(pipe)->base.pushbuf, NVC0_3D(SERIALIZE), 0); NOUVEAU_DRV_STAT(nouveau_screen(pipe->screen), gpu_serialize_count, 1); } nvc0_query(targ->pq)->index = index; pipe->end_query(pipe, targ->pq); }
static void nvc0_set_scissor_state(struct pipe_context *pipe, const struct pipe_scissor_state *scissor) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->scissor = *scissor; nvc0->dirty |= NVC0_NEW_SCISSOR; }
static void nvc0_set_viewport_state(struct pipe_context *pipe, const struct pipe_viewport_state *vpt) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->viewport = *vpt; nvc0->dirty |= NVC0_NEW_VIEWPORT; }
static void nvc0_set_framebuffer_state(struct pipe_context *pipe, const struct pipe_framebuffer_state *fb) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->framebuffer = *fb; nvc0->dirty |= NVC0_NEW_FRAMEBUFFER; }
static void nvc0_memory_barrier(struct pipe_context *pipe, unsigned flags) { struct nvc0_context *nvc0 = nvc0_context(pipe); struct nouveau_pushbuf *push = nvc0->base.pushbuf; int i, s; if (flags & PIPE_BARRIER_MAPPED_BUFFER) { for (i = 0; i < nvc0->num_vtxbufs; ++i) { if (!nvc0->vtxbuf[i].buffer) continue; if (nvc0->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) nvc0->base.vbo_dirty = true; } if (nvc0->idxbuf.buffer && nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) nvc0->base.vbo_dirty = true; for (s = 0; s < 5 && !nvc0->cb_dirty; ++s) { uint32_t valid = nvc0->constbuf_valid[s]; while (valid && !nvc0->cb_dirty) { const unsigned i = ffs(valid) - 1; struct pipe_resource *res; valid &= ~(1 << i); if (nvc0->constbuf[s][i].user) continue; res = nvc0->constbuf[s][i].u.buf; if (!res) continue; if (res->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) nvc0->cb_dirty = true; } } } else { /* Pretty much any writing by shaders needs a serialize after * it. Especially when moving between 3d and compute pipelines, but even * without that. */ IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0); } /* If we're going to texture from a buffer/image written by a shader, we * must flush the texture cache. */ if (flags & PIPE_BARRIER_TEXTURE) IMMED_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 0); if (flags & PIPE_BARRIER_CONSTANT_BUFFER) nvc0->cb_dirty = true; if (flags & (PIPE_BARRIER_VERTEX_BUFFER | PIPE_BARRIER_INDEX_BUFFER)) nvc0->base.vbo_dirty = true; }
static void nvc0_set_polygon_stipple(struct pipe_context *pipe, const struct pipe_poly_stipple *stipple) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->stipple = *stipple; nvc0->dirty_3d |= NVC0_NEW_3D_STIPPLE; }
static void nvc0_set_stencil_ref(struct pipe_context *pipe, const struct pipe_stencil_ref *sr) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->stencil_ref = *sr; nvc0->dirty_3d |= NVC0_NEW_3D_STENCIL_REF; }
static void nvc0_set_blend_color(struct pipe_context *pipe, const struct pipe_blend_color *bcol) { struct nvc0_context *nvc0 = nvc0_context(pipe); nvc0->blend_colour = *bcol; nvc0->dirty_3d |= NVC0_NEW_3D_BLEND_COLOUR; }
static void nvc0_set_min_samples(struct pipe_context *pipe, unsigned min_samples) { struct nvc0_context *nvc0 = nvc0_context(pipe); if (nvc0->min_samples != min_samples) { nvc0->min_samples = min_samples; nvc0->dirty_3d |= NVC0_NEW_3D_MIN_SAMPLES; } }
static void nvc0_set_clip_state(struct pipe_context *pipe, const struct pipe_clip_state *clip) { struct nvc0_context *nvc0 = nvc0_context(pipe); memcpy(nvc0->clip.ucp, clip->ucp, sizeof(clip->ucp)); nvc0->dirty_3d |= NVC0_NEW_3D_CLIP; }
static void nvc0_sp_state_delete(struct pipe_context *pipe, void *hwcso) { struct nvc0_program *prog = (struct nvc0_program *)hwcso; nvc0_program_destroy(nvc0_context(pipe), prog); FREE((void *)prog->pipe.tokens); FREE(prog); }
/* NOTE: only called when not referenced anywhere, won't be bound */ static void nvc0_sampler_view_destroy(struct pipe_context *pipe, struct pipe_sampler_view *view) { pipe_resource_reference(&view->texture, NULL); nvc0_screen_tic_free(nvc0_context(pipe)->screen, nv50_tic_entry(view)); FREE(nv50_tic_entry(view)); }
static void nvc0_render_condition(struct pipe_context *pipe, struct pipe_query *pq, uint mode) { struct nvc0_context *nvc0 = nvc0_context(pipe); struct nouveau_pushbuf *push = nvc0->base.pushbuf; struct nvc0_query *q; uint32_t cond; boolean negated = FALSE; boolean wait = mode != PIPE_RENDER_COND_NO_WAIT && mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT; if (!pq) { PUSH_SPACE(push, 1); IMMED_NVC0(push, NVC0_3D(COND_MODE), NVC0_3D_COND_MODE_ALWAYS); return; } q = nvc0_query(pq); /* NOTE: comparison of 2 queries only works if both have completed */ switch (q->type) { case PIPE_QUERY_SO_OVERFLOW_PREDICATE: cond = negated ? NVC0_3D_COND_MODE_EQUAL : NVC0_3D_COND_MODE_NOT_EQUAL; wait = TRUE; break; case PIPE_QUERY_OCCLUSION_COUNTER: case PIPE_QUERY_OCCLUSION_PREDICATE: if (likely(!negated)) { if (unlikely(q->nesting)) cond = wait ? NVC0_3D_COND_MODE_NOT_EQUAL : NVC0_3D_COND_MODE_ALWAYS; else cond = NVC0_3D_COND_MODE_RES_NON_ZERO; } else { cond = wait ? NVC0_3D_COND_MODE_EQUAL : NVC0_3D_COND_MODE_ALWAYS; } break; default: assert(!"render condition query not a predicate"); mode = NVC0_3D_COND_MODE_ALWAYS; break; } if (wait) nvc0_query_fifo_wait(push, pq); PUSH_SPACE(push, 4); PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD); BEGIN_NVC0(push, NVC0_3D(COND_ADDRESS_HIGH), 3); PUSH_DATAh(push, q->bo->offset + q->offset); PUSH_DATA (push, q->bo->offset + q->offset); PUSH_DATA (push, cond); }