static void nv50_texture_barrier(struct pipe_context *pipe) { struct nouveau_pushbuf *push = nv50_context(pipe)->base.pushbuf; BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1); PUSH_DATA (push, 0); BEGIN_NV04(push, NV50_3D(TEX_CACHE_CTL), 1); PUSH_DATA (push, 0x20); }
static void nv50_sprite_coords_validate(struct nv50_context *nv50) { struct nouveau_pushbuf *push = nv50->base.pushbuf; uint32_t pntc[8], mode; struct nv50_program *fp = nv50->fragprog; unsigned i, c; unsigned m = (nv50->state.interpolant_ctrl >> 8) & 0xff; if (!nv50->rast->pipe.point_quad_rasterization) { if (nv50->state.point_sprite) { BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8); for (i = 0; i < 8; ++i) PUSH_DATA(push, 0); nv50->state.point_sprite = FALSE; } return; } else { nv50->state.point_sprite = TRUE; } memset(pntc, 0, sizeof(pntc)); for (i = 0; i < fp->in_nr; i++) { unsigned n = util_bitcount(fp->in[i].mask); if (fp->in[i].sn != TGSI_SEMANTIC_GENERIC) { m += n; continue; } if (!(nv50->rast->pipe.sprite_coord_enable & (1 << fp->in[i].si))) { m += n; continue; } for (c = 0; c < 4; ++c) { if (fp->in[i].mask & (1 << c)) { pntc[m / 8] |= (c + 1) << ((m % 8) * 4); ++m; } } } if (nv50->rast->pipe.sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT) mode = 0x00; else mode = 0x10; BEGIN_NV04(push, NV50_3D(POINT_SPRITE_CTRL), 1); PUSH_DATA (push, mode); BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8); PUSH_DATAp(push, pntc, 8); }
static void nvc0_render_condition(struct pipe_context *pipe, struct pipe_query *pq, uint mode) { struct nvc0_context *nvc0 = nvc0_context(pipe); struct nouveau_pushbuf *push = nvc0->base.pushbuf; struct nvc0_query *q; uint32_t cond; boolean negated = FALSE; boolean wait = mode != PIPE_RENDER_COND_NO_WAIT && mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT; if (!pq) { PUSH_SPACE(push, 1); IMMED_NVC0(push, NVC0_3D(COND_MODE), NVC0_3D_COND_MODE_ALWAYS); return; } q = nvc0_query(pq); /* NOTE: comparison of 2 queries only works if both have completed */ switch (q->type) { case PIPE_QUERY_SO_OVERFLOW_PREDICATE: cond = negated ? NVC0_3D_COND_MODE_EQUAL : NVC0_3D_COND_MODE_NOT_EQUAL; wait = TRUE; break; case PIPE_QUERY_OCCLUSION_COUNTER: case PIPE_QUERY_OCCLUSION_PREDICATE: if (likely(!negated)) { if (unlikely(q->nesting)) cond = wait ? NVC0_3D_COND_MODE_NOT_EQUAL : NVC0_3D_COND_MODE_ALWAYS; else cond = NVC0_3D_COND_MODE_RES_NON_ZERO; } else { cond = wait ? NVC0_3D_COND_MODE_EQUAL : NVC0_3D_COND_MODE_ALWAYS; } break; default: assert(!"render condition query not a predicate"); mode = NVC0_3D_COND_MODE_ALWAYS; break; } if (wait) nvc0_query_fifo_wait(push, pq); PUSH_SPACE(push, 4); PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD); BEGIN_NVC0(push, NVC0_3D(COND_ADDRESS_HIGH), 3); PUSH_DATAh(push, q->bo->offset + q->offset); PUSH_DATA (push, q->bo->offset + q->offset); PUSH_DATA (push, cond); }
static void nv30_draw_elements(struct nv30_context *nv30, boolean shorten, unsigned mode, unsigned start, unsigned count, unsigned instance_count, int32_t index_bias) { const unsigned index_size = nv30->idxbuf.index_size; struct nouveau_pushbuf *push = nv30->base.pushbuf; struct nouveau_object *eng3d = nv30->screen->eng3d; unsigned prim = nv30_prim_gl(mode); #if 0 /*XXX*/ if (index_bias != nv30->state.index_bias) { BEGIN_NV04(push, NV30_3D(VB_ELEMENT_BASE), 1); PUSH_DATA (push, index_bias); nv30->state.index_bias = index_bias; } #endif if (eng3d->oclass == NV40_3D_CLASS && index_size > 1 && nv30->idxbuf.buffer) { struct nv04_resource *res = nv04_resource(nv30->idxbuf.buffer); unsigned offset = nv30->idxbuf.offset; assert(nouveau_resource_mapped_by_gpu(&res->base)); BEGIN_NV04(push, NV30_3D(IDXBUF_OFFSET), 2); PUSH_RESRC(push, NV30_3D(IDXBUF_OFFSET), BUFCTX_IDXBUF, res, offset, NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, 0); PUSH_MTHD (push, NV30_3D(IDXBUF_FORMAT), BUFCTX_IDXBUF, res->bo, (index_size == 2) ? 0x00000010 : 0x00000000, res->domain | NOUVEAU_BO_RD, 0, NV30_3D_IDXBUF_FORMAT_DMA1); BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1); PUSH_DATA (push, prim); while (count) { const unsigned mpush = 2047 * 256; unsigned npush = (count > mpush) ? mpush : count; unsigned wpush = ((npush + 255) & ~255) >> 8; count -= npush; BEGIN_NI04(push, NV30_3D(VB_INDEX_BATCH), wpush); while (npush >= 256) { PUSH_DATA (push, 0xff000000 | start); start += 256; npush -= 256; } if (npush) PUSH_DATA (push, ((npush - 1) << 24) | start); } BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1); PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP); PUSH_RESET(push, BUFCTX_IDXBUF); } else {
void nv20_emit_framebuffer(struct gl_context *ctx, int emit) { struct nouveau_pushbuf *push = context_push(ctx); struct gl_framebuffer *fb = ctx->DrawBuffer; struct nouveau_surface *s; unsigned rt_format = NV20_3D_RT_FORMAT_TYPE_LINEAR; unsigned rt_pitch = 0, zeta_pitch = 0; unsigned bo_flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR; if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) return; PUSH_RESET(push, BUFCTX_FB); /* Render target */ if (fb->_ColorDrawBuffers[0]) { s = &to_nouveau_renderbuffer( fb->_ColorDrawBuffers[0])->surface; rt_format |= get_rt_format(s->format); rt_pitch = s->pitch; BEGIN_NV04(push, NV20_3D(COLOR_OFFSET), 1); PUSH_MTHDl(push, NV20_3D(COLOR_OFFSET), BUFCTX_FB, s->bo, 0, bo_flags); } /* depth/stencil */ if (fb->Attachment[BUFFER_DEPTH].Renderbuffer) { s = &to_nouveau_renderbuffer( fb->Attachment[BUFFER_DEPTH].Renderbuffer)->surface; rt_format |= get_rt_format(s->format); zeta_pitch = s->pitch; BEGIN_NV04(push, NV20_3D(ZETA_OFFSET), 1); PUSH_MTHDl(push, NV20_3D(ZETA_OFFSET), BUFCTX_FB, s->bo, 0, bo_flags); if (context_chipset(ctx) >= 0x25) setup_hierz_buffer(ctx); } else { rt_format |= get_rt_format(MESA_FORMAT_Z24_S8); zeta_pitch = rt_pitch; } BEGIN_NV04(push, NV20_3D(RT_FORMAT), 2); PUSH_DATA (push, rt_format); PUSH_DATA (push, zeta_pitch << 16 | rt_pitch); /* Recompute the viewport/scissor state. */ context_dirty(ctx, VIEWPORT); context_dirty(ctx, SCISSOR); }
static void disp_vertices_i32(struct push_context *ctx, unsigned start, unsigned count) { struct nouveau_pushbuf *push = ctx->push; struct translate *translate = ctx->translate; const uint32_t *restrict elts = (uint32_t *)ctx->idxbuf + start; unsigned pos = 0; do { unsigned nR = count; if (unlikely(ctx->prim_restart)) nR = prim_restart_search_i32(elts, nR, ctx->restart_index); translate->run_elts(translate, elts, nR, 0, ctx->instance_id, ctx->dest); count -= nR; ctx->dest += nR * ctx->vertex_size; while (nR) { unsigned nE = nR; if (unlikely(ctx->edgeflag.enabled)) nE = ef_toggle_search_i32(ctx, elts, nR); PUSH_SPACE(push, 4); if (likely(nE >= 2)) { BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2); PUSH_DATA (push, pos); PUSH_DATA (push, nE); } else if (nE) { if (pos <= 0xff) { IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos); } else { BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1); PUSH_DATA (push, pos); } } if (unlikely(nE != nR)) IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx)); pos += nE; elts += nE; nR -= nE; } if (count) { BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1); PUSH_DATA (push, ctx->restart_index); ++elts; ctx->dest += ctx->vertex_size; ++pos; --count; } } while (count); }
static void nv50_query_begin(struct pipe_context *pipe, struct pipe_query *pq) { struct nv50_context *nv50 = nv50_context(pipe); struct nouveau_pushbuf *push = nv50->base.pushbuf; struct nv50_query *q = nv50_query(pq); /* For occlusion queries we have to change the storage, because a previous * query might set the initial render conition to FALSE even *after* we re- * initialized it to TRUE. */ if (q->type == PIPE_QUERY_OCCLUSION_COUNTER) { q->offset += 16; q->data += 16 / sizeof(*q->data); if (q->offset - q->base == NV50_QUERY_ALLOC_SPACE) nv50_query_allocate(nv50, q, NV50_QUERY_ALLOC_SPACE); /* XXX: can we do this with the GPU, and sync with respect to a previous * query ? */ q->data[0] = q->sequence; /* initialize sequence */ q->data[1] = 1; /* initial render condition = TRUE */ q->data[4] = q->sequence + 1; /* for comparison COND_MODE */ q->data[5] = 0; } if (!q->is64bit) q->data[0] = q->sequence++; /* the previously used one */ switch (q->type) { case PIPE_QUERY_OCCLUSION_COUNTER: PUSH_SPACE(push, 4); BEGIN_NV04(push, NV50_3D(COUNTER_RESET), 1); PUSH_DATA (push, NV50_3D_COUNTER_RESET_SAMPLECNT); BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1); PUSH_DATA (push, 1); break; case PIPE_QUERY_PRIMITIVES_GENERATED: nv50_query_get(push, q, 0x10, 0x06805002); break; case PIPE_QUERY_PRIMITIVES_EMITTED: nv50_query_get(push, q, 0x10, 0x05805002); break; case PIPE_QUERY_SO_STATISTICS: nv50_query_get(push, q, 0x20, 0x05805002); nv50_query_get(push, q, 0x30, 0x06805002); break; case PIPE_QUERY_TIME_ELAPSED: nv50_query_get(push, q, 0x10, 0x00005002); break; default: break; } q->ready = FALSE; }
void nv04_emit_scissor(struct gl_context *ctx, int emit) { struct nouveau_pushbuf *push = context_push(ctx); int x, y, w, h; get_scissors(ctx->DrawBuffer, &x, &y, &w, &h); BEGIN_NV04(push, NV04_SF3D(CLIP_HORIZONTAL), 2); PUSH_DATA (push, w << 16 | x); PUSH_DATA (push, h << 16 | y); }
void nv10_emit_alpha_func(struct gl_context *ctx, int emit) { struct nouveau_pushbuf *push = context_push(ctx); BEGIN_NV04(push, NV10_3D(ALPHA_FUNC_ENABLE), 1); PUSH_DATAb(push, ctx->Color.AlphaEnabled); BEGIN_NV04(push, NV10_3D(ALPHA_FUNC_FUNC), 2); PUSH_DATA (push, nvgl_comparison_op(ctx->Color.AlphaFunc)); PUSH_DATA (push, FLOAT_TO_UBYTE(ctx->Color.AlphaRef)); }
static void nv30_screen_fence_emit(struct pipe_screen *pscreen, uint32_t *sequence) { struct nv30_screen *screen = nv30_screen(pscreen); struct nouveau_pushbuf *push = screen->base.pushbuf; *sequence = ++screen->base.fence.sequence; BEGIN_NV04(push, NV30_3D(FENCE_OFFSET), 2); PUSH_DATA (push, 0); PUSH_DATA (push, *sequence); }
Bool NV10EXAPrepareComposite(int op, PicturePtr pict_src, PicturePtr pict_mask, PicturePtr pict_dst, PixmapPtr src, PixmapPtr mask, PixmapPtr dst) { ScrnInfoPtr pScrn = xf86ScreenToScrn(dst->drawable.pScreen); NVPtr pNv = NVPTR(pScrn); struct nouveau_pushbuf *push = pNv->pushbuf; uint32_t sc, sa, mc, ma; if (!PUSH_SPACE(push, 128)) return FALSE; PUSH_RESET(push); /* setup render target and blending */ if (!setup_render_target(pNv, pict_dst, dst)) return FALSE; setup_blend_function(pNv, pict_dst, op); /* select picture sources */ if (!setup_picture(pNv, pict_src, src, 0, &sc, &sa)) return FALSE; if (!setup_picture(pNv, pict_mask, mask, 1, &mc, &ma)) return FALSE; /* configure register combiners */ BEGIN_NV04(push, NV10_3D(RC_IN_ALPHA(0)), 1); PUSH_DATA (push, sa | ma); BEGIN_NV04(push, NV10_3D(RC_IN_RGB(0)), 1); if (effective_component_alpha(pict_mask)) { if (needs_src_alpha(op)) PUSH_DATA(push, sa | mc); else PUSH_DATA(push, sc | mc); } else { PUSH_DATA(push, sc | ma); } nouveau_pushbuf_bufctx(push, pNv->bufctx); if (nouveau_pushbuf_validate(push)) { nouveau_pushbuf_bufctx(push, NULL); return FALSE; } pNv->pspict = pict_src; pNv->pmpict = pict_mask; return TRUE; }
static boolean nv50_hw_sm_begin_query(struct nv50_context *nv50, struct nv50_hw_query *hq) { struct nv50_screen *screen = nv50->screen; struct nouveau_pushbuf *push = nv50->base.pushbuf; struct nv50_hw_sm_query *hsq = nv50_hw_sm_query(hq); const struct nv50_hw_sm_query_cfg *cfg; uint16_t func; int i, c; cfg = nv50_hw_sm_query_get_cfg(nv50, hq); /* check if we have enough free counter slots */ if (screen->pm.num_hw_sm_active + cfg->num_counters > 4) { NOUVEAU_ERR("Not enough free MP counter slots !\n"); return false; } assert(cfg->num_counters <= 4); PUSH_SPACE(push, 4 * 4); /* set sequence field to 0 (used to check if result is available) */ for (i = 0; i < screen->MPsInTP; ++i) { const unsigned b = (0x14 / 4) * i; hq->data[b + 16] = 0; } hq->sequence++; for (i = 0; i < cfg->num_counters; i++) { screen->pm.num_hw_sm_active++; /* find free counter slots */ for (c = 0; c < 4; ++c) { if (!screen->pm.mp_counter[c]) { hsq->ctr[i] = c; screen->pm.mp_counter[c] = hsq; break; } } /* select func to aggregate counters */ func = nv50_hw_sm_get_func(c); /* configure and reset the counter(s) */ BEGIN_NV04(push, NV50_COMPUTE(MP_PM_CONTROL(c)), 1); PUSH_DATA (push, (cfg->ctr[i].sig << 24) | (func << 8) | cfg->ctr[i].unit | cfg->ctr[i].mode); BEGIN_NV04(push, NV50_COMPUTE(MP_PM_SET(c)), 1); PUSH_DATA (push, 0); } return true; }
static inline void PUSH_VTX2s(struct nouveau_pushbuf *push, int x1, int y1, int x2, int y2, int dx, int dy) { BEGIN_NV04(push, NV10_3D(VERTEX_TX0_2I), 1); PUSH_DATA (push, ((y1 & 0xffff) << 16) | (x1 & 0xffff)); BEGIN_NV04(push, NV10_3D(VERTEX_TX1_2I), 1); PUSH_DATA (push, ((y2 & 0xffff) << 16) | (x2 & 0xffff)); BEGIN_NV04(push, NV10_3D(VERTEX_POS_3F_X), 3); PUSH_DATAf(push, dx); PUSH_DATAf(push, dy); PUSH_DATAf(push, 0.0); }
void nv10_emit_stencil_func(struct gl_context *ctx, int emit) { struct nouveau_pushbuf *push = context_push(ctx); BEGIN_NV04(push, NV10_3D(STENCIL_ENABLE), 1); PUSH_DATAb(push, ctx->Stencil.Enabled); BEGIN_NV04(push, NV10_3D(STENCIL_FUNC_FUNC), 3); PUSH_DATA (push, nvgl_comparison_op(ctx->Stencil.Function[0])); PUSH_DATA (push, ctx->Stencil.Ref[0]); PUSH_DATA (push, ctx->Stencil.ValueMask[0]); }
static Bool setup_render_target(NVPtr pNv, PicturePtr pict, PixmapPtr pixmap) { struct nouveau_pushbuf *push = pNv->pushbuf; struct nouveau_bo *bo = nouveau_pixmap_bo(pixmap); BEGIN_NV04(push, NV10_3D(RT_FORMAT), 3); PUSH_DATA (push, get_rt_format(pict)); PUSH_DATA (push, (exaGetPixmapPitch(pixmap) << 16 | exaGetPixmapPitch(pixmap))); PUSH_MTHDl(push, NV10_3D(COLOR_OFFSET), bo, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR); return TRUE; }
static void nv50_query_get(struct nouveau_pushbuf *push, struct nv50_query *q, unsigned offset, uint32_t get) { offset += q->offset; PUSH_SPACE(push, 5); PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_WR); BEGIN_NV04(push, NV50_3D(QUERY_ADDRESS_HIGH), 4); PUSH_DATAh(push, q->bo->offset + offset); PUSH_DATA (push, q->bo->offset + offset); PUSH_DATA (push, q->sequence); PUSH_DATA (push, get); }
void nv84_query_fifo_wait(struct nouveau_pushbuf *push, struct pipe_query *pq) { struct nv50_query *q = nv50_query(pq); unsigned offset = q->offset; PUSH_SPACE(push, 5); PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD); BEGIN_NV04(push, SUBC_3D(NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH), 4); PUSH_DATAh(push, q->bo->offset + offset); PUSH_DATA (push, q->bo->offset + offset); PUSH_DATA (push, q->sequence); PUSH_DATA (push, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); }
static void nv30_screen_fence_emit(struct pipe_screen *pscreen, uint32_t *sequence) { struct nv30_screen *screen = nv30_screen(pscreen); struct nouveau_pushbuf *push = screen->base.pushbuf; *sequence = ++screen->base.fence.sequence; assert(PUSH_AVAIL(push) + push->rsvd_kick >= 3); PUSH_DATA (push, NV30_3D_FENCE_OFFSET | (2 /* size */ << 18) | (7 /* subchan */ << 13)); PUSH_DATA (push, 0); PUSH_DATA (push, *sequence); }
static void nvc0_compute_validate_constbufs(struct nvc0_context *nvc0) { struct nouveau_pushbuf *push = nvc0->base.pushbuf; const int s = 5; while (nvc0->constbuf_dirty[s]) { int i = ffs(nvc0->constbuf_dirty[s]) - 1; nvc0->constbuf_dirty[s] &= ~(1 << i); if (nvc0->constbuf[s][i].user) { struct nouveau_bo *bo = nvc0->screen->uniform_bo; const unsigned base = NVC0_CB_USR_INFO(s); const unsigned size = nvc0->constbuf[s][0].size; assert(i == 0); /* we really only want OpenGL uniforms here */ assert(nvc0->constbuf[s][0].u.data); if (nvc0->state.uniform_buffer_bound[s] < size) { nvc0->state.uniform_buffer_bound[s] = align(size, 0x100); BEGIN_NVC0(push, NVC0_CP(CB_SIZE), 3); PUSH_DATA (push, nvc0->state.uniform_buffer_bound[s]); PUSH_DATAh(push, bo->offset + base); PUSH_DATA (push, bo->offset + base); BEGIN_NVC0(push, NVC0_CP(CB_BIND), 1); PUSH_DATA (push, (0 << 8) | 1); } nvc0_cb_bo_push(&nvc0->base, bo, NV_VRAM_DOMAIN(&nvc0->screen->base), base, nvc0->state.uniform_buffer_bound[s], 0, (size + 3) / 4, nvc0->constbuf[s][0].u.data); } else { struct nv04_resource *res = nv04_resource(nvc0->constbuf[s][i].u.buf); if (res) { BEGIN_NVC0(push, NVC0_CP(CB_SIZE), 3); PUSH_DATA (push, nvc0->constbuf[s][i].size); PUSH_DATAh(push, res->address + nvc0->constbuf[s][i].offset); PUSH_DATA (push, res->address + nvc0->constbuf[s][i].offset); BEGIN_NVC0(push, NVC0_CP(CB_BIND), 1); PUSH_DATA (push, (i << 8) | 1); BCTX_REFN(nvc0->bufctx_cp, CP_CB(i), res, RD); res->cb_bindings[s] |= 1 << i; } else { BEGIN_NVC0(push, NVC0_CP(CB_BIND), 1); PUSH_DATA (push, (i << 8) | 0); } if (i == 0) nvc0->state.uniform_buffer_bound[s] = 0; } } nvc0_compute_invalidate_constbufs(nvc0); BEGIN_NVC0(push, NVC0_CP(FLUSH), 1); PUSH_DATA (push, NVC0_COMPUTE_FLUSH_CB); }
void nv04_emit_framebuffer(struct gl_context *ctx, int emit) { struct nouveau_pushbuf *push = context_push(ctx); struct gl_framebuffer *fb = ctx->DrawBuffer; struct nouveau_surface *s; uint32_t rt_format = NV04_CONTEXT_SURFACES_3D_FORMAT_TYPE_PITCH; uint32_t rt_pitch = 0, zeta_pitch = 0; unsigned bo_flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR; if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) return; PUSH_RESET(push, BUFCTX_FB); /* Render target */ if (fb->_ColorDrawBuffers[0]) { s = &to_nouveau_renderbuffer( fb->_ColorDrawBuffers[0])->surface; rt_format |= get_rt_format(s->format); zeta_pitch = rt_pitch = s->pitch; BEGIN_NV04(push, NV04_SF3D(OFFSET_COLOR), 1); PUSH_MTHDl(push, NV04_SF3D(OFFSET_COLOR), BUFCTX_FB, s->bo, 0, bo_flags); } /* depth/stencil */ if (fb->Attachment[BUFFER_DEPTH].Renderbuffer) { s = &to_nouveau_renderbuffer( fb->Attachment[BUFFER_DEPTH].Renderbuffer)->surface; zeta_pitch = s->pitch; BEGIN_NV04(push, NV04_SF3D(OFFSET_ZETA), 1); PUSH_MTHDl(push, NV04_SF3D(OFFSET_ZETA), BUFCTX_FB, s->bo, 0, bo_flags); } BEGIN_NV04(push, NV04_SF3D(FORMAT), 1); PUSH_DATA (push, rt_format); BEGIN_NV04(push, NV04_SF3D(PITCH), 1); PUSH_DATA (push, zeta_pitch << 16 | rt_pitch); /* Recompute the scissor state. */ context_dirty(ctx, SCISSOR); context_dirty(ctx, CONTROL); }
static void nvc0_compute_validate_driverconst(struct nvc0_context *nvc0) { struct nouveau_pushbuf *push = nvc0->base.pushbuf; struct nvc0_screen *screen = nvc0->screen; BEGIN_NVC0(push, NVC0_CP(CB_SIZE), 3); PUSH_DATA (push, NVC0_CB_AUX_SIZE); PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(5)); PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(5)); BEGIN_NVC0(push, NVC0_CP(CB_BIND), 1); PUSH_DATA (push, (15 << 8) | 1); nvc0->dirty_3d |= NVC0_NEW_3D_DRIVERCONST; }
static void nv30_transfer_rect_m2mf(XFER_ARGS) { struct nouveau_pushbuf *push = nv30->base.pushbuf; struct nouveau_pushbuf_refn refs[] = { { src->bo, src->domain | NOUVEAU_BO_RD }, { dst->bo, dst->domain | NOUVEAU_BO_WR }, }; struct nv04_fifo *fifo = push->channel->data; unsigned src_offset = src->offset; unsigned dst_offset = dst->offset; unsigned w = dst->x1 - dst->x0; unsigned h = dst->y1 - dst->y0; src_offset += (src->y0 * src->pitch) + (src->x0 * src->cpp); dst_offset += (dst->y0 * dst->pitch) + (dst->x0 * dst->cpp); BEGIN_NV04(push, NV03_M2MF(DMA_BUFFER_IN), 2); PUSH_DATA (push, (src->domain == NOUVEAU_BO_VRAM) ? fifo->vram : fifo->gart); PUSH_DATA (push, (dst->domain == NOUVEAU_BO_VRAM) ? fifo->vram : fifo->gart); while (h) { unsigned lines = (h > 2047) ? 2047 : h; if (nouveau_pushbuf_space(push, 13, 2, 0) || nouveau_pushbuf_refn (push, refs, 2)) return; BEGIN_NV04(push, NV03_M2MF(OFFSET_IN), 8); PUSH_RELOC(push, src->bo, src_offset, NOUVEAU_BO_LOW, 0, 0); PUSH_RELOC(push, dst->bo, dst_offset, NOUVEAU_BO_LOW, 0, 0); PUSH_DATA (push, src->pitch); PUSH_DATA (push, dst->pitch); PUSH_DATA (push, w * src->cpp); PUSH_DATA (push, lines); PUSH_DATA (push, NV03_M2MF_FORMAT_INPUT_INC_1 | NV03_M2MF_FORMAT_OUTPUT_INC_1); PUSH_DATA (push, 0x00000000); BEGIN_NV04(push, NV04_GRAPH(M2MF, NOP), 1); PUSH_DATA (push, 0x00000000); BEGIN_NV04(push, NV03_M2MF(OFFSET_OUT), 1); PUSH_DATA (push, 0x00000000); h -= lines; src_offset += src->pitch * lines; dst_offset += dst->pitch * lines; } }
static void nv10_render_set_format(struct gl_context *ctx) { struct nouveau_render_state *render = to_render_state(ctx); struct nouveau_pushbuf *push = context_push(ctx); int i, attr, hw_format; FOR_EACH_ATTR(render, i, attr) { if (attr >= 0) { struct nouveau_array *a = &render->attrs[attr]; hw_format = a->stride << 8 | a->fields << 4 | get_hw_format(a->type); if (attr == VERT_ATTRIB_POS && a->fields == 4) hw_format |= NV10_3D_VTXBUF_FMT_HOMOGENEOUS; } else { /* Unused attribute. */ hw_format = NV10_3D_VTXBUF_FMT_TYPE_V32_FLOAT; } BEGIN_NV04(push, NV10_3D(VTXBUF_FMT(i)), 1); PUSH_DATA (push, hw_format); } }
struct nouveau_object * nv04_context_engine(struct gl_context *ctx) { struct nv04_context *nctx = to_nv04_context(ctx); struct nouveau_hw_state *hw = &to_nouveau_context(ctx)->hw; struct nouveau_pushbuf *push = context_push(ctx); struct nouveau_object *fahrenheit; if ((ctx->Texture.Unit[0]._ReallyEnabled && texunit_needs_combiners(&ctx->Texture.Unit[0])) || ctx->Texture.Unit[1]._ReallyEnabled || ctx->Stencil.Enabled || !(ctx->Color.ColorMask[0][RCOMP] && ctx->Color.ColorMask[0][GCOMP] && ctx->Color.ColorMask[0][BCOMP] && ctx->Color.ColorMask[0][ACOMP])) fahrenheit = hw->eng3dm; else fahrenheit = hw->eng3d; if (fahrenheit != nctx->eng3d) { BEGIN_NV04(push, NV01_SUBC(3D, OBJECT), 1); PUSH_DATA (push, fahrenheit->handle); nctx->eng3d = fahrenheit; } return fahrenheit; }
static void emit_vertices_i08(struct push_context *ctx, unsigned start, unsigned count) { uint8_t *elts = (uint8_t *)ctx->idxbuf + start; while (count) { unsigned push = MIN2(count, ctx->packet_vertex_limit); unsigned size, nr; nr = push; if (ctx->primitive_restart) nr = prim_restart_search_i08(elts, push, ctx->restart_index); size = ctx->vertex_words * nr; BEGIN_NI04(ctx->push, NV30_3D(VERTEX_DATA), size); ctx->translate->run_elts8(ctx->translate, elts, nr, 0, 0, ctx->push->cur); ctx->push->cur += size; count -= nr; elts += nr; if (nr != push) { BEGIN_NV04(ctx->push, NV30_3D(VB_ELEMENT_U32), 1); PUSH_DATA (ctx->push, ctx->restart_index); count--; elts++; } } }
static bool nv50_compute_validate_program(struct nv50_context *nv50) { struct nv50_program *prog = nv50->compprog; if (prog->mem) return true; if (!prog->translated) { prog->translated = nv50_program_translate( prog, nv50->screen->base.device->chipset, &nv50->base.debug); if (!prog->translated) return false; } if (unlikely(!prog->code_size)) return false; if (likely(prog->code_size)) { if (nv50_program_upload_code(nv50, prog)) { struct nouveau_pushbuf *push = nv50->base.pushbuf; BEGIN_NV04(push, NV50_COMPUTE(CODE_CB_FLUSH), 1); PUSH_DATA (push, 0); return true; } } return false; }
static void nv50_compute_upload_input(struct nv50_context *nv50, const uint32_t *input) { struct nv50_screen *screen = nv50->screen; struct nouveau_pushbuf *push = screen->base.pushbuf; unsigned size = align(nv50->compprog->parm_size, 0x4); BEGIN_NV04(push, NV50_COMPUTE(USER_PARAM_COUNT), 1); PUSH_DATA (push, (size / 4) << 8); if (size) { struct nouveau_mm_allocation *mm; struct nouveau_bo *bo = NULL; unsigned offset; mm = nouveau_mm_allocate(screen->base.mm_GART, size, &bo, &offset); assert(mm); nouveau_bo_map(bo, 0, screen->base.client); memcpy(bo->map + offset, input, size); nouveau_bufctx_refn(nv50->bufctx, 0, bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD); nouveau_pushbuf_bufctx(push, nv50->bufctx); nouveau_pushbuf_validate(push); BEGIN_NV04(push, NV50_COMPUTE(USER_PARAM(0)), size / 4); nouveau_pushbuf_data(push, bo, offset, size); nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work, mm); nouveau_bo_ref(NULL, &bo); nouveau_bufctx_reset(nv50->bufctx, 0); } }
void nv50_upload_ms_info(struct nouveau_pushbuf *push) { BEGIN_NV04(push, NV50_3D(CB_ADDR), 1); PUSH_DATA (push, (NV50_CB_AUX_MS_OFFSET << (8 - 2)) | NV50_CB_AUX); BEGIN_NI04(push, NV50_3D(CB_DATA(0)), Elements(msaa_sample_xy_offsets)); PUSH_DATAp(push, msaa_sample_xy_offsets, Elements(msaa_sample_xy_offsets)); }
boolean nvc0_compute_validate_program(struct nvc0_context *nvc0) { struct nvc0_program *prog = nvc0->compprog; if (prog->mem) return TRUE; if (!prog->translated) { prog->translated = nvc0_program_translate( prog, nvc0->screen->base.device->chipset); if (!prog->translated) return FALSE; } if (unlikely(!prog->code_size)) return FALSE; if (likely(prog->code_size)) { if (nvc0_program_upload_code(nvc0, prog)) { struct nouveau_pushbuf *push = nvc0->base.pushbuf; BEGIN_NVC0(push, NVC0_COMPUTE(FLUSH), 1); PUSH_DATA (push, NVC0_COMPUTE_FLUSH_CODE); return TRUE; } } return FALSE; }
static void nvc0_clear_render_target(struct pipe_context *pipe, struct pipe_surface *dst, const union pipe_color_union *color, unsigned dstx, unsigned dsty, unsigned width, unsigned height) { struct nvc0_context *nvc0 = nvc0_context(pipe); struct nouveau_pushbuf *push = nvc0->base.pushbuf; struct nv50_surface *sf = nv50_surface(dst); struct nv04_resource *res = nv04_resource(sf->base.texture); unsigned z; if (!PUSH_SPACE(push, 32 + sf->depth)) return; PUSH_REFN (push, res->bo, res->domain | NOUVEAU_BO_WR); BEGIN_NVC0(push, NVC0_3D(CLEAR_COLOR(0)), 4); PUSH_DATAf(push, color->f[0]); PUSH_DATAf(push, color->f[1]); PUSH_DATAf(push, color->f[2]); PUSH_DATAf(push, color->f[3]); BEGIN_NVC0(push, NVC0_3D(SCREEN_SCISSOR_HORIZ), 2); PUSH_DATA (push, ( width << 16) | dstx); PUSH_DATA (push, (height << 16) | dsty); BEGIN_NVC0(push, NVC0_3D(RT_CONTROL), 1); PUSH_DATA (push, 1); BEGIN_NVC0(push, NVC0_3D(RT_ADDRESS_HIGH(0)), 9); PUSH_DATAh(push, res->address + sf->offset); PUSH_DATA (push, res->address + sf->offset); if (likely(nouveau_bo_memtype(res->bo))) { struct nv50_miptree *mt = nv50_miptree(dst->texture); PUSH_DATA(push, sf->width); PUSH_DATA(push, sf->height); PUSH_DATA(push, nvc0_format_table[dst->format].rt); PUSH_DATA(push, (mt->layout_3d << 16) | mt->level[sf->base.u.tex.level].tile_mode); PUSH_DATA(push, dst->u.tex.first_layer + sf->depth); PUSH_DATA(push, mt->layer_stride >> 2); PUSH_DATA(push, dst->u.tex.first_layer); } else { if (res->base.target == PIPE_BUFFER) {