static void nvc0_render_condition(struct pipe_context *pipe, struct pipe_query *pq, boolean condition, uint mode) { struct nvc0_context *nvc0 = nvc0_context(pipe); struct nouveau_pushbuf *push = nvc0->base.pushbuf; struct nvc0_query *q; uint32_t cond; boolean negated = FALSE; boolean wait = mode != PIPE_RENDER_COND_NO_WAIT && mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT; nvc0->cond_query = pq; nvc0->cond_cond = condition; nvc0->cond_mode = mode; if (!pq) { PUSH_SPACE(push, 1); IMMED_NVC0(push, NVC0_3D(COND_MODE), NVC0_3D_COND_MODE_ALWAYS); return; } q = nvc0_query(pq); /* NOTE: comparison of 2 queries only works if both have completed */ switch (q->type) { case PIPE_QUERY_SO_OVERFLOW_PREDICATE: cond = negated ? NVC0_3D_COND_MODE_EQUAL : NVC0_3D_COND_MODE_NOT_EQUAL; wait = TRUE; break; case PIPE_QUERY_OCCLUSION_COUNTER: case PIPE_QUERY_OCCLUSION_PREDICATE: if (likely(!negated)) { if (unlikely(q->nesting)) cond = wait ? NVC0_3D_COND_MODE_NOT_EQUAL : NVC0_3D_COND_MODE_ALWAYS; else cond = NVC0_3D_COND_MODE_RES_NON_ZERO; } else { cond = wait ? NVC0_3D_COND_MODE_EQUAL : NVC0_3D_COND_MODE_ALWAYS; } break; default: assert(!"render condition query not a predicate"); mode = NVC0_3D_COND_MODE_ALWAYS; break; } if (wait) nvc0_query_fifo_wait(push, pq); PUSH_SPACE(push, 4); PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD); BEGIN_NVC0(push, NVC0_3D(COND_ADDRESS_HIGH), 3); PUSH_DATAh(push, q->bo->offset + q->offset); PUSH_DATA (push, q->bo->offset + q->offset); PUSH_DATA (push, cond); }
static void nv50_query_begin(struct pipe_context *pipe, struct pipe_query *pq) { struct nv50_context *nv50 = nv50_context(pipe); struct nouveau_pushbuf *push = nv50->base.pushbuf; struct nv50_query *q = nv50_query(pq); /* For occlusion queries we have to change the storage, because a previous * query might set the initial render conition to FALSE even *after* we re- * initialized it to TRUE. */ if (q->type == PIPE_QUERY_OCCLUSION_COUNTER) { q->offset += 16; q->data += 16 / sizeof(*q->data); if (q->offset - q->base == NV50_QUERY_ALLOC_SPACE) nv50_query_allocate(nv50, q, NV50_QUERY_ALLOC_SPACE); /* XXX: can we do this with the GPU, and sync with respect to a previous * query ? */ q->data[1] = 1; /* initial render condition = TRUE */ } if (!q->is64bit) q->data[0] = q->sequence++; /* the previously used one */ switch (q->type) { case PIPE_QUERY_OCCLUSION_COUNTER: PUSH_SPACE(push, 4); BEGIN_NV04(push, NV50_3D(COUNTER_RESET), 1); PUSH_DATA (push, NV50_3D_COUNTER_RESET_SAMPLECNT); BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1); PUSH_DATA (push, 1); break; case PIPE_QUERY_PRIMITIVES_GENERATED: /* store before & after instead ? */ PUSH_SPACE(push, 2); BEGIN_NV04(push, NV50_3D(COUNTER_RESET), 1); PUSH_DATA (push, NV50_3D_COUNTER_RESET_GENERATED_PRIMITIVES); break; case PIPE_QUERY_PRIMITIVES_EMITTED: PUSH_SPACE(push, 2); BEGIN_NV04(push, NV50_3D(COUNTER_RESET), 1); PUSH_DATA (push, NV50_3D_COUNTER_RESET_TRANSFORM_FEEDBACK); break; case PIPE_QUERY_SO_STATISTICS: PUSH_SPACE(push, 3); BEGIN_NI04(push, NV50_3D(COUNTER_RESET), 2); PUSH_DATA (push, NV50_3D_COUNTER_RESET_TRANSFORM_FEEDBACK); PUSH_DATA (push, NV50_3D_COUNTER_RESET_GENERATED_PRIMITIVES); break; case PIPE_QUERY_TIMESTAMP_DISJOINT: case PIPE_QUERY_TIME_ELAPSED: nv50_query_get(push, q, 0x10, 0x00005002); break; default: break; } q->ready = FALSE; }
static void nv50_render_condition(struct pipe_context *pipe, struct pipe_query *pq, boolean condition, uint mode) { struct nv50_context *nv50 = nv50_context(pipe); struct nouveau_pushbuf *push = nv50->base.pushbuf; struct nv50_query *q; nv50->cond_query = pq; nv50->cond_cond = condition; nv50->cond_mode = mode; PUSH_SPACE(push, 6); if (!pq) { BEGIN_NV04(push, NV50_3D(COND_MODE), 1); PUSH_DATA (push, NV50_3D_COND_MODE_ALWAYS); return; } q = nv50_query(pq); if (mode == PIPE_RENDER_COND_WAIT || mode == PIPE_RENDER_COND_BY_REGION_WAIT) { BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1); PUSH_DATA (push, 0); } BEGIN_NV04(push, NV50_3D(COND_ADDRESS_HIGH), 3); PUSH_DATAh(push, q->bo->offset + q->offset); PUSH_DATA (push, q->bo->offset + q->offset); PUSH_DATA (push, NV50_3D_COND_MODE_RES_NON_ZERO); }
static void disp_vertices_seq(struct push_context *ctx, unsigned start, unsigned count) { struct nouveau_pushbuf *push = ctx->push; struct translate *translate = ctx->translate; unsigned pos = 0; translate->run(translate, start, count, 0, ctx->instance_id, ctx->dest); do { unsigned nr = count; if (unlikely(ctx->edgeflag.enabled)) nr = ef_toggle_search_seq(ctx, start + pos, nr); PUSH_SPACE(push, 4); if (likely(nr)) { BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2); PUSH_DATA (push, pos); PUSH_DATA (push, nr); } if (unlikely(nr != count)) IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx)); pos += nr; count -= nr; } while (count); }
/* This happens rather often with DTD9/st. */ void nvc0_cb_push(struct nouveau_context *nv, struct nouveau_bo *bo, unsigned domain, unsigned base, unsigned size, unsigned offset, unsigned words, const uint32_t *data) { struct nouveau_pushbuf *push = nv->pushbuf; NOUVEAU_DRV_STAT(nv->screen, constbuf_upload_count, 1); NOUVEAU_DRV_STAT(nv->screen, constbuf_upload_bytes, words * 4); assert(!(offset & 3)); size = align(size, 0x100); BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3); PUSH_DATA (push, size); PUSH_DATAh(push, bo->offset + base); PUSH_DATA (push, bo->offset + base); while (words) { unsigned nr = PUSH_AVAIL(push); nr = MIN2(nr, words); nr = MIN2(nr, NV04_PFIFO_MAX_PACKET_LEN - 1); PUSH_SPACE(push, nr + 2); PUSH_REFN (push, bo, NOUVEAU_BO_WR | domain); BEGIN_1IC0(push, NVC0_3D(CB_POS), nr + 1); PUSH_DATA (push, offset); PUSH_DATAp(push, data, nr); words -= nr; data += nr; offset += nr * 4; } }
void NV40EXAComposite(PixmapPtr pdPix, int sx, int sy, int mx, int my, int dx, int dy, int w, int h) { ScrnInfoPtr pScrn = xf86ScreenToScrn(pdPix->drawable.pScreen); NVPtr pNv = NVPTR(pScrn); struct nouveau_pushbuf *push = pNv->pushbuf; if (!PUSH_SPACE(push, 64)) return; /* We're drawing a triangle, we need to scissor it to a quad. */ /* The scissors are here for a good reason, we don't get the full * image, but just a part. */ /* Handling the cliprects is done for us already. */ BEGIN_NV04(push, NV30_3D(SCISSOR_HORIZ), 2); PUSH_DATA (push, (w << 16) | dx); PUSH_DATA (push, (h << 16) | dy); BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1); PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_TRIANGLES); PUSH_VTX2s(push, sx, sy + (h * 2), mx, my + (h * 2), dx, dy + (h * 2)); PUSH_VTX2s(push, sx, sy, mx, my, dx, dy); PUSH_VTX2s(push, sx + (w * 2), sy, mx + (w * 2), my, dx + (w * 2), dy); BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1); PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP); }
static boolean nve4_validate_tic(struct nvc0_context *nvc0, unsigned s) { struct nouveau_bo *txc = nvc0->screen->txc; struct nouveau_pushbuf *push = nvc0->base.pushbuf; unsigned i; boolean need_flush = FALSE; for (i = 0; i < nvc0->num_textures[s]; ++i) { struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]); struct nv04_resource *res; const boolean dirty = !!(nvc0->textures_dirty[s] & (1 << i)); if (!tic) { nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID; continue; } res = nv04_resource(tic->pipe.texture); if (tic->id < 0) { tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic); PUSH_SPACE(push, 16); BEGIN_NVC0(push, NVE4_P2MF(DST_ADDRESS_HIGH), 2); PUSH_DATAh(push, txc->offset + (tic->id * 32)); PUSH_DATA (push, txc->offset + (tic->id * 32)); BEGIN_NVC0(push, NVE4_P2MF(LINE_LENGTH_IN), 2); PUSH_DATA (push, 32); PUSH_DATA (push, 1); BEGIN_1IC0(push, NVE4_P2MF(EXEC), 9); PUSH_DATA (push, 0x1001); PUSH_DATAp(push, &tic->tic[0], 8); need_flush = TRUE; } else if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) { BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1); PUSH_DATA (push, (tic->id << 4) | 1); } nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32); res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING; res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING; nvc0->tex_handles[s][i] &= ~NVE4_TIC_ENTRY_INVALID; nvc0->tex_handles[s][i] |= tic->id; if (dirty) BCTX_REFN(nvc0->bufctx_3d, TEX(s, i), res, RD); } for (; i < nvc0->state.num_textures[s]; ++i) { nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID; nvc0->textures_dirty[s] |= 1 << i; } nvc0->state.num_textures[s] = nvc0->num_textures[s]; return need_flush; }
static void nv50_query_end(struct pipe_context *pipe, struct pipe_query *pq) { struct nv50_context *nv50 = nv50_context(pipe); struct nouveau_pushbuf *push = nv50->base.pushbuf; struct nv50_query *q = nv50_query(pq); switch (q->type) { case PIPE_QUERY_OCCLUSION_COUNTER: nv50_query_get(push, q, 0, 0x0100f002); PUSH_SPACE(push, 2); BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1); PUSH_DATA (push, 0); break; case PIPE_QUERY_PRIMITIVES_GENERATED: nv50_query_get(push, q, 0, 0x06805002); break; case PIPE_QUERY_PRIMITIVES_EMITTED: nv50_query_get(push, q, 0, 0x05805002); break; case PIPE_QUERY_SO_STATISTICS: nv50_query_get(push, q, 0x00, 0x05805002); nv50_query_get(push, q, 0x10, 0x06805002); break; case PIPE_QUERY_PIPELINE_STATISTICS: nv50_query_get(push, q, 0x00, 0x00801002); /* VFETCH, VERTICES */ nv50_query_get(push, q, 0x10, 0x01801002); /* VFETCH, PRIMS */ nv50_query_get(push, q, 0x20, 0x02802002); /* VP, LAUNCHES */ nv50_query_get(push, q, 0x30, 0x03806002); /* GP, LAUNCHES */ nv50_query_get(push, q, 0x40, 0x04806002); /* GP, PRIMS_OUT */ nv50_query_get(push, q, 0x50, 0x07804002); /* RAST, PRIMS_IN */ nv50_query_get(push, q, 0x60, 0x08804002); /* RAST, PRIMS_OUT */ nv50_query_get(push, q, 0x70, 0x0980a002); /* ROP, PIXELS */ break; case PIPE_QUERY_TIMESTAMP: q->sequence++; /* fall through */ case PIPE_QUERY_TIME_ELAPSED: nv50_query_get(push, q, 0, 0x00005002); break; case PIPE_QUERY_GPU_FINISHED: q->sequence++; nv50_query_get(push, q, 0, 0x1000f010); break; case NVA0_QUERY_STREAM_OUTPUT_BUFFER_OFFSET: nv50_query_get(push, q, 0, 0x0d005002 | (q->index << 5)); break; case PIPE_QUERY_TIMESTAMP_DISJOINT: /* This query is not issued on GPU because disjoint is forced to FALSE */ q->ready = TRUE; break; default: assert(0); break; } q->ready = q->flushed = FALSE; }
static void disp_vertices_i32(struct push_context *ctx, unsigned start, unsigned count) { struct nouveau_pushbuf *push = ctx->push; struct translate *translate = ctx->translate; const uint32_t *restrict elts = (uint32_t *)ctx->idxbuf + start; unsigned pos = 0; do { unsigned nR = count; if (unlikely(ctx->prim_restart)) nR = prim_restart_search_i32(elts, nR, ctx->restart_index); translate->run_elts(translate, elts, nR, 0, ctx->instance_id, ctx->dest); count -= nR; ctx->dest += nR * ctx->vertex_size; while (nR) { unsigned nE = nR; if (unlikely(ctx->edgeflag.enabled)) nE = ef_toggle_search_i32(ctx, elts, nR); PUSH_SPACE(push, 4); if (likely(nE >= 2)) { BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2); PUSH_DATA (push, pos); PUSH_DATA (push, nE); } else if (nE) { if (pos <= 0xff) { IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos); } else { BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1); PUSH_DATA (push, pos); } } if (unlikely(nE != nR)) IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx)); pos += nE; elts += nE; nR -= nE; } if (count) { BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1); PUSH_DATA (push, ctx->restart_index); ++elts; ctx->dest += ctx->vertex_size; ++pos; --count; } } while (count); }
static int nvc0_2d_texture_do_copy(struct nouveau_pushbuf *push, struct nv50_miptree *dst, unsigned dst_level, unsigned dx, unsigned dy, unsigned dz, struct nv50_miptree *src, unsigned src_level, unsigned sx, unsigned sy, unsigned sz, unsigned w, unsigned h) { static const uint32_t duvdxy[5] = { 0x40000000, 0x80000000, 0x00000001, 0x00000002, 0x00000004 }; int ret; uint32_t ctrl = 0x00; ret = PUSH_SPACE(push, 2 * 16 + 32); if (ret) return ret; ret = nvc0_2d_texture_set(push, TRUE, dst, dst_level, dz); if (ret) return ret; ret = nvc0_2d_texture_set(push, FALSE, src, src_level, sz); if (ret) return ret; /* NOTE: 2D engine doesn't work for MS8 */ if (src->ms_x) ctrl = 0x11; /* 0/1 = CENTER/CORNER, 00/10 = POINT/BILINEAR */ BEGIN_NVC0(push, NVC0_2D(BLIT_CONTROL), 1); PUSH_DATA (push, ctrl); BEGIN_NVC0(push, NVC0_2D(BLIT_DST_X), 4); PUSH_DATA (push, dx << dst->ms_x); PUSH_DATA (push, dy << dst->ms_y); PUSH_DATA (push, w << dst->ms_x); PUSH_DATA (push, h << dst->ms_y); BEGIN_NVC0(push, NVC0_2D(BLIT_DU_DX_FRACT), 4); PUSH_DATA (push, duvdxy[2 + ((int)src->ms_x - (int)dst->ms_x)] & 0xf0000000); PUSH_DATA (push, duvdxy[2 + ((int)src->ms_x - (int)dst->ms_x)] & 0x0000000f); PUSH_DATA (push, duvdxy[2 + ((int)src->ms_y - (int)dst->ms_y)] & 0xf0000000); PUSH_DATA (push, duvdxy[2 + ((int)src->ms_y - (int)dst->ms_y)] & 0x0000000f); BEGIN_NVC0(push, NVC0_2D(BLIT_SRC_X_FRACT), 4); PUSH_DATA (push, 0); PUSH_DATA (push, sx << src->ms_x); PUSH_DATA (push, 0); PUSH_DATA (push, sy << src->ms_x); return 0; }
static boolean nv50_hw_sm_begin_query(struct nv50_context *nv50, struct nv50_hw_query *hq) { struct nv50_screen *screen = nv50->screen; struct nouveau_pushbuf *push = nv50->base.pushbuf; struct nv50_hw_sm_query *hsq = nv50_hw_sm_query(hq); const struct nv50_hw_sm_query_cfg *cfg; uint16_t func; int i, c; cfg = nv50_hw_sm_query_get_cfg(nv50, hq); /* check if we have enough free counter slots */ if (screen->pm.num_hw_sm_active + cfg->num_counters > 4) { NOUVEAU_ERR("Not enough free MP counter slots !\n"); return false; } assert(cfg->num_counters <= 4); PUSH_SPACE(push, 4 * 4); /* set sequence field to 0 (used to check if result is available) */ for (i = 0; i < screen->MPsInTP; ++i) { const unsigned b = (0x14 / 4) * i; hq->data[b + 16] = 0; } hq->sequence++; for (i = 0; i < cfg->num_counters; i++) { screen->pm.num_hw_sm_active++; /* find free counter slots */ for (c = 0; c < 4; ++c) { if (!screen->pm.mp_counter[c]) { hsq->ctr[i] = c; screen->pm.mp_counter[c] = hsq; break; } } /* select func to aggregate counters */ func = nv50_hw_sm_get_func(c); /* configure and reset the counter(s) */ BEGIN_NV04(push, NV50_COMPUTE(MP_PM_CONTROL(c)), 1); PUSH_DATA (push, (cfg->ctr[i].sig << 24) | (func << 8) | cfg->ctr[i].unit | cfg->ctr[i].mode); BEGIN_NV04(push, NV50_COMPUTE(MP_PM_SET(c)), 1); PUSH_DATA (push, 0); } return true; }
Bool NV10EXAPrepareComposite(int op, PicturePtr pict_src, PicturePtr pict_mask, PicturePtr pict_dst, PixmapPtr src, PixmapPtr mask, PixmapPtr dst) { ScrnInfoPtr pScrn = xf86ScreenToScrn(dst->drawable.pScreen); NVPtr pNv = NVPTR(pScrn); struct nouveau_pushbuf *push = pNv->pushbuf; uint32_t sc, sa, mc, ma; if (!PUSH_SPACE(push, 128)) return FALSE; PUSH_RESET(push); /* setup render target and blending */ if (!setup_render_target(pNv, pict_dst, dst)) return FALSE; setup_blend_function(pNv, pict_dst, op); /* select picture sources */ if (!setup_picture(pNv, pict_src, src, 0, &sc, &sa)) return FALSE; if (!setup_picture(pNv, pict_mask, mask, 1, &mc, &ma)) return FALSE; /* configure register combiners */ BEGIN_NV04(push, NV10_3D(RC_IN_ALPHA(0)), 1); PUSH_DATA (push, sa | ma); BEGIN_NV04(push, NV10_3D(RC_IN_RGB(0)), 1); if (effective_component_alpha(pict_mask)) { if (needs_src_alpha(op)) PUSH_DATA(push, sa | mc); else PUSH_DATA(push, sc | mc); } else { PUSH_DATA(push, sc | ma); } nouveau_pushbuf_bufctx(push, pNv->bufctx); if (nouveau_pushbuf_validate(push)) { nouveau_pushbuf_bufctx(push, NULL); return FALSE; } pNv->pspict = pict_src; pNv->pmpict = pict_mask; return TRUE; }
static void nv50_query_get(struct nouveau_pushbuf *push, struct nv50_query *q, unsigned offset, uint32_t get) { offset += q->offset; PUSH_SPACE(push, 5); PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_WR); BEGIN_NV04(push, NV50_3D(QUERY_ADDRESS_HIGH), 4); PUSH_DATAh(push, q->bo->offset + offset); PUSH_DATA (push, q->bo->offset + offset); PUSH_DATA (push, q->sequence); PUSH_DATA (push, get); }
void nv84_query_fifo_wait(struct nouveau_pushbuf *push, struct pipe_query *pq) { struct nv50_query *q = nv50_query(pq); unsigned offset = q->offset; PUSH_SPACE(push, 5); PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD); BEGIN_NV04(push, SUBC_3D(NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH), 4); PUSH_DATAh(push, q->bo->offset + offset); PUSH_DATA (push, q->bo->offset + offset); PUSH_DATA (push, q->sequence); PUSH_DATA (push, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); }
void nvc0_so_target_save_offset(struct pipe_context *pipe, struct pipe_stream_output_target *ptarg, unsigned index, boolean *serialize) { struct nvc0_so_target *targ = nvc0_so_target(ptarg); if (*serialize) { *serialize = FALSE; PUSH_SPACE(nvc0_context(pipe)->base.pushbuf, 1); IMMED_NVC0(nvc0_context(pipe)->base.pushbuf, NVC0_3D(SERIALIZE), 0); } nvc0_query(targ->pq)->index = index; nvc0_query_end(pipe, targ->pq); }
void nvc0_query_fifo_wait(struct nouveau_pushbuf *push, struct pipe_query *pq) { struct nvc0_query *q = nvc0_query(pq); unsigned offset = q->offset; if (q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE) offset += 0x20; PUSH_SPACE(push, 5); PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD); BEGIN_NVC0(push, SUBC_3D(NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH), 4); PUSH_DATAh(push, q->bo->offset + offset); PUSH_DATA (push, q->bo->offset + offset); PUSH_DATA (push, q->sequence); PUSH_DATA (push, (1 << 12) | NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); }
static void nvc0_clear_render_target(struct pipe_context *pipe, struct pipe_surface *dst, const union pipe_color_union *color, unsigned dstx, unsigned dsty, unsigned width, unsigned height) { struct nvc0_context *nvc0 = nvc0_context(pipe); struct nouveau_pushbuf *push = nvc0->base.pushbuf; struct nv50_surface *sf = nv50_surface(dst); struct nv04_resource *res = nv04_resource(sf->base.texture); unsigned z; if (!PUSH_SPACE(push, 32 + sf->depth)) return; PUSH_REFN (push, res->bo, res->domain | NOUVEAU_BO_WR); BEGIN_NVC0(push, NVC0_3D(CLEAR_COLOR(0)), 4); PUSH_DATAf(push, color->f[0]); PUSH_DATAf(push, color->f[1]); PUSH_DATAf(push, color->f[2]); PUSH_DATAf(push, color->f[3]); BEGIN_NVC0(push, NVC0_3D(SCREEN_SCISSOR_HORIZ), 2); PUSH_DATA (push, ( width << 16) | dstx); PUSH_DATA (push, (height << 16) | dsty); BEGIN_NVC0(push, NVC0_3D(RT_CONTROL), 1); PUSH_DATA (push, 1); BEGIN_NVC0(push, NVC0_3D(RT_ADDRESS_HIGH(0)), 9); PUSH_DATAh(push, res->address + sf->offset); PUSH_DATA (push, res->address + sf->offset); if (likely(nouveau_bo_memtype(res->bo))) { struct nv50_miptree *mt = nv50_miptree(dst->texture); PUSH_DATA(push, sf->width); PUSH_DATA(push, sf->height); PUSH_DATA(push, nvc0_format_table[dst->format].rt); PUSH_DATA(push, (mt->layout_3d << 16) | mt->level[sf->base.u.tex.level].tile_mode); PUSH_DATA(push, dst->u.tex.first_layer + sf->depth); PUSH_DATA(push, mt->layer_stride >> 2); PUSH_DATA(push, dst->u.tex.first_layer); } else { if (res->base.target == PIPE_BUFFER) {
void nva0_so_target_save_offset(struct pipe_context *pipe, struct pipe_stream_output_target *ptarg, unsigned index, boolean serialize) { struct nv50_so_target *targ = nv50_so_target(ptarg); if (serialize) { struct nouveau_pushbuf *push = nv50_context(pipe)->base.pushbuf; PUSH_SPACE(push, 2); BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1); PUSH_DATA (push, 0); } nv50_query(targ->pq)->index = index; nv50_query_end(pipe, targ->pq); }
void nvc0_m2mf_push_linear(struct nouveau_context *nv, struct nouveau_bo *dst, unsigned offset, unsigned domain, unsigned size, const void *data) { struct nvc0_context *nvc0 = nvc0_context(&nv->pipe); struct nouveau_pushbuf *push = nv->pushbuf; uint32_t *src = (uint32_t *)data; unsigned count = (size + 3) / 4; nouveau_bufctx_refn(nvc0->bufctx, 0, dst, domain | NOUVEAU_BO_WR); nouveau_pushbuf_bufctx(push, nvc0->bufctx); nouveau_pushbuf_validate(push); while (count) { unsigned nr; if (!PUSH_SPACE(push, 16)) break; nr = PUSH_AVAIL(push); assert(nr >= 16); nr = MIN2(count, nr - 9); nr = MIN2(nr, NV04_PFIFO_MAX_PACKET_LEN); BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2); PUSH_DATAh(push, dst->offset + offset); PUSH_DATA (push, dst->offset + offset); BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2); PUSH_DATA (push, MIN2(size, nr * 4)); PUSH_DATA (push, 1); BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1); PUSH_DATA (push, 0x100111); /* must not be interrupted (trap on QUERY fence, 0x50 works however) */ BEGIN_NIC0(push, NVC0_M2MF(DATA), nr); PUSH_DATAp(push, src, nr); count -= nr; src += nr; offset += nr * 4; size -= nr * 4; } nouveau_bufctx_reset(nvc0->bufctx, 0); }
boolean nve4_validate_tsc(struct nvc0_context *nvc0, int s) { struct nouveau_bo *txc = nvc0->screen->txc; struct nouveau_pushbuf *push = nvc0->base.pushbuf; unsigned i; boolean need_flush = FALSE; for (i = 0; i < nvc0->num_samplers[s]; ++i) { struct nv50_tsc_entry *tsc = nv50_tsc_entry(nvc0->samplers[s][i]); if (!tsc) { nvc0->tex_handles[s][i] |= NVE4_TSC_ENTRY_INVALID; continue; } if (tsc->id < 0) { tsc->id = nvc0_screen_tsc_alloc(nvc0->screen, tsc); PUSH_SPACE(push, 16); BEGIN_NVC0(push, NVE4_P2MF(DST_ADDRESS_HIGH), 2); PUSH_DATAh(push, txc->offset + 65536 + (tsc->id * 32)); PUSH_DATA (push, txc->offset + 65536 + (tsc->id * 32)); BEGIN_NVC0(push, NVE4_P2MF(LINE_LENGTH_IN), 2); PUSH_DATA (push, 32); PUSH_DATA (push, 1); BEGIN_1IC0(push, NVE4_P2MF(EXEC), 9); PUSH_DATA (push, 0x1001); PUSH_DATAp(push, &tsc->tsc[0], 8); need_flush = TRUE; } nvc0->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32); nvc0->tex_handles[s][i] &= ~NVE4_TSC_ENTRY_INVALID; nvc0->tex_handles[s][i] |= tsc->id << 20; } for (; i < nvc0->state.num_samplers[s]; ++i) { nvc0->tex_handles[s][i] |= NVE4_TSC_ENTRY_INVALID; nvc0->samplers_dirty[s] |= 1 << i; } nvc0->state.num_samplers[s] = nvc0->num_samplers[s]; return need_flush; }
static void nv50_query_end(struct pipe_context *pipe, struct pipe_query *pq) { struct nv50_context *nv50 = nv50_context(pipe); struct nouveau_pushbuf *push = nv50->base.pushbuf; struct nv50_query *q = nv50_query(pq); switch (q->type) { case PIPE_QUERY_OCCLUSION_COUNTER: nv50_query_get(push, q, 0, 0x0100f002); PUSH_SPACE(push, 2); BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1); PUSH_DATA (push, 0); break; case PIPE_QUERY_PRIMITIVES_GENERATED: nv50_query_get(push, q, 0, 0x06805002); break; case PIPE_QUERY_PRIMITIVES_EMITTED: nv50_query_get(push, q, 0, 0x05805002); break; case PIPE_QUERY_SO_STATISTICS: nv50_query_get(push, q, 0x00, 0x05805002); nv50_query_get(push, q, 0x10, 0x06805002); break; case PIPE_QUERY_TIMESTAMP: q->sequence++; /* fall through */ case PIPE_QUERY_TIME_ELAPSED: nv50_query_get(push, q, 0, 0x00005002); break; case PIPE_QUERY_GPU_FINISHED: q->sequence++; nv50_query_get(push, q, 0, 0x1000f010); break; case NVA0_QUERY_STREAM_OUTPUT_BUFFER_OFFSET: nv50_query_get(push, q, 0, 0x0d005002 | (q->index << 5)); break; case PIPE_QUERY_TIMESTAMP_DISJOINT: break; default: assert(0); break; } q->ready = q->flushed = FALSE; }
static void nvc0_so_target_save_offset(struct pipe_context *pipe, struct pipe_stream_output_target *ptarg, unsigned index, bool *serialize) { struct nvc0_so_target *targ = nvc0_so_target(ptarg); if (*serialize) { *serialize = false; PUSH_SPACE(nvc0_context(pipe)->base.pushbuf, 1); IMMED_NVC0(nvc0_context(pipe)->base.pushbuf, NVC0_3D(SERIALIZE), 0); NOUVEAU_DRV_STAT(nouveau_screen(pipe->screen), gpu_serialize_count, 1); } nvc0_query(targ->pq)->index = index; pipe->end_query(pipe, targ->pq); }
static int nvc0_2d_texture_do_copy(struct nouveau_pushbuf *push, struct nv50_miptree *dst, unsigned dst_level, unsigned dx, unsigned dy, unsigned dz, struct nv50_miptree *src, unsigned src_level, unsigned sx, unsigned sy, unsigned sz, unsigned w, unsigned h) { const enum pipe_format dfmt = dst->base.base.format; const enum pipe_format sfmt = src->base.base.format; int ret; boolean eqfmt = dfmt == sfmt; if (!PUSH_SPACE(push, 2 * 16 + 32)) return PIPE_ERROR; ret = nvc0_2d_texture_set(push, TRUE, dst, dst_level, dz, dfmt, eqfmt); if (ret) return ret; ret = nvc0_2d_texture_set(push, FALSE, src, src_level, sz, sfmt, eqfmt); if (ret) return ret; IMMED_NVC0(push, NVC0_2D(BLIT_CONTROL), 0x00); BEGIN_NVC0(push, NVC0_2D(BLIT_DST_X), 4); PUSH_DATA (push, dx << dst->ms_x); PUSH_DATA (push, dy << dst->ms_y); PUSH_DATA (push, w << dst->ms_x); PUSH_DATA (push, h << dst->ms_y); BEGIN_NVC0(push, NVC0_2D(BLIT_DU_DX_FRACT), 4); PUSH_DATA (push, 0); PUSH_DATA (push, 1); PUSH_DATA (push, 0); PUSH_DATA (push, 1); BEGIN_NVC0(push, NVC0_2D(BLIT_SRC_X_FRACT), 4); PUSH_DATA (push, 0); PUSH_DATA (push, sx << src->ms_x); PUSH_DATA (push, 0); PUSH_DATA (push, sy << src->ms_x); return 0; }
void NV10EXAComposite(PixmapPtr pix_dst, int sx, int sy, int mx, int my, int dx, int dy, int w, int h) { ScrnInfoPtr pScrn = xf86ScreenToScrn(pix_dst->drawable.pScreen); NVPtr pNv = NVPTR(pScrn); struct nouveau_pushbuf *push = pNv->pushbuf; if (!PUSH_SPACE(push, 64)) return; BEGIN_NV04(push, NV10_3D(VERTEX_BEGIN_END), 1); PUSH_DATA (push, NV10_3D_VERTEX_BEGIN_END_QUADS); PUSH_VTX2s(push, sx, sy, mx, my, dx, dy); PUSH_VTX2s(push, sx + w, sy, mx + w, my, dx + w, dy); PUSH_VTX2s(push, sx + w, sy + h, mx + w, my + h, dx + w, dy + h); PUSH_VTX2s(push, sx, sy + h, mx, my + h, dx, dy + h); BEGIN_NV04(push, NV10_3D(VERTEX_BEGIN_END), 1); PUSH_DATA (push, NV10_3D_VERTEX_BEGIN_END_STOP); }
void nve4_p2mf_push_linear(struct nouveau_context *nv, struct nouveau_bo *dst, unsigned offset, unsigned domain, unsigned size, const void *data) { struct nvc0_context *nvc0 = nvc0_context(&nv->pipe); struct nouveau_pushbuf *push = nv->pushbuf; uint32_t *src = (uint32_t *)data; unsigned count = (size + 3) / 4; nouveau_bufctx_refn(nvc0->bufctx, 0, dst, domain | NOUVEAU_BO_WR); nouveau_pushbuf_bufctx(push, nvc0->bufctx); nouveau_pushbuf_validate(push); while (count) { unsigned nr = MIN2(count, (NV04_PFIFO_MAX_PACKET_LEN - 1)); if (!PUSH_SPACE(push, nr + 10)) break; BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_DST_ADDRESS_HIGH), 2); PUSH_DATAh(push, dst->offset + offset); PUSH_DATA (push, dst->offset + offset); BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_LINE_LENGTH_IN), 2); PUSH_DATA (push, MIN2(size, nr * 4)); PUSH_DATA (push, 1); /* must not be interrupted (trap on QUERY fence, 0x50 works however) */ BEGIN_1IC0(push, NVE4_P2MF(UPLOAD_EXEC), nr + 1); PUSH_DATA (push, 0x1001); PUSH_DATAp(push, src, nr); count -= nr; src += nr; offset += nr * 4; size -= nr * 4; } nouveau_bufctx_reset(nvc0->bufctx, 0); }
void nv50_constbufs_validate(struct nv50_context *nv50) { struct nouveau_pushbuf *push = nv50->base.pushbuf; unsigned s; for (s = 0; s < 3; ++s) { unsigned p; if (s == PIPE_SHADER_FRAGMENT) p = NV50_3D_SET_PROGRAM_CB_PROGRAM_FRAGMENT; else if (s == PIPE_SHADER_GEOMETRY) p = NV50_3D_SET_PROGRAM_CB_PROGRAM_GEOMETRY; else p = NV50_3D_SET_PROGRAM_CB_PROGRAM_VERTEX; while (nv50->constbuf_dirty[s]) { const unsigned i = (unsigned)ffs(nv50->constbuf_dirty[s]) - 1; assert(i < NV50_MAX_PIPE_CONSTBUFS); nv50->constbuf_dirty[s] &= ~(1 << i); if (nv50->constbuf[s][i].user) { const unsigned b = NV50_CB_PVP + s; unsigned start = 0; unsigned words = nv50->constbuf[s][0].size / 4; if (i) { NOUVEAU_ERR("user constbufs only supported in slot 0\n"); continue; } if (!nv50->state.uniform_buffer_bound[s]) { nv50->state.uniform_buffer_bound[s] = TRUE; BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1); PUSH_DATA (push, (b << 12) | (i << 8) | p | 1); } while (words) { unsigned nr; if (!PUSH_SPACE(push, 16)) break; nr = PUSH_AVAIL(push); assert(nr >= 16); nr = MIN2(MIN2(nr - 3, words), NV04_PFIFO_MAX_PACKET_LEN); BEGIN_NV04(push, NV50_3D(CB_ADDR), 1); PUSH_DATA (push, (start << 8) | b); BEGIN_NI04(push, NV50_3D(CB_DATA(0)), nr); PUSH_DATAp(push, &nv50->constbuf[s][0].u.data[start * 4], nr); start += nr; words -= nr; } } else { struct nv04_resource *res = nv04_resource(nv50->constbuf[s][i].u.buf); if (res) { /* TODO: allocate persistent bindings */ const unsigned b = s * 16 + i; assert(nouveau_resource_mapped_by_gpu(&res->base)); if (!nv50->constbuf[s][i].offset) res->cb_slot = b; BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3); PUSH_DATAh(push, res->address + nv50->constbuf[s][i].offset); PUSH_DATA (push, res->address + nv50->constbuf[s][i].offset); PUSH_DATA (push, (b << 16) | (align(nv50->constbuf[s][i].size, 0x100) & 0xffff)); BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1); PUSH_DATA (push, (b << 12) | (i << 8) | p | 1); BCTX_REFN(nv50->bufctx_3d, CB(s, i), res, RD); } else { BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1); PUSH_DATA (push, (i << 8) | p | 0); } if (i == 0) nv50->state.uniform_buffer_bound[s] = FALSE; } } } }
static boolean nvc0_validate_tic(struct nvc0_context *nvc0, int s) { uint32_t commands[32]; struct nouveau_pushbuf *push = nvc0->base.pushbuf; struct nouveau_bo *txc = nvc0->screen->txc; unsigned i; unsigned n = 0; boolean need_flush = FALSE; for (i = 0; i < nvc0->num_textures[s]; ++i) { struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]); struct nv04_resource *res; const boolean dirty = !!(nvc0->textures_dirty[s] & (1 << i)); if (!tic) { if (dirty) commands[n++] = (i << 1) | 0; continue; } res = nv04_resource(tic->pipe.texture); if (tic->id < 0) { tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic); PUSH_SPACE(push, 17); BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2); PUSH_DATAh(push, txc->offset + (tic->id * 32)); PUSH_DATA (push, txc->offset + (tic->id * 32)); BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2); PUSH_DATA (push, 32); PUSH_DATA (push, 1); BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1); PUSH_DATA (push, 0x100111); BEGIN_NIC0(push, NVC0_M2MF(DATA), 8); PUSH_DATAp(push, &tic->tic[0], 8); need_flush = TRUE; } else if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) { BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1); PUSH_DATA (push, (tic->id << 4) | 1); } nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32); res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING; res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING; if (!dirty) continue; commands[n++] = (tic->id << 9) | (i << 1) | 1; BCTX_REFN(nvc0->bufctx_3d, TEX(s, i), res, RD); } for (; i < nvc0->state.num_textures[s]; ++i) commands[n++] = (i << 1) | 0; nvc0->state.num_textures[s] = nvc0->num_textures[s]; if (n) { BEGIN_NIC0(push, NVC0_3D(BIND_TIC(s)), n); PUSH_DATAp(push, commands, n); } nvc0->textures_dirty[s] = 0; return need_flush; }
static void nv50_render_condition(struct pipe_context *pipe, struct pipe_query *pq, boolean condition, uint mode) { struct nv50_context *nv50 = nv50_context(pipe); struct nouveau_pushbuf *push = nv50->base.pushbuf; struct nv50_query *q; uint32_t cond; boolean wait = mode != PIPE_RENDER_COND_NO_WAIT && mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT; if (!pq) { cond = NV50_3D_COND_MODE_ALWAYS; } else { q = nv50_query(pq); /* NOTE: comparison of 2 queries only works if both have completed */ switch (q->type) { case PIPE_QUERY_SO_OVERFLOW_PREDICATE: cond = condition ? NV50_3D_COND_MODE_EQUAL : NV50_3D_COND_MODE_NOT_EQUAL; wait = TRUE; break; case PIPE_QUERY_OCCLUSION_COUNTER: case PIPE_QUERY_OCCLUSION_PREDICATE: if (likely(!condition)) { /* XXX: Placeholder, handle nesting here if available */ if (unlikely(false)) cond = wait ? NV50_3D_COND_MODE_NOT_EQUAL : NV50_3D_COND_MODE_ALWAYS; else cond = NV50_3D_COND_MODE_RES_NON_ZERO; } else { cond = wait ? NV50_3D_COND_MODE_EQUAL : NV50_3D_COND_MODE_ALWAYS; } break; default: assert(!"render condition query not a predicate"); cond = NV50_3D_COND_MODE_ALWAYS; break; } } nv50->cond_query = pq; nv50->cond_cond = condition; nv50->cond_condmode = cond; nv50->cond_mode = mode; if (!pq) { PUSH_SPACE(push, 2); BEGIN_NV04(push, NV50_3D(COND_MODE), 1); PUSH_DATA (push, cond); return; } PUSH_SPACE(push, 9); if (wait) { BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1); PUSH_DATA (push, 0); } PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD); BEGIN_NV04(push, NV50_3D(COND_ADDRESS_HIGH), 3); PUSH_DATAh(push, q->bo->offset + q->offset); PUSH_DATA (push, q->bo->offset + q->offset); PUSH_DATA (push, cond); BEGIN_NV04(push, NV50_2D(COND_ADDRESS_HIGH), 2); PUSH_DATAh(push, q->bo->offset + q->offset); PUSH_DATA (push, q->bo->offset + q->offset); }
Bool NV40EXAPrepareComposite(int op, PicturePtr psPict, PicturePtr pmPict, PicturePtr pdPict, PixmapPtr psPix, PixmapPtr pmPix, PixmapPtr pdPix) { ScrnInfoPtr pScrn = xf86ScreenToScrn(pdPix->drawable.pScreen); NVPtr pNv = NVPTR(pScrn); nv_pict_op_t *blend = NV40_GetPictOpRec(op); struct nouveau_pushbuf *push = pNv->pushbuf; uint32_t fragprog; if (!PUSH_SPACE(push, 128)) NOUVEAU_FALLBACK("space\n"); PUSH_RESET(push); NV40_SetupBlend(pScrn, blend, pdPict->format, (pmPict && pmPict->componentAlpha && PICT_FORMAT_RGB(pmPict->format))); if (!NV40_SetupSurface(pScrn, pdPix, pdPict->format) || !NV40EXAPicture(pNv, psPix, psPict, 0)) return FALSE; if (pmPict) { if (!NV40EXAPicture(pNv, pmPix, pmPict, 1)) return FALSE; if (pdPict->format == PICT_a8) { fragprog = PFP_C_A8; } else if (pmPict->componentAlpha && PICT_FORMAT_RGB(pmPict->format)) { if (blend->src_alpha) fragprog = PFP_CCASA; else fragprog = PFP_CCA; } else { fragprog = PFP_C; } } else { if (pdPict->format == PICT_a8) fragprog = PFP_S_A8; else fragprog = PFP_S; } BEGIN_NV04(push, NV30_3D(FP_ACTIVE_PROGRAM), 1); PUSH_MTHD (push, NV30_3D(FP_ACTIVE_PROGRAM), pNv->scratch, fragprog, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD | NOUVEAU_BO_LOW | NOUVEAU_BO_OR, NV30_3D_FP_ACTIVE_PROGRAM_DMA0, NV30_3D_FP_ACTIVE_PROGRAM_DMA1); BEGIN_NV04(push, NV30_3D(FP_CONTROL), 1); PUSH_DATA (push, 0x02000000); /* Appears to be some kind of cache flush, needed here at least * sometimes.. funky text rendering otherwise :) */ BEGIN_NV04(push, NV40_3D(TEX_CACHE_CTL), 1); PUSH_DATA (push, 2); BEGIN_NV04(push, NV40_3D(TEX_CACHE_CTL), 1); PUSH_DATA (push, 1); nouveau_pushbuf_bufctx(push, pNv->bufctx); if (nouveau_pushbuf_validate(push)) { nouveau_pushbuf_bufctx(push, NULL); return FALSE; } return TRUE; }
static boolean nv50_query_begin(struct pipe_context *pipe, struct pipe_query *pq) { struct nv50_context *nv50 = nv50_context(pipe); struct nouveau_pushbuf *push = nv50->base.pushbuf; struct nv50_query *q = nv50_query(pq); /* For occlusion queries we have to change the storage, because a previous * query might set the initial render conition to FALSE even *after* we re- * initialized it to TRUE. */ if (q->type == PIPE_QUERY_OCCLUSION_COUNTER) { q->offset += 32; q->data += 32 / sizeof(*q->data); if (q->offset - q->base == NV50_QUERY_ALLOC_SPACE) nv50_query_allocate(nv50, q, NV50_QUERY_ALLOC_SPACE); /* XXX: can we do this with the GPU, and sync with respect to a previous * query ? */ q->data[0] = q->sequence; /* initialize sequence */ q->data[1] = 1; /* initial render condition = TRUE */ q->data[4] = q->sequence + 1; /* for comparison COND_MODE */ q->data[5] = 0; } if (!q->is64bit) q->data[0] = q->sequence++; /* the previously used one */ switch (q->type) { case PIPE_QUERY_OCCLUSION_COUNTER: PUSH_SPACE(push, 4); BEGIN_NV04(push, NV50_3D(COUNTER_RESET), 1); PUSH_DATA (push, NV50_3D_COUNTER_RESET_SAMPLECNT); BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1); PUSH_DATA (push, 1); break; case PIPE_QUERY_PRIMITIVES_GENERATED: nv50_query_get(push, q, 0x10, 0x06805002); break; case PIPE_QUERY_PRIMITIVES_EMITTED: nv50_query_get(push, q, 0x10, 0x05805002); break; case PIPE_QUERY_SO_STATISTICS: nv50_query_get(push, q, 0x20, 0x05805002); nv50_query_get(push, q, 0x30, 0x06805002); break; case PIPE_QUERY_PIPELINE_STATISTICS: nv50_query_get(push, q, 0x80, 0x00801002); /* VFETCH, VERTICES */ nv50_query_get(push, q, 0x90, 0x01801002); /* VFETCH, PRIMS */ nv50_query_get(push, q, 0xa0, 0x02802002); /* VP, LAUNCHES */ nv50_query_get(push, q, 0xb0, 0x03806002); /* GP, LAUNCHES */ nv50_query_get(push, q, 0xc0, 0x04806002); /* GP, PRIMS_OUT */ nv50_query_get(push, q, 0xd0, 0x07804002); /* RAST, PRIMS_IN */ nv50_query_get(push, q, 0xe0, 0x08804002); /* RAST, PRIMS_OUT */ nv50_query_get(push, q, 0xf0, 0x0980a002); /* ROP, PIXELS */ break; case PIPE_QUERY_TIME_ELAPSED: nv50_query_get(push, q, 0x10, 0x00005002); break; default: break; } q->ready = FALSE; return true; }