Ejemplo n.º 1
0
static void
nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx,
                       unsigned offset, unsigned size)
{
   struct nv04_resource *buf = nv04_resource(tx->base.resource);
   uint8_t *data = tx->map + offset;
   const unsigned base = tx->base.box.x + offset;
   const boolean can_cb = !((base | size) & 3);

   if (buf->data)
      memcpy(data, buf->data + base, size);
   else
      buf->status |= NOUVEAU_BUFFER_STATUS_DIRTY;

   if (tx->bo)
      nv->copy_data(nv, buf->bo, buf->offset + base, buf->domain,
                    tx->bo, tx->offset + offset, NOUVEAU_BO_GART, size);
   else
   if ((buf->base.bind & PIPE_BIND_CONSTANT_BUFFER) && nv->push_cb && can_cb)
      nv->push_cb(nv, buf->bo, buf->domain, buf->offset, buf->base.width0,
                  base, size / 4, (const uint32_t *)data);
   else
      nv->push_data(nv, buf->bo, buf->offset + base, buf->domain, size, data);
}
Ejemplo n.º 2
0
static void
nv30_render_draw_arrays(struct vbuf_render *render, unsigned start, uint nr)
{
   struct nv30_render *r = nv30_render(render);
   struct nv30_context *nv30 = r->nv30;
   struct nouveau_pushbuf *push = nv30->base.pushbuf;
   unsigned fn = nr >> 8, pn = nr & 0xff;
   unsigned ps = fn + (pn ? 1 : 0);
   unsigned i;

   BEGIN_NV04(push, NV30_3D(VTXBUF(0)), r->vertex_info.num_attribs);
   for (i = 0; i < r->vertex_info.num_attribs; i++) {
      PUSH_RESRC(push, NV30_3D(VTXBUF(i)), BUFCTX_VTXTMP,
                       nv04_resource(r->buffer), r->offset + r->vtxptr[i],
                       NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, NV30_3D_VTXBUF_DMA1);
   }

   if (!nv30_state_validate(nv30, FALSE))
      return;

   BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
   PUSH_DATA (push, r->prim);

   BEGIN_NI04(push, NV30_3D(VB_VERTEX_BATCH), ps);
   while (fn--) {
      PUSH_DATA (push, 0xff000000 | start);
      start += 256;
   }

   if (pn)
      PUSH_DATA (push, ((pn - 1) << 24) | start);

   BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
   PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
   PUSH_RESET(push, BUFCTX_VTXTMP);
}
Ejemplo n.º 3
0
static void
nvc0_compute_validate_buffers(struct nvc0_context *nvc0)
{
   struct nouveau_pushbuf *push = nvc0->base.pushbuf;
   struct nvc0_screen *screen = nvc0->screen;
   const int s = 5;
   int i;

   BEGIN_NVC0(push, NVC0_CP(CB_SIZE), 3);
   PUSH_DATA (push, NVC0_CB_AUX_SIZE);
   PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s));
   PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s));
   BEGIN_1IC0(push, NVC0_CP(CB_POS), 1 + 4 * NVC0_MAX_BUFFERS);
   PUSH_DATA (push, NVC0_CB_AUX_BUF_INFO(0));

   for (i = 0; i < NVC0_MAX_BUFFERS; i++) {
      if (nvc0->buffers[s][i].buffer) {
         struct nv04_resource *res =
            nv04_resource(nvc0->buffers[s][i].buffer);
         PUSH_DATA (push, res->address + nvc0->buffers[s][i].buffer_offset);
         PUSH_DATAh(push, res->address + nvc0->buffers[s][i].buffer_offset);
         PUSH_DATA (push, nvc0->buffers[s][i].buffer_size);
         PUSH_DATA (push, 0);
         BCTX_REFN(nvc0->bufctx_cp, CP_BUF, res, RDWR);
         util_range_add(&res->valid_buffer_range,
                        nvc0->buffers[s][i].buffer_offset,
                        nvc0->buffers[s][i].buffer_offset +
                        nvc0->buffers[s][i].buffer_size);
      } else {
         PUSH_DATA (push, 0);
         PUSH_DATA (push, 0);
         PUSH_DATA (push, 0);
         PUSH_DATA (push, 0);
      }
   }
}
Ejemplo n.º 4
0
void
nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
   struct nv30_context *nv30 = nv30_context(pipe);
   struct draw_context *draw = nv30->draw;
   struct pipe_transfer *transfer[PIPE_MAX_ATTRIBS] = {NULL};
   struct pipe_transfer *transferi = NULL;
   int i;

   nv30_render_validate(nv30);

   if (nv30->draw_dirty & NV30_NEW_VIEWPORT)
      draw_set_viewport_states(draw, 0, 1, &nv30->viewport);
   if (nv30->draw_dirty & NV30_NEW_RASTERIZER)
      draw_set_rasterizer_state(draw, &nv30->rast->pipe, NULL);
   if (nv30->draw_dirty & NV30_NEW_CLIP)
      draw_set_clip_state(draw, &nv30->clip);
   if (nv30->draw_dirty & NV30_NEW_ARRAYS) {
      draw_set_vertex_buffers(draw, 0, nv30->num_vtxbufs, nv30->vtxbuf);
      draw_set_vertex_elements(draw, nv30->vertex->num_elements, nv30->vertex->pipe);
   }
   if (nv30->draw_dirty & NV30_NEW_FRAGPROG) {
      struct nv30_fragprog *fp = nv30->fragprog.program;
      if (!fp->draw)
         fp->draw = draw_create_fragment_shader(draw, &fp->pipe);
      draw_bind_fragment_shader(draw, fp->draw);
   }
   if (nv30->draw_dirty & NV30_NEW_VERTPROG) {
      struct nv30_vertprog *vp = nv30->vertprog.program;
      if (!vp->draw)
         vp->draw = draw_create_vertex_shader(draw, &vp->pipe);
      draw_bind_vertex_shader(draw, vp->draw);
   }
   if (nv30->draw_dirty & NV30_NEW_VERTCONST) {
      if (nv30->vertprog.constbuf) {
         void *map = nv04_resource(nv30->vertprog.constbuf)->data;
         draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0,
                                         map, nv30->vertprog.constbuf_nr * 16);
      } else {
         draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0, NULL, 0);
      }
   }

   for (i = 0; i < nv30->num_vtxbufs; i++) {
      const void *map = nv30->vtxbuf[i].user_buffer;
      if (!map) {
         if (nv30->vtxbuf[i].buffer)
            map = pipe_buffer_map(pipe, nv30->vtxbuf[i].buffer,
                                  PIPE_TRANSFER_UNSYNCHRONIZED |
                                  PIPE_TRANSFER_READ, &transfer[i]);
      }
      draw_set_mapped_vertex_buffer(draw, i, map, ~0);
   }

   if (info->indexed) {
      const void *map = nv30->idxbuf.user_buffer;
      if (!map)
         map = pipe_buffer_map(pipe, nv30->idxbuf.buffer,
                               PIPE_TRANSFER_UNSYNCHRONIZED |
                               PIPE_TRANSFER_READ, &transferi);
      draw_set_indexes(draw,
                       (ubyte *) map + nv30->idxbuf.offset,
                       nv30->idxbuf.index_size, ~0);
   } else {
      draw_set_indexes(draw, NULL, 0, 0);
   }

   draw_vbo(draw, info);
   draw_flush(draw);

   if (info->indexed && transferi)
      pipe_buffer_unmap(pipe, transferi);
   for (i = 0; i < nv30->num_vtxbufs; i++)
      if (transfer[i])
         pipe_buffer_unmap(pipe, transfer[i]);

   nv30->draw_dirty = 0;
   nv30_state_release(nv30);
}
Ejemplo n.º 5
0
void
nv30_fragprog_validate(struct nv30_context *nv30)
{
   struct nouveau_pushbuf *push = nv30->base.pushbuf;
   struct nouveau_object *eng3d = nv30->screen->eng3d;
   struct nv30_fragprog *fp = nv30->fragprog.program;
   bool upload = false;
   int i;

   if (!fp->translated) {
      _nvfx_fragprog_translate(eng3d->oclass, fp);
      if (!fp->translated)
         return;

      upload = true;
   }

   /* update constants, also needs to be done on every fp switch as we
    * have no idea whether the constbuf changed in the meantime
    */
   if (nv30->fragprog.constbuf) {
      struct pipe_resource *constbuf = nv30->fragprog.constbuf;
      uint32_t *cbuf = (uint32_t *)nv04_resource(constbuf)->data;

      for (i = 0; i < fp->nr_consts; i++) {
         unsigned off = fp->consts[i].offset;
         unsigned idx = fp->consts[i].index * 4;

         if (!memcmp(&fp->insn[off], &cbuf[idx], 4 * 4))
            continue;
         memcpy(&fp->insn[off], &cbuf[idx], 4 * 4);
         upload = true;
      }
   }

   if (upload)
      nv30_fragprog_upload(nv30);

   /* FP_ACTIVE_PROGRAM needs to be done again even if only the consts
    * were updated.  TEX_CACHE_CTL magic is not enough to convince the
    * GPU that it should re-read the fragprog from VRAM... sigh.
    */
   if (nv30->state.fragprog != fp || upload) {
      struct nv04_resource *r = nv04_resource(fp->buffer);

      if (!PUSH_SPACE(push, 8))
         return;
      PUSH_RESET(push, BUFCTX_FRAGPROG);

      BEGIN_NV04(push, NV30_3D(FP_ACTIVE_PROGRAM), 1);
      PUSH_RESRC(push, NV30_3D(FP_ACTIVE_PROGRAM), BUFCTX_FRAGPROG, r, 0,
                       NOUVEAU_BO_LOW | NOUVEAU_BO_RD | NOUVEAU_BO_OR,
                       NV30_3D_FP_ACTIVE_PROGRAM_DMA0,
                       NV30_3D_FP_ACTIVE_PROGRAM_DMA1);
      BEGIN_NV04(push, NV30_3D(FP_CONTROL), 1);
      PUSH_DATA (push, fp->fp_control);
      if (eng3d->oclass < NV40_3D_CLASS) {
         BEGIN_NV04(push, NV30_3D(FP_REG_CONTROL), 1);
         PUSH_DATA (push, 0x00010004);
         BEGIN_NV04(push, NV30_3D(TEX_UNITS_ENABLE), 1);
         PUSH_DATA (push, fp->texcoords);
      } else {
         BEGIN_NV04(push, SUBC_3D(0x0b40), 1);
         PUSH_DATA (push, 0x00000000);
      }

      nv30->state.fragprog = fp;
   }
}
void
nvc0_tfb_validate(struct nvc0_context *nvc0)
{
   struct nouveau_pushbuf *push = nvc0->base.pushbuf;
   struct nvc0_transform_feedback_state *tfb;
   unsigned b;

   if (nvc0->gmtyprog) tfb = nvc0->gmtyprog->tfb;
   else
   if (nvc0->tevlprog) tfb = nvc0->tevlprog->tfb;
   else
      tfb = nvc0->vertprog->tfb;

   IMMED_NVC0(push, NVC0_3D(TFB_ENABLE), (tfb && nvc0->num_tfbbufs) ? 1 : 0);

   if (tfb && tfb != nvc0->state.tfb) {
      for (b = 0; b < 4; ++b) {
         if (tfb->varying_count[b]) {
            unsigned n = (tfb->varying_count[b] + 3) / 4;

            BEGIN_NVC0(push, NVC0_3D(TFB_STREAM(b)), 3);
            PUSH_DATA (push, 0);
            PUSH_DATA (push, tfb->varying_count[b]);
            PUSH_DATA (push, tfb->stride[b]);
            BEGIN_NVC0(push, NVC0_3D(TFB_VARYING_LOCS(b, 0)), n);
            PUSH_DATAp(push, tfb->varying_index[b], n);

            if (nvc0->tfbbuf[b])
               nvc0_so_target(nvc0->tfbbuf[b])->stride = tfb->stride[b];
         } else {
            IMMED_NVC0(push, NVC0_3D(TFB_VARYING_COUNT(b)), 0);
         }
      }
   }
   nvc0->state.tfb = tfb;

   if (!(nvc0->dirty & NVC0_NEW_TFB_TARGETS))
      return;
   nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_TFB);

   for (b = 0; b < nvc0->num_tfbbufs; ++b) {
      struct nvc0_so_target *targ = nvc0_so_target(nvc0->tfbbuf[b]);
      struct nv04_resource *buf = nv04_resource(targ->pipe.buffer);

      if (tfb)
         targ->stride = tfb->stride[b];

      if (!(nvc0->tfbbuf_dirty & (1 << b)))
         continue;

      if (!targ->clean)
         nvc0_query_fifo_wait(push, targ->pq);
      BEGIN_NVC0(push, NVC0_3D(TFB_BUFFER_ENABLE(b)), 5);
      PUSH_DATA (push, 1);
      PUSH_DATAh(push, buf->address + targ->pipe.buffer_offset);
      PUSH_DATA (push, buf->address + targ->pipe.buffer_offset);
      PUSH_DATA (push, targ->pipe.buffer_size);
      if (!targ->clean) {
         nvc0_query_pushbuf_submit(push, targ->pq, 0x4);
      } else {
         PUSH_DATA(push, 0); /* TFB_BUFFER_OFFSET */
         targ->clean = FALSE;
      }
      BCTX_REFN(nvc0->bufctx_3d, TFB, buf, WR);
   }
   for (; b < 4; ++b)
      IMMED_NVC0(push, NVC0_3D(TFB_BUFFER_ENABLE(b)), 0);
}
Ejemplo n.º 7
0
void
nv50_constbufs_validate(struct nv50_context *nv50)
{
   struct nouveau_channel *chan = nv50->screen->base.channel;
   unsigned s;

   for (s = 0; s < 3; ++s) {
      struct nv04_resource *res;
      int i;
      unsigned p, b;

      if (s == PIPE_SHADER_FRAGMENT)
         p = NV50_3D_SET_PROGRAM_CB_PROGRAM_FRAGMENT;
      else
      if (s == PIPE_SHADER_GEOMETRY)
         p = NV50_3D_SET_PROGRAM_CB_PROGRAM_GEOMETRY;
      else
         p = NV50_3D_SET_PROGRAM_CB_PROGRAM_VERTEX;

      while (nv50->constbuf_dirty[s]) {
         struct nouveau_bo *bo;
         unsigned start = 0;
         unsigned words = 0;

         i = ffs(nv50->constbuf_dirty[s]) - 1;
         nv50->constbuf_dirty[s] &= ~(1 << i);

         res = nv04_resource(nv50->constbuf[s][i]);
         if (!res) {
            if (i != 0) {
               BEGIN_RING(chan, RING_3D(SET_PROGRAM_CB), 1);
               OUT_RING  (chan, (i << 8) | p | 0);
            }
            continue;
         }

         if (i == 0) {
            b = NV50_CB_PVP + s;

            /* always upload GL uniforms through CB DATA */
            bo = nv50->screen->uniforms;
            words = res->base.width0 / 4;
         } else {
            b = s * 16 + i;

            assert(0);

            if (!nouveau_resource_mapped_by_gpu(&res->base)) {
               nouveau_buffer_migrate(&nv50->base, res, NOUVEAU_BO_VRAM);

               BEGIN_RING(chan, RING_3D(CODE_CB_FLUSH), 1);
               OUT_RING  (chan, 0);
            }
            MARK_RING (chan, 6, 2);
            BEGIN_RING(chan, RING_3D(CB_DEF_ADDRESS_HIGH), 3);
            OUT_RESRCh(chan, res, 0, NOUVEAU_BO_RD);
            OUT_RESRCl(chan, res, 0, NOUVEAU_BO_RD);
            OUT_RING  (chan, (b << 16) | (res->base.width0 & 0xffff));
            BEGIN_RING(chan, RING_3D(SET_PROGRAM_CB), 1);
            OUT_RING  (chan, (b << 12) | (i << 8) | p | 1);

            bo = res->bo;

            nv50_bufctx_add_resident(nv50, NV50_BUFCTX_CONSTANT, res,
                                     res->domain | NOUVEAU_BO_RD);
         }

         if (words) {
            MARK_RING(chan, 8, 1);

            nouveau_bo_validate(chan, bo, res->domain | NOUVEAU_BO_WR);
         }

         while (words) {
            unsigned nr = AVAIL_RING(chan);

            if (nr < 16) {
               FIRE_RING(chan);
               nouveau_bo_validate(chan, bo, res->domain | NOUVEAU_BO_WR);
               continue;
            }
            nr = MIN2(MIN2(nr - 3, words), NV04_PFIFO_MAX_PACKET_LEN);

            BEGIN_RING(chan, RING_3D(CB_ADDR), 1);
            OUT_RING  (chan, (start << 8) | b);
            BEGIN_RING_NI(chan, RING_3D(CB_DATA(0)), nr);
            OUT_RINGp (chan, &res->data[start * 4], nr);

            start += nr;
            words -= nr;
         }
      }
   }
}
Ejemplo n.º 8
0
struct pipe_sampler_view *
nvc0_create_texture_view(struct pipe_context *pipe,
                         struct pipe_resource *texture,
                         const struct pipe_sampler_view *templ,
                         uint32_t flags,
                         enum pipe_texture_target target)
{
   const struct util_format_description *desc;
   uint64_t address;
   uint32_t *tic;
   uint32_t swz[4];
   uint32_t depth;
   struct nv50_tic_entry *view;
   struct nv50_miptree *mt;
   boolean tex_int;

   view = MALLOC_STRUCT(nv50_tic_entry);
   if (!view)
      return NULL;
   mt = nv50_miptree(texture);

   view->pipe = *templ;
   view->pipe.reference.count = 1;
   view->pipe.texture = NULL;
   view->pipe.context = pipe;

   view->id = -1;

   pipe_resource_reference(&view->pipe.texture, texture);

   tic = &view->tic[0];

   desc = util_format_description(view->pipe.format);

   tic[0] = nvc0_format_table[view->pipe.format].tic;

   tex_int = util_format_is_pure_integer(view->pipe.format);

   swz[0] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_r, tex_int);
   swz[1] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_g, tex_int);
   swz[2] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_b, tex_int);
   swz[3] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_a, tex_int);
   tic[0] = (tic[0] & ~NV50_TIC_0_SWIZZLE__MASK) |
      (swz[0] << NV50_TIC_0_MAPR__SHIFT) |
      (swz[1] << NV50_TIC_0_MAPG__SHIFT) |
      (swz[2] << NV50_TIC_0_MAPB__SHIFT) |
      (swz[3] << NV50_TIC_0_MAPA__SHIFT);

   address = mt->base.address;

   tic[2] = 0x10001000 | NV50_TIC_2_NO_BORDER;

   if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
      tic[2] |= NV50_TIC_2_COLORSPACE_SRGB;

   /* check for linear storage type */
   if (unlikely(!nouveau_bo_memtype(nv04_resource(texture)->bo))) {
      if (texture->target == PIPE_BUFFER) {
         address +=
            view->pipe.u.buf.first_element * desc->block.bits / 8;
         tic[2] |= NV50_TIC_2_LINEAR | NV50_TIC_2_TARGET_BUFFER;
         tic[3] = 0;
         tic[4] = /* width */
            view->pipe.u.buf.last_element - view->pipe.u.buf.first_element + 1;
         tic[5] = 0;
      } else {
         /* must be 2D texture without mip maps */
         tic[2] |= NV50_TIC_2_LINEAR | NV50_TIC_2_TARGET_RECT;
         if (texture->target != PIPE_TEXTURE_RECT)
            tic[2] |= NV50_TIC_2_NORMALIZED_COORDS;
         tic[3] = mt->level[0].pitch;
         tic[4] = mt->base.base.width0;
         tic[5] = (1 << 16) | mt->base.base.height0;
      }
      tic[6] =
      tic[7] = 0;
      tic[1] = address;
      tic[2] |= address >> 32;
      return &view->pipe;
   }

   if (!(flags & NV50_TEXVIEW_SCALED_COORDS))
      tic[2] |= NV50_TIC_2_NORMALIZED_COORDS;

   tic[2] |=
      ((mt->level[0].tile_mode & 0x0f0) << (22 - 4)) |
      ((mt->level[0].tile_mode & 0xf00) << (25 - 8));

   depth = MAX2(mt->base.base.array_size, mt->base.base.depth0);

   if (mt->base.base.array_size > 1) {
      /* there doesn't seem to be a base layer field in TIC */
      address += view->pipe.u.tex.first_layer * mt->layer_stride;
      depth = view->pipe.u.tex.last_layer - view->pipe.u.tex.first_layer + 1;
   }
   tic[1] = address;
   tic[2] |= address >> 32;

   switch (target) {
   case PIPE_TEXTURE_1D:
      tic[2] |= NV50_TIC_2_TARGET_1D;
      break;
/* case PIPE_TEXTURE_2D_MS: */
   case PIPE_TEXTURE_2D:
      tic[2] |= NV50_TIC_2_TARGET_2D;
      break;
   case PIPE_TEXTURE_RECT:
      tic[2] |= NV50_TIC_2_TARGET_RECT;
      break;
   case PIPE_TEXTURE_3D:
      tic[2] |= NV50_TIC_2_TARGET_3D;
      break;
   case PIPE_TEXTURE_CUBE:
      depth /= 6;
      tic[2] |= NV50_TIC_2_TARGET_CUBE;
      break;
   case PIPE_TEXTURE_1D_ARRAY:
      tic[2] |= NV50_TIC_2_TARGET_1D_ARRAY;
      break;
/* case PIPE_TEXTURE_2D_ARRAY_MS: */
   case PIPE_TEXTURE_2D_ARRAY:
      tic[2] |= NV50_TIC_2_TARGET_2D_ARRAY;
      break;
   case PIPE_TEXTURE_CUBE_ARRAY:
      depth /= 6;
      tic[2] |= NV50_TIC_2_TARGET_CUBE_ARRAY;
      break;
   default:
      NOUVEAU_ERR("invalid texture target: %d\n", mt->base.base.target);
      return FALSE;
   }

   if (mt->base.base.target == PIPE_BUFFER)
      tic[3] = mt->base.base.width0;
   else
      tic[3] = (flags & NV50_TEXVIEW_FILTER_MSAA8) ? 0x20000000 : 0x00300000;

   tic[4] = (1 << 31) | (mt->base.base.width0 << mt->ms_x);

   tic[5] = (mt->base.base.height0 << mt->ms_y) & 0xffff;
   tic[5] |= depth << 16;
   tic[5] |= mt->base.base.last_level << 28;

   tic[6] = (mt->ms_x > 1) ? 0x88000000 : 0x03000000; /* sampling points */

   tic[7] = (view->pipe.u.tex.last_level << 4) | view->pipe.u.tex.first_level;

   /*
   if (mt->base.base.target == PIPE_TEXTURE_2D_MS ||
       mt->base.base.target == PIPE_TEXTURE_2D_ARRAY_MS)
      tic[7] |= mt->ms_mode << 12;
   */

   return &view->pipe;
}
Ejemplo n.º 9
0
static boolean
nvc0_validate_tic(struct nvc0_context *nvc0, int s)
{
   uint32_t commands[32];
   struct nouveau_pushbuf *push = nvc0->base.pushbuf;
   struct nouveau_bo *txc = nvc0->screen->txc;
   unsigned i;
   unsigned n = 0;
   boolean need_flush = FALSE;

   for (i = 0; i < nvc0->num_textures[s]; ++i) {
      struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]);
      struct nv04_resource *res;
      const boolean dirty = !!(nvc0->textures_dirty[s] & (1 << i));

      if (!tic) {
         if (dirty)
            commands[n++] = (i << 1) | 0;
         continue;
      }
      res = nv04_resource(tic->pipe.texture);

      if (tic->id < 0) {
         tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);

         PUSH_SPACE(push, 17);
         BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2);
         PUSH_DATAh(push, txc->offset + (tic->id * 32));
         PUSH_DATA (push, txc->offset + (tic->id * 32));
         BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2);
         PUSH_DATA (push, 32);
         PUSH_DATA (push, 1);
         BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1);
         PUSH_DATA (push, 0x100111);
         BEGIN_NIC0(push, NVC0_M2MF(DATA), 8);
         PUSH_DATAp(push, &tic->tic[0], 8);

         need_flush = TRUE;
      } else
      if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
         BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1);
         PUSH_DATA (push, (tic->id << 4) | 1);
      }
      nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);

      res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
      res->status |=  NOUVEAU_BUFFER_STATUS_GPU_READING;

      if (!dirty)
         continue;
      commands[n++] = (tic->id << 9) | (i << 1) | 1;

      BCTX_REFN(nvc0->bufctx_3d, TEX(s, i), res, RD);
   }
   for (; i < nvc0->state.num_textures[s]; ++i)
      commands[n++] = (i << 1) | 0;

   nvc0->state.num_textures[s] = nvc0->num_textures[s];

   if (n) {
      BEGIN_NIC0(push, NVC0_3D(BIND_TIC(s)), n);
      PUSH_DATAp(push, commands, n);
   }
   nvc0->textures_dirty[s] = 0;

   return need_flush;
}
Ejemplo n.º 10
0
static void *
nouveau_buffer_transfer_map(struct pipe_context *pipe,
                            struct pipe_resource *resource,
                            unsigned level, unsigned usage,
                            const struct pipe_box *box,
                            struct pipe_transfer **ptransfer)
{
   struct nouveau_context *nv = nouveau_context(pipe);
   struct nv04_resource *buf = nv04_resource(resource);
   struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
   uint8_t *map;
   int ret;

   if (!tx)
      return NULL;
   nouveau_buffer_transfer_init(tx, resource, box, usage);
   *ptransfer = &tx->base;

   if (buf->domain == NOUVEAU_BO_VRAM) {
      if (usage & NOUVEAU_TRANSFER_DISCARD) {
         if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
            buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
         nouveau_transfer_staging(nv, tx, TRUE);
      } else {
         if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
            if (buf->data) {
               align_free(buf->data);
               buf->data = NULL;
            }
            nouveau_transfer_staging(nv, tx, FALSE);
            nouveau_transfer_read(nv, tx);
         } else {
            if (usage & PIPE_TRANSFER_WRITE)
               nouveau_transfer_staging(nv, tx, TRUE);
            if (!buf->data)
               nouveau_buffer_cache(nv, buf);
         }
      }
      return buf->data ? (buf->data + box->x) : tx->map;
   } else
   if (unlikely(buf->domain == 0)) {
      return buf->data + box->x;
   }

   if (nouveau_buffer_should_discard(buf, usage)) {
      int ref = buf->base.reference.count - 1;
      nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
      if (ref > 0) /* any references inside context possible ? */
         nv->invalidate_resource_storage(nv, &buf->base, ref);
   }

   ret = nouveau_bo_map(buf->bo,
                        buf->mm ? 0 : nouveau_screen_transfer_flags(usage),
                        nv->client);
   if (ret) {
      FREE(tx);
      return NULL;
   }
   map = (uint8_t *)buf->bo->map + buf->offset + box->x;

   /* using kernel fences only if !buf->mm */
   if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) || !buf->mm)
      return map;

   if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) {
      if (unlikely(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) {
         /* Discarding was not possible, must sync because
          * subsequent transfers might use UNSYNCHRONIZED. */
         nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
      } else
      if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
         nouveau_transfer_staging(nv, tx, TRUE);
         map = tx->map;
      } else
      if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
         if (usage & PIPE_TRANSFER_DONTBLOCK)
            map = NULL;
         else
            nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
      } else {
         nouveau_transfer_staging(nv, tx, TRUE);
         if (tx->map)
            memcpy(tx->map, map, box->width);
         map = tx->map;
      }
   }
   if (!map)
      FREE(tx);
   return map;
}
void
nv30_vbo_validate(struct nv30_context *nv30)
{
   struct nouveau_pushbuf *push = nv30->base.pushbuf;
   struct nv30_vertex_stateobj *vertex = nv30->vertex;
   struct pipe_vertex_element *ve;
   struct pipe_vertex_buffer *vb;
   unsigned i, redefine;

   nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXBUF);
   if (!nv30->vertex || nv30->draw_flags)
      return;

   if (unlikely(vertex->need_conversion)) {
      nv30->vbo_fifo = ~0;
      nv30->vbo_user = 0;
   } else {
      nv30_prevalidate_vbufs(nv30);
   }

   if (!PUSH_SPACE(push, 128))
      return;

   redefine = MAX2(vertex->num_elements, nv30->state.num_vtxelts);
   BEGIN_NV04(push, NV30_3D(VTXFMT(0)), redefine);

   for (i = 0; i < vertex->num_elements; i++) {
      ve = &vertex->pipe[i];
      vb = &nv30->vtxbuf[ve->vertex_buffer_index];

      if (likely(vb->stride) || nv30->vbo_fifo)
         PUSH_DATA (push, (vb->stride << 8) | vertex->element[i].state);
      else
         PUSH_DATA (push, NV30_3D_VTXFMT_TYPE_V32_FLOAT);
   }

   for (; i < nv30->state.num_vtxelts; i++) {
      PUSH_DATA (push, NV30_3D_VTXFMT_TYPE_V32_FLOAT);
   }

   for (i = 0; i < vertex->num_elements; i++) {
      struct nv04_resource *res;
      unsigned offset;
      boolean user;

      ve = &vertex->pipe[i];
      vb = &nv30->vtxbuf[ve->vertex_buffer_index];
      user = (nv30->vbo_user & (1 << ve->vertex_buffer_index));

      res = nv04_resource(vb->buffer);

      if (nv30->vbo_fifo || unlikely(vb->stride == 0)) {
         if (!nv30->vbo_fifo)
            nv30_emit_vtxattr(nv30, vb, ve, i);
         continue;
      }

      offset = ve->src_offset + vb->buffer_offset;

      BEGIN_NV04(push, NV30_3D(VTXBUF(i)), 1);
      PUSH_RESRC(push, NV30_3D(VTXBUF(i)), user ? BUFCTX_VTXTMP : BUFCTX_VTXBUF,
                       res, offset, NOUVEAU_BO_LOW | NOUVEAU_BO_RD,
                       0, NV30_3D_VTXBUF_DMA1);
   }

   nv30->state.num_vtxelts = vertex->num_elements;
}
Ejemplo n.º 12
0
void
nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
{
   struct push_context ctx;
   unsigned i, index_size;
   unsigned inst_count = info->instance_count;
   unsigned vert_count = info->count;
   unsigned prim;

   nvc0_push_context_init(nvc0, &ctx);

   nvc0_vertex_configure_translate(nvc0, info->index_bias);

   if (nvc0->state.index_bias) {
      /* this is already taken care of by translate */
      IMMED_NVC0(ctx.push, NVC0_3D(VB_ELEMENT_BASE), 0);
      nvc0->state.index_bias = 0;
   }

   if (unlikely(ctx.edgeflag.enabled))
      nvc0_push_map_edgeflag(&ctx, nvc0, info->index_bias);

   ctx.prim_restart = info->primitive_restart;
   ctx.restart_index = info->restart_index;

   if (info->primitive_restart) {
      /* NOTE: I hope we won't ever need that last index (~0).
       * If we do, we have to disable primitive restart here always and
       * use END,BEGIN to restart. (XXX: would that affect PrimitiveID ?)
       * We could also deactive PRIM_RESTART_WITH_DRAW_ARRAYS temporarily,
       * and add manual restart to disp_vertices_seq.
       */
      BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
      PUSH_DATA (ctx.push, 1);
      PUSH_DATA (ctx.push, info->indexed ? 0xffffffff : info->restart_index);
   } else
   if (nvc0->state.prim_restart) {
      IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0);
   }
   nvc0->state.prim_restart = info->primitive_restart;

   if (info->indexed) {
      nvc0_push_map_idxbuf(&ctx, nvc0);
      index_size = nvc0->idxbuf.index_size;
   } else {
      if (unlikely(info->count_from_stream_output)) {
         struct pipe_context *pipe = &nvc0->base.pipe;
         struct nvc0_so_target *targ;
         targ = nvc0_so_target(info->count_from_stream_output);
         pipe->get_query_result(pipe, targ->pq, true, (void *)&vert_count);
         vert_count /= targ->stride;
      }
      ctx.idxbuf = NULL; /* shut up warnings */
      index_size = 0;
   }

   ctx.instance_id = info->start_instance;

   prim = nvc0_prim_gl(info->mode);
   do {
      PUSH_SPACE(ctx.push, 9);

      ctx.dest = nvc0_push_setup_vertex_array(nvc0, vert_count);
      if (unlikely(!ctx.dest))
         break;

      if (unlikely(ctx.need_vertex_id))
         nvc0_push_upload_vertex_ids(&ctx, nvc0, info);

      if (nvc0->screen->eng3d->oclass < GM107_3D_CLASS)
         IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
      BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_BEGIN_GL), 1);
      PUSH_DATA (ctx.push, prim);
      switch (index_size) {
      case 1:
         disp_vertices_i08(&ctx, info->start, vert_count);
         break;
      case 2:
         disp_vertices_i16(&ctx, info->start, vert_count);
         break;
      case 4:
         disp_vertices_i32(&ctx, info->start, vert_count);
         break;
      default:
         assert(index_size == 0);
         disp_vertices_seq(&ctx, info->start, vert_count);
         break;
      }
      PUSH_SPACE(ctx.push, 1);
      IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_END_GL), 0);

      if (--inst_count) {
         prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
         ++ctx.instance_id;
      }
      nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_VTX_TMP);
      nouveau_scratch_done(&nvc0->base);
   } while (inst_count);


   /* reset state and unmap buffers (no-op) */

   if (unlikely(!ctx.edgeflag.value)) {
      PUSH_SPACE(ctx.push, 1);
      IMMED_NVC0(ctx.push, NVC0_3D(EDGEFLAG), 1);
   }

   if (unlikely(ctx.need_vertex_id)) {
      PUSH_SPACE(ctx.push, 4);
      IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ID_REPLACE), 0);
      BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
      PUSH_DATA (ctx.push,
                 NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST |
                 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
                 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32);
      IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
   }

   if (info->indexed)
      nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer));
   for (i = 0; i < nvc0->num_vtxbufs; ++i)
      nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer));

   NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1);
}
Ejemplo n.º 13
0
/* Returns a pointer to a memory area representing a window into the
 * resource's data.
 *
 * This may or may not be the _actual_ memory area of the resource. However
 * when calling nouveau_buffer_transfer_unmap, if it wasn't the actual memory
 * area, the contents of the returned map are copied over to the resource.
 *
 * The usage indicates what the caller plans to do with the map:
 *
 *   WRITE means that the user plans to write to it
 *
 *   READ means that the user plans on reading from it
 *
 *   DISCARD_WHOLE_RESOURCE means that the whole resource is going to be
 *   potentially overwritten, and even if it isn't, the bits that aren't don't
 *   need to be maintained.
 *
 *   DISCARD_RANGE means that all the data in the specified range is going to
 *   be overwritten.
 *
 * The strategy for determining what kind of memory area to return is complex,
 * see comments inside of the function.
 */
static void *
nouveau_buffer_transfer_map(struct pipe_context *pipe,
                            struct pipe_resource *resource,
                            unsigned level, unsigned usage,
                            const struct pipe_box *box,
                            struct pipe_transfer **ptransfer)
{
   struct nouveau_context *nv = nouveau_context(pipe);
   struct nv04_resource *buf = nv04_resource(resource);
   struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
   uint8_t *map;
   int ret;

   if (!tx)
      return NULL;
   nouveau_buffer_transfer_init(tx, resource, box, usage);
   *ptransfer = &tx->base;

   if (usage & PIPE_TRANSFER_READ)
      NOUVEAU_DRV_STAT(nv->screen, buf_transfers_rd, 1);
   if (usage & PIPE_TRANSFER_WRITE)
      NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1);

   /* If we are trying to write to an uninitialized range, the user shouldn't
    * care what was there before. So we can treat the write as if the target
    * range were being discarded. Furthermore, since we know that even if this
    * buffer is busy due to GPU activity, because the contents were
    * uninitialized, the GPU can't care what was there, and so we can treat
    * the write as being unsynchronized.
    */
   if ((usage & PIPE_TRANSFER_WRITE) &&
       !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width))
      usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED;

   if (usage & PIPE_TRANSFER_PERSISTENT)
      usage |= PIPE_TRANSFER_UNSYNCHRONIZED;

   if (buf->domain == NOUVEAU_BO_VRAM) {
      if (usage & NOUVEAU_TRANSFER_DISCARD) {
         /* Set up a staging area for the user to write to. It will be copied
          * back into VRAM on unmap. */
         if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
            buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
         nouveau_transfer_staging(nv, tx, TRUE);
      } else {
         if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
            /* The GPU is currently writing to this buffer. Copy its current
             * contents to a staging area in the GART. This is necessary since
             * not the whole area being mapped is being discarded.
             */
            if (buf->data) {
               align_free(buf->data);
               buf->data = NULL;
            }
            nouveau_transfer_staging(nv, tx, FALSE);
            nouveau_transfer_read(nv, tx);
         } else {
            /* The buffer is currently idle. Create a staging area for writes,
             * and make sure that the cached data is up-to-date. */
            if (usage & PIPE_TRANSFER_WRITE)
               nouveau_transfer_staging(nv, tx, TRUE);
            if (!buf->data)
               nouveau_buffer_cache(nv, buf);
         }
      }
      return buf->data ? (buf->data + box->x) : tx->map;
   } else
   if (unlikely(buf->domain == 0)) {
      return buf->data + box->x;
   }

   /* At this point, buf->domain == GART */

   if (nouveau_buffer_should_discard(buf, usage)) {
      int ref = buf->base.reference.count - 1;
      nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
      if (ref > 0) /* any references inside context possible ? */
         nv->invalidate_resource_storage(nv, &buf->base, ref);
   }

   /* Note that nouveau_bo_map ends up doing a nouveau_bo_wait with the
    * relevant flags. If buf->mm is set, that means this resource is part of a
    * larger slab bo that holds multiple resources. So in that case, don't
    * wait on the whole slab and instead use the logic below to return a
    * reasonable buffer for that case.
    */
   ret = nouveau_bo_map(buf->bo,
                        buf->mm ? 0 : nouveau_screen_transfer_flags(usage),
                        nv->client);
   if (ret) {
      FREE(tx);
      return NULL;
   }
   map = (uint8_t *)buf->bo->map + buf->offset + box->x;

   /* using kernel fences only if !buf->mm */
   if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) || !buf->mm)
      return map;

   /* If the GPU is currently reading/writing this buffer, we shouldn't
    * interfere with its progress. So instead we either wait for the GPU to
    * complete its operation, or set up a staging area to perform our work in.
    */
   if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) {
      if (unlikely(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) {
         /* Discarding was not possible, must sync because
          * subsequent transfers might use UNSYNCHRONIZED. */
         nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
      } else
      if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
         /* The whole range is being discarded, so it doesn't matter what was
          * there before. No need to copy anything over. */
         nouveau_transfer_staging(nv, tx, TRUE);
         map = tx->map;
      } else
      if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
         if (usage & PIPE_TRANSFER_DONTBLOCK)
            map = NULL;
         else
            nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
      } else {
         /* It is expected that the returned buffer be a representation of the
          * data in question, so we must copy it over from the buffer. */
         nouveau_transfer_staging(nv, tx, TRUE);
         if (tx->map)
            memcpy(tx->map, map, box->width);
         map = tx->map;
      }
   }
   if (!map)
      FREE(tx);
   return map;
}
Ejemplo n.º 14
0
struct pipe_sampler_view *
nv50_create_texture_view(struct pipe_context *pipe,
                         struct pipe_resource *texture,
                         const struct pipe_sampler_view *templ,
                         uint32_t flags,
                         enum pipe_texture_target target)
{
   const struct util_format_description *desc;
   uint64_t addr;
   uint32_t *tic;
   uint32_t swz[4];
   uint32_t depth;
   struct nv50_tic_entry *view;
   struct nv50_miptree *mt = nv50_miptree(texture);
   boolean tex_int;

   view = MALLOC_STRUCT(nv50_tic_entry);
   if (!view)
      return NULL;

   view->pipe = *templ;
   view->pipe.reference.count = 1;
   view->pipe.texture = NULL;
   view->pipe.context = pipe;

   view->id = -1;

   pipe_resource_reference(&view->pipe.texture, texture);

   tic = &view->tic[0];

   desc = util_format_description(view->pipe.format);

   /* TIC[0] */

   tic[0] = nv50_format_table[view->pipe.format].tic;

   tex_int = util_format_is_pure_integer(view->pipe.format);

   swz[0] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_r, tex_int);
   swz[1] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_g, tex_int);
   swz[2] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_b, tex_int);
   swz[3] = nv50_tic_swizzle(tic[0], view->pipe.swizzle_a, tex_int);
   tic[0] = (tic[0] & ~NV50_TIC_0_SWIZZLE__MASK) |
      (swz[0] << NV50_TIC_0_MAPR__SHIFT) |
      (swz[1] << NV50_TIC_0_MAPG__SHIFT) |
      (swz[2] << NV50_TIC_0_MAPB__SHIFT) |
      (swz[3] << NV50_TIC_0_MAPA__SHIFT);

   addr = mt->base.address;

   if (mt->base.base.target == PIPE_TEXTURE_1D_ARRAY ||
       mt->base.base.target == PIPE_TEXTURE_2D_ARRAY) {
      addr += view->pipe.u.tex.first_layer * mt->layer_stride;
      depth = view->pipe.u.tex.last_layer - view->pipe.u.tex.first_layer + 1;
   } else {
      depth = mt->base.base.depth0;
   }

   tic[2] = 0x10001000 | NV50_TIC_2_NO_BORDER;

   if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
      tic[2] |= NV50_TIC_2_COLORSPACE_SRGB;

   if (!(flags & NV50_TEXVIEW_SCALED_COORDS))
      tic[2] |= NV50_TIC_2_NORMALIZED_COORDS;

   if (unlikely(!nouveau_bo_memtype(nv04_resource(texture)->bo))) {
      if (target == PIPE_BUFFER) {
         addr += view->pipe.u.buf.first_element * desc->block.bits / 8;
         tic[2] |= NV50_TIC_2_LINEAR | NV50_TIC_2_TARGET_BUFFER;
         tic[3] = 0;
         tic[4] = /* width */
            view->pipe.u.buf.last_element - view->pipe.u.buf.first_element + 1;
         tic[5] = 0;
      } else {
         tic[2] |= NV50_TIC_2_LINEAR | NV50_TIC_2_TARGET_RECT;
         tic[3] = mt->level[0].pitch;
         tic[4] = mt->base.base.width0;
         tic[5] = (1 << 16) | (mt->base.base.height0);
      }
      tic[6] =
      tic[7] = 0;
      tic[1] = addr;
      tic[2] |= addr >> 32;
      return &view->pipe;
   }

   tic[1] = addr;
   tic[2] |= (addr >> 32) & 0xff;

   tic[2] |=
      ((mt->level[0].tile_mode & 0x0f0) << (22 - 4)) |
      ((mt->level[0].tile_mode & 0xf00) << (25 - 8));

   switch (target) {
   case PIPE_TEXTURE_1D:
      tic[2] |= NV50_TIC_2_TARGET_1D;
      break;
   case PIPE_TEXTURE_2D:
      tic[2] |= NV50_TIC_2_TARGET_2D;
      break;
   case PIPE_TEXTURE_RECT:
      tic[2] |= NV50_TIC_2_TARGET_RECT;
      break;
   case PIPE_TEXTURE_3D:
      tic[2] |= NV50_TIC_2_TARGET_3D;
      break;
   case PIPE_TEXTURE_CUBE:
      depth /= 6;
      tic[2] |= NV50_TIC_2_TARGET_CUBE;
      break;
   case PIPE_TEXTURE_1D_ARRAY:
      tic[2] |= NV50_TIC_2_TARGET_1D_ARRAY;
      break;
   case PIPE_TEXTURE_2D_ARRAY:
      tic[2] |= NV50_TIC_2_TARGET_2D_ARRAY;
      break;
   case PIPE_TEXTURE_CUBE_ARRAY:
      depth /= 6;
      tic[2] |= NV50_TIC_2_TARGET_CUBE_ARRAY;
      break;
   case PIPE_BUFFER:
      assert(0); /* should be linear and handled above ! */
      tic[2] |= NV50_TIC_2_TARGET_BUFFER | NV50_TIC_2_LINEAR;
      break;
   default:
      NOUVEAU_ERR("invalid texture target: %d\n", mt->base.base.target);
      return FALSE;
   }

   tic[3] = (flags & NV50_TEXVIEW_FILTER_MSAA8) ? 0x20000000 : 0x00300000;

   tic[4] = (1 << 31) | (mt->base.base.width0 << mt->ms_x);

   tic[5] = (mt->base.base.height0 << mt->ms_y) & 0xffff;
   tic[5] |= depth << 16;
   tic[5] |= mt->base.base.last_level << NV50_TIC_5_LAST_LEVEL__SHIFT;

   tic[6] = (mt->ms_x > 1) ? 0x88000000 : 0x03000000; /* sampling points */

   tic[7] = (view->pipe.u.tex.last_level << 4) | view->pipe.u.tex.first_level;

   if (unlikely(!(tic[2] & NV50_TIC_2_NORMALIZED_COORDS)))
      if (mt->base.base.last_level)
         tic[5] &= ~NV50_TIC_5_LAST_LEVEL__MASK;

   return &view->pipe;
}
Ejemplo n.º 15
0
void
nv30_vertprog_validate(struct nv30_context *nv30)
{
   struct nouveau_pushbuf *push = nv30->base.pushbuf;
   struct nouveau_object *eng3d = nv30->screen->eng3d;
   struct nv30_vertprog *vp = nv30->vertprog.program;
   struct nv30_fragprog *fp = nv30->fragprog.program;
   boolean upload_code = FALSE;
   boolean upload_data = FALSE;
   unsigned i;

   if (nv30->dirty & NV30_NEW_FRAGPROG) {
      if (memcmp(vp->texcoord, fp->texcoord, sizeof(vp->texcoord))) {
         if (vp->translated)
            nv30_vertprog_destroy(vp);
         memcpy(vp->texcoord, fp->texcoord, sizeof(vp->texcoord));
      }
   }

   if (nv30->rast && nv30->rast->pipe.clip_plane_enable != vp->enabled_ucps) {
      vp->enabled_ucps = nv30->rast->pipe.clip_plane_enable;
      if (vp->translated)
         nv30_vertprog_destroy(vp);
   }

   if (!vp->translated) {
      vp->translated = _nvfx_vertprog_translate(eng3d->oclass, vp);
      if (!vp->translated) {
         nv30->draw_flags |= NV30_NEW_VERTPROG;
         return;
      }
      nv30->dirty |= NV30_NEW_VERTPROG;
   }

   if (!vp->exec) {
      struct nouveau_heap *heap = nv30->screen->vp_exec_heap;
      struct nv30_shader_reloc *reloc = vp->branch_relocs.data;
      unsigned nr_reloc = vp->branch_relocs.size / sizeof(*reloc);
      uint32_t *inst, target;

      if (nouveau_heap_alloc(heap, vp->nr_insns, &vp->exec, &vp->exec)) {
         while (heap->next && heap->size < vp->nr_insns) {
            struct nouveau_heap **evict = heap->next->priv;
            nouveau_heap_free(evict);
         }

         if (nouveau_heap_alloc(heap, vp->nr_insns, &vp->exec, &vp->exec)) {
            nv30->draw_flags |= NV30_NEW_VERTPROG;
            return;
         }
      }

      if (eng3d->oclass < NV40_3D_CLASS) {
         while (nr_reloc--) {
            inst     = vp->insns[reloc->location].data;
            target   = vp->exec->start + reloc->target;

            inst[2] &= ~0x000007fc;
            inst[2] |= target << 2;
            reloc++;
         }
      } else {
         while (nr_reloc--) {
            inst     = vp->insns[reloc->location].data;
            target   = vp->exec->start + reloc->target;

            inst[2] &= ~0x0000003f;
            inst[2] |= target >> 3;
            inst[3] &= ~0xe0000000;
            inst[3] |= target << 29;
            reloc++;
         }
      }

      upload_code = TRUE;
   }

   if (vp->nr_consts && !vp->data) {
      struct nouveau_heap *heap = nv30->screen->vp_data_heap;
      struct nv30_shader_reloc *reloc = vp->const_relocs.data;
      unsigned nr_reloc = vp->const_relocs.size / sizeof(*reloc);
      uint32_t *inst, target;

      if (nouveau_heap_alloc(heap, vp->nr_consts, vp, &vp->data)) {
         while (heap->next && heap->size < vp->nr_consts) {
            struct nv30_vertprog *evp = heap->next->priv;
            nouveau_heap_free(&evp->data);
         }

         if (nouveau_heap_alloc(heap, vp->nr_consts, vp, &vp->data)) {
            nv30->draw_flags |= NV30_NEW_VERTPROG;
            return;
         }
      }

      if (eng3d->oclass < NV40_3D_CLASS) {
         while (nr_reloc--) {
            inst     = vp->insns[reloc->location].data;
            target   = vp->data->start + reloc->target;

            inst[1] &= ~0x0007fc000;
            inst[1] |= (target & 0x1ff) << 14;
            reloc++;
         }
      } else {
         while (nr_reloc--) {
            inst     = vp->insns[reloc->location].data;
            target   = vp->data->start + reloc->target;

            inst[1] &= ~0x0001ff000;
            inst[1] |= (target & 0x1ff) << 12;
            reloc++;
         }
      }

      upload_code = TRUE;
      upload_data = TRUE;
   }

   if (vp->nr_consts) {
      struct nv04_resource *res = nv04_resource(nv30->vertprog.constbuf);

      for (i = 0; i < vp->nr_consts; i++) {
         struct nv30_vertprog_data *data = &vp->consts[i];

         if (data->index < 0) {
            if (!upload_data)
               continue;
         } else {
            float *constbuf = (float *)res->data;
            if (!upload_data &&
                !memcmp(data->value, &constbuf[data->index * 4], 16))
               continue;
            memcpy(data->value, &constbuf[data->index * 4], 16);
         }

         BEGIN_NV04(push, NV30_3D(VP_UPLOAD_CONST_ID), 5);
         PUSH_DATA (push, vp->data->start + i);
         PUSH_DATAp(push, data->value, 4);
      }
   }

   if (upload_code) {
      BEGIN_NV04(push, NV30_3D(VP_UPLOAD_FROM_ID), 1);
      PUSH_DATA (push, vp->exec->start);
      for (i = 0; i < vp->nr_insns; i++) {
         BEGIN_NV04(push, NV30_3D(VP_UPLOAD_INST(0)), 4);
         PUSH_DATAp(push, vp->insns[i].data, 4);
      }
   }

   if (nv30->dirty & (NV30_NEW_VERTPROG | NV30_NEW_FRAGPROG)) {
      BEGIN_NV04(push, NV30_3D(VP_START_FROM_ID), 1);
      PUSH_DATA (push, vp->exec->start);
      if (eng3d->oclass < NV40_3D_CLASS) {
         BEGIN_NV04(push, NV30_3D(ENGINE), 1);
         PUSH_DATA (push, 0x00000013); /* vp instead of ff, somehow */
      } else {
         BEGIN_NV04(push, NV40_3D(VP_ATTRIB_EN), 2);
         PUSH_DATA (push, vp->ir);
         PUSH_DATA (push, vp->or | fp->vp_or);
         BEGIN_NV04(push, NV30_3D(ENGINE), 1);
         PUSH_DATA (push, 0x00000011);
      }
   }
}
Ejemplo n.º 16
0
static struct pipe_sampler_view *
gm107_create_texture_view(struct pipe_context *pipe,
                          struct pipe_resource *texture,
                          const struct pipe_sampler_view *templ,
                          uint32_t flags,
                          enum pipe_texture_target target)
{
   const struct util_format_description *desc;
   const struct nvc0_format *fmt;
   uint64_t address;
   uint32_t *tic;
   uint32_t swz[4];
   uint32_t width, height;
   uint32_t depth;
   struct nv50_tic_entry *view;
   struct nv50_miptree *mt;
   bool tex_int;

   view = MALLOC_STRUCT(nv50_tic_entry);
   if (!view)
      return NULL;
   mt = nv50_miptree(texture);

   view->pipe = *templ;
   view->pipe.reference.count = 1;
   view->pipe.texture = NULL;
   view->pipe.context = pipe;

   view->id = -1;

   pipe_resource_reference(&view->pipe.texture, texture);

   tic = &view->tic[0];

   desc = util_format_description(view->pipe.format);
   tex_int = util_format_is_pure_integer(view->pipe.format);

   fmt = &nvc0_format_table[view->pipe.format];
   swz[0] = nv50_tic_swizzle(fmt, view->pipe.swizzle_r, tex_int);
   swz[1] = nv50_tic_swizzle(fmt, view->pipe.swizzle_g, tex_int);
   swz[2] = nv50_tic_swizzle(fmt, view->pipe.swizzle_b, tex_int);
   swz[3] = nv50_tic_swizzle(fmt, view->pipe.swizzle_a, tex_int);

   tic[0]  = fmt->tic.format << GM107_TIC2_0_COMPONENTS_SIZES__SHIFT;
   tic[0] |= fmt->tic.type_r << GM107_TIC2_0_R_DATA_TYPE__SHIFT;
   tic[0] |= fmt->tic.type_g << GM107_TIC2_0_G_DATA_TYPE__SHIFT;
   tic[0] |= fmt->tic.type_b << GM107_TIC2_0_B_DATA_TYPE__SHIFT;
   tic[0] |= fmt->tic.type_a << GM107_TIC2_0_A_DATA_TYPE__SHIFT;
   tic[0] |= swz[0] << GM107_TIC2_0_X_SOURCE__SHIFT;
   tic[0] |= swz[1] << GM107_TIC2_0_Y_SOURCE__SHIFT;
   tic[0] |= swz[2] << GM107_TIC2_0_Z_SOURCE__SHIFT;
   tic[0] |= swz[3] << GM107_TIC2_0_W_SOURCE__SHIFT;

   address = mt->base.address;

   tic[3]  = GM107_TIC2_3_LOD_ANISO_QUALITY_2;
   tic[4]  = GM107_TIC2_4_SECTOR_PROMOTION_PROMOTE_TO_2_V;
   tic[4] |= GM107_TIC2_4_BORDER_SIZE_SAMPLER_COLOR;

   if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
      tic[4] |= GM107_TIC2_4_SRGB_CONVERSION;

   if (!(flags & NV50_TEXVIEW_SCALED_COORDS))
      tic[5] = GM107_TIC2_5_NORMALIZED_COORDS;
   else
      tic[5] = 0;

   /* check for linear storage type */
   if (unlikely(!nouveau_bo_memtype(nv04_resource(texture)->bo))) {
      if (texture->target == PIPE_BUFFER) {
         assert(!(tic[5] & GM107_TIC2_5_NORMALIZED_COORDS));
         width = view->pipe.u.buf.last_element - view->pipe.u.buf.first_element;
         address +=
            view->pipe.u.buf.first_element * desc->block.bits / 8;
         tic[2]  = GM107_TIC2_2_HEADER_VERSION_ONE_D_BUFFER;
         tic[3] |= width >> 16;
         tic[4] |= GM107_TIC2_4_TEXTURE_TYPE_ONE_D_BUFFER;
         tic[4] |= width & 0xffff;
      } else {
Ejemplo n.º 17
0
void
nv30_push_vbo(struct nv30_context *nv30, const struct pipe_draw_info *info)
{
   struct push_context ctx;
   unsigned i, index_size;
   boolean apply_bias = info->indexed && info->index_bias;

   ctx.push = nv30->base.pushbuf;
   ctx.translate = nv30->vertex->translate;
   ctx.packet_vertex_limit = nv30->vertex->vtx_per_packet_max;
   ctx.vertex_words = nv30->vertex->vtx_size;

   for (i = 0; i < nv30->num_vtxbufs; ++i) {
      uint8_t *data;
      struct pipe_vertex_buffer *vb = &nv30->vtxbuf[i];
      struct nv04_resource *res = nv04_resource(vb->buffer);

      if (!vb->buffer && !vb->user_buffer) {
         continue;
      }

      data = nouveau_resource_map_offset(&nv30->base, res,
                                         vb->buffer_offset, NOUVEAU_BO_RD);

      if (apply_bias)
         data += info->index_bias * vb->stride;

      ctx.translate->set_buffer(ctx.translate, i, data, vb->stride, ~0);
   }

   if (info->indexed) {
      if (nv30->idxbuf.buffer)
         ctx.idxbuf = nouveau_resource_map_offset(&nv30->base,
            nv04_resource(nv30->idxbuf.buffer), nv30->idxbuf.offset,
            NOUVEAU_BO_RD);
      else
         ctx.idxbuf = nv30->idxbuf.user_buffer;
      if (!ctx.idxbuf) {
         nv30_state_release(nv30);
         return;
      }
      index_size = nv30->idxbuf.index_size;
      ctx.primitive_restart = info->primitive_restart;
      ctx.restart_index = info->restart_index;
   } else {
      ctx.idxbuf = NULL;
      index_size = 0;
      ctx.primitive_restart = FALSE;
      ctx.restart_index = 0;
   }

   if (nv30->screen->eng3d->oclass >= NV40_3D_CLASS) {
      BEGIN_NV04(ctx.push, NV40_3D(PRIM_RESTART_ENABLE), 2);
      PUSH_DATA (ctx.push, info->primitive_restart);
      PUSH_DATA (ctx.push, info->restart_index);
      nv30->state.prim_restart = info->primitive_restart;
   }

   ctx.prim = nv30_prim_gl(info->mode);

   PUSH_RESET(ctx.push, BUFCTX_IDXBUF);
   BEGIN_NV04(ctx.push, NV30_3D(VERTEX_BEGIN_END), 1);
   PUSH_DATA (ctx.push, ctx.prim);
   switch (index_size) {
   case 0:
      emit_vertices_seq(&ctx, info->start, info->count);
      break;
   case 1:
      emit_vertices_i08(&ctx, info->start, info->count);
      break;
   case 2:
      emit_vertices_i16(&ctx, info->start, info->count);
      break;
   case 4:
      emit_vertices_i32(&ctx, info->start, info->count);
      break;
   default:
      assert(0);
      break;
   }
   BEGIN_NV04(ctx.push, NV30_3D(VERTEX_BEGIN_END), 1);
   PUSH_DATA (ctx.push, NV30_3D_VERTEX_BEGIN_END_STOP);

   if (info->indexed)
      nouveau_resource_unmap(nv04_resource(nv30->idxbuf.buffer));

   for (i = 0; i < nv30->num_vtxbufs; ++i) {
      if (nv30->vtxbuf[i].buffer) {
         nouveau_resource_unmap(nv04_resource(nv30->vtxbuf[i].buffer));
      }
   }

   nv30_state_release(nv30);
}
Ejemplo n.º 18
0
static int
nv50_invalidate_resource_storage(struct nouveau_context *ctx,
                                 struct pipe_resource *res,
                                 int ref)
{
   struct nv50_context *nv50 = nv50_context(&ctx->pipe);
   unsigned bind = res->bind ? res->bind : PIPE_BIND_VERTEX_BUFFER;
   unsigned s, i;

   if (bind & PIPE_BIND_RENDER_TARGET) {
      assert(nv50->framebuffer.nr_cbufs <= PIPE_MAX_COLOR_BUFS);
      for (i = 0; i < nv50->framebuffer.nr_cbufs; ++i) {
         if (nv50->framebuffer.cbufs[i] &&
             nv50->framebuffer.cbufs[i]->texture == res) {
            nv50->dirty_3d |= NV50_NEW_3D_FRAMEBUFFER;
            nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_FB);
            if (!--ref)
               return ref;
         }
      }
   }
   if (bind & PIPE_BIND_DEPTH_STENCIL) {
      if (nv50->framebuffer.zsbuf &&
          nv50->framebuffer.zsbuf->texture == res) {
         nv50->dirty_3d |= NV50_NEW_3D_FRAMEBUFFER;
         nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_FB);
         if (!--ref)
            return ref;
      }
   }

   if (bind & (PIPE_BIND_VERTEX_BUFFER |
               PIPE_BIND_INDEX_BUFFER |
               PIPE_BIND_CONSTANT_BUFFER |
               PIPE_BIND_STREAM_OUTPUT |
               PIPE_BIND_SAMPLER_VIEW)) {

      assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
      for (i = 0; i < nv50->num_vtxbufs; ++i) {
         if (nv50->vtxbuf[i].buffer == res) {
            nv50->dirty_3d |= NV50_NEW_3D_ARRAYS;
            nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_VERTEX);
            if (!--ref)
               return ref;
         }
      }

      if (nv50->idxbuf.buffer == res) {
         /* Just rebind to the bufctx as there is no separate dirty bit */
         nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_INDEX);
         BCTX_REFN(nv50->bufctx_3d, 3D_INDEX, nv04_resource(res), RD);
         if (!--ref)
            return ref;
      }

      for (s = 0; s < 3; ++s) {
      assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS);
      for (i = 0; i < nv50->num_textures[s]; ++i) {
         if (nv50->textures[s][i] &&
             nv50->textures[s][i]->texture == res) {
            nv50->dirty_3d |= NV50_NEW_3D_TEXTURES;
            nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TEXTURES);
            if (!--ref)
               return ref;
         }
      }
      }

      for (s = 0; s < 3; ++s) {
      for (i = 0; i < NV50_MAX_PIPE_CONSTBUFS; ++i) {
         if (!(nv50->constbuf_valid[s] & (1 << i)))
            continue;
         if (!nv50->constbuf[s][i].user &&
             nv50->constbuf[s][i].u.buf == res) {
            nv50->dirty_3d |= NV50_NEW_3D_CONSTBUF;
            nv50->constbuf_dirty[s] |= 1 << i;
            nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_CB(s, i));
            if (!--ref)
               return ref;
         }
      }
      }
   }

   return ref;
}
Ejemplo n.º 19
0
void
nve4_set_surface_info(struct nouveau_pushbuf *push,
                      struct pipe_surface *psf,
                      struct nvc0_screen *screen)
{
   struct nv50_surface *sf = nv50_surface(psf);
   struct nv04_resource *res;
   uint64_t address;
   uint32_t *const info = push->cur;
   uint8_t log2cpp;

   if (psf && !nve4_su_format_map[psf->format])
      NOUVEAU_ERR("unsupported surface format, try is_format_supported() !\n");

   push->cur += 16;

   if (!psf || !nve4_su_format_map[psf->format]) {
      memset(info, 0, 16 * sizeof(*info));

      info[0] = 0xbadf0000;
      info[1] = 0x80004000;
      info[12] = nve4_suldp_lib_offset[PIPE_FORMAT_R32G32B32A32_UINT] +
         screen->lib_code->start;
      return;
   }
   res = nv04_resource(sf->base.texture);

   address = res->address + sf->offset;

   info[8] = sf->width;
   info[9] = sf->height;
   info[10] = sf->depth;
   switch (res->base.target) {
   case PIPE_TEXTURE_1D_ARRAY:
      info[11] = 1;
      break;
   case PIPE_TEXTURE_2D:
   case PIPE_TEXTURE_RECT:
      info[11] = 2;
      break;
   case PIPE_TEXTURE_3D:
      info[11] = 3;
      break;
   case PIPE_TEXTURE_2D_ARRAY:
   case PIPE_TEXTURE_CUBE:
   case PIPE_TEXTURE_CUBE_ARRAY:
      info[11] = 4;
      break;
   default:
      info[11] = 0;
      break;
   }
   log2cpp = (0xf000 & nve4_su_format_aux_map[sf->base.format]) >> 12;

   info[12] = nve4_suldp_lib_offset[sf->base.format] + screen->lib_code->start;

   /* limit in bytes for raw access */
   info[13] = (0x06 << 22) | ((sf->width << log2cpp) - 1);

   info[1] = nve4_su_format_map[sf->base.format];

#if 0
   switch (util_format_get_blocksizebits(sf->base.format)) {
   case  16: info[1] |= 1 << 16; break;
   case  32: info[1] |= 2 << 16; break;
   case  64: info[1] |= 3 << 16; break;
   case 128: info[1] |= 4 << 16; break;
   default:
      break;
   }
#else
   info[1] |= log2cpp << 16;
   info[1] |=  0x4000;
   info[1] |= (0x0f00 & nve4_su_format_aux_map[sf->base.format]);
#endif

   if (res->base.target == PIPE_BUFFER) {
      info[0]  = address >> 8;
      info[2]  = sf->width - 1;
      info[2] |= (0xff & nve4_su_format_aux_map[sf->base.format]) << 22;
      info[3]  = 0;
      info[4]  = 0;
      info[5]  = 0;
      info[6]  = 0;
      info[7]  = 0;
      info[14] = 0;
      info[15] = 0;
   } else {
Ejemplo n.º 20
0
void
nvc0_launch_grid(struct pipe_context *pipe, const struct pipe_grid_info *info)
{
   struct nvc0_context *nvc0 = nvc0_context(pipe);
   struct nouveau_pushbuf *push = nvc0->base.pushbuf;
   struct nvc0_program *cp = nvc0->compprog;
   int ret;

   ret = !nvc0_state_validate_cp(nvc0, ~0);
   if (ret) {
      NOUVEAU_ERR("Failed to launch grid !\n");
      return;
   }

   nvc0_compute_upload_input(nvc0, info);

   BEGIN_NVC0(push, NVC0_CP(CP_START_ID), 1);
   PUSH_DATA (push, nvc0_program_symbol_offset(cp, info->pc));

   BEGIN_NVC0(push, NVC0_CP(LOCAL_POS_ALLOC), 3);
   PUSH_DATA (push, (cp->hdr[1] & 0xfffff0) + align(cp->cp.lmem_size, 0x10));
   PUSH_DATA (push, 0);
   PUSH_DATA (push, 0x800); /* WARP_CSTACK_SIZE */

   BEGIN_NVC0(push, NVC0_CP(SHARED_SIZE), 3);
   PUSH_DATA (push, align(cp->cp.smem_size, 0x100));
   PUSH_DATA (push, info->block[0] * info->block[1] * info->block[2]);
   PUSH_DATA (push, cp->num_barriers);
   BEGIN_NVC0(push, NVC0_CP(CP_GPR_ALLOC), 1);
   PUSH_DATA (push, cp->num_gprs);

   /* launch preliminary setup */
   BEGIN_NVC0(push, NVC0_CP(GRIDID), 1);
   PUSH_DATA (push, 0x1);
   BEGIN_NVC0(push, SUBC_CP(0x036c), 1);
   PUSH_DATA (push, 0);
   BEGIN_NVC0(push, NVC0_CP(FLUSH), 1);
   PUSH_DATA (push, NVC0_COMPUTE_FLUSH_GLOBAL | NVC0_COMPUTE_FLUSH_UNK8);

   /* block setup */
   BEGIN_NVC0(push, NVC0_CP(BLOCKDIM_YX), 2);
   PUSH_DATA (push, (info->block[1] << 16) | info->block[0]);
   PUSH_DATA (push, info->block[2]);

   if (unlikely(info->indirect)) {
      struct nv04_resource *res = nv04_resource(info->indirect);
      uint32_t offset = res->offset + info->indirect_offset;
      unsigned macro = NVC0_CP_MACRO_LAUNCH_GRID_INDIRECT;

      nouveau_pushbuf_space(push, 16, 0, 1);
      PUSH_REFN(push, res->bo, NOUVEAU_BO_RD | res->domain);
      PUSH_DATA(push, NVC0_FIFO_PKHDR_1I(1, macro, 3));
      nouveau_pushbuf_data(push, res->bo, offset,
                           NVC0_IB_ENTRY_1_NO_PREFETCH | 3 * 4);
   } else {
      /* grid setup */
      BEGIN_NVC0(push, NVC0_CP(GRIDDIM_YX), 2);
      PUSH_DATA (push, (info->grid[1] << 16) | info->grid[0]);
      PUSH_DATA (push, info->grid[2]);

      /* kernel launching */
      BEGIN_NVC0(push, NVC0_CP(COMPUTE_BEGIN), 1);
      PUSH_DATA (push, 0);
      BEGIN_NVC0(push, SUBC_CP(0x0a08), 1);
      PUSH_DATA (push, 0);
      BEGIN_NVC0(push, NVC0_CP(LAUNCH), 1);
      PUSH_DATA (push, 0x1000);
      BEGIN_NVC0(push, NVC0_CP(COMPUTE_END), 1);
      PUSH_DATA (push, 0);
      BEGIN_NVC0(push, SUBC_CP(0x0360), 1);
      PUSH_DATA (push, 0x1);
   }

   /* TODO: Not sure if this is really necessary. */
   nvc0_compute_invalidate_surfaces(nvc0, 5);
   nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_SUF);
   nvc0->dirty_cp |= NVC0_NEW_CP_SURFACES;
   nvc0->images_dirty[5] |= nvc0->images_valid[5];
}
Ejemplo n.º 21
0
static void
nv50_resource_copy_region(struct pipe_context *pipe,
                          struct pipe_resource *dst, unsigned dst_level,
                          unsigned dstx, unsigned dsty, unsigned dstz,
                          struct pipe_resource *src, unsigned src_level,
                          const struct pipe_box *src_box)
{
   struct nv50_screen *screen = nv50_context(pipe)->screen;
   int ret;
   boolean m2mf;
   unsigned dst_layer = dstz, src_layer = src_box->z;

   /* Fallback for buffers. */
   if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
      util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz,
                                src, src_level, src_box);
      return;
   }

   assert(src->nr_samples == dst->nr_samples);

   m2mf = (src->format == dst->format) ||
      (util_format_get_blocksizebits(src->format) ==
       util_format_get_blocksizebits(dst->format));

   nv04_resource(dst)->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;

   if (m2mf) {
      struct nv50_m2mf_rect drect, srect;
      unsigned i;
      unsigned nx = util_format_get_nblocksx(src->format, src_box->width);
      unsigned ny = util_format_get_nblocksy(src->format, src_box->height);

      nv50_m2mf_rect_setup(&drect, dst, dst_level, dstx, dsty, dstz);
      nv50_m2mf_rect_setup(&srect, src, src_level,
                           src_box->x, src_box->y, src_box->z);

      for (i = 0; i < src_box->depth; ++i) {
         nv50_m2mf_transfer_rect(&screen->base.base, &drect, &srect, nx, ny);

         if (nv50_miptree(dst)->layout_3d)
            drect.z++;
         else
            drect.base += nv50_miptree(dst)->layer_stride;

         if (nv50_miptree(src)->layout_3d)
            srect.z++;
         else
            srect.base += nv50_miptree(src)->layer_stride;
      }
      return;
   }

   assert((src->format == dst->format) ||
          (nv50_2d_format_faithful(src->format) &&
           nv50_2d_format_faithful(dst->format)));

   for (; dst_layer < dstz + src_box->depth; ++dst_layer, ++src_layer) {
      ret = nv50_2d_texture_do_copy(screen->base.channel,
                                    nv50_miptree(dst), dst_level,
                                    dstx, dsty, dst_layer,
                                    nv50_miptree(src), src_level,
                                    src_box->x, src_box->y, src_layer,
                                    src_box->width, src_box->height);
      if (ret)
         return;
   }
}
Ejemplo n.º 22
0
void
nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
{
   struct push_context ctx;
   unsigned i, index_size;
   unsigned inst_count = info->instance_count;
   unsigned vert_count = info->count;
   unsigned prim;

   nvc0_push_context_init(nvc0, &ctx);

   nvc0_vertex_configure_translate(nvc0, info->index_bias);

   if (unlikely(ctx.edgeflag.enabled))
      nvc0_push_map_edgeflag(&ctx, nvc0, info->index_bias);

   ctx.prim_restart = info->primitive_restart;
   ctx.restart_index = info->restart_index;

   if (info->indexed) {
      nvc0_push_map_idxbuf(&ctx, nvc0);
      index_size = nvc0->idxbuf.index_size;

      if (info->primitive_restart) {
         BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
         PUSH_DATA (ctx.push, 1);
         PUSH_DATA (ctx.push, info->restart_index);
      } else
      if (nvc0->state.prim_restart) {
         IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0);
      }
      nvc0->state.prim_restart = info->primitive_restart;
   } else {
      if (unlikely(info->count_from_stream_output)) {
         struct pipe_context *pipe = &nvc0->base.pipe;
         struct nvc0_so_target *targ;
         targ = nvc0_so_target(info->count_from_stream_output);
         pipe->get_query_result(pipe, targ->pq, TRUE, (void *)&vert_count);
         vert_count /= targ->stride;
      }
      ctx.idxbuf = NULL; /* shut up warnings */
      index_size = 0;
   }

   ctx.instance_id = info->start_instance;

   prim = nvc0_prim_gl(info->mode);
   do {
      PUSH_SPACE(ctx.push, 9);

      ctx.dest = nvc0_push_setup_vertex_array(nvc0, vert_count);
      if (unlikely(!ctx.dest))
         break;

      if (unlikely(ctx.need_vertex_id))
         nvc0_push_upload_vertex_ids(&ctx, nvc0, info);

      IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
      BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_BEGIN_GL), 1);
      PUSH_DATA (ctx.push, prim);
      switch (index_size) {
      case 1:
         disp_vertices_i08(&ctx, info->start, vert_count);
         break;
      case 2:
         disp_vertices_i16(&ctx, info->start, vert_count);
         break;
      case 4:
         disp_vertices_i32(&ctx, info->start, vert_count);
         break;
      default:
         assert(index_size == 0);
         disp_vertices_seq(&ctx, info->start, vert_count);
         break;
      }
      PUSH_SPACE(ctx.push, 1);
      IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_END_GL), 0);

      if (--inst_count) {
         prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
         ++ctx.instance_id;
      }
      nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_VTX_TMP);
      nouveau_scratch_done(&nvc0->base);
   } while (inst_count);


   /* reset state and unmap buffers (no-op) */

   if (unlikely(!ctx.edgeflag.value)) {
      PUSH_SPACE(ctx.push, 1);
      IMMED_NVC0(ctx.push, NVC0_3D(EDGEFLAG), 1);
   }

   if (unlikely(ctx.need_vertex_id)) {
      PUSH_SPACE(ctx.push, 4);
      IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ID_REPLACE), 0);
      BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
      PUSH_DATA (ctx.push,
                 NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST |
                 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
                 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32);
      IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
   }

   if (info->indexed)
      nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer));
   for (i = 0; i < nvc0->num_vtxbufs; ++i)
      nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer));

   NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1);
}
Ejemplo n.º 23
0
static void
nvc0_resource_copy_region(struct pipe_context *pipe,
                          struct pipe_resource *dst, unsigned dst_level,
                          unsigned dstx, unsigned dsty, unsigned dstz,
                          struct pipe_resource *src, unsigned src_level,
                          const struct pipe_box *src_box)
{
   struct nvc0_context *nvc0 = nvc0_context(pipe);
   int ret;
   boolean m2mf;
   unsigned dst_layer = dstz, src_layer = src_box->z;

   if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
      nouveau_copy_buffer(&nvc0->base,
                          nv04_resource(dst), dstx,
                          nv04_resource(src), src_box->x, src_box->width);
      NOUVEAU_DRV_STAT(&nvc0->screen->base, buf_copy_bytes, src_box->width);
      return;
   }
   NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_copy_count, 1);

   /* 0 and 1 are equal, only supporting 0/1, 2, 4 and 8 */
   assert((src->nr_samples | 1) == (dst->nr_samples | 1));

   m2mf = (src->format == dst->format) ||
      (util_format_get_blocksizebits(src->format) ==
       util_format_get_blocksizebits(dst->format));

   nv04_resource(dst)->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;

   if (m2mf) {
      struct nv50_m2mf_rect drect, srect;
      unsigned i;
      unsigned nx = util_format_get_nblocksx(src->format, src_box->width);
      unsigned ny = util_format_get_nblocksy(src->format, src_box->height);

      nv50_m2mf_rect_setup(&drect, dst, dst_level, dstx, dsty, dstz);
      nv50_m2mf_rect_setup(&srect, src, src_level,
                           src_box->x, src_box->y, src_box->z);

      for (i = 0; i < src_box->depth; ++i) {
         nvc0->m2mf_copy_rect(nvc0, &drect, &srect, nx, ny);

         if (nv50_miptree(dst)->layout_3d)
            drect.z++;
         else
            drect.base += nv50_miptree(dst)->layer_stride;

         if (nv50_miptree(src)->layout_3d)
            srect.z++;
         else
            srect.base += nv50_miptree(src)->layer_stride;
      }
      return;
   }

   assert(nv50_2d_dst_format_faithful(dst->format));
   assert(nv50_2d_src_format_faithful(src->format));

   BCTX_REFN(nvc0->bufctx, 2D, nv04_resource(src), RD);
   BCTX_REFN(nvc0->bufctx, 2D, nv04_resource(dst), WR);
   nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx);
   nouveau_pushbuf_validate(nvc0->base.pushbuf);

   for (; dst_layer < dstz + src_box->depth; ++dst_layer, ++src_layer) {
      ret = nvc0_2d_texture_do_copy(nvc0->base.pushbuf,
                                    nv50_miptree(dst), dst_level,
                                    dstx, dsty, dst_layer,
                                    nv50_miptree(src), src_level,
                                    src_box->x, src_box->y, src_layer,
                                    src_box->width, src_box->height);
      if (ret)
         break;
   }
   nouveau_bufctx_reset(nvc0->bufctx, 0);
}
void
nv50_push_vbo(struct nv50_context *nv50, const struct pipe_draw_info *info)
{
   struct push_context ctx;
   unsigned i, index_size;
   unsigned inst_count = info->instance_count;
   unsigned vert_count = info->count;
   boolean apply_bias = info->indexed && info->index_bias;

   ctx.push = nv50->base.pushbuf;
   ctx.translate = nv50->vertex->translate;
   ctx.packet_vertex_limit = nv50->vertex->packet_vertex_limit;
   ctx.vertex_words = nv50->vertex->vertex_size;

   for (i = 0; i < nv50->num_vtxbufs; ++i) {
      const struct pipe_vertex_buffer *vb = &nv50->vtxbuf[i];
      const uint8_t *data;

      if (unlikely(vb->buffer))
         data = nouveau_resource_map_offset(&nv50->base,
            nv04_resource(vb->buffer), vb->buffer_offset, NOUVEAU_BO_RD);
      else
         data = vb->user_buffer;

      if (apply_bias && likely(!(nv50->vertex->instance_bufs & (1 << i))))
         data += (ptrdiff_t)info->index_bias * vb->stride;

      ctx.translate->set_buffer(ctx.translate, i, data, vb->stride, ~0);
   }

   if (info->indexed) {
      if (nv50->idxbuf.buffer) {
         ctx.idxbuf = nouveau_resource_map_offset(&nv50->base,
            nv04_resource(nv50->idxbuf.buffer), nv50->idxbuf.offset,
            NOUVEAU_BO_RD);
      } else {
         ctx.idxbuf = nv50->idxbuf.user_buffer;
      }
      if (!ctx.idxbuf)
         return;
      index_size = nv50->idxbuf.index_size;
      ctx.primitive_restart = info->primitive_restart;
      ctx.restart_index = info->restart_index;
   } else {
      if (unlikely(info->count_from_stream_output)) {
         struct pipe_context *pipe = &nv50->base.pipe;
         struct nv50_so_target *targ;
         targ = nv50_so_target(info->count_from_stream_output);
         if (!targ->pq) {
            NOUVEAU_ERR("draw_stream_output not supported on pre-NVA0 cards\n");
            return;
         }
         pipe->get_query_result(pipe, targ->pq, TRUE, (void *)&vert_count);
         vert_count /= targ->stride;
      }
      ctx.idxbuf = NULL;
      index_size = 0;
      ctx.primitive_restart = FALSE;
      ctx.restart_index = 0;
   }

   ctx.instance_id = info->start_instance;
   ctx.prim = nv50_prim_gl(info->mode);

   if (info->primitive_restart) {
      BEGIN_NV04(ctx.push, NV50_3D(PRIM_RESTART_ENABLE), 2);
      PUSH_DATA (ctx.push, 1);
      PUSH_DATA (ctx.push, info->restart_index);
   } else
   if (nv50->state.prim_restart) {
      BEGIN_NV04(ctx.push, NV50_3D(PRIM_RESTART_ENABLE), 1);
      PUSH_DATA (ctx.push, 0);
   }
   nv50->state.prim_restart = info->primitive_restart;

   while (inst_count--) {
      BEGIN_NV04(ctx.push, NV50_3D(VERTEX_BEGIN_GL), 1);
      PUSH_DATA (ctx.push, ctx.prim);
      switch (index_size) {
      case 0:
         emit_vertices_seq(&ctx, info->start, vert_count);
         break;
      case 1:
         emit_vertices_i08(&ctx, info->start, vert_count);
         break;
      case 2:
         emit_vertices_i16(&ctx, info->start, vert_count);
         break;
      case 4:
         emit_vertices_i32(&ctx, info->start, vert_count);
         break;
      default:
         assert(0);
         break;
      }
      BEGIN_NV04(ctx.push, NV50_3D(VERTEX_END_GL), 1);
      PUSH_DATA (ctx.push, 0);

      ctx.instance_id++;
      ctx.prim |= NV50_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
   }
}
Ejemplo n.º 25
0
void
nv50_constbufs_validate(struct nv50_context *nv50)
{
   struct nouveau_pushbuf *push = nv50->base.pushbuf;
   unsigned s;

   for (s = 0; s < 3; ++s) {
      unsigned p;

      if (s == PIPE_SHADER_FRAGMENT)
         p = NV50_3D_SET_PROGRAM_CB_PROGRAM_FRAGMENT;
      else
      if (s == PIPE_SHADER_GEOMETRY)
         p = NV50_3D_SET_PROGRAM_CB_PROGRAM_GEOMETRY;
      else
         p = NV50_3D_SET_PROGRAM_CB_PROGRAM_VERTEX;

      while (nv50->constbuf_dirty[s]) {
         const unsigned i = (unsigned)ffs(nv50->constbuf_dirty[s]) - 1;

         assert(i < NV50_MAX_PIPE_CONSTBUFS);
         nv50->constbuf_dirty[s] &= ~(1 << i);

         if (nv50->constbuf[s][i].user) {
            const unsigned b = NV50_CB_PVP + s;
            unsigned start = 0;
            unsigned words = nv50->constbuf[s][0].size / 4;
            if (i) {
               NOUVEAU_ERR("user constbufs only supported in slot 0\n");
               continue;
            }
            if (!nv50->state.uniform_buffer_bound[s]) {
               nv50->state.uniform_buffer_bound[s] = TRUE;
               BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
               PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
            }
            while (words) {
               unsigned nr;

               if (!PUSH_SPACE(push, 16))
                  break;
               nr = PUSH_AVAIL(push);
               assert(nr >= 16);
               nr = MIN2(MIN2(nr - 3, words), NV04_PFIFO_MAX_PACKET_LEN);

               BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
               PUSH_DATA (push, (start << 8) | b);
               BEGIN_NI04(push, NV50_3D(CB_DATA(0)), nr);
               PUSH_DATAp(push, &nv50->constbuf[s][0].u.data[start * 4], nr);

               start += nr;
               words -= nr;
            }
         } else {
            struct nv04_resource *res =
               nv04_resource(nv50->constbuf[s][i].u.buf);
            if (res) {
               /* TODO: allocate persistent bindings */
               const unsigned b = s * 16 + i;

               assert(nouveau_resource_mapped_by_gpu(&res->base));

               if (!nv50->constbuf[s][i].offset)
                  res->cb_slot = b;

               BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
               PUSH_DATAh(push, res->address + nv50->constbuf[s][i].offset);
               PUSH_DATA (push, res->address + nv50->constbuf[s][i].offset);
               PUSH_DATA (push, (b << 16) |
                          (align(nv50->constbuf[s][i].size, 0x100) & 0xffff));
               BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
               PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);

               BCTX_REFN(nv50->bufctx_3d, CB(s, i), res, RD);
            } else {
               BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
               PUSH_DATA (push, (i << 8) | p | 0);
            }
            if (i == 0)
               nv50->state.uniform_buffer_bound[s] = FALSE;
         }
      }
   }
}
Ejemplo n.º 26
0
void
nvc0_tfb_validate(struct nvc0_context *nvc0)
{
   struct nouveau_channel *chan = nvc0->screen->base.channel;
   struct nvc0_transform_feedback_state *tfb;
   unsigned b, n, i;

   if (nvc0->gmtyprog) tfb = nvc0->gmtyprog->tfb;
   else
   if (nvc0->tevlprog) tfb = nvc0->tevlprog->tfb;
   else
      tfb = nvc0->vertprog->tfb;

   IMMED_RING(chan, RING_3D(TFB_ENABLE), (tfb && nvc0->num_tfbbufs) ? 1 : 0);

   if (tfb && tfb != nvc0->state.tfb) {
      uint8_t var[128];

      for (n = 0, b = 0; b < 4; n += tfb->varying_count[b++]) {
         if (tfb->varying_count[b]) {
            BEGIN_RING(chan, RING_3D(TFB_STREAM(b)), 3);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, tfb->varying_count[b]);
            OUT_RING  (chan, tfb->stride[b]);

            for (i = 0; i < tfb->varying_count[b]; ++i)
               var[i] = tfb->varying_index[n + i];
            for (; i & 3; ++i)
               var[i] = 0; /* zero rest of method word bits */

            BEGIN_RING(chan, RING_3D(TFB_VARYING_LOCS(b, 0)), i / 4);
            OUT_RINGp (chan, var, i / 4);

            if (nvc0->tfbbuf[b])
               nvc0_so_target(nvc0->tfbbuf[b])->stride = tfb->stride[b];
         } else {
            IMMED_RING(chan, RING_3D(TFB_VARYING_COUNT(b)), 0);
         }
      }
   }
   nvc0->state.tfb = tfb;

   if (!(nvc0->dirty & NVC0_NEW_TFB_TARGETS))
      return;
   nvc0_bufctx_reset(nvc0, NVC0_BUFCTX_TFB);

   for (b = 0; b < nvc0->num_tfbbufs; ++b) {
      struct nvc0_so_target *targ = nvc0_so_target(nvc0->tfbbuf[b]);
      struct nv04_resource *buf = nv04_resource(targ->pipe.buffer);

      if (tfb)
         targ->stride = tfb->stride[b];

      if (!(nvc0->tfbbuf_dirty & (1 << b)))
         continue;

      if (!targ->clean)
         nvc0_query_fifo_wait(chan, targ->pq);
      BEGIN_RING(chan, RING_3D(TFB_BUFFER_ENABLE(b)), 5);
      OUT_RING  (chan, 1);
      OUT_RESRCh(chan, buf, targ->pipe.buffer_offset, NOUVEAU_BO_WR);
      OUT_RESRCl(chan, buf, targ->pipe.buffer_offset, NOUVEAU_BO_WR);
      OUT_RING  (chan, targ->pipe.buffer_size);
      if (!targ->clean) {
         nvc0_query_pushbuf_submit(chan, targ->pq, 0x4);
      } else {
         OUT_RING(chan, 0); /* TFB_BUFFER_OFFSET */
         targ->clean = FALSE;
      }
      nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_TFB, buf, NOUVEAU_BO_WR);
   }
   for (; b < 4; ++b)
      IMMED_RING(chan, RING_3D(TFB_BUFFER_ENABLE(b)), 0);
}