int nouveau_bo_unpin(struct nouveau_bo *nvbo) { struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; int ret; if (--nvbo->pin_refcnt) return 0; ret = ttm_bo_reserve(bo, false, false, false, 0); if (ret) return ret; nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); ret = nouveau_bo_validate(nvbo, false, false, false); if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: dev_priv->fb_aper_free += bo->mem.size; break; case TTM_PL_TT: dev_priv->gart_info.aper_free += bo->mem.size; break; default: break; } } ttm_bo_unreserve(bo); return ret; }
static int nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) { struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); /* as long as the bo isn't in vram, and isn't tiled, we've got * nothing to do here. */ if (bo->mem.mem_type != TTM_PL_VRAM) { if (dev_priv->card_type < NV_50 || !nouveau_bo_tile_layout(nvbo)) return 0; } /* make sure bo is in mappable vram */ if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages) return 0; nvbo->placement.fpfn = 0; nvbo->placement.lpfn = dev_priv->fb_mappable_pages; nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0); return nouveau_bo_validate(nvbo, false, true, false); }
int nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) { struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; int ret; if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { NV_ERROR(nouveau_bdev(bo->bdev)->dev, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, 1 << bo->mem.mem_type, memtype); return -EINVAL; } if (nvbo->pin_refcnt++) return 0; ret = ttm_bo_reserve(bo, false, false, false, 0); if (ret) goto out; nouveau_bo_placement_set(nvbo, memtype, 0); ret = nouveau_bo_validate(nvbo, false, false, false); if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: dev_priv->fb_aper_free -= bo->mem.size; break; case TTM_PL_TT: dev_priv->gart_info.aper_free -= bo->mem.size; break; default: break; } } ttm_bo_unreserve(bo); out: if (unlikely(ret)) nvbo->pin_refcnt--; return ret; }
void nvc0_m2mf_push_linear(struct nouveau_context *nv, struct nouveau_bo *dst, unsigned offset, unsigned domain, unsigned size, void *data) { struct nouveau_channel *chan = nv->screen->channel; uint32_t *src = (uint32_t *)data; unsigned count = (size + 3) / 4; MARK_RING (chan, 8, 2); BEGIN_RING(chan, RING_MF(OFFSET_OUT_HIGH), 2); OUT_RELOCh(chan, dst, offset, domain | NOUVEAU_BO_WR); OUT_RELOCl(chan, dst, offset, domain | NOUVEAU_BO_WR); BEGIN_RING(chan, RING_MF(LINE_LENGTH_IN), 2); OUT_RING (chan, size); OUT_RING (chan, 1); BEGIN_RING(chan, RING_MF(EXEC), 1); OUT_RING (chan, 0x100111); while (count) { unsigned nr = AVAIL_RING(chan); if (nr < 9) { FIRE_RING(chan); nouveau_bo_validate(chan, dst, NOUVEAU_BO_WR); continue; } nr = MIN2(count, nr - 1); nr = MIN2(nr, NV04_PFIFO_MAX_PACKET_LEN); BEGIN_RING_NI(chan, RING_MF(DATA), nr); OUT_RINGp (chan, src, nr); src += nr; count -= nr; } }
void nvc0_cb_push(struct nouveau_context *nv, struct nouveau_bo *bo, unsigned domain, unsigned base, unsigned size, unsigned offset, unsigned words, const uint32_t *data) { struct nouveau_channel *chan = nv->screen->channel; assert(!(offset & 3)); size = align(size, 0x100); MARK_RING (chan, 16, 2); BEGIN_RING(chan, RING_3D(CB_SIZE), 3); OUT_RING (chan, size); OUT_RELOCh(chan, bo, base, domain | NOUVEAU_BO_WR); OUT_RELOCl(chan, bo, base, domain | NOUVEAU_BO_WR); while (words) { unsigned nr = AVAIL_RING(chan); nr = MIN2(nr, words); nr = MIN2(nr, NV04_PFIFO_MAX_PACKET_LEN - 1); BEGIN_RING_1I(chan, RING_3D(CB_POS), nr + 1); OUT_RING (chan, offset); OUT_RINGp (chan, data, nr); words -= nr; data += nr; offset += nr * 4; if (words) { MARK_RING(chan, 6, 1); nouveau_bo_validate(chan, bo, domain | NOUVEAU_BO_WR); } } }
void nv50_constbufs_validate(struct nv50_context *nv50) { struct nouveau_channel *chan = nv50->screen->base.channel; unsigned s; for (s = 0; s < 3; ++s) { struct nv04_resource *res; int i; unsigned p, b; if (s == PIPE_SHADER_FRAGMENT) p = NV50_3D_SET_PROGRAM_CB_PROGRAM_FRAGMENT; else if (s == PIPE_SHADER_GEOMETRY) p = NV50_3D_SET_PROGRAM_CB_PROGRAM_GEOMETRY; else p = NV50_3D_SET_PROGRAM_CB_PROGRAM_VERTEX; while (nv50->constbuf_dirty[s]) { struct nouveau_bo *bo; unsigned start = 0; unsigned words = 0; i = ffs(nv50->constbuf_dirty[s]) - 1; nv50->constbuf_dirty[s] &= ~(1 << i); res = nv04_resource(nv50->constbuf[s][i]); if (!res) { if (i != 0) { BEGIN_RING(chan, RING_3D(SET_PROGRAM_CB), 1); OUT_RING (chan, (i << 8) | p | 0); } continue; } if (i == 0) { b = NV50_CB_PVP + s; /* always upload GL uniforms through CB DATA */ bo = nv50->screen->uniforms; words = res->base.width0 / 4; } else { b = s * 16 + i; assert(0); if (!nouveau_resource_mapped_by_gpu(&res->base)) { nouveau_buffer_migrate(&nv50->base, res, NOUVEAU_BO_VRAM); BEGIN_RING(chan, RING_3D(CODE_CB_FLUSH), 1); OUT_RING (chan, 0); } MARK_RING (chan, 6, 2); BEGIN_RING(chan, RING_3D(CB_DEF_ADDRESS_HIGH), 3); OUT_RESRCh(chan, res, 0, NOUVEAU_BO_RD); OUT_RESRCl(chan, res, 0, NOUVEAU_BO_RD); OUT_RING (chan, (b << 16) | (res->base.width0 & 0xffff)); BEGIN_RING(chan, RING_3D(SET_PROGRAM_CB), 1); OUT_RING (chan, (b << 12) | (i << 8) | p | 1); bo = res->bo; nv50_bufctx_add_resident(nv50, NV50_BUFCTX_CONSTANT, res, res->domain | NOUVEAU_BO_RD); } if (words) { MARK_RING(chan, 8, 1); nouveau_bo_validate(chan, bo, res->domain | NOUVEAU_BO_WR); } while (words) { unsigned nr = AVAIL_RING(chan); if (nr < 16) { FIRE_RING(chan); nouveau_bo_validate(chan, bo, res->domain | NOUVEAU_BO_WR); continue; } nr = MIN2(MIN2(nr - 3, words), NV04_PFIFO_MAX_PACKET_LEN); BEGIN_RING(chan, RING_3D(CB_ADDR), 1); OUT_RING (chan, (start << 8) | b); BEGIN_RING_NI(chan, RING_3D(CB_DATA(0)), nr); OUT_RINGp (chan, &res->data[start * 4], nr); start += nr; words -= nr; } } } }