/* Maybe just migrate to GART right away if we actually need to do this. */ boolean nouveau_buffer_download(struct nouveau_context *nv, struct nv04_resource *buf, unsigned start, unsigned size) { struct nouveau_mm_allocation *mm; struct nouveau_bo *bounce = NULL; uint32_t offset; assert(buf->domain == NOUVEAU_BO_VRAM); mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset); if (!bounce) return FALSE; nv->copy_data(nv, bounce, offset, NOUVEAU_BO_GART, buf->bo, buf->offset + start, NOUVEAU_BO_VRAM, size); if (nouveau_bo_map(bounce, NOUVEAU_BO_RD, nv->screen->client)) return FALSE; memcpy(buf->data + start, (uint8_t *)bounce->map + offset, size); buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING; nouveau_bo_ref(NULL, &bounce); if (mm) nouveau_mm_free(mm); return TRUE; }
static boolean nv50_query_allocate(struct nv50_context *nv50, struct nv50_query *q, int size) { struct nv50_screen *screen = nv50->screen; int ret; if (q->bo) { nouveau_bo_ref(NULL, &q->bo); if (q->mm) { if (q->ready) nouveau_mm_free(q->mm); else nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work, q->mm); } } if (size) { q->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base); if (!q->bo) return FALSE; q->offset = q->base; ret = nouveau_bo_map(q->bo, 0, screen->base.client); if (ret) { nv50_query_allocate(nv50, q, 0); return FALSE; } q->data = (uint32_t *)((uint8_t *)q->bo->map + q->base); } return TRUE; }
static bool nv50_hw_query_allocate(struct nv50_context *nv50, struct nv50_query *q, int size) { struct nv50_screen *screen = nv50->screen; struct nv50_hw_query *hq = nv50_hw_query(q); int ret; if (hq->bo) { nouveau_bo_ref(NULL, &hq->bo); if (hq->mm) { if (hq->state == NV50_HW_QUERY_STATE_READY) nouveau_mm_free(hq->mm); else nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work, hq->mm); } } if (size) { hq->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &hq->bo, &hq->base_offset); if (!hq->bo) return false; hq->offset = hq->base_offset; ret = nouveau_bo_map(hq->bo, 0, screen->base.client); if (ret) { nv50_hw_query_allocate(nv50, q, 0); return false; } hq->data = (uint32_t *)((uint8_t *)hq->bo->map + hq->base_offset); } return true; }
static void nvc0_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode) { struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; nouveau_mm_free(&priv->tags, pnode); }
static void nv04_instobj_dtor(struct nouveau_object *object) { struct nv04_instmem_priv *priv = (void *)object->engine; struct nv04_instobj_priv *node = (void *)object; nouveau_mm_free(&priv->heap, &node->mem); nouveau_instobj_destroy(&node->base); }
void nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) { tile->addr = 0; tile->limit = 0; tile->pitch = 0; tile->zcomp = 0; nouveau_mm_free(&pfb->tags, &tile->tag); }
void gf100_ltc_dtor(struct nouveau_object *object) { struct nouveau_fb *pfb = nouveau_fb(object); struct nvkm_ltc_priv *priv = (void *)object; nouveau_mm_fini(&priv->tags); nouveau_mm_free(&pfb->vram, &priv->tag_ram); nvkm_ltc_destroy(priv); }
static void nvc0_ltcg_dtor(struct nouveau_object *object) { struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent); nouveau_mm_fini(&priv->tags); nouveau_mm_free(&pfb->vram, &priv->tag_ram); nouveau_ltcg_destroy(ltcg); }
void nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj) { int i; if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) { for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, 0x00000000); } if (gpuobj->node) { nouveau_mm_free(&nv_gpuobj(gpuobj->parent)->heap, &gpuobj->node); } if (gpuobj->heap.block_size) nouveau_mm_fini(&gpuobj->heap); nouveau_object_destroy(&gpuobj->base); }
static void nvkm_ltc_tags_free(struct nouveau_ltc *ltc, struct nouveau_mm_node **pnode) { struct nvkm_ltc_priv *priv = (void *)ltc; nouveau_mm_free(&priv->tags, pnode); }