void nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) { struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); if ((*pmem)->tag) ltcg->tags_free(ltcg, &(*pmem)->tag); nv50_ram_put(pfb, pmem); }
void nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) { struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); struct nouveau_mem *mem = *pmem; *pmem = NULL; if (unlikely(mem == NULL)) return; mutex_lock(&pfb->base.mutex); if (mem->tag) ltcg->tags_free(ltcg, &mem->tag); __nv50_ram_put(pfb, mem); mutex_unlock(&pfb->base.mutex); kfree(mem); }
int nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, u32 memtype, struct nouveau_mem **pmem) { struct nouveau_mm *mm = &pfb->vram; struct nouveau_mm_node *r; struct nouveau_mem *mem; int type = (memtype & 0x0ff); int back = (memtype & 0x800); const bool comp = nvc0_pte_storage_type_map[type] != type; int ret; size >>= 12; align >>= 12; ncmin >>= 12; if (!ncmin) ncmin = size; mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return -ENOMEM; INIT_LIST_HEAD(&mem->regions); mem->size = size; mutex_lock(&pfb->base.mutex); if (comp) { struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); /* compression only works with lpages */ if (align == (1 << (17 - 12))) { int n = size >> 5; ltcg->tags_alloc(ltcg, n, &mem->tag); } if (unlikely(!mem->tag)) type = nvc0_pte_storage_type_map[type]; }