static int nvc0_graph_zbc_depth_get(struct nvc0_graph_priv *priv, int format, const u32 ds, const u32 l2) { struct nouveau_ltc *ltc = nouveau_ltc(priv); int zbc = -ENOSPC, i; for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { if (priv->zbc_depth[i].format) { if (priv->zbc_depth[i].format != format) continue; if (priv->zbc_depth[i].ds != ds) continue; if (priv->zbc_depth[i].l2 != l2) { WARN_ON(1); return -EINVAL; } return i; } else { zbc = (zbc < 0) ? i : zbc; } } if (zbc < 0) return zbc; priv->zbc_depth[zbc].format = format; priv->zbc_depth[zbc].ds = ds; priv->zbc_depth[zbc].l2 = l2; ltc->zbc_depth_get(ltc, zbc, l2); nvc0_graph_zbc_clear_depth(priv, zbc); return zbc; }
static int nvc0_graph_zbc_color_get(struct nvc0_graph_priv *priv, int format, const u32 ds[4], const u32 l2[4]) { struct nouveau_ltc *ltc = nouveau_ltc(priv); int zbc = -ENOSPC, i; for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { if (priv->zbc_color[i].format) { if (priv->zbc_color[i].format != format) continue; if (memcmp(priv->zbc_color[i].ds, ds, sizeof( priv->zbc_color[i].ds))) continue; if (memcmp(priv->zbc_color[i].l2, l2, sizeof( priv->zbc_color[i].l2))) { WARN_ON(1); return -EINVAL; } return i; } else { zbc = (zbc < 0) ? i : zbc; } } if (zbc < 0) return zbc; memcpy(priv->zbc_color[zbc].ds, ds, sizeof(priv->zbc_color[zbc].ds)); memcpy(priv->zbc_color[zbc].l2, l2, sizeof(priv->zbc_color[zbc].l2)); priv->zbc_color[zbc].format = format; ltc->zbc_color_get(ltc, zbc, l2); nvc0_graph_zbc_clear_color(priv, zbc); return zbc; }
void nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) { struct nouveau_ltc *ltc = nouveau_ltc(pfb); struct nouveau_mem *mem = *pmem; *pmem = NULL; if (unlikely(mem == NULL)) return; mutex_lock(&pfb->base.mutex); if (mem->tag) ltc->tags_free(ltc, &mem->tag); __nv50_ram_put(pfb, mem); mutex_unlock(&pfb->base.mutex); kfree(mem); }
int nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, u32 memtype, struct nouveau_mem **pmem) { struct nouveau_mm *mm = &pfb->vram; struct nouveau_mm_node *r; struct nouveau_mem *mem; int type = (memtype & 0x0ff); int back = (memtype & 0x800); const bool comp = nvc0_pte_storage_type_map[type] != type; int ret; size >>= 12; align >>= 12; ncmin >>= 12; if (!ncmin) ncmin = size; mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return -ENOMEM; INIT_LIST_HEAD(&mem->regions); mem->size = size; mutex_lock(&pfb->base.mutex); if (comp) { struct nouveau_ltc *ltc = nouveau_ltc(pfb); /* compression only works with lpages */ if (align == (1 << (17 - 12))) { int n = size >> 5; ltc->tags_alloc(ltc, n, &mem->tag); } if (unlikely(!mem->tag)) type = nvc0_pte_storage_type_map[type]; }