/* doesn't need to be called inside nvmap_pin_lock, since this will only * expand the available VM area */ static int handle_unpin(struct nvmap_client *client, struct nvmap_handle *h) { int ret = 0; nvmap_mru_lock(client->share); if (atomic_read(&h->pin) == 0) { nvmap_err(client, "%s unpinning unpinned handle %p\n", current->group_leader->comm, h); nvmap_mru_unlock(client->share); return 0; } BUG_ON(!h->alloc); if (!atomic_dec_return(&h->pin)) { if (h->heap_pgalloc && h->pgalloc.area) { /* if a secure handle is clean (i.e., mapped into * IOVMM, it needs to be zapped on unpin. */ if (h->secure && !h->pgalloc.dirty) { tegra_iovmm_zap_vm(h->pgalloc.area); h->pgalloc.dirty = true; } nvmap_mru_insert_locked(client->share, h); ret = 1; } } nvmap_mru_unlock(client->share); nvmap_handle_put(h); return ret; }
/* doesn't need to be called inside nvmap_pin_lock, since this will only * expand the available VM area */ static int handle_unpin(struct nvmap_client *client, struct nvmap_handle *h, int free_vm) { int ret = 0; nvmap_mru_lock(client->share); /* */ #if defined(CONFIG_MACH_LGE) BUG_ON(!h); #endif /* */ if (atomic_read(&h->pin) == 0) { trace_handle_unpin_error(client, h, atomic_read(&h->pin)); nvmap_err(client, "%s unpinning unpinned handle %p\n", current->group_leader->comm, h); nvmap_mru_unlock(client->share); return 0; } BUG_ON(!h->alloc); if (!atomic_dec_return(&h->pin)) { if (h->heap_pgalloc && h->pgalloc.area) { /* if a secure handle is clean (i.e., mapped into * IOVMM, it needs to be zapped on unpin. */ if (h->secure && !h->pgalloc.dirty) { tegra_iovmm_zap_vm(h->pgalloc.area); h->pgalloc.dirty = true; } if (free_vm) { tegra_iovmm_free_vm(h->pgalloc.area); h->pgalloc.area = NULL; } else nvmap_mru_insert_locked(client->share, h); ret = 1; } } trace_handle_unpin(client, h, atomic_read(&h->pin)); nvmap_mru_unlock(client->share); nvmap_handle_put(h); return ret; }