phys_addr_t nvmap_pin(struct nvmap_client *client, struct nvmap_handle_ref *ref) { struct nvmap_handle *h; phys_addr_t phys; int ret = 0; h = nvmap_handle_get(ref->handle); if (WARN_ON(!h)) return -EINVAL; atomic_inc(&ref->pin); if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) { ret = -EINTR; } else { ret = wait_pin_array_locked(client, &h, 1); mutex_unlock(&client->share->pin_lock); } if (ret) { atomic_dec(&ref->pin); nvmap_handle_put(h); } else { if (h->heap_pgalloc && h->pgalloc.dirty) map_iovmm_area(h); phys = handle_phys(h); } return ret ?: phys; }
phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id) { struct nvmap_handle *h; phys_addr_t phys; h = nvmap_get_handle_id(c, id); if (!h) return -EPERM; mutex_lock(&h->lock); phys = handle_phys(h); mutex_unlock(&h->lock); nvmap_handle_put(h); return phys; }
/* * Pin handle without slow validation step */ phys_addr_t _nvmap_pin(struct nvmap_client *client, struct nvmap_handle_ref *ref) { int ret = 0; struct nvmap_handle *h; phys_addr_t phys; if (!ref) return -EINVAL; h = ref->handle; if (WARN_ON(!h)) return -EINVAL; h = nvmap_handle_get(h); atomic_inc(&ref->pin); if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) { ret = -EINTR; } else { ret = wait_pin_array_locked(client, &h, 1); mutex_unlock(&client->share->pin_lock); } if (ret) { goto err_out; } else { if (h->heap_pgalloc && h->pgalloc.dirty) ret = map_iovmm_area(h); if (ret) goto err_out_unpin; phys = handle_phys(h); } return phys; err_out_unpin: nvmap_handle_get(h); handle_unpin(client, h, true); err_out: atomic_dec(&ref->pin); nvmap_handle_put(h); return ret; }
/* * Get physical address of the handle. Handle should be * already validated and pinned. */ phys_addr_t _nvmap_get_addr_from_id(u32 id) { struct nvmap_handle *h = (struct nvmap_handle *)id; return handle_phys(h); }