phys_addr_t nvmap_pin(struct nvmap_client *client, struct nvmap_handle_ref *ref) { struct nvmap_handle *h; phys_addr_t phys; int ret = 0; h = nvmap_handle_get(ref->handle); if (WARN_ON(!h)) return -EINVAL; atomic_inc(&ref->pin); if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) { ret = -EINTR; } else { ret = wait_pin_array_locked(client, &h, 1); mutex_unlock(&client->share->pin_lock); } if (ret) { atomic_dec(&ref->pin); nvmap_handle_put(h); } else { if (h->heap_pgalloc && h->pgalloc.dirty) map_iovmm_area(h); phys = handle_phys(h); } return ret ?: phys; }
/* * @client: nvmap_client which should be used for validation; * should be owned by the process which is submitting * command buffers * @ids: array of nvmap_handles to pin * @id_type_mask: bitmask which defines handle type field in handle id. * @id_type: only handles with of this type will be pinned. Handles with * other type are ignored. * @nr: number of entries in arr * @unique_arr: list of nvmap_handle objects which were pinned by * nvmap_pin_array. Must be unpinned after use * @unique_arr_ref: list of duplicated nvmap_handle_refs corresponding * to unique_arr. Must be freed after use. */ int nvmap_pin_array(struct nvmap_client *client, unsigned long *ids, long unsigned id_type_mask, long unsigned id_type, int nr, struct nvmap_handle **unique_arr, struct nvmap_handle_ref **unique_arr_refs) { int count = 0; int ret = 0; int i; if (mutex_lock_interruptible(&client->share->pin_lock)) { nvmap_err(client, "%s interrupted when acquiring pin lock\n", current->group_leader->comm); return -EINTR; } count = nvmap_validate_get_pin_array(client, ids, id_type_mask, id_type, nr, unique_arr, unique_arr_refs); if (count < 0) { mutex_unlock(&client->share->pin_lock); nvmap_warn(client, "failed to validate pin array\n"); return count; } for (i = 0; i < count; i++) unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED; ret = wait_pin_array_locked(client, unique_arr, count); mutex_unlock(&client->share->pin_lock); if (WARN_ON(ret)) { for (i = 0; i < count; i++) { /* pin ref */ nvmap_handle_put(unique_arr[i]); /* remove duplicate */ atomic_dec(&unique_arr_refs[i]->dupes); nvmap_handle_put(unique_arr[i]); } return ret; } else { for (i = 0; i < count; i++) { if (unique_arr[i]->heap_pgalloc && unique_arr[i]->pgalloc.dirty) map_iovmm_area(unique_arr[i]); atomic_inc(&unique_arr_refs[i]->pin); } } return count; }
/* * Pin handle without slow validation step */ phys_addr_t _nvmap_pin(struct nvmap_client *client, struct nvmap_handle_ref *ref) { int ret = 0; struct nvmap_handle *h; phys_addr_t phys; if (!ref) return -EINVAL; h = ref->handle; if (WARN_ON(!h)) return -EINVAL; h = nvmap_handle_get(h); atomic_inc(&ref->pin); if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) { ret = -EINTR; } else { ret = wait_pin_array_locked(client, &h, 1); mutex_unlock(&client->share->pin_lock); } if (ret) { goto err_out; } else { if (h->heap_pgalloc && h->pgalloc.dirty) ret = map_iovmm_area(h); if (ret) goto err_out_unpin; phys = handle_phys(h); } return phys; err_out_unpin: nvmap_handle_get(h); handle_unpin(client, h, true); err_out: atomic_dec(&ref->pin); nvmap_handle_put(h); return ret; }
/* pins a list of handle_ref objects; same conditions apply as to * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */ int nvmap_pin_ids(struct nvmap_client *client, unsigned int nr, const unsigned long *ids) { int ret = 0; unsigned int i; struct nvmap_handle **h = (struct nvmap_handle **)ids; struct nvmap_handle_ref *ref; /* to optimize for the common case (client provided valid handle * references and the pin succeeds), increment the handle_ref pin * count during validation. in error cases, the tree will need to * be re-walked, since the handle_ref is discarded so that an * allocation isn't required. if a handle_ref is not found, * locally validate that the caller has permission to pin the handle; * handle_refs are not created in this case, so it is possible that * if the caller crashes after pinning a global handle, the handle * will be permanently leaked. */ nvmap_ref_lock(client); for (i = 0; i < nr; i++) { ref = _nvmap_validate_id_locked(client, ids[i]); if (ref) { atomic_inc(&ref->pin); nvmap_handle_get(h[i]); } else { struct nvmap_handle *verify; nvmap_ref_unlock(client); verify = nvmap_validate_get(client, ids[i]); if (verify) { nvmap_warn(client, "%s pinning unreferenced " "handle %p\n", current->group_leader->comm, h[i]); } else { ret = -EPERM; nr = i; break; } nvmap_ref_lock(client); } if (!h[i]->alloc) { ret = -EFAULT; nr = i + 1; break; } } nvmap_ref_unlock(client); if (ret) goto out; ret = mutex_lock_interruptible(&client->share->pin_lock); if (WARN_ON(ret)) goto out; ret = wait_pin_array_locked(client, h, nr); mutex_unlock(&client->share->pin_lock); if (ret) { ret = -EINTR; } else { for (i = 0; i < nr; i++) { if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty) map_iovmm_area(h[i]); } } out: if (ret) { nvmap_ref_lock(client); for (i = 0; i < nr; i++) { if (!ids[i]) continue; ref = _nvmap_validate_id_locked(client, ids[i]); if (!ref) { nvmap_warn(client, "%s freed handle %p " "during pinning\n", current->group_leader->comm, (void *)ids[i]); continue; } atomic_dec(&ref->pin); } nvmap_ref_unlock(client); for (i = 0; i < nr; i++) if(h[i]) nvmap_handle_put(h[i]); } return ret; }