struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) { struct dma_buf *dmabuf; struct ion_buffer *buffer; struct ion_handle *handle; dmabuf = dma_buf_get(fd); if (IS_ERR_OR_NULL(dmabuf)) return ERR_PTR(PTR_ERR(dmabuf)); if (dmabuf->ops != &dma_buf_ops) { pr_err("%s: can not import dmabuf from another exporter\n", __func__); dma_buf_put(dmabuf); return ERR_PTR(-EINVAL); } buffer = dmabuf->priv; mutex_lock(&client->lock); handle = ion_handle_lookup(client, buffer); if (!IS_ERR_OR_NULL(handle)) { ion_handle_get(handle); goto end; } handle = ion_handle_create(client, buffer); if (IS_ERR_OR_NULL(handle)) goto end; ion_handle_add(client, handle); end: mutex_unlock(&client->lock); dma_buf_put(dmabuf); return handle; }
struct ion_handle *ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int flags) { struct rb_node *n; struct ion_handle *handle; struct ion_device *dev = client->dev; struct ion_buffer *buffer = NULL; /* * traverse the list of heaps available in this system in priority * order. If the heap type is supported by the client, and matches the * request of the caller allocate from it. Repeat until allocate has * succeeded or all heaps have been tried */ mutex_lock(&dev->lock); for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { struct ion_heap *heap = rb_entry(n, struct ion_heap, node); /* if the client doesn't support this heap type */ if (!((1 << heap->type) & client->heap_mask)) continue; /* if the caller didn't specify this heap ID */ if (!((1 << heap->id) & flags)) continue; buffer = ion_buffer_create(heap, dev, len, align, flags); if (!IS_ERR_OR_NULL(buffer)) break; } mutex_unlock(&dev->lock); if (IS_ERR_OR_NULL(buffer)) return ERR_PTR(PTR_ERR(buffer)); handle = ion_handle_create(client, buffer); if (IS_ERR_OR_NULL(handle)) goto end; /* * ion_buffer_create will create a buffer with a ref_cnt of 1, * and ion_handle_create will take a second reference, drop one here */ ion_buffer_put(buffer); mutex_lock(&client->lock); ion_handle_add(client, handle); mutex_unlock(&client->lock); return handle; end: ion_buffer_put(buffer); return handle; }
struct ion_handle *ion_import(struct ion_client *client, struct ion_buffer *buffer) { struct ion_handle *handle = NULL; mutex_lock(&client->lock); /* if a handle exists for this buffer just take a reference to it */ handle = ion_handle_lookup(client, buffer); if (!IS_ERR_OR_NULL(handle)) { ion_handle_get(handle); goto end; } handle = ion_handle_create(client, buffer); if (IS_ERR_OR_NULL(handle)) goto end; ion_handle_add(client, handle); end: mutex_unlock(&client->lock); return handle; }
struct ion_handle *ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int flags) { struct rb_node *n; struct ion_handle *handle; struct ion_device *dev = client->dev; struct ion_buffer *buffer = NULL; unsigned long secure_allocation = flags & ION_SECURE; const unsigned int MAX_DBG_STR_LEN = 64; char dbg_str[MAX_DBG_STR_LEN]; unsigned int dbg_str_idx = 0; dbg_str[0] = '\0'; /* * traverse the list of heaps available in this system in priority * order. If the heap type is supported by the client, and matches the * request of the caller allocate from it. Repeat until allocate has * succeeded or all heaps have been tried */ mutex_lock(&dev->lock); for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { struct ion_heap *heap = rb_entry(n, struct ion_heap, node); /* if the client doesn't support this heap type */ if (!((1 << heap->type) & client->heap_mask)) continue; /* if the caller didn't specify this heap type */ if (!((1 << heap->id) & flags)) continue; /* Do not allow un-secure heap if secure is specified */ if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP)) continue; if (heap->id == ION_CP_MM_HEAP_ID) { if (client->task == NULL) printk(KERN_WARNING "MM ION alloc request from %s (%d)\n", client->name, client->pid); else { char task_comm[TASK_COMM_LEN]; get_task_comm(task_comm, client->task); printk(KERN_WARNING "MM ION alloc request from %s (%d)\n", task_comm, client->pid); } } buffer = ion_buffer_create(heap, dev, len, align, flags); if (!IS_ERR_OR_NULL(buffer)) break; if (dbg_str_idx < MAX_DBG_STR_LEN) { unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1; int ret_value = snprintf(&dbg_str[dbg_str_idx], len_left, "%s ", heap->name); if (ret_value >= len_left) { /* overflow */ dbg_str[MAX_DBG_STR_LEN-1] = '\0'; dbg_str_idx = MAX_DBG_STR_LEN; } else if (ret_value >= 0) { dbg_str_idx += ret_value; } else { /* error */ dbg_str[MAX_DBG_STR_LEN-1] = '\0'; } } } mutex_unlock(&dev->lock); if (IS_ERR_OR_NULL(buffer)) { pr_debug("ION is unable to allocate 0x%x bytes (alignment: " "0x%x) from heap(s) %sfor client %s with heap " "mask 0x%x\n", len, align, dbg_str, client->name, client->heap_mask); return ERR_PTR(PTR_ERR(buffer)); } handle = ion_handle_create(client, buffer); if (IS_ERR_OR_NULL(handle)) goto end; /* * ion_buffer_create will create a buffer with a ref_cnt of 1, * and ion_handle_create will take a second reference, drop one here */ ion_buffer_put(buffer); mutex_lock(&client->lock); ion_handle_add(client, handle); mutex_unlock(&client->lock); return handle; end: ion_buffer_put(buffer); return handle; }
struct ion_handle *ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int flags) { struct rb_node *n; struct ion_handle *handle; struct ion_device *dev = client->dev; struct ion_buffer *buffer = NULL; unsigned long secure_allocation = flags & ION_SECURE; const unsigned int MAX_DBG_STR_LEN = 64; char dbg_str[MAX_DBG_STR_LEN]; unsigned int dbg_str_idx = 0; dbg_str[0] = '\0'; if (WARN_ON(!len)) return ERR_PTR(-EINVAL); len = PAGE_ALIGN(len); mutex_lock(&dev->lock); for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { struct ion_heap *heap = rb_entry(n, struct ion_heap, node); if (!((1 << heap->type) & client->heap_mask)) continue; if (!((1 << heap->id) & flags)) continue; if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP)) continue; buffer = ion_buffer_create(heap, dev, len, align, flags); if (!IS_ERR_OR_NULL(buffer)) break; if (dbg_str_idx < MAX_DBG_STR_LEN) { unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1; int ret_value = snprintf(&dbg_str[dbg_str_idx], len_left, "%s ", heap->name); if (ret_value >= len_left) { dbg_str[MAX_DBG_STR_LEN-1] = '\0'; dbg_str_idx = MAX_DBG_STR_LEN; } else if (ret_value >= 0) { dbg_str_idx += ret_value; } else { dbg_str[MAX_DBG_STR_LEN-1] = '\0'; } } } mutex_unlock(&dev->lock); if (buffer == NULL) return ERR_PTR(-ENODEV); if (IS_ERR(buffer)) { pr_debug("ION is unable to allocate 0x%x bytes (alignment: " "0x%x) from heap(s) %sfor client %s with heap " "mask 0x%x\n", len, align, dbg_str, client->name, client->heap_mask); return ERR_PTR(PTR_ERR(buffer)); } handle = ion_handle_create(client, buffer); ion_buffer_put(buffer); if (!IS_ERR(handle)) { mutex_lock(&client->lock); ion_handle_add(client, handle); mutex_unlock(&client->lock); } return handle; }
struct ion_handle *ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int flags) { struct rb_node *n; struct ion_handle *handle; struct ion_device *dev = client->dev; struct ion_buffer *buffer = NULL; unsigned long secure_allocation = flags & ION_SECURE; const unsigned int MAX_DBG_STR_LEN = 64; char dbg_str[MAX_DBG_STR_LEN]; unsigned int dbg_str_idx = 0; dbg_str[0] = '\0'; /* * traverse the list of heaps available in this system in priority * order. If the heap type is supported by the client, and matches the * request of the caller allocate from it. Repeat until allocate has * succeeded or all heaps have been tried */ if (WARN_ON(!len)) return ERR_PTR(-EINVAL); len = PAGE_ALIGN(len); #if defined(CONFIG_MACH_LGE_L9II_OPEN_EU) down_read(&dev->lock); #else mutex_lock(&dev->lock); #endif for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { struct ion_heap *heap = rb_entry(n, struct ion_heap, node); /* if the client doesn't support this heap type */ if (!((1 << heap->type) & client->heap_mask)) continue; /* if the caller didn't specify this heap type */ if (!((1 << heap->id) & flags)) continue; /* Do not allow un-secure heap if secure is specified */ if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP)) continue; buffer = ion_buffer_create(heap, dev, len, align, flags); if (!IS_ERR_OR_NULL(buffer)) break; if (dbg_str_idx < MAX_DBG_STR_LEN) { unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1; int ret_value = snprintf(&dbg_str[dbg_str_idx], len_left, "%s ", heap->name); if (ret_value >= len_left) { /* overflow */ dbg_str[MAX_DBG_STR_LEN-1] = '\0'; dbg_str_idx = MAX_DBG_STR_LEN; } else if (ret_value >= 0) { dbg_str_idx += ret_value; } else { /* error */ dbg_str[MAX_DBG_STR_LEN-1] = '\0'; } } } #if defined(CONFIG_MACH_LGE_L9II_OPEN_EU) up_read(&dev->lock); #else mutex_unlock(&dev->lock); #endif if (buffer == NULL) return ERR_PTR(-ENODEV); if (IS_ERR(buffer)) { pr_debug("ION is unable to allocate 0x%x bytes (alignment: " "0x%x) from heap(s) %sfor client %s with heap " "mask 0x%x\n", len, align, dbg_str, client->name, client->heap_mask); return ERR_PTR(PTR_ERR(buffer)); } handle = ion_handle_create(client, buffer); /* * ion_buffer_create will create a buffer with a ref_cnt of 1, * and ion_handle_create will take a second reference, drop one here */ ion_buffer_put(buffer); if (!IS_ERR(handle)) { mutex_lock(&client->lock); ion_handle_add(client, handle); mutex_unlock(&client->lock); } return handle; }