void *hmm_alloc(size_t bytes, enum hmm_bo_type type, int from_highmem, unsigned int userptr, bool cached) { unsigned int pgnr; struct hmm_buffer_object *bo; int ret; mutex_lock(&bo_device.mm_lock); /*Get page number from size*/ pgnr = size_to_pgnr_ceil(bytes); /*Buffer object structure init*/ /* * allocates hmm_bubber_object and initializes it * adds bo to active_bo_list in bo_device * doesn't allocate memory */ bo = hmm_bo_create(&bo_device, pgnr); if (!bo) { v4l2_err(&atomisp_dev, "hmm_bo_create failed.\n"); goto create_bo_err; } /*Allocate virtual address in ISP virtual space*/ ret = hmm_bo_alloc_vm(bo); if (ret) { v4l2_err(&atomisp_dev, "hmm_bo_alloc_vm failed.\n"); goto alloc_vm_err; } /*Allocate pages for memory*/ ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached); if (ret) { v4l2_err(&atomisp_dev, "hmm_bo_alloc_pages failed.\n"); goto alloc_page_err; } /*Combind the virtual address and pages togather*/ ret = hmm_bo_bind(bo); if (ret) { v4l2_err(&atomisp_dev, "hmm_bo_bind failed.\n"); goto bind_err; } mutex_unlock(&bo_device.mm_lock); return (void *)bo->vm_node->start; bind_err: hmm_bo_free_pages(bo); alloc_page_err: hmm_bo_free_vm(bo); alloc_vm_err: hmm_bo_unref(bo); create_bo_err: mutex_unlock(&bo_device.mm_lock); return NULL; }
static void hmm_bo_release(struct hmm_buffer_object *bo) { struct hmm_bo_device *bdev; unsigned long flags; check_bo_null_return_void(bo); bdev = bo->bdev; /* * remove it from buffer device's buffer object list. */ spin_lock_irqsave(&bdev->list_lock, flags); list_del(&bo->list); spin_unlock_irqrestore(&bdev->list_lock, flags); /* * FIX ME: * * how to destroy the bo when it is stilled MMAPED? * * ideally, this will not happened as hmm_bo_release * will only be called when kref reaches 0, and in mmap * operation the hmm_bo_ref will eventually be called. * so, if this happened, something goes wrong. */ if (bo->status & HMM_BO_MMAPED) { dev_err(atomisp_dev, "destroy bo which is MMAPED, do nothing\n"); goto err; } if (bo->status & HMM_BO_BINDED) { dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n"); hmm_bo_unbind(bo); } if (bo->status & HMM_BO_PAGE_ALLOCED) { dev_warn(atomisp_dev, "the pages is not freed, free pages first\n"); hmm_bo_free_pages(bo); } if (bo->status & HMM_BO_VM_ALLOCED) { dev_warn(atomisp_dev, "the vm is still not freed, free vm first...\n"); hmm_bo_free_vm(bo); } if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { dev_warn(atomisp_dev, "the vunmap is not done, do it...\n"); hmm_bo_vunmap(bo); } if (bo->release) bo->release(bo); err: return; }
void hmm_free(void *virt) { struct hmm_buffer_object *bo; bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt); if (!bo) { v4l2_err(&atomisp_dev, "can not find buffer object start with " "address 0x%x\n", (unsigned int)virt); return; } hmm_bo_unbind(bo); hmm_bo_free_pages(bo); hmm_bo_free_vm(bo); hmm_bo_unref(bo); }