void *hmm_alloc(size_t bytes, enum hmm_bo_type type, int from_highmem, unsigned int userptr, bool cached) { unsigned int pgnr; struct hmm_buffer_object *bo; int ret; mutex_lock(&bo_device.mm_lock); /*Get page number from size*/ pgnr = size_to_pgnr_ceil(bytes); /*Buffer object structure init*/ /* * allocates hmm_bubber_object and initializes it * adds bo to active_bo_list in bo_device * doesn't allocate memory */ bo = hmm_bo_create(&bo_device, pgnr); if (!bo) { v4l2_err(&atomisp_dev, "hmm_bo_create failed.\n"); goto create_bo_err; } /*Allocate virtual address in ISP virtual space*/ ret = hmm_bo_alloc_vm(bo); if (ret) { v4l2_err(&atomisp_dev, "hmm_bo_alloc_vm failed.\n"); goto alloc_vm_err; } /*Allocate pages for memory*/ ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached); if (ret) { v4l2_err(&atomisp_dev, "hmm_bo_alloc_pages failed.\n"); goto alloc_page_err; } /*Combind the virtual address and pages togather*/ ret = hmm_bo_bind(bo); if (ret) { v4l2_err(&atomisp_dev, "hmm_bo_bind failed.\n"); goto bind_err; } mutex_unlock(&bo_device.mm_lock); return (void *)bo->vm_node->start; bind_err: hmm_bo_free_pages(bo); alloc_page_err: hmm_bo_free_vm(bo); alloc_vm_err: hmm_bo_unref(bo); create_bo_err: mutex_unlock(&bo_device.mm_lock); return NULL; }
void hmm_free(void *virt) { struct hmm_buffer_object *bo; bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt); if (!bo) { v4l2_err(&atomisp_dev, "can not find buffer object start with " "address 0x%x\n", (unsigned int)virt); return; } hmm_bo_unbind(bo); hmm_bo_free_pages(bo); hmm_bo_free_vm(bo); hmm_bo_unref(bo); }