IonDmaMemManager::~IonDmaMemManager()
{
	if (mPreviewData) {
		destroyPreviewBuffer();
		free(mPreviewData);
        mPreviewData = NULL;
	}
	if(mRawData) {
		destroyRawBuffer();
		free(mRawData);
        mRawData = NULL;
	}
	if(mJpegData) {
		destroyJpegBuffer();
		free(mJpegData);
        mJpegData = NULL;
	}
	if(mVideoEncData) {
		destroyVideoEncBuffer();
		free(mVideoEncData);
		mVideoEncData = NULL;
	}
	if(client_fd != -1)
         ion_close(client_fd);
		

}
void ion_map_test()
{
	int fd, map_fd, ret;
	size_t i;
	struct ion_handle *handle;
	unsigned char *ptr;

	if(_ion_alloc_test(&fd, &handle))
		return;

	if (tiler_test)
		len = height * stride;
	ret = ion_map(fd, handle, len, prot, map_flags, 0, &ptr, &map_fd);
	if (ret)
		return;

	if (tiler_test)
		_ion_tiler_map_test(ptr);
	else {
		for (i = 0; i < len; i++) {
			ptr[i] = (unsigned char)i;
		}
		for (i = 0; i < len; i++)
			if (ptr[i] != (unsigned char)i)
				printf("%s failed wrote %d read %d from mapped "
					   "memory\n", __func__, i, ptr[i]);
	}
	/* clean up properly */
	ret = ion_free(fd, handle);
	ion_close(fd);
	munmap(ptr, len);
	close(map_fd);

	_ion_alloc_test(&fd, &handle);
	close(fd);

#if 0
	munmap(ptr, len);
	close(map_fd);
	ion_close(fd);

	_ion_alloc_test(len, align, flags, &fd, &handle);
	close(map_fd);
	ret = ion_map(fd, handle, len, prot, flags, 0, &ptr, &map_fd);
	/* don't clean up */
#endif
}
MEMPLUGIN_ERRORTYPE MemPlugin_ION_Close(void *pMemPluginHandle, OMX_U32 nClient)
{
    MEMPLUGIN_ERRORTYPE    eError = MEMPLUGIN_ERROR_NONE;

    ion_close(nClient);

EXIT:
    return eError;
}
int ion_alloc_test(int count)
{
	int fd, ret = 0, i, count_alloc;
	struct ion_handle **handle;

	fd = ion_open();
	if (fd < 0) {
		printf("%s(): FAILED to open ion device\n",	__func__);
		return -1;
	}

	handle = (struct ion_handle **)malloc(count * sizeof(struct ion_handle *));
	if(handle == NULL) {
		printf("%s() : FAILED to allocate memory for ion_handles\n", __func__);
		return -ENOMEM;
	}

	/* Allocate ion_handles */
	count_alloc = count;
	for(i = 0; i < count; i++) {
		ret = _ion_alloc_test(fd, &(handle[i]));
		printf("%s(): Alloc handle[%d]=%p\n", __func__, i, handle[i]);
		if(ret || ((int)handle[i]  == -ENOMEM)) {
			printf("%s(): Alloc handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
			count_alloc = i;
			goto err_alloc;
		}
	}

	err_alloc:
	/* Free ion_handles */
	for (i = 0; i < count_alloc; i++) {
		printf("%s(): Free  handle[%d]=%p\n", __func__, i, handle[i]);
		ret = ion_free(fd, handle[i]);
		if (ret) {
			printf("%s(): Free handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
		}
	}

	ion_close(fd);
	free(handle);
	handle = NULL;

	if(ret || (count_alloc != count)) {
		printf("\nion alloc test: FAILED\n\n");
		if(count_alloc != count)
			ret = -ENOMEM;
	}
	else
		printf("\nion alloc test: PASSED\n\n");

	return ret;
}
Exemple #5
0
void ion_alloc_test()
{
	int fd, ret;
	ion_user_handle_t handle;

	if(_ion_alloc_test(&fd, &handle))
			return;

	ret = ion_free(fd, handle);
	if (ret) {
		printf("%s failed: %s %d\n", __func__, strerror(ret), handle);
		return;
	}
	ion_close(fd);
	printf("ion alloc test: passed\n");
}
int MemoryManager::freeBuffer(void* buf)
{
    status_t ret = NO_ERROR;
    LOG_FUNCTION_NAME;

    uint32_t *bufEntry = (uint32_t*)buf;

    if(!bufEntry)
        {
        CAMHAL_LOGEA("NULL pointer passed to freebuffer");
        LOG_FUNCTION_NAME_EXIT;
        return BAD_VALUE;
        }

    while(*bufEntry)
        {
        unsigned int ptr = (unsigned int) *bufEntry++;
        if(mIonBufLength.valueFor(ptr))
            {
            munmap((void *)ptr, mIonBufLength.valueFor(ptr));
            close(mIonFdMap.valueFor(ptr));
            ion_free(mIonFd, (ion_handle*)mIonHandleMap.valueFor(ptr));
            mIonHandleMap.removeItem(ptr);
            mIonBufLength.removeItem(ptr);
            mIonFdMap.removeItem(ptr);
            }
        else
            {
            CAMHAL_LOGEA("Not a valid Memory Manager buffer");
            }
        }

    ///@todo Check if this way of deleting array is correct, else use malloc/free
    uint32_t * bufArr = (uint32_t*)buf;
    delete [] bufArr;

    if(mIonBufLength.size() == 0)
        {
        if(mIonFd)
            {
            ion_close(mIonFd);
            mIonFd = 0;
            }
        }
    LOG_FUNCTION_NAME_EXIT;
    return ret;
}
static int alloc_device_close(struct hw_device_t *device)
{
	alloc_device_t* dev = reinterpret_cast<alloc_device_t*>(device);
	if (dev)
	{
#if GRALLOC_ARM_DMA_BUF_MODULE
		private_module_t *m = reinterpret_cast<private_module_t*>(device);
		if ( 0 != ion_close(m->ion_client) ) AERR( "Failed to close ion_client: %d", m->ion_client );
		close(m->ion_client);
#endif
		delete dev;
#if GRALLOC_ARM_UMP_MODULE
		ump_close(); // Our UMP memory refs will be released automatically here...
#endif
	}
	return 0;
}
Exemple #8
0
int sys_mem_free(ion_user_handle_t p_ion_handle)
{
  int ret;
    ret = ion_free(ion_fd, p_ion_handle);
    if (ret)
    {
        printf("ion mem free error \n");
        return ret;
    }
    ion_count--;
    if (!ion_count)
    {
        ion_close(ion_fd);
    }
    return 0;

}
unsigned int IonGetAddr(void *handle)
{
	  unsigned int phy_adr=0;
	  struct ion_handle *handle_ion;
		private_handle_t* hnd = NULL;
    SUNXI_hwcdev_context_t *Globctx = &gSunxiHwcDevice;
    Globctx->ion_fd = ion_open();		
    if( Globctx->ion_fd != -1 )
    {		
				hnd = (private_handle_t*)handle;
				ion_import(Globctx->ion_fd,hnd->share_fd, &handle_ion);
				phy_adr= (unsigned int)ion_getphyadr(Globctx->ion_fd,(void *)(handle_ion));
				ion_sync_fd(Globctx->ion_fd,hnd->share_fd);
				ion_close(Globctx->ion_fd);
				Globctx->ion_fd = -1;
		}
    return phy_adr;  
}
Exemple #10
0
void check_pid()
{
	struct actal_mem * user_p;
	// int ret = 0;
	
	//避免线程冲突
	if (pthread_mutex_lock(&mutex) != 0)
    {
		ALOGE("get mutex failed");
        return ;
    }

	if(s_pid != getpid())
	{
		ALOGD("PID changed, reopen ion device");
		ALOGD("parent pid = %d, fd = %d", s_pid, s_fd);
		if(s_top_p != NULL)
		{
			s_current_p = s_top_p->next;
			while((user_p = s_current_p) != NULL)
			{
				s_current_p = user_p->next;
				// ret = ion_free(user_p->fd, user_p->handle);
				munmap(user_p->ptr, user_p->len);
				// close(user_p->map_fd);
				free(user_p);
				user_p = NULL;
			}
			s_top_p->next = NULL;
			s_current_p = s_top_p;
		}
		ion_close(s_fd);
		s_fd = ion_open();
		s_pid = getpid();
		ALOGD("new pid = %d, fd = %d", s_pid, s_fd);
	}
	
	if (pthread_mutex_unlock(&mutex) != 0)
    {
		ALOGE("free mutex failed");
        return ;
    }
}
/**
 * Go on allocating buffers of specified size & type, untill the allocation fails.
 * Then free 10 buffers and allocate 10 buffers again.
 */
int ion_alloc_fail_alloc_test()
{
	int fd, ret = 0, i;
	struct ion_handle **handle;
	const int  COUNT_ALLOC_MAX = 200;
	const int  COUNT_REALLOC_MAX = 10;
	int count_alloc = COUNT_ALLOC_MAX, count_realloc = COUNT_ALLOC_MAX;

	fd = ion_open();
	if (fd < 0) {
		printf("%s(): FAILED to open ion device\n", __func__);
		return -1;
	}

	handle = (struct ion_handle **)malloc(COUNT_ALLOC_MAX * sizeof(struct ion_handle *));
	if(handle == NULL) {
		printf("%s(): FAILED to allocate memory for ion_handles\n", __func__);
		return -ENOMEM;
	}

	/* Allocate ion_handles as much as possible */
	for(i = 0; i < COUNT_ALLOC_MAX; i++) {
		ret = _ion_alloc_test(fd, &(handle[i]));
		printf("%s(): Alloc handle[%d]=%p\n", __func__, i, handle[i]);
		if(ret || ((int)handle[i]  == -ENOMEM)) {
			printf("%s(): Alloc handle[%d]=%p FAILED, err:%s\n\n",
					__func__, i, handle[i], strerror(ret));
			count_alloc = i;
			break;
		}
	}

	/* Free COUNT_REALLOC_MAX ion_handles */
	for (i = count_alloc-1; i > (count_alloc-1 - COUNT_REALLOC_MAX); i--) {
		printf("%s(): Free  handle[%d]=%p\n", __func__, i, handle[i]);
		ret = ion_free(fd, handle[i]);
		if (ret) {
			printf("%s(): Free  handle[%d]=%p FAILED, err:%s\n\n",
					__func__, i, handle[i], strerror(ret));
		}
	}

	/* Again allocate COUNT_REALLOC_MAX ion_handles to test
	   that we are still able to allocate */
	for(i = (count_alloc - COUNT_REALLOC_MAX); i < count_alloc; i++) {
		ret = _ion_alloc_test(fd, &(handle[i]));
		printf("%s(): Alloc handle[%d]=%p\n", __func__, i, handle[i]);
		if(ret || ((int)handle[i]  == -ENOMEM)) {
			printf("%s(): Alloc handle[%d]=%p FAILED, err:%s\n\n",
					__func__, i, handle[i], strerror(ret));
			count_realloc = i;
			goto err_alloc;
		}
	}
	count_realloc = i;

	err_alloc:
	/* Free all ion_handles */
	for (i = 0; i < count_alloc; i++) {
		printf("%s(): Free  handle[%d]=%p\n", __func__, i, handle[i]);
		ret = ion_free(fd, handle[i]);
		if (ret) {
			printf("%s(): Free  handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
		}
	}

	ion_close(fd);
	free(handle);
	handle = NULL;

	printf("\ncount_alloc=%d, count_realloc=%d\n",count_alloc, count_realloc);

	if(ret || (count_alloc != count_realloc)) {
		printf("\nion alloc->fail->alloc test: FAILED\n\n");
		if(count_alloc != COUNT_ALLOC_MAX)
			ret = -ENOMEM;
	}
	else
		printf("\nion alloc->fail->alloc test: PASSED\n\n");

	return ret;
}
int ion_map_test(int count)
{
	int fd, ret = 0, i, count_alloc, count_map;
	struct ion_handle **handle;
	unsigned char **ptr;
	int *map_fd;

	fd = ion_open();
	if (fd < 0) {
		printf("%s(): FAILED to open ion device\n",	__func__);
		return -1;
	}

	handle = (struct ion_handle **)malloc(count * sizeof(struct ion_handle *));
	if(handle == NULL) {
		printf("%s(): FAILED to allocate memory for ion_handles\n", __func__);
		return -ENOMEM;
	}

	count_alloc = count;
	count_map = count;

	/* Allocate ion_handles */
	for(i = 0; i < count; i++) {
		ret = _ion_alloc_test(fd, &(handle[i]));
		printf("%s(): Alloc handle[%d]=%p\n", __func__, i, handle[i]);
		if(ret || ((int)handle[i]  == -ENOMEM)) {
			printf("%s(): Alloc handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
			count_alloc = i;
			goto err_alloc;
		}
	}

	/* Map ion_handles and validate */
	if (tiler_test)
		len = height * stride;

	ptr = (unsigned char **)malloc(count * sizeof(unsigned char **));
	map_fd = (int *)malloc(count * sizeof(int *));

	for(i = 0; i < count; i++) {
		/* Map ion_handle on userside */
		ret = ion_map(fd, handle[i], len, prot, map_flags, 0, &(ptr[i]), &(map_fd[i]));
		printf("%s(): Map handle[%d]=%p, map_fd=%d, ptr=%p\n",
				__func__, i, handle[i], map_fd[i], ptr[i]);
		if(ret) {
			printf("%s Map handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
			count_map = i;
			goto err_map;
		}

		/* Validate mapping by writing the data and reading it back */
		if (tiler_test)
			_ion_tiler_map_test(ptr[i]);
		else
			_ion_map_test(ptr[i]);
	}

	/* clean up properly */
	err_map:
	for(i = 0; i < count_map; i++) {
		/* Unmap ion_handles */
		ret = munmap(ptr[i], len);
		printf("%s(): Unmap handle[%d]=%p, map_fd=%d, ptr=%p\n",
				__func__, i, handle[i], map_fd[i], ptr[i]);
		if(ret) {
			printf("%s(): Unmap handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
			goto err_map;
		}
		/* Close fds */
		close(map_fd[i]);
	}
	free(map_fd);
	free(ptr);

	err_alloc:
	/* Free ion_handles */
	for (i = 0; i < count_alloc; i++) {
		printf("%s(): Free handle[%d]=%p\n", __func__, i, handle[i]);
		ret = ion_free(fd, handle[i]);
		if (ret) {
			printf("%s(): Free handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
		}
	}

	ion_close(fd);
	free(handle);
	handle = NULL;

	if(ret || (count_alloc != count) || (count_map != count))
	{
		printf("\nion map test: FAILED\n\n");
		if((count_alloc != count) || (count_map != count))
			ret = -ENOMEM;
	}	else
		printf("\nion map test: PASSED\n");

	return ret;
}
Exemple #13
0
static int __attribute__((destructor)) so_exit(void) 
{
	ion_close(s_fd);
	return 0;
}
int _IOGetPhyMem(int which, vpu_mem_desc *buff)
{
#ifdef BUILD_FOR_ANDROID
	const size_t pagesize = getpagesize();
	int err, fd;
#ifdef USE_ION
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	ion_user_handle_t handle;
#else
	struct ion_handle *handle;
#endif
	int share_fd, ret = -1;
	unsigned char *ptr;
#elif USE_GPU
        struct g2d_buf *gbuf;
        int bytes;
#else
	/* Get memory from pmem space for android */
	struct pmem_region region;
#endif

	if ((!buff) || (!buff->size)) {
		err_msg("Error!_IOGetPhyMem:Invalid parameters");
		return -1;
	}

	buff->cpu_addr = 0;
	buff->phy_addr = 0;
	buff->virt_uaddr = 0;

	if (which == VPU_IOC_GET_WORK_ADDR) {
		if (ioctl(vpu_fd, which, buff) < 0) {
			err_msg("mem allocation failed!\n");
			buff->phy_addr = 0;
			buff->cpu_addr = 0;
			return -1;
		}
		return 0;
	}

	if (which != VPU_IOC_PHYMEM_ALLOC) {
		err_msg("Error!_IOGetPhyMem unsupported memtype: %d", which);
		return -1;
	}

	buff->size = (buff->size + pagesize-1) & ~(pagesize - 1);

#ifdef USE_ION
	fd = ion_open();
	if (fd <= 0) {
		err_msg("ion open failed!\n");
		return -1;
	}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	err = ion_alloc(fd, buff->size, pagesize, 1, 0, &handle);
#else
	err = ion_alloc(fd, buff->size, pagesize, 1, &handle);
#endif
	if (err) {
		err_msg("ion allocation failed!\n");
		goto error;
	}

	err = ion_map(fd, handle, buff->size,
			    PROT_READ|PROT_WRITE, MAP_SHARED,
			    0, &ptr, &share_fd);
	if (err) {
		err_msg("ion map failed!\n");
		goto error;
	}

	err = ion_phys(fd, handle);
	if (err == 0) {
		err_msg("ion get physical address failed!\n");
		goto error;
	}

	buff->virt_uaddr = (unsigned long)ptr;
	buff->phy_addr = (unsigned long)err;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	ion_free(fd, handle);
	buff->cpu_addr = (unsigned long)share_fd;
#else
	buff->cpu_addr = (unsigned long)handle;
#endif
	memset((void*)buff->virt_uaddr, 0, buff->size);
	ret = 0;
	info_msg("<ion> alloc handle: 0x%x, paddr: 0x%x, vaddr: 0x%x",
			(unsigned int)handle, (unsigned int)buff->phy_addr,
			(unsigned int)buff->virt_uaddr);
error:
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
	close(share_fd);
#endif
	ion_close(fd);
	return ret;
#elif USE_GPU
        bytes = buff->size + PAGE_SIZE;
        gbuf = g2d_alloc(bytes, 0);
        if(!gbuf) {
            err_msg("%s: gpu allocator failed to alloc buffer with size %d", __FUNCTION__, buff->size);
            return -1;
        }

        buff->virt_uaddr = (unsigned long)gbuf->buf_vaddr;
        buff->phy_addr = (unsigned long)gbuf->buf_paddr;
        buff->cpu_addr = (unsigned long)gbuf;

        //vpu requires page alignment for the address implicitly, round it to page edge
        buff->virt_uaddr = (buff->virt_uaddr + PAGE_SIZE -1) & ~(PAGE_SIZE -1);
        buff->phy_addr = (buff->phy_addr + PAGE_SIZE -1) & ~(PAGE_SIZE -1);
        memset((void*)buff->virt_uaddr, 0, buff->size);

        info_msg("<gpu> alloc handle: 0x%x, paddr: 0x%x, vaddr: 0x%x",
			(unsigned int)gbuf, (unsigned int)buff->phy_addr,
			(unsigned int)buff->virt_uaddr);
        return 0;
#else
	fd = (unsigned long)open("/dev/pmem_adsp", O_RDWR | O_SYNC);
	if (fd < 0) {
		err_msg("Error!_IOGetPhyMem Error,cannot open pmem");
		return -1;
	}

	err = ioctl(fd, PMEM_GET_TOTAL_SIZE, &region);

	buff->virt_uaddr = (unsigned long)mmap(0, buff->size,
			    PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);

	if (buff->virt_uaddr == (unsigned long)MAP_FAILED) {
		err_msg("Error!mmap(fd=%d, size=%u) failed (%s)",
			fd, buff->size, strerror(errno));
		close(fd);
		return -1;
	}

	memset(&region, 0, sizeof(region));

	if (ioctl(fd, PMEM_GET_PHYS, &region) == -1) {
		err_msg("Error!Failed to get physical address of source!");
		munmap((void *)buff->virt_uaddr, buff->size);
		close(fd);
		return -1;
	}

	buff->phy_addr = (unsigned long)region.offset;
	buff->cpu_addr = (unsigned long)fd;
	memset((void*)buff->virt_uaddr, 0, buff->size);
#endif
#else
	if (ioctl(vpu_fd, which, buff) < 0) {
		err_msg("mem allocation failed!\n");
		buff->phy_addr = 0;
		buff->cpu_addr = 0;
		return -1;
	}
	sz_alloc += buff->size;
	dprintf(3, "%s: phy addr = %08lx\n", __func__, buff->phy_addr);
	dprintf(3, "%s: alloc=%d, total=%d\n", __func__, buff->size, sz_alloc);
#endif

	return 0;
}
/*--------------------MemoryManager Class STARTS here-----------------------------*/
void* MemoryManager::allocateBuffer(int width, int height, const char* format, int &bytes, int numBufs)
{
    LOG_FUNCTION_NAME;

    if(mIonFd < 0)
        {
        mIonFd = ion_open();
        if(mIonFd < 0)
            {
            CAMHAL_LOGEA("ion_open failed!!!");
            return NULL;
            }
        }

    ///We allocate numBufs+1 because the last entry will be marked NULL to indicate end of array, which is used when freeing
    ///the buffers
    const uint numArrayEntriesC = (uint)(numBufs+1);

    ///Allocate a buffer array
    uint32_t *bufsArr = new uint32_t [numArrayEntriesC];
    if(!bufsArr)
        {
        CAMHAL_LOGEB("Allocation failed when creating buffers array of %d uint32_t elements", numArrayEntriesC);
        goto error;
        }

    ///Initialize the array with zeros - this will help us while freeing the array in case of error
    ///If a value of an array element is NULL, it means we didnt allocate it
    memset(bufsArr, 0, sizeof(*bufsArr) * numArrayEntriesC);

    //2D Allocations are not supported currently
    if(bytes != 0)
        {
        struct ion_handle *handle;
        int mmap_fd;

        ///1D buffers
        for (int i = 0; i < numBufs; i++)
            {
            int ret = ion_alloc(mIonFd, bytes, 0, 1 << ION_HEAP_TYPE_CARVEOUT, &handle);
            if(ret < 0)
                {
                CAMHAL_LOGEB("ion_alloc resulted in error %d", ret);
                goto error;
                }

            CAMHAL_LOGDB("Before mapping, handle = %x, nSize = %d", handle, bytes);
            if ((ret = ion_map(mIonFd, handle, bytes, PROT_READ | PROT_WRITE, MAP_SHARED, 0,
                          (unsigned char**)&bufsArr[i], &mmap_fd)) < 0)
                {
                CAMHAL_LOGEB("Userspace mapping of ION buffers returned error %d", ret);
                ion_free(mIonFd, handle);
                goto error;
                }

            mIonHandleMap.add(bufsArr[i], (unsigned int)handle);
            mIonFdMap.add(bufsArr[i], (unsigned int) mmap_fd);
            mIonBufLength.add(bufsArr[i], (unsigned int) bytes);
            }

        }
    else // If bytes is not zero, then it is a 2-D tiler buffer request
        {
        }

        LOG_FUNCTION_NAME_EXIT;

        return (void*)bufsArr;

error:
    ALOGE("Freeing buffers already allocated after error occurred");
    if(bufsArr)
        freeBuffer(bufsArr);

    if ( NULL != mErrorNotifier.get() )
        {
        mErrorNotifier->errorNotify(-ENOMEM);
        }

    if (mIonFd >= 0)
    {
        ion_close(mIonFd);
        mIonFd = -1;
    }

    LOG_FUNCTION_NAME_EXIT;
    return NULL;
}
Exemple #16
0
//-----------------------------------------------------------------------------
///////////////////////////////////////////////////////////////////////
///We do decrease global and local count first, then judge we have to uninit m4uDrv/ion_dev and m4uPort according
///   local count and global count repectively.
MBOOL IMemDrvImp::uninit(void)
{
    MBOOL Result = MTRUE;
    MINT32 ret = 0;
    ISP_REF_CNT_CTRL_STRUCT ref_cnt;
    //
    Mutex::Autolock lock(mLock);
    //
#if defined(_use_kernel_ref_cnt_)
    if(mIspFd < 0)
    {
        IMEM_ERR("mIspFd < 0 \n");
		goto EXIT;
    }
    ///////////////////////////////////////////////
    //decrease global and local count first  
    // More than one user
    ref_cnt.ctrl = ISP_REF_CNT_DEC;
    ref_cnt.id = ISP_REF_CNT_ID_IMEM;
    ref_cnt.data_ptr = (MUINT32)&mInitCount;
    ret = ioctl(mIspFd,ISP_REF_CNT_CTRL,&ref_cnt);
    if(ret < 0)
    {
        IMEM_ERR("ISP_REF_CNT_DEC fail(%d)[errno(%d):%s] \n",ret, errno, strerror(errno));
        Result = MFALSE;
		goto EXIT;
    }
    android_atomic_dec(&mLocal_InitCount);
    //
#else
    ///IMEM_DBG("mInitCount(%d)",mInitCount);  
    // More than one user
    android_atomic_dec(&mInitCount);
    //IMEM_INF("-flag2- mInitCount(%d)\n",mInitCount);
#endif    
    IMEM_INF("mInitCount(%d),mLocal_InitCount(%d)\n",mInitCount,mLocal_InitCount);

#if defined (__ISP_USE_PMEM__)
    //
#elif defined (__ISP_USE_STD_M4U__) || defined (__ISP_USE_ION__)
    //////////////////////////////////////////////////////
    // we delete m4udrv and close ion device when local count is 1,
    // and unconfig m4v ports when global count is 1
    if ( mLocal_InitCount <= 0 ) 
	{
       #if defined (__ISP_USE_ION__)
	      // we have to handle local ion drv here
          // if process A open ionID, then process B open ionID before process A call ImemDrv_uninit,
          // process A would not do close ionID.
          if (mIonDrv)  
          {
		    IMEM_INF("close ion id(%d).\n", mIonDrv);
            ion_close(mIonDrv);
          }
	   #endif
	   //IMEM_INF("-!!!- mInitCount(%d)\n", mInitCount);
	   //if(mInitCount<=0)
       {
           IMEM_INF("disable config dma port using mva");
           M4U_PORT_STRUCT port;
           port.Virtuality = 0;
           port.Security = 0;
           port.domain = 3;
           port.Distance = 1;
           port.Direction = 0; //M4U_DMA_READ_WRITE
           //
	      port.ePortID = M4U_PORT_CAM_IMGO;
          ret = mpM4UDrv->m4u_config_port(&port);

	      port.ePortID = M4U_PORT_CAM_IMG2O;
          ret = mpM4UDrv->m4u_config_port(&port);

	      port.ePortID = M4U_PORT_CAM_LSCI;
          ret = mpM4UDrv->m4u_config_port(&port);

	      port.ePortID = M4U_PORT_CAM_IMGI;
          ret = mpM4UDrv->m4u_config_port(&port);

	      port.ePortID = M4U_PORT_CAM_ESFKO;
          ret = mpM4UDrv->m4u_config_port(&port);

	      port.ePortID = M4U_PORT_CAM_AAO;
          ret = mpM4UDrv->m4u_config_port(&port);
       }
	   delete mpM4UDrv;
       mpM4UDrv = NULL;
    }
#endif

    EXIT:

#if defined(_use_kernel_ref_cnt_)    
    //local ==0, global !=0 del m4u object only
    if ( mLocal_InitCount <= 0 ) {
        if ( mIspFd >= 0 ) {
            close(mIspFd);
            mIspFd = -1;
            IMEM_DBG("mIspFd(%d)",mIspFd);            
        }
    }
#endif
    return Result;
}
MemoryManager::~MemoryManager() {
    if ( mIonFd >= 0 ) {
        ion_close(mIonFd);
        mIonFd = -1;
    }
}
/*!
 * @brief Free specified memory
 * When user wants to free massive memory for the system,
 * they needs to fill the physical address and size to be freed
 * in buff structure.
 *
 * @param buff	the structure containing memory information to be freed;
 *
 * @return
 * @li 0            Freeing memory success.
 * @li -1		Freeing memory failure.
 */
int _IOFreePhyMem(int which, vpu_mem_desc * buff)
{
#ifdef BUILD_FOR_ANDROID
#ifdef USE_ION
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	int shared_fd;
#else
	struct ion_handle *handle;
#endif
	int fd;

	if (!buff || !(buff->size) || ((unsigned long)buff->cpu_addr == 0)) {
		err_msg("Error!_IOFreePhyMem:Invalid parameters");
		return -1;
	}

	if (which != VPU_IOC_PHYMEM_FREE) {
		err_msg("Error!_IOFreePhyMem unsupported memtype: %d",which);
		return -1;
	}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	shared_fd = buff->cpu_addr;
#else
	handle = (struct ion_handle *)buff->cpu_addr;
#endif

	fd = ion_open();
	if (fd <= 0) {
		err_msg("ion open failed!\n");
		return -1;
	}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	ion_close(shared_fd);
	info_msg("<ion> free handle: 0x%x, paddr: 0x%x, vaddr: 0x%x",
			(unsigned int)shared_fd, (unsigned int)buff->phy_addr,
			(unsigned int)buff->virt_uaddr);
#else
	ion_free(fd, handle);
	info_msg("<ion> free handle: 0x%x, paddr: 0x%x, vaddr: 0x%x",
			(unsigned int)handle, (unsigned int)buff->phy_addr,
			(unsigned int)buff->virt_uaddr);
#endif
	ion_close(fd);

	munmap((void *)buff->virt_uaddr, buff->size);
	memset((void*)buff, 0, sizeof(*buff));
#elif USE_GPU
        struct g2d_buf *gbuf = (struct g2d_buf *)buff->cpu_addr;
        if(gbuf) {
            if(g2d_free(gbuf) != 0) {
               err_msg("%s: gpu allocator failed to free buffer 0x%x", __FUNCTION__, (unsigned int)gbuf);
               return -1;
            }

            info_msg("<gpu> free handle: 0x%x, paddr: 0x%x, vaddr: 0x%x",
			(unsigned int)gbuf, (unsigned int)buff->phy_addr,
			(unsigned int)buff->virt_uaddr);
        }
        memset((void*)buff, 0, sizeof(*buff));
#else
	int fd_pmem;

	if (!buff || !(buff->size) || ((int)buff->cpu_addr <= 0)) {
		err_msg("Error!_IOFreePhyMem:Invalid parameters");
		return -1;
	}

	if (which != VPU_IOC_PHYMEM_FREE) {
		err_msg("Error!_IOFreePhyMem unsupported memtype: %d",which);
		return -1;
	}

	fd_pmem = (int)buff->cpu_addr;
	if(fd_pmem) {
		munmap((void *)buff->virt_uaddr, buff->size);
		close(fd_pmem);
	}
	memset((void*)buff, 0, sizeof(*buff));
#endif
#else
	if (buff->phy_addr != 0) {
		dprintf(3, "%s: phy addr = %08lx\n", __func__, buff->phy_addr);
		ioctl(vpu_fd, which, buff);
	}

	sz_alloc -= buff->size;
	dprintf(3, "%s: total=%d\n", __func__, sz_alloc);
	memset(buff, 0, sizeof(*buff));
#endif
	return 0;
}
PhysMemAdapter::~PhysMemAdapter()
{
    memset(mCameraBuffer, 0, sizeof(mCameraBuffer));
    clearBufferListeners();
    ion_close(mIonFd);
}