TEST_F(FormerlyValidHandle, map)
{
    int map_fd;
    unsigned char *ptr;

    ASSERT_EQ(-EINVAL, ion_map(m_ionFd, m_handle, 4096, PROT_READ, 0, 0, &ptr, &map_fd));
}
MemoryHeapIon::MemoryHeapIon(int fd, size_t size, uint32_t flags, uint32_t offset):MemoryHeapBase()
{
    void* base = NULL;
    int dup_fd = -1;

    mIonClient = ion_client_create();

    if (mIonClient < 0) {
        ALOGE("MemoryHeapIon : ION client creation failed : %s", strerror(errno));
        mIonClient = -1;
    } else {
        if (fd >= 0) {
            dup_fd = dup(fd);
            if (dup_fd == -1) {
                ALOGE("MemoryHeapIon : cannot dup fd (size[%u], fd[%d]) : %s", size, fd, strerror(errno));
            } else {
                flags |= USE_ION_FD;
                base = ion_map(dup_fd, size, 0);
                if (base != MAP_FAILED) {
                    init(dup_fd, base, size, flags, NULL);
                } else {
                    ALOGE("MemoryHeapIon : ION mmap failed(size[%u], fd[%d]): %s", size, fd, strerror(errno));
                    ion_free(dup_fd);
                }
            }
        } else {
            ALOGE("MemoryHeapIon : fd parameter error(fd : %d)", fd);
        }
    }
}
void ion_map_test()
{
	int fd, map_fd, ret;
	size_t i;
	struct ion_handle *handle;
	unsigned char *ptr;

	if(_ion_alloc_test(&fd, &handle))
		return;

	if (tiler_test)
		len = height * stride;
	ret = ion_map(fd, handle, len, prot, map_flags, 0, &ptr, &map_fd);
	if (ret)
		return;

	if (tiler_test)
		_ion_tiler_map_test(ptr);
	else {
		for (i = 0; i < len; i++) {
			ptr[i] = (unsigned char)i;
		}
		for (i = 0; i < len; i++)
			if (ptr[i] != (unsigned char)i)
				printf("%s failed wrote %d read %d from mapped "
					   "memory\n", __func__, i, ptr[i]);
	}
	/* clean up properly */
	ret = ion_free(fd, handle);
	ion_close(fd);
	munmap(ptr, len);
	close(map_fd);

	_ion_alloc_test(&fd, &handle);
	close(fd);

#if 0
	munmap(ptr, len);
	close(map_fd);
	ion_close(fd);

	_ion_alloc_test(len, align, flags, &fd, &handle);
	close(map_fd);
	ret = ion_map(fd, handle, len, prot, flags, 0, &ptr, &map_fd);
	/* don't clean up */
#endif
}
Esempio n. 4
0
int ion_phys(int fd, ion_user_handle_t handle, unsigned long *phys)
{
          int ret;
        struct owl_ion_phys_data phys_data = {
                .handle = handle,
        };
        
        struct ion_custom_data data = {
                .cmd = OWL_ION_GET_PHY,
                .arg = (unsigned long)&phys_data,
        }; 
        
        ret = ion_ioctl(fd, ION_IOC_CUSTOM, &data);

        if (ret < 0)
            return ret;
        *phys = phys_data.phys_addr;
        return ret;

}
#endif
int ion_count = 0;
/*利用ion分配内存,成功返回0*/
int sys_mem_allocate(unsigned int size, void **vir_addr, ion_user_handle_t * p_ion_handle)
{
    int ret;

    if (!ion_count)
    {
        ion_fd = ion_open();
      if(ion_fd < 0){
          printf("ion_open failed\n");
          return -1;
      }
      printf("ion_open ok ion_fd = %d \n",ion_fd);
    }
    ret = ion_alloc(ion_fd, size, 0, 1,0, &ion_handle_t);
    if(ret)
    {
        printf("%s failed: %s\n", __func__, strerror(ret));
        return -1;
    }
    *p_ion_handle  = ion_handle_t;
    ret = ion_map(ion_fd, ion_handle_t, size, PROT_READ | PROT_WRITE, MAP_SHARED, 0, (unsigned char **)vir_addr, &ion_map_fd);
    if (ret){
    printf("ion_map error \n");
    return -1 ;
    }
    printf("ion_map ok \n");
    ion_count++;
    return 0;

}
unsigned long ExynosVirtualDisplay::getMappedAddrFBTarget(int fd)
{
    for (int i = 0; i < NUM_FB_TARGET; i++) {
        if (fbTargetInfo[i].fd == fd)
            return fbTargetInfo[i].mappedAddr;

        if (fbTargetInfo[i].fd == -1) {
            fbTargetInfo[i].fd = fd;
            fbTargetInfo[i].mappedAddr = (unsigned long)ion_map(fd, mWidth * mHeight * 4, 0);
            fbTargetInfo[i].mapSize = mWidth * mHeight * 4;
            return fbTargetInfo[i].mappedAddr;
        }
    }
    return 0;
}
MemoryHeapIon::MemoryHeapIon(size_t size, uint32_t flags, char const *name):MemoryHeapBase()
{
    void* base = NULL;
    int fd = -1;
    uint32_t isReadOnly, heapMask, flagMask;

    mIonClient = ion_client_create();

    if (mIonClient < 0) {
        ALOGE("MemoryHeapIon : ION client creation failed : %s", strerror(errno));
        mIonClient = -1;
    } else {
        isReadOnly = flags & (IMemoryHeap::READ_ONLY);
        heapMask = ion_HeapMask_valid_check(flags);
        flagMask = ion_FlagMask_valid_check(flags);

        if (heapMask) {
            ALOGD("MemoryHeapIon : Allocated with size:%d, heap:0x%X , flag:0x%X", size, heapMask, flagMask);
            fd = ion_alloc(mIonClient, size, 0, heapMask, flagMask);
            if (fd < 0) {
                ALOGE("MemoryHeapIon : ION Reserve memory allocation failed(size[%u]) : %s", size, strerror(errno));
                if (errno == ENOMEM) { // Out of reserve memory. So re-try allocating in system heap
                    ALOGD("MemoryHeapIon : Re-try Allocating in default heap - SYSTEM heap");
                    fd = ion_alloc(mIonClient, size, 0, ION_HEAP_SYSTEM_MASK, ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC | ION_FLAG_PRESERVE_KMAP);
                }
            }
        } else {
            fd = ion_alloc(mIonClient, size, 0, ION_HEAP_SYSTEM_MASK, ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC | ION_FLAG_PRESERVE_KMAP);
            ALOGD("MemoryHeapIon : Allocated with default heap - SYSTEM heap");
        }

        flags = isReadOnly | heapMask | flagMask;

        if (fd < 0) {
            ALOGE("MemoryHeapIon : ION memory allocation failed(size[%u]) : %s", size, strerror(errno));
        } else {
            flags |= USE_ION_FD;
            base = ion_map(fd, size, 0);
            if (base != MAP_FAILED) {
                init(fd, base, size, flags, NULL);
            } else {
                ALOGE("MemoryHeapIon : ION mmap failed(size[%u], fd[%d]) : %s", size, fd, strerror(errno));
                ion_free(fd);
            }
        }
    }
}
Esempio n. 7
0
int MemoryManager::allocateBufferList(int size, int numBufs)
{
    int mmap_fd;
    LOG_FUNCTION_NAME;


    //2D Allocations are not supported currently
    if(size != 0) {
        struct ion_handle *handle;

        size_t stride;

        ///1D buffers
        for (int i = 0; i < numBufs; i++) {
            unsigned char *data;
            int ret = ion_alloc(mIonFd, size, 0, 1 << ION_HEAP_TYPE_CARVEOUT,
                    &handle);

            if((ret < 0) || ((int)handle == -ENOMEM)) {
                printe("FAILED to allocate ion buffer of size=%d. ret=%d(0x%x)", size, ret, ret);
                goto error;
            }

            if ((ret = ion_map(mIonFd, handle, size, PROT_READ | PROT_WRITE, MAP_SHARED, 0,
                          &data, &mmap_fd)) < 0) {
                printe("Userspace mapping of ION buffers returned error %d", ret);
                ion_free(mIonFd, handle);
                goto error;
            }
        }
    }

    LOG_FUNCTION_NAME_EXIT;

    return mmap_fd;

error:

    printe("Freeing buffers already allocated after error occurred");

#if 0
    if ( NULL != mErrorNotifier.get() )
        mErrorNotifier->errorNotify(-ENOMEM);
    LOG_FUNCTION_NAME_EXIT;
#endif
    return NULL;
}
Esempio n. 8
0
static int gralloc_map(gralloc_module_t const* module,
        buffer_handle_t handle, void** vaddr)
{
    private_handle_t* hnd = (private_handle_t*)handle;
    if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
        if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_IOCTL) {
            size_t size = FIMC1_RESERVED_SIZE * 1024;
            void *mappedAddress = mmap(0, size,
                    PROT_READ|PROT_WRITE, MAP_SHARED, gMemfd, (hnd->paddr - hnd->offset));
            if (mappedAddress == MAP_FAILED) {
                ALOGE("Could not mmap %s fd(%d)", strerror(errno),hnd->fd);
                return -errno;
            }
            hnd->base = intptr_t(mappedAddress) + hnd->offset;
        } else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION) {
            size_t size = hnd->size;
            hnd->ion_client = ion_client_create();
            void *mappedAddress = ion_map(hnd->fd, size, 0);

            if (mappedAddress == MAP_FAILED) {
                ALOGE("Could not ion_map %s fd(%d)", strerror(errno), hnd->fd);
                return -errno;
            }

            hnd->base = intptr_t(mappedAddress) + hnd->offset;
        } else {
            size_t size = hnd->size;
#if PMEM_HACK
            size += hnd->offset;
#endif
            void *mappedAddress = mmap(0, size,
                    PROT_READ|PROT_WRITE, MAP_SHARED, hnd->fd, 0);
            if (mappedAddress == MAP_FAILED) {
                ALOGE("Could not mmap %s fd(%d)", strerror(errno),hnd->fd);
                return -errno;
            }
            hnd->base = intptr_t(mappedAddress) + hnd->offset;
        }
    }
    *vaddr = (void*)hnd->base;
    return 0;
}
int _IOGetPhyMem(int which, vpu_mem_desc *buff)
{
#ifdef BUILD_FOR_ANDROID
	const size_t pagesize = getpagesize();
	int err, fd;
#ifdef USE_ION
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	ion_user_handle_t handle;
#else
	struct ion_handle *handle;
#endif
	int share_fd, ret = -1;
	unsigned char *ptr;
#elif USE_GPU
        struct g2d_buf *gbuf;
        int bytes;
#else
	/* Get memory from pmem space for android */
	struct pmem_region region;
#endif

	if ((!buff) || (!buff->size)) {
		err_msg("Error!_IOGetPhyMem:Invalid parameters");
		return -1;
	}

	buff->cpu_addr = 0;
	buff->phy_addr = 0;
	buff->virt_uaddr = 0;

	if (which == VPU_IOC_GET_WORK_ADDR) {
		if (ioctl(vpu_fd, which, buff) < 0) {
			err_msg("mem allocation failed!\n");
			buff->phy_addr = 0;
			buff->cpu_addr = 0;
			return -1;
		}
		return 0;
	}

	if (which != VPU_IOC_PHYMEM_ALLOC) {
		err_msg("Error!_IOGetPhyMem unsupported memtype: %d", which);
		return -1;
	}

	buff->size = (buff->size + pagesize-1) & ~(pagesize - 1);

#ifdef USE_ION
	fd = ion_open();
	if (fd <= 0) {
		err_msg("ion open failed!\n");
		return -1;
	}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	err = ion_alloc(fd, buff->size, pagesize, 1, 0, &handle);
#else
	err = ion_alloc(fd, buff->size, pagesize, 1, &handle);
#endif
	if (err) {
		err_msg("ion allocation failed!\n");
		goto error;
	}

	err = ion_map(fd, handle, buff->size,
			    PROT_READ|PROT_WRITE, MAP_SHARED,
			    0, &ptr, &share_fd);
	if (err) {
		err_msg("ion map failed!\n");
		goto error;
	}

	err = ion_phys(fd, handle);
	if (err == 0) {
		err_msg("ion get physical address failed!\n");
		goto error;
	}

	buff->virt_uaddr = (unsigned long)ptr;
	buff->phy_addr = (unsigned long)err;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
	ion_free(fd, handle);
	buff->cpu_addr = (unsigned long)share_fd;
#else
	buff->cpu_addr = (unsigned long)handle;
#endif
	memset((void*)buff->virt_uaddr, 0, buff->size);
	ret = 0;
	info_msg("<ion> alloc handle: 0x%x, paddr: 0x%x, vaddr: 0x%x",
			(unsigned int)handle, (unsigned int)buff->phy_addr,
			(unsigned int)buff->virt_uaddr);
error:
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
	close(share_fd);
#endif
	ion_close(fd);
	return ret;
#elif USE_GPU
        bytes = buff->size + PAGE_SIZE;
        gbuf = g2d_alloc(bytes, 0);
        if(!gbuf) {
            err_msg("%s: gpu allocator failed to alloc buffer with size %d", __FUNCTION__, buff->size);
            return -1;
        }

        buff->virt_uaddr = (unsigned long)gbuf->buf_vaddr;
        buff->phy_addr = (unsigned long)gbuf->buf_paddr;
        buff->cpu_addr = (unsigned long)gbuf;

        //vpu requires page alignment for the address implicitly, round it to page edge
        buff->virt_uaddr = (buff->virt_uaddr + PAGE_SIZE -1) & ~(PAGE_SIZE -1);
        buff->phy_addr = (buff->phy_addr + PAGE_SIZE -1) & ~(PAGE_SIZE -1);
        memset((void*)buff->virt_uaddr, 0, buff->size);

        info_msg("<gpu> alloc handle: 0x%x, paddr: 0x%x, vaddr: 0x%x",
			(unsigned int)gbuf, (unsigned int)buff->phy_addr,
			(unsigned int)buff->virt_uaddr);
        return 0;
#else
	fd = (unsigned long)open("/dev/pmem_adsp", O_RDWR | O_SYNC);
	if (fd < 0) {
		err_msg("Error!_IOGetPhyMem Error,cannot open pmem");
		return -1;
	}

	err = ioctl(fd, PMEM_GET_TOTAL_SIZE, &region);

	buff->virt_uaddr = (unsigned long)mmap(0, buff->size,
			    PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);

	if (buff->virt_uaddr == (unsigned long)MAP_FAILED) {
		err_msg("Error!mmap(fd=%d, size=%u) failed (%s)",
			fd, buff->size, strerror(errno));
		close(fd);
		return -1;
	}

	memset(&region, 0, sizeof(region));

	if (ioctl(fd, PMEM_GET_PHYS, &region) == -1) {
		err_msg("Error!Failed to get physical address of source!");
		munmap((void *)buff->virt_uaddr, buff->size);
		close(fd);
		return -1;
	}

	buff->phy_addr = (unsigned long)region.offset;
	buff->cpu_addr = (unsigned long)fd;
	memset((void*)buff->virt_uaddr, 0, buff->size);
#endif
#else
	if (ioctl(vpu_fd, which, buff) < 0) {
		err_msg("mem allocation failed!\n");
		buff->phy_addr = 0;
		buff->cpu_addr = 0;
		return -1;
	}
	sz_alloc += buff->size;
	dprintf(3, "%s: phy addr = %08lx\n", __func__, buff->phy_addr);
	dprintf(3, "%s: alloc=%d, total=%d\n", __func__, buff->size, sz_alloc);
#endif

	return 0;
}
Esempio n. 10
0
/*--------------------MemoryManager Class STARTS here-----------------------------*/
void* MemoryManager::allocateBuffer(int width, int height, const char* format,
		int &bytes, int numBufs) {
	LOG_FUNCTION_NAME;

	if (mIonFd == 0) {
		mIonFd = ion_open();
		if (mIonFd == 0) {
			LOGE("ion_open failed!!!");
			return NULL;
		}
	}

	///We allocate numBufs+1 because the last entry will be marked NULL to indicate end of array, which is used when freeing
	///the buffers
	const uint numArrayEntriesC = (uint)(numBufs + 1);

	///Allocate a buffer array
	uint32_t *bufsArr = new uint32_t[numArrayEntriesC];
	if (!bufsArr) {
		LOGE(
				"Allocation failed when creating buffers array of %d uint32_t elements",
				numArrayEntriesC);
		LOG_FUNCTION_NAME_EXIT;
		return NULL;
	}

	///Initialize the array with zeros - this will help us while freeing the array in case of error
	///If a value of an array element is NULL, it means we didnt allocate it
	memset(bufsArr, 0, sizeof(*bufsArr) * numArrayEntriesC);

	//2D Allocations are not supported currently
	if (bytes != 0) {
		struct ion_handle *handle;
		int mmap_fd;

		///1D buffers
		for (int i = 0; i < numBufs; i++) {
			int ret = ion_alloc(mIonFd, bytes, 0, 1 << ION_HEAP_TYPE_CARVEOUT,
					&handle);
			if (ret < 0) {
				LOGE("ion_alloc resulted in error %d", ret);
				goto error;
			}

			LOGE("Before mapping, handle = %x, nSize = %d", handle, bytes);
			if ((ret = ion_map(mIonFd, handle, bytes, PROT_READ | PROT_WRITE,
					MAP_SHARED, 0, (unsigned char**) &bufsArr[i], &mmap_fd))
					< 0) {
				LOGE("Userspace mapping of ION buffers returned error %d", ret);
				ion_free(mIonFd, handle);
				goto error;
			}

			mIonHandleMap.add(bufsArr[i], (unsigned int) handle);
			mIonFdMap.add(bufsArr[i], (unsigned int) mmap_fd);
			mIonBufLength.add(bufsArr[i], (unsigned int) bytes);
		}

	} else // If bytes is not zero, then it is a 2-D tiler buffer request
	{
	}

	LOG_FUNCTION_NAME_EXIT;

	return (void*) bufsArr;

	error: LOGE("Freeing buffers already allocated after error occurred");
	freeBuffer(bufsArr);

	if (NULL != mErrorNotifier.get()) {
		mErrorNotifier->errorNotify(-ENOMEM);
	}

	LOG_FUNCTION_NAME_EXIT;
	return NULL;
}
int ion_map_test(int count)
{
	int fd, ret = 0, i, count_alloc, count_map;
	struct ion_handle **handle;
	unsigned char **ptr;
	int *map_fd;

	fd = ion_open();
	if (fd < 0) {
		printf("%s(): FAILED to open ion device\n",	__func__);
		return -1;
	}

	handle = (struct ion_handle **)malloc(count * sizeof(struct ion_handle *));
	if(handle == NULL) {
		printf("%s(): FAILED to allocate memory for ion_handles\n", __func__);
		return -ENOMEM;
	}

	count_alloc = count;
	count_map = count;

	/* Allocate ion_handles */
	for(i = 0; i < count; i++) {
		ret = _ion_alloc_test(fd, &(handle[i]));
		printf("%s(): Alloc handle[%d]=%p\n", __func__, i, handle[i]);
		if(ret || ((int)handle[i]  == -ENOMEM)) {
			printf("%s(): Alloc handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
			count_alloc = i;
			goto err_alloc;
		}
	}

	/* Map ion_handles and validate */
	if (tiler_test)
		len = height * stride;

	ptr = (unsigned char **)malloc(count * sizeof(unsigned char **));
	map_fd = (int *)malloc(count * sizeof(int *));

	for(i = 0; i < count; i++) {
		/* Map ion_handle on userside */
		ret = ion_map(fd, handle[i], len, prot, map_flags, 0, &(ptr[i]), &(map_fd[i]));
		printf("%s(): Map handle[%d]=%p, map_fd=%d, ptr=%p\n",
				__func__, i, handle[i], map_fd[i], ptr[i]);
		if(ret) {
			printf("%s Map handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
			count_map = i;
			goto err_map;
		}

		/* Validate mapping by writing the data and reading it back */
		if (tiler_test)
			_ion_tiler_map_test(ptr[i]);
		else
			_ion_map_test(ptr[i]);
	}

	/* clean up properly */
	err_map:
	for(i = 0; i < count_map; i++) {
		/* Unmap ion_handles */
		ret = munmap(ptr[i], len);
		printf("%s(): Unmap handle[%d]=%p, map_fd=%d, ptr=%p\n",
				__func__, i, handle[i], map_fd[i], ptr[i]);
		if(ret) {
			printf("%s(): Unmap handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
			goto err_map;
		}
		/* Close fds */
		close(map_fd[i]);
	}
	free(map_fd);
	free(ptr);

	err_alloc:
	/* Free ion_handles */
	for (i = 0; i < count_alloc; i++) {
		printf("%s(): Free handle[%d]=%p\n", __func__, i, handle[i]);
		ret = ion_free(fd, handle[i]);
		if (ret) {
			printf("%s(): Free handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
		}
	}

	ion_close(fd);
	free(handle);
	handle = NULL;

	if(ret || (count_alloc != count) || (count_map != count))
	{
		printf("\nion map test: FAILED\n\n");
		if((count_alloc != count) || (count_map != count))
			ret = -ENOMEM;
	}	else
		printf("\nion map test: PASSED\n");

	return ret;
}
Esempio n. 12
0
void *actal_malloc_uncache(int size,void *phy_add)
{
	int prot = PROT_READ | PROT_WRITE;
	int map_flags = MAP_SHARED;
	ion_user_handle_t handle;
	int map_fd, ret;
    void *ptr;

	ALOGI("[memory_malloc] actal_malloc_uncache: size: %d", size);
	if(size <= 0){
		ALOGE("actal_malloc_uncache: size must be positive\n");
		return NULL; //-EINVAL;
	}

	check_pid();
	// actal_printf_list();
	// actal_error("s_fd = %d\n", s_fd);
    if (size & ALIGN_MASK) {
        //4k对齐
        size += (ALIGN_BYTES - (size & ALIGN_MASK));
    }

    struct actal_mem * user_p;

	user_p = (struct actal_mem*)malloc(sizeof(struct actal_mem));
	user_p->next = NULL;
	

	ret = ion_alloc(s_fd, size, 0, 1, 0, &handle);
	if(ret < 0) {
		ALOGE("actal_malloc_uncache: ion_alloc(size: %d) failed(%d)\n", size, ret);
		return NULL;
	}
		
	// ALOGD("handle :%#X\n", handle);

	ret = ion_map(s_fd , handle, size, prot, map_flags, 0, (unsigned char **)&ptr, &map_fd);

	user_p->handle = handle;
    user_p->len = size;
    user_p->fd = s_fd;
	user_p->ptr = ptr;
	user_p->map_fd = map_fd;
	user_p->flag = 0;

    ret = ion_phys(s_fd, handle,(unsigned long*)phy_add);
    if(ret < 0){
        actal_error("actal_malloc_wt: get phy_addr error!\n");
        return NULL;
    }

    user_p->phy_add = *((long*)phy_add);

	//避免线程冲突
	if (pthread_mutex_lock(&mutex) != 0)
    {
		ALOGE("get mutex failed");
        return NULL;
    }

	if(s_top_p == NULL)  //处理头结点,头结点置空
	{
		s_current_p = s_top_p = (struct actal_mem*)malloc(sizeof(struct actal_mem));
		s_top_p->fd = 0;
		s_top_p->ptr = NULL;
		s_top_p->map_fd = 0;
		s_top_p->handle = -1;
		s_top_p->len = 0;
		s_top_p->phy_add = 0;
		s_top_p->flag = 0;
	}
	
	s_current_p->next = user_p;
    s_current_p = user_p;
	
	if (pthread_mutex_unlock(&mutex) != 0)
    {
		ALOGE("actal_malloc_wt: free mutex failed");
        return NULL;
    }

    // ALOGD("malloc_uncache: ptr = %#X, phy_add = %#X, size = %d\n", (unsigned int)ptr, *phy_add, size);
    return (void *)ptr;
}
void BpMemoryHeap::assertReallyMapped() const
{
    if (mHeapId == -1) {

        // remote call without mLock held, worse case scenario, we end up
        // calling transact() from multiple threads, but that's not a problem,
        // only mmap below must be in the critical section.

        Parcel data, reply;
        data.writeInterfaceToken(IMemoryHeap::getInterfaceDescriptor());
        status_t err = remote()->transact(HEAP_ID, data, &reply);
        int parcel_fd = reply.readFileDescriptor();
        ssize_t size = reply.readInt32();
        uint32_t flags = reply.readInt32();
        uint32_t offset = reply.readInt32();

        ALOGE_IF(err, "binder=%p transaction failed fd=%d, size=%ld, err=%d (%s)",
                asBinder().get(), parcel_fd, size, err, strerror(-err));

#ifdef USE_V4L2_ION
        int ion_client = -1;
        if (flags & USE_ION_FD) {
            ion_client = ion_client_create();
            ALOGE_IF(ion_client < 0, "BpMemoryHeap : ion client creation error");
        }
#endif

        int fd = dup( parcel_fd );
        ALOGE_IF(fd==-1, "cannot dup fd=%d, size=%ld, err=%d (%s)",
                parcel_fd, size, err, strerror(errno));

        int access = PROT_READ;
        if (!(flags & READ_ONLY)) {
            access |= PROT_WRITE;
        }

        Mutex::Autolock _l(mLock);
        if (mHeapId == -1) {
            mRealHeap = true;

#ifdef USE_V4L2_ION
        if (flags & USE_ION_FD) {
            if (ion_client < 0)
                mBase = MAP_FAILED;
            else
                mBase = ion_map(fd, size, offset);
            } else
#endif
                mBase = mmap(0, size, access, MAP_SHARED, fd, offset);
            if (mBase == MAP_FAILED) {
                ALOGE("cannot map BpMemoryHeap (binder=%p), size=%ld, fd=%d (%s)",
                        asBinder().get(), size, fd, strerror(errno));
                close(fd);
            } else {
                mSize = size;
                mFlags = flags;
                mOffset = offset;
                android_atomic_write(fd, &mHeapId);
            }
        }
#ifdef USE_V4L2_ION
        if (ion_client < 0)
            ion_client = -1;
        else
            ion_client_destroy(ion_client);
#endif
    }
}
int PhysMemAdapter::allocatePictureBuffer(int width,
                                          int height,
                                          int format,
                                          int numBufs)
{
    if (mIonFd <= 0) {
        FLOGE("try to allocate buffer from ion in preview or ion invalid");
        return BAD_VALUE;
    }

    int size = 0;
    if ((width == 0) || (height == 0)) {
        FLOGE("allocateBufferFromIon: width or height = 0");
        return BAD_VALUE;
    }
    switch (format) {
        case HAL_PIXEL_FORMAT_YCbCr_420_SP:
            size = width * ((height + 16) & (~15)) * 3 / 2;
            break;

        case HAL_PIXEL_FORMAT_YCbCr_420_P:
            size = width * height * 3 / 2;
            break;

        case HAL_PIXEL_FORMAT_YCbCr_422_I:
            size = width * height * 2;
            break;

        default:
            FLOGE("Error: format not supported int ion alloc");
            return BAD_VALUE;
    }

    unsigned char *ptr = NULL;
    int sharedFd;
    int phyAddr;
    struct ion_handle *ionHandle;
    size = (size + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));

    FLOGI("allocateBufferFromIon buffer num:%d", numBufs);
    for (int i = 0; i < numBufs; i++) {
        ionHandle = NULL;
        int err = ion_alloc(mIonFd, size, 8, 1, &ionHandle);
        if (err) {
            FLOGE("ion_alloc failed.");
            return BAD_VALUE;
        }

        err = ion_map(mIonFd,
                      ionHandle,
                      size,
                      PROT_READ | PROT_WRITE,
                      MAP_SHARED,
                      0,
                      &ptr,
                      &sharedFd);
        if (err) {
            FLOGE("ion_map failed.");
            return BAD_VALUE;
        }
        phyAddr = ion_phys(mIonFd, ionHandle);
        if (phyAddr == 0) {
            FLOGE("ion_phys failed.");
            return BAD_VALUE;
        }
        FLOG_RUNTIME("phyalloc ptr:0x%x, phy:0x%x, size:%d",
                     (int)ptr,
                     phyAddr,
                     size);
        mCameraBuffer[i].reset();
        mCameraBuffer[i].mIndex     = i;
        mCameraBuffer[i].mWidth     = width;
        mCameraBuffer[i].mHeight    = height;
        mCameraBuffer[i].mFormat    = format;
        mCameraBuffer[i].mVirtAddr  = ptr;
        mCameraBuffer[i].mPhyAddr   = phyAddr;
        mCameraBuffer[i].mSize      =  size;
        mCameraBuffer[i].mBufHandle = (buffer_handle_t *)ionHandle;
        close(sharedFd);
    }

    mBufferCount    = numBufs;
    mQueueableCount = numBufs;
    mFormat         = format;
    mBufferSize     = mCameraBuffer[0].mSize;
    mFrameWidth     = width;
    mFrameHeight    = height;

    dispatchBuffers(&mCameraBuffer[0], numBufs, BUFFER_CREATE);

    return NO_ERROR;
}
MEMPLUGIN_ERRORTYPE MemPlugin_ION_Alloc(void *pMemPluginHandle, OMX_U32 nClient,
                                    MEMPLUGIN_BUFFER_PARAMS *pIonBufferParams,
                                    MEMPLUGIN_BUFFER_PROPERTIES *pIonBufferProp)
{
    OMX_S16 ret;
    struct ion_handle *temp;
    size_t stride;
    MEMPLUGIN_ERRORTYPE eError = MEMPLUGIN_ERROR_NONE;
    MEMPLUGIN_ION_PARAMS sIonParams;
    MEMPLUGIN_OBJECT    *pMemPluginHdl = (MEMPLUGIN_OBJECT *)pMemPluginHandle;

    if(pIonBufferParams->nWidth <= 0)
    {
        eError = MEMPLUGIN_ERROR_BADPARAMETER;
        DOMX_ERROR("%s: width should be positive %d", __FUNCTION__,pIonBufferParams->nWidth);
        goto EXIT;
    }

    if(pMemPluginHdl->pPluginExtendedInfo == NULL)
    {
        MEMPLUGIN_ION_PARAMS_INIT(&sIonParams);
    }
    else
    {
        MEMPLUGIN_ION_PARAMS_COPY(((MEMPLUGIN_ION_PARAMS *)pMemPluginHdl->pPluginExtendedInfo),sIonParams);
    }
    if(pIonBufferParams->eBuffer_type == DEFAULT)
    {
        ret = (OMX_S16)ion_alloc(nClient,
                                    pIonBufferParams->nWidth,
                                    sIonParams.nAlign,
                                    sIonParams.alloc_flags,
                                    &temp);
        if(ret || (int)temp == -ENOMEM)
        {
            if(sIonParams.alloc_flags != OMAP_ION_HEAP_SECURE_INPUT)
            {
               //for non default types of allocation - no retry with tiler 1d - throw error
//STARGO: ducati secure heap is too small, need to allocate from heap
#if 0
               DOMX_ERROR("FAILED to allocate secure buffer of size=%d. ret=0x%x",pIonBufferParams->nWidth, ret);
               eError = MEMPLUGIN_ERROR_NORESOURCES;
               goto EXIT;
#endif
               DOMX_ERROR("FAILED to allocate secure buffer of size=%d. ret=0x%x - trying tiler 1d space",pIonBufferParams->nWidth, ret);
               pIonBufferParams->eBuffer_type = TILER1D;
               pIonBufferParams->eTiler_format = MEMPLUGIN_TILER_FORMAT_PAGE;
               sIonParams.alloc_flags = OMAP_ION_HEAP_TILER_MASK;
               sIonParams.nAlign = -1;
            }
            else
            {
                // for default non tiler (OMAP_ION_HEAP_SECURE_INPUT) retry allocating from tiler 1D
                DOMX_DEBUG("FAILED to allocate from non tiler space - trying tiler 1d space");
                pIonBufferParams->eBuffer_type = TILER1D;
                pIonBufferParams->eTiler_format = MEMPLUGIN_TILER_FORMAT_PAGE;
                sIonParams.alloc_flags = OMAP_ION_HEAP_TILER_MASK;
                sIonParams.nAlign = -1;
            }
        }
    }
    if(pIonBufferParams->eBuffer_type == TILER1D)
    {
        ret = (OMX_S16)ion_alloc_tiler(nClient,
                                        pIonBufferParams->nWidth,
                                        pIonBufferParams->nHeight,
                                        pIonBufferParams->eTiler_format,
                                        sIonParams.alloc_flags,
                                        &temp,
                                        &(pIonBufferProp->nStride));

         if (ret || ((int)temp == -ENOMEM))
         {
               DOMX_ERROR("FAILED to allocate buffer of size=%d. ret=0x%x",pIonBufferParams->nWidth, ret);
               eError = MEMPLUGIN_ERROR_NORESOURCES;
               goto EXIT;
         }
    }
    else if(pIonBufferParams->eBuffer_type == TILER2D)
    {
        DOMX_ERROR("Tiler 2D not implemented");
        eError = MEMPLUGIN_ERROR_NOTIMPLEMENTED;
        goto EXIT;
    }
    else if(!temp)
    {
        DOMX_ERROR("Undefined option for buffer type");
        eError = MEMPLUGIN_ERROR_UNDEFINED;
        goto EXIT;
    }
    pIonBufferProp->sBuffer_accessor.pBufferHandle = (OMX_PTR)temp;
    pIonBufferProp->nStride =  stride;

    if(pIonBufferParams->bMap == OMX_TRUE)
    {
        ret = (OMX_S16) ion_map(nClient,
                                pIonBufferProp->sBuffer_accessor.pBufferHandle,
                                pIonBufferParams->nWidth*pIonBufferParams->nHeight,
                                sIonParams.prot,
                                sIonParams.map_flags,
                                sIonParams.nOffset,
                                (unsigned char **) &(pIonBufferProp->sBuffer_accessor.pBufferMappedAddress),
                                &(pIonBufferProp->sBuffer_accessor.bufferFd));

        if(ret < 0)
        {
                DOMX_ERROR("userspace mapping of ION buffers returned error");
                eError = MEMPLUGIN_ERROR_NORESOURCES;
                goto EXIT;
        }
    }
    else
    {
        ret = (OMX_S16) ion_share(nClient,
                                    pIonBufferProp->sBuffer_accessor.pBufferHandle,
                                    &(pIonBufferProp->sBuffer_accessor.bufferFd));
        if(ret < 0)
        {
                DOMX_ERROR("ION share returned error");
                eError = MEMPLUGIN_ERROR_NORESOURCES;
                goto EXIT;
        }
    }
EXIT:
      if (eError != MEMPLUGIN_ERROR_NONE) {
          DOMX_EXIT("%s exited with error 0x%x",__FUNCTION__,eError);
         return eError;
      }
      else {
          DOMX_EXIT("%s executed successfully",__FUNCTION__);
         return MEMPLUGIN_ERROR_NONE;
      }
}
Esempio n. 16
0
void *halide_hexagon_host_malloc(size_t size) {
    const int heap_id = system_heap_id;
    const int ion_flags = ion_flag_cached;

    // Hexagon can only access a small number of mappings of these
    // sizes. We reduce the number of mappings required by aligning
    // large allocations to these sizes.
    static const size_t alignments[] = { 0x1000, 0x4000, 0x10000, 0x40000, 0x100000 };
    size_t alignment = alignments[0];

    // Align the size up to the minimum alignment.
    size = (size + alignment - 1) & ~(alignment - 1);

    if (heap_id != system_heap_id) {
        for (size_t i = 0; i < sizeof(alignments) / sizeof(alignments[0]); i++) {
            if (size >= alignments[i]) {
                alignment = alignments[i];
            }
        }
    }

    ion_user_handle_t handle = ion_alloc(ion_fd, size, alignment, 1 << heap_id, ion_flags);
    if (handle < 0) {
        __android_log_print(ANDROID_LOG_ERROR, "halide", "ion_alloc(%d, %d, %d, %d, %d) failed",
                            ion_fd, size, alignment, 1 << heap_id, ion_flags);
        return NULL;
    }

    // Map the ion handle to a file buffer.
    int buf_fd = ion_map(ion_fd, handle);
    if (buf_fd < 0) {
        __android_log_print(ANDROID_LOG_ERROR, "halide", "ion_map(%d, %d) failed", ion_fd, handle);
        ion_free(ion_fd, handle);
        return NULL;
    }

    // Map the file buffer to a pointer.
    void *buf = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, buf_fd, 0);
    if (buf == MAP_FAILED) {
        __android_log_print(ANDROID_LOG_ERROR, "halide", "mmap(NULL, %d, PROT_READ | PROT_WRITE, MAP_SHARED, %d, 0) failed",
                            size, buf_fd);
        close(buf_fd);
        ion_free(ion_fd, handle);
        return NULL;
    }

    // Register the buffer, so we get zero copy.
    if (remote_register_buf) {
        remote_register_buf(buf, size, buf_fd);
    }

    // Build a record for this allocation.
    allocation_record *rec = (allocation_record *)malloc(sizeof(allocation_record));
    if (!rec) {
        __android_log_print(ANDROID_LOG_ERROR, "halide", "malloc failed");
        munmap(buf, size);
        close(buf_fd);
        ion_free(ion_fd, handle);
        return NULL;
    }

    rec->next = NULL;
    rec->handle = handle;
    rec->buf_fd = buf_fd;
    rec->buf = buf;
    rec->size = size;

    // Insert this record into the list of allocations. Insert it at
    // the front, since it's simpler, and most likely to be freed
    // next.
    pthread_mutex_lock(&allocations_mutex);
    rec->next = allocations.next;
    allocations.next = rec;
    pthread_mutex_unlock(&allocations_mutex);

    return buf;
}