static int gralloc_unlock(gralloc_module_t const* module, buffer_handle_t handle)
{
	MALI_IGNORE(module);
	if (private_handle_t::validate(handle) < 0)
	{
		AERR("Unlocking invalid buffer 0x%p, returning error", handle);
		return -EINVAL;
	}

	private_handle_t *hnd = (private_handle_t *)handle;
	int32_t current_value;
	int32_t new_value;
	int retry;

	if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP && hnd->writeOwner)
	{
#if GRALLOC_ARM_UMP_MODULE
		ump_cpu_msync_now((ump_handle)hnd->ump_mem_handle, UMP_MSYNC_CLEAN_AND_INVALIDATE, (void *)hnd->base, hnd->size);
#else
		AERR("Buffer 0x%p is UMP type but it is not supported", hnd);
#endif
	} else if ( hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION && hnd->writeOwner)
	{
#if GRALLOC_ARM_DMA_BUF_MODULE
		private_module_t *m = (private_module_t*)module;

		ion_sync_fd(m->ion_client, hnd->share_fd);
#endif
	}

	return 0;
}
int gralloc_backend_register(private_handle_t* hnd)
{
	int retval = -EINVAL;

	switch (hnd->flags & (private_handle_t::PRIV_FLAGS_USES_UMP |
	                      private_handle_t::PRIV_FLAGS_USES_ION))
	{
	case private_handle_t::PRIV_FLAGS_USES_UMP:
		if (!s_ump_is_open)
		{
			ump_result res = ump_open(); // MJOLL-4012: UMP implementation needs a ump_close() for each ump_open
			if (res != UMP_OK)
			{
				AERR("Failed to open UMP library with res=%d", res);
			}
			s_ump_is_open = 1;
		}

		if (s_ump_is_open)
		{
			hnd->ump_mem_handle = ump_handle_create_from_secure_id(hnd->ump_id);
			if (UMP_INVALID_MEMORY_HANDLE != (ump_handle)hnd->ump_mem_handle)
			{
				hnd->base = ump_mapped_pointer_get(hnd->ump_mem_handle);
				if (0 != hnd->base)
				{
					hnd->lockState = private_handle_t::LOCK_STATE_MAPPED;
					hnd->writeOwner = 0;
					hnd->lockState = 0;

					return 0;
				}
				else
				{
					AERR("Failed to map UMP handle %p", hnd->ump_mem_handle );
				}

				ump_reference_release((ump_handle)hnd->ump_mem_handle);
			}
			else
			{
				AERR("Failed to create UMP handle %p", hnd->ump_mem_handle );
			}
		}
		break;
	case private_handle_t::PRIV_FLAGS_USES_ION:
		AERR("Gralloc does not support DMA_BUF. Unable to map memory for handle %p", hnd );
		break;
	}

	return retval;
}
예제 #3
0
int alloc_device_open(hw_module_t const *module, const char */*name*/, hw_device_t **device)
{
	alloc_device_t *dev;

	dev = new alloc_device_t;

	if (NULL == dev)
	{
		return -1;
	}

#if GRALLOC_ARM_UMP_MODULE
	ump_result ump_res = ump_open();

	if (UMP_OK != ump_res)
	{
		AERR("UMP open failed with %d", ump_res);
		delete dev;
		return -1;
	}

#endif

	/* initialize our state here */
	memset(dev, 0, sizeof(*dev));

	/* initialize the procs */
	dev->common.tag = HARDWARE_DEVICE_TAG;
	dev->common.version = 0;
	dev->common.module = const_cast<hw_module_t *>(module);
	dev->common.close = alloc_device_close;
	dev->alloc = alloc_device_alloc;
	dev->free = alloc_device_free;

#if GRALLOC_ARM_DMA_BUF_MODULE
	private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
	m->ion_client = ion_open();

	if (m->ion_client < 0)
	{
		AERR("ion_open failed with %s", strerror(errno));
		delete dev;
		return -1;
	}

#endif

	*device = &dev->common;

	return 0;
}
static int alloc_device_free(alloc_device_t* dev, buffer_handle_t handle)
{
	if (private_handle_t::validate(handle) < 0)
	{
		return -EINVAL;
	}

	private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
	if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
	{
		// free this buffer
		private_module_t* m = reinterpret_cast<private_module_t*>(dev->common.module);
		const size_t bufferSize = m->finfo.line_length * m->info.yres;
		int index = (hnd->base - m->framebuffer->base) / bufferSize;
		m->bufferMask &= ~(1<<index); 
		close(hnd->fd);

#if GRALLOC_ARM_UMP_MODULE
		if ( (int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle )
		{
			ump_reference_release((ump_handle)hnd->ump_mem_handle);
		}
#endif
	}
	else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP)
	{
#if GRALLOC_ARM_UMP_MODULE
		ump_mapped_pointer_release((ump_handle)hnd->ump_mem_handle);
		ump_reference_release((ump_handle)hnd->ump_mem_handle);
#else
		AERR( "Can't free ump memory for handle:0x%x. Not supported.", (unsigned int)hnd );
#endif
	} 
	else if ( hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION )
	{
#if GRALLOC_ARM_DMA_BUF_MODULE
		private_module_t* m = reinterpret_cast<private_module_t*>(dev->common.module);
		if ( 0 != munmap( (void*)hnd->base, hnd->size ) ) AERR( "Failed to munmap handle 0x%x", (unsigned int)hnd );
		close( hnd->share_fd );
		if ( 0 != ion_free( m->ion_client, hnd->ion_hnd ) ) AERR( "Failed to ion_free( ion_client: %d ion_hnd: %p )", m->ion_client, hnd->ion_hnd );
		memset( (void*)hnd, 0, sizeof( *hnd ) );
#else 
		AERR( "Can't free dma_buf memory for handle:0x%x. Not supported.", (unsigned int)hnd );
#endif
		
	}

	delete hnd;

	return 0;
}
static int gralloc_lock(gralloc_module_t const *module, buffer_handle_t handle, int usage, int l, int t, int w, int h, void **vaddr)
{
	if (private_handle_t::validate(handle) < 0)
	{
		AERR("Locking invalid buffer 0x%p, returning error", handle);
		return -EINVAL;
	}

	private_handle_t *hnd = (private_handle_t *)handle;

	if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP || hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
	{
		hnd->writeOwner = usage & GRALLOC_USAGE_SW_WRITE_MASK;
	}

	if (usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK))
	{
		*vaddr = (void *)hnd->base;
	}

	MALI_IGNORE(module);
	MALI_IGNORE(l);
	MALI_IGNORE(t);
	MALI_IGNORE(w);
	MALI_IGNORE(h);
	return 0;
}
static int gralloc_unlock(gralloc_module_t const *module, buffer_handle_t handle)
{
	MALI_IGNORE(module);

	if (private_handle_t::validate(handle) < 0)
	{
		AERR("Unlocking invalid buffer 0x%p, returning error", handle);
		return -EINVAL;
	}

	private_handle_t *hnd = (private_handle_t *)handle;
	int32_t current_value;
	int32_t new_value;
	int retry;

	if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP && hnd->writeOwner)
	{
#if GRALLOC_ARM_UMP_MODULE
		ump_cpu_msync_now((ump_handle)hnd->ump_mem_handle, UMP_MSYNC_CLEAN_AND_INVALIDATE, (void *)hnd->base, hnd->size);
#else
		AERR("Buffer 0x%p is UMP type but it is not supported", hnd);
#endif
	}
	else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION && hnd->writeOwner)
	{
#if GRALLOC_ARM_DMA_BUF_MODULE
		hw_module_t *pmodule = NULL;
		private_module_t *m = NULL;

		if (hw_get_module(GRALLOC_HARDWARE_MODULE_ID, (const hw_module_t **)&pmodule) == 0)
		{
			m = reinterpret_cast<private_module_t *>(pmodule);
			//ion_sync_fd(m->ion_client, hnd->share_fd);
		}
		else
		{
			AERR("Couldnot get gralloc module for handle 0x%p\n", handle);
		}

#endif
	}

	return 0;
}
void gralloc_backend_sync(private_handle_t* hnd)
{
	switch (hnd->flags & (private_handle_t::PRIV_FLAGS_USES_UMP |
	                      private_handle_t::PRIV_FLAGS_USES_ION))
	{
	case private_handle_t::PRIV_FLAGS_USES_UMP:
		ump_cpu_msync_now((ump_handle)hnd->ump_mem_handle, UMP_MSYNC_CLEAN_AND_INVALIDATE, (void*)hnd->base, hnd->size);
		break;
	case private_handle_t::PRIV_FLAGS_USES_ION:
		AERR( "Buffer %p is DMA_BUF type but it is not supported", hnd );
		break;
	}
}
static int gralloc_lock_ycbcr(struct gralloc_module_t const* module,
            buffer_handle_t handle, int usage,
            int l, int t, int w, int h,
            struct android_ycbcr *ycbcr)
{
	if (private_handle_t::validate(handle) < 0)
	{
		AERR("Locking invalid buffer 0x%p, returning error", handle );
		return -EINVAL;
	}
	private_handle_t* hnd = (private_handle_t*)handle;
	int ystride;
	int err=0;

	switch (hnd->format) {
		case HAL_PIXEL_FORMAT_YCrCb_420_SP:
			ystride = GRALLOC_ALIGN(hnd->width, 16);
			ycbcr->y  = (void*)hnd->base;
			ycbcr->cr = (void*)((uintptr_t)hnd->base + ystride * hnd->height);
			ycbcr->cb = (void*)((uintptr_t)hnd->base + ystride * hnd->height + 1);
			ycbcr->ystride = ystride;
			ycbcr->cstride = ystride;
			ycbcr->chroma_step = 2;
			memset(ycbcr->reserved, 0, sizeof(ycbcr->reserved));
			break;
		case HAL_PIXEL_FORMAT_YCbCr_420_SP:
			ystride = GRALLOC_ALIGN(hnd->width, 16);
			ycbcr->y  = (void*)hnd->base;
			ycbcr->cb = (void*)((uintptr_t)hnd->base + ystride * hnd->height);
			ycbcr->cr = (void*)((uintptr_t)hnd->base + ystride * hnd->height + 1);
			ycbcr->ystride = ystride;
			ycbcr->cstride = ystride;
			ycbcr->chroma_step = 2;
			memset(ycbcr->reserved, 0, sizeof(ycbcr->reserved));
			break;
		default:
			ALOGD("%s: Invalid format passed: 0x%x", __FUNCTION__,
			       hnd->format);
			err = -EINVAL;
	}

	MALI_IGNORE(module);
	MALI_IGNORE(usage);
	MALI_IGNORE(l);
	MALI_IGNORE(t);
	MALI_IGNORE(w);
	MALI_IGNORE(h);
	return err;
}
void gralloc_backend_unregister(private_handle_t* hnd)
{
	switch (hnd->flags & (private_handle_t::PRIV_FLAGS_USES_UMP |
	                      private_handle_t::PRIV_FLAGS_USES_ION))
	{
	case private_handle_t::PRIV_FLAGS_USES_UMP:
		ump_mapped_pointer_release((ump_handle)hnd->ump_mem_handle);
		hnd->base = 0;
		ump_reference_release((ump_handle)hnd->ump_mem_handle);
		hnd->ump_mem_handle = UMP_INVALID_MEMORY_HANDLE;
		break;
	case private_handle_t::PRIV_FLAGS_USES_ION:
		AERR( "Can't unregister DMA_BUF buffer for hnd %p. Not supported", hnd );
		break;
	}
}
static int alloc_device_close(struct hw_device_t *device)
{
	alloc_device_t* dev = reinterpret_cast<alloc_device_t*>(device);
	if (dev)
	{
#if GRALLOC_ARM_DMA_BUF_MODULE
		private_module_t *m = reinterpret_cast<private_module_t*>(device);
		if ( 0 != ion_close(m->ion_client) ) AERR( "Failed to close ion_client: %d", m->ion_client );
		close(m->ion_client);
#endif
		delete dev;
#if GRALLOC_ARM_UMP_MODULE
		ump_close(); // Our UMP memory refs will be released automatically here...
#endif
	}
	return 0;
}
예제 #11
0
static int __ump_alloc_should_fail()
{

	static unsigned int call_count  = 0;
	unsigned int        first_fail  = 0;
	int                 fail_period = 0;
	int                 fail        = 0;

	++call_count;

	/* read the system properties that control failure simulation */
	{
		char prop_value[PROPERTY_VALUE_MAX];

		if (property_get(PROP_MALI_TEST_GRALLOC_FAIL_FIRST, prop_value, "0") > 0)
		{
			sscanf(prop_value, "%11u", &first_fail);
		}

		if (property_get(PROP_MALI_TEST_GRALLOC_FAIL_INTERVAL, prop_value, "0") > 0)
		{
			sscanf(prop_value, "%11u", &fail_period);
		}
	}

	/* failure simulation is enabled by setting the first_fail property to non-zero */
	if (first_fail > 0)
	{
		LOGI("iteration %u (fail=%u, period=%u)\n", call_count, first_fail, fail_period);

		fail = (call_count == first_fail) ||
		       (call_count > first_fail && fail_period > 0 && 0 == (call_count - first_fail) % fail_period);

		if (fail)
		{
			AERR("failed ump_ref_drv_allocate on iteration #%d\n", call_count);
		}
	}

	return fail;
}
static int gralloc_register_buffer(gralloc_module_t const *module, buffer_handle_t handle)
{
	MALI_IGNORE(module);

	if (private_handle_t::validate(handle) < 0)
	{
		AERR("Registering invalid buffer 0x%p, returning error", handle);
		return -EINVAL;
	}

	// if this handle was created in this process, then we keep it as is.
	private_handle_t *hnd = (private_handle_t *)handle;

	int retval = -EINVAL;

	pthread_mutex_lock(&s_map_lock);

#if GRALLOC_ARM_UMP_MODULE

	if (!s_ump_is_open)
	{
		ump_result res = ump_open(); // MJOLL-4012: UMP implementation needs a ump_close() for each ump_open

		if (res != UMP_OK)
		{
			pthread_mutex_unlock(&s_map_lock);
			AERR("Failed to open UMP library with res=%d", res);
			return retval;
		}

		s_ump_is_open = 1;
	}

#endif

	hnd->pid = getpid();

	if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
	{
		AERR("Can't register buffer 0x%p as it is a framebuffer", handle);
	}
	else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP)
	{
#if GRALLOC_ARM_UMP_MODULE
		hnd->ump_mem_handle = (int)ump_handle_create_from_secure_id(hnd->ump_id);

		if (UMP_INVALID_MEMORY_HANDLE != (ump_handle)hnd->ump_mem_handle)
		{
			hnd->base = ump_mapped_pointer_get((ump_handle)hnd->ump_mem_handle);

			if (0 != hnd->base)
			{
				hnd->lockState = private_handle_t::LOCK_STATE_MAPPED;
				hnd->writeOwner = 0;
				hnd->lockState = 0;

				pthread_mutex_unlock(&s_map_lock);
				return 0;
			}
			else
			{
				AERR("Failed to map UMP handle 0x%x", hnd->ump_mem_handle);
			}

			ump_reference_release((ump_handle)hnd->ump_mem_handle);
		}
		else
		{
			AERR("Failed to create UMP handle 0x%x", hnd->ump_mem_handle);
		}

#else
		AERR("Gralloc does not support UMP. Unable to register UMP memory for handle 0x%p", hnd);
#endif
	}
	else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
	{
#if GRALLOC_ARM_DMA_BUF_MODULE
		int ret;
		unsigned char *mappedAddress;
		size_t size = hnd->size;
		hw_module_t *pmodule = NULL;
		private_module_t *m = NULL;

		if (hw_get_module(GRALLOC_HARDWARE_MODULE_ID, (const hw_module_t **)&pmodule) == 0)
		{
			m = reinterpret_cast<private_module_t *>(pmodule);
		}
		else
		{
			AERR("Could not get gralloc module for handle: 0x%p", hnd);
			retval = -errno;
			goto cleanup;
		}

		/* the test condition is set to m->ion_client <= 0 here, because:
		 * 1) module structure are initialized to 0 if no initial value is applied
		 * 2) a second user process should get a ion fd greater than 0.
		 */
		if (m->ion_client <= 0)
		{
			/* a second user process must obtain a client handle first via ion_open before it can obtain the shared ion buffer*/
			m->ion_client = ion_open();

			if (m->ion_client < 0)
			{
				AERR("Could not open ion device for handle: 0x%p", hnd);
				retval = -errno;
				goto cleanup;
			}
		}

		mappedAddress = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, hnd->share_fd, 0);

		if (MAP_FAILED == mappedAddress)
		{
			AERR("mmap( share_fd:%d ) failed with %s",  hnd->share_fd, strerror(errno));
			retval = -errno;
			goto cleanup;
		}

		hnd->base = mappedAddress + hnd->offset;
		pthread_mutex_unlock(&s_map_lock);
		return 0;
#endif
	}
	else
	{
		AERR("registering non-UMP buffer not supported. flags = %d", hnd->flags);
	}

cleanup:
	pthread_mutex_unlock(&s_map_lock);
	return retval;
}
static int gralloc_unregister_buffer(gralloc_module_t const *module, buffer_handle_t handle)
{
	MALI_IGNORE(module);

	if (private_handle_t::validate(handle) < 0)
	{
		AERR("unregistering invalid buffer 0x%p, returning error", handle);
		return -EINVAL;
	}

	private_handle_t *hnd = (private_handle_t *)handle;

	AERR_IF(hnd->lockState & private_handle_t::LOCK_STATE_READ_MASK, "[unregister] handle %p still locked (state=%08x)", hnd, hnd->lockState);

	if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
	{
		AERR("Can't unregister buffer 0x%p as it is a framebuffer", handle);
	}
	else if (hnd->pid == getpid()) // never unmap buffers that were not registered in this process
	{
		pthread_mutex_lock(&s_map_lock);

		if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP)
		{
#if GRALLOC_ARM_UMP_MODULE
			ump_mapped_pointer_release((ump_handle)hnd->ump_mem_handle);
			ump_reference_release((ump_handle)hnd->ump_mem_handle);
			hnd->ump_mem_handle = (int)UMP_INVALID_MEMORY_HANDLE;
#else
			AERR("Can't unregister UMP buffer for handle 0x%p. Not supported", handle);
#endif
		}
		else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
		{
#if GRALLOC_ARM_DMA_BUF_MODULE
			void *base = (void *)hnd->base;
			size_t size = hnd->size;

			if (munmap(base, size) < 0)
			{
				AERR("Could not munmap base:0x%p size:%lu '%s'", base, (unsigned long)size, strerror(errno));
			}

#else
			AERR("Can't unregister DMA_BUF buffer for hnd %p. Not supported", hnd);
#endif

		}
		else
		{
			AERR("Unregistering unknown buffer is not supported. Flags = %d", hnd->flags);
		}

		hnd->base = 0;
		hnd->lockState  = 0;
		hnd->writeOwner = 0;

		pthread_mutex_unlock(&s_map_lock);
	}
	else
	{
		AERR("Trying to unregister buffer 0x%p from process %d that was not created in current process: %d", hnd, hnd->pid, getpid());
	}

	return 0;
}
예제 #14
0
static int gralloc_alloc_buffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
{
#if GRALLOC_ARM_DMA_BUF_MODULE
	{
		private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
		ion_user_handle_t ion_hnd;
		unsigned char *cpu_ptr;
		int shared_fd;
		int ret;
		unsigned int ion_flags = 0;

		if( (usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN )
			ion_flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
		if (usage & GRALLOC_USAGE_PRIVATE_1) {
			ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_CARVEOUT_MASK, ion_flags, &ion_hnd);
		} else {
			ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_SYSTEM_MASK, ion_flags, &ion_hnd);
		}

		if (ret != 0)
		{
			AERR("Failed to ion_alloc from ion_client:%d", m->ion_client);
			return -1;
		}

		ret = ion_share(m->ion_client, ion_hnd, &shared_fd);

		if (ret != 0)
		{
			AERR("ion_share( %d ) failed", m->ion_client);

			if (0 != ion_free(m->ion_client, ion_hnd))
			{
				AERR("ion_free( %d ) failed", m->ion_client);
			}

			return -1;
		}

		cpu_ptr = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, shared_fd, 0);

		if (MAP_FAILED == cpu_ptr)
		{
			AERR("ion_map( %d ) failed", m->ion_client);

			if (0 != ion_free(m->ion_client, ion_hnd))
			{
				AERR("ion_free( %d ) failed", m->ion_client);
			}

			close(shared_fd);
			return -1;
		}

		private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, (int)cpu_ptr, private_handle_t::LOCK_STATE_MAPPED);

		if (NULL != hnd)
		{
			hnd->share_fd = shared_fd;
			hnd->ion_hnd = ion_hnd;
			*pHandle = hnd;
			return 0;
		}
		else
		{
			AERR("Gralloc out of mem for ion_client:%d", m->ion_client);
		}

		close(shared_fd);
		ret = munmap(cpu_ptr, size);

		if (0 != ret)
		{
			AERR("munmap failed for base:%p size: %d", cpu_ptr, size);
		}

		ret = ion_free(m->ion_client, ion_hnd);

		if (0 != ret)
		{
			AERR("ion_free( %d ) failed", m->ion_client);
		}

		return -1;
	}
#endif

#if GRALLOC_ARM_UMP_MODULE
	{
		ump_handle ump_mem_handle;
		void *cpu_ptr;
		ump_secure_id ump_id;
		ump_alloc_constraints constraints;

		size = round_up_to_page_size(size);

		if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
		{
			constraints =  UMP_REF_DRV_CONSTRAINT_USE_CACHE;
		}
		else
		{
			constraints = UMP_REF_DRV_CONSTRAINT_NONE;
		}

#ifdef GRALLOC_SIMULATE_FAILURES
		/* if the failure condition matches, fail this iteration */
		if (__ump_alloc_should_fail())
		{
			ump_mem_handle = UMP_INVALID_MEMORY_HANDLE;
		}
		else
#endif
		{
			ump_mem_handle = ump_ref_drv_allocate(size, constraints);

			if (UMP_INVALID_MEMORY_HANDLE != ump_mem_handle)
			{
				cpu_ptr = ump_mapped_pointer_get(ump_mem_handle);

				if (NULL != cpu_ptr)
				{
					ump_id = ump_secure_id_get(ump_mem_handle);

					if (UMP_INVALID_SECURE_ID != ump_id)
					{
						private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_UMP, usage, size, (int)cpu_ptr,
						private_handle_t::LOCK_STATE_MAPPED, ump_id, ump_mem_handle);

						if (NULL != hnd)
						{
							*pHandle = hnd;
							return 0;
						}
						else
						{
							AERR("gralloc_alloc_buffer() failed to allocate handle. ump_handle = %p, ump_id = %d", ump_mem_handle, ump_id);
						}
					}
					else
					{
						AERR("gralloc_alloc_buffer() failed to retrieve valid secure id. ump_handle = %p", ump_mem_handle);
					}

					ump_mapped_pointer_release(ump_mem_handle);
				}
				else
				{
					AERR("gralloc_alloc_buffer() failed to map UMP memory. ump_handle = %p", ump_mem_handle);
				}

				ump_reference_release(ump_mem_handle);
			}
			else
			{
				AERR("gralloc_alloc_buffer() failed to allocate UMP memory. size:%d constraints: %d", size, constraints);
			}
		}
		return -1;
	}
#endif

}
예제 #15
0
static int gralloc_alloc_framebuffer_locked(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
{
	private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);

	// allocate the framebuffer
	if (m->framebuffer == NULL)
	{
		// initialize the framebuffer, the framebuffer is mapped once and forever.
		int err = init_frame_buffer_locked(m);

		if (err < 0)
		{
			return err;
		}
	}

	const uint32_t bufferMask = m->bufferMask;
	const uint32_t numBuffers = m->numBuffers;
	const size_t bufferSize = m->finfo.line_length * m->info.yres;

	if (numBuffers == 1)
	{
		// If we have only one buffer, we never use page-flipping. Instead,
		// we return a regular buffer which will be memcpy'ed to the main
		// screen when post is called.
		int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
		AERR("fallback to single buffering. Virtual Y-res too small %d", m->info.yres);
		return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle);
	}

	if (bufferMask >= ((1LU << numBuffers) - 1))
	{
		// We ran out of buffers.
		return -ENOMEM;
	}

	int vaddr = m->framebuffer->base;

	// find a free slot
	for (uint32_t i = 0 ; i < numBuffers ; i++)
	{
		if ((bufferMask & (1LU << i)) == 0)
		{
			m->bufferMask |= (1LU << i);
			break;
		}

		vaddr += bufferSize;
	}

	// The entire framebuffer memory is already mapped, now create a buffer object for parts of this memory
	private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_FRAMEBUFFER, usage, size, vaddr,
	        0, dup(m->framebuffer->fd), vaddr - m->framebuffer->base);
#if GRALLOC_ARM_UMP_MODULE
	hnd->ump_id = m->framebuffer->ump_id;

	/* create a backing ump memory handle if the framebuffer is exposed as a secure ID */
	if ((int)UMP_INVALID_SECURE_ID != hnd->ump_id)
	{
		hnd->ump_mem_handle = (int)ump_handle_create_from_secure_id(hnd->ump_id);

		if ((int)UMP_INVALID_MEMORY_HANDLE == hnd->ump_mem_handle)
		{
			AINF("warning: unable to create UMP handle from secure ID %i\n", hnd->ump_id);
		}
	}

#endif

#if GRALLOC_ARM_DMA_BUF_MODULE
	{
#ifdef FBIOGET_DMABUF
		struct fb_dmabuf_export fb_dma_buf;

		if (ioctl(m->framebuffer->fd, FBIOGET_DMABUF, &fb_dma_buf) == 0)
		{
			AINF("framebuffer accessed with dma buf (fd 0x%x)\n", (int)fb_dma_buf.fd);
			hnd->share_fd = fb_dma_buf.fd;
		}

#endif
	}
#endif

	*pHandle = hnd;

	return 0;
}
//static int gralloc_alloc_buffer(alloc_device_t* dev, size_t size, int usage, buffer_handle_t* pHandle, bool reserve)
static int gralloc_alloc_buffer(alloc_device_t* dev, size_t size, int usage, buffer_handle_t* pHandle, int reserve)
{
#if GRALLOC_ARM_DMA_BUF_MODULE
	{
		private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
		ion_user_handle_t ion_hnd;
		unsigned char *cpu_ptr;
		int shared_fd;
		int ret;
		unsigned int heap_mask;
		int Ion_type;
		bool Ishwc = false;
        int Ion_flag = 0;   
        if(usage == (GRALLOC_USAGE_HW_COMPOSER|GRALLOC_USAGE_HW_RENDER))
            Ishwc = true;

		//ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_SYSTEM_MASK, 0, &ion_hnd);
        #ifdef USE_X86	
        
        if(usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK))
            Ion_flag = (ION_FLAG_CACHED|ION_FLAG_CACHED_NEEDS_SYNC);

        if(is_out_log())
            ALOGD("usage=%x,protect=%x,ion_flag=%x,mmu=%d",usage,GRALLOC_USAGE_PROTECTED,Ion_flag,g_MMU_stat);
        if (usage & GRALLOC_USAGE_PROTECTED)  //secrue memery
        {
            unsigned long phys;
            ret = ion_secure_alloc(m->ion_client, size,&phys);
            //ALOGD("secure_alloc ret=%d,phys=%x",ret,(int)phys);
            if(ret != 0)
            {
                AERR("Failed to ion_alloc from ion_client:%d, size: %d", m->ion_client, size);
                return -1;
            }    
	        private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, 0, 0);

    		if (NULL != hnd)
    		{
    			hnd->share_fd = 0;
    			hnd->ion_hnd = 0;
    			hnd->type = 0;
    			hnd->phy_addr = (int)phys;
    			*pHandle = hnd;
                if(is_out_log())
                    ALOGD("secure_alloc_ok phy=%x",usage,hnd->phy_addr);
    			
    			return 0;
    		}
    		else
    		{
    			AERR("Gralloc out of mem for ion_client:%d", m->ion_client);
    		}

    		close(shared_fd);

    		return -1;
        }
        #endif
		//ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_SYSTEM_MASK, 0, &ion_hnd);
        #ifdef USE_X86		
		if(g_MMU_stat
		    && ((usage&GRALLOC_USAGE_HW_CAMERA_WRITE)==0)
		    && !(usage & GRALLOC_USAGE_PRIVATE_2)
		    && !Ishwc)
        #else
		if(g_MMU_stat)
		#endif
		{
		    heap_mask = ION_HEAP(ION_VMALLOC_HEAP_ID);
            #ifdef USE_X86		
		    if (usage & GRALLOC_USAGE_PRIVATE_2)
		    {
		        heap_mask |=  ION_HEAP(ION_SECURE_HEAP_ID);
		    }
            #endif
		    ret = ion_alloc(m->ion_client, size, 0, heap_mask, Ion_flag, &ion_hnd);
		    Ion_type = 1;
		} else {
		    heap_mask = ION_HEAP(ION_CMA_HEAP_ID);
            #ifdef USE_X86		
		    if (usage & GRALLOC_USAGE_PRIVATE_2)
		    {
		        heap_mask |=  ION_HEAP(ION_SECURE_HEAP_ID);
		    }
            #endif

		    if (usage == (GRALLOC_USAGE_HW_CAMERA_WRITE|GRALLOC_USAGE_SW_READ_OFTEN)) {
                     ret = ion_alloc(m->ion_client, size, 0,heap_mask, 
                        (ION_FLAG_CACHED|ION_FLAG_CACHED_NEEDS_SYNC), &ion_hnd);   
		    } else {
                     ret = ion_alloc(m->ion_client, size, 0,heap_mask, Ion_flag, &ion_hnd);
		    }
            #ifdef USE_X86		
		    if(g_MMU_stat && Ishwc)
		    {
		        Ion_type = 1;
 
		    }    
		    else    
		    #endif
		        Ion_type = 0;
		}

		if (ret != 0)
		{
            if( (heap_mask & ION_HEAP(ION_CMA_HEAP_ID))
#ifdef USE_X86
            && !Ishwc
#endif
            )
            {
#ifdef BOARD_WITH_IOMMU
                heap_mask = ION_HEAP(ION_VMALLOC_HEAP_ID);
#else
                heap_mask = ION_HEAP(ION_CARVEOUT_HEAP_ID);
#endif
                ret = ion_alloc(m->ion_client, size, 0, heap_mask, 0, &ion_hnd );
                {
                    if( ret != 0)
                    {
                        AERR("Force to VMALLOC fail ion_client:%d", m->ion_client);
                        return -1;
                    }
                    else
                    {
                        ALOGD("Force to VMALLOC sucess !");
                        Ion_type = 1;
                    }
                }
            }
            else
            {
                AERR("Failed to ion_alloc from ion_client:%d, size: %d", m->ion_client, size);
                return -1;
            }
		}

		ret = ion_share(m->ion_client, ion_hnd, &shared_fd);

		if (ret != 0)
		{
			AERR("ion_share( %d ) failed", m->ion_client);

			if (0 != ion_free(m->ion_client, ion_hnd))
			{
				AERR("ion_free( %d ) failed", m->ion_client);
			}

			return -1;
		}
		cpu_ptr = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, shared_fd, 0);
		#ifdef USE_X86
		//memset(cpu_ptr, 0, size);
		#endif
		if (MAP_FAILED == cpu_ptr)
		{
			AERR("ion_map( %d ) failed", m->ion_client);

			if (0 != ion_free(m->ion_client, ion_hnd))
			{
				AERR("ion_free( %d ) failed", m->ion_client);
			}

			close(shared_fd);
			return -1;
		}

		private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, (int)cpu_ptr, private_handle_t::LOCK_STATE_MAPPED);

		if (NULL != hnd)
		{
		    unsigned long cma_phys = 0;
			hnd->share_fd = shared_fd;
			hnd->ion_hnd = ion_hnd;
			hnd->type = Ion_type;
			if(!Ion_type)
			{
			    int pret;
			    pret = ion_get_phys(m->ion_client, ion_hnd, &cma_phys);
			    //ALOGD("ion_get_phy ret=%d,cma_phys=%x",pret,cma_phys);
			}    
			    
			hnd->phy_addr = (int)cma_phys;
			*pHandle = hnd;
            if(is_out_log())
                ALOGD("alloc_info fd[%d],type=%d,phy=%x",hnd->share_fd,hnd->type,hnd->phy_addr);
			
			return 0;
		}
		else
		{
			AERR("Gralloc out of mem for ion_client:%d", m->ion_client);
		}

		close(shared_fd);
		ret = munmap(cpu_ptr, size);

		if (0 != ret)
		{
			AERR("munmap failed for base:%p size: %d", cpu_ptr, size);
		}

		ret = ion_free(m->ion_client, ion_hnd);

		if (0 != ret)
		{
			AERR("ion_free( %d ) failed", m->ion_client);
		}

		return -1;
	}
#endif

#if GRALLOC_ARM_UMP_MODULE
	{
		ump_handle ump_mem_handle;
		void *cpu_ptr;
		ump_secure_id ump_id;
		int constraints;

		size = round_up_to_page_size(size);

		if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
		{
			constraints =  UMP_REF_DRV_CONSTRAINT_USE_CACHE;
		}
		else
		{
			constraints = UMP_REF_DRV_CONSTRAINT_NONE;
		}
	    if ( reserve & 0x01)
		{
		
		    constraints |= UMP_REF_DRV_CONSTRAINT_PRE_RESERVE;
		}
		
		if( reserve & 0x02)
		{
            constraints |= UMP_REF_DRV_UK_CONSTRAINT_MEM_SWITCH;

		}
#ifdef GRALLOC_SIMULATE_FAILURES
		/* if the failure condition matches, fail this iteration */
		if (__ump_alloc_should_fail())
		{
			ump_mem_handle = UMP_INVALID_MEMORY_HANDLE;
		}
		else
#endif
		{
			ump_mem_handle = ump_ref_drv_allocate(size, (ump_alloc_constraints)constraints);

			if (UMP_INVALID_MEMORY_HANDLE != ump_mem_handle)
			{
				cpu_ptr = ump_mapped_pointer_get(ump_mem_handle);

				if (NULL != cpu_ptr)
				{
					ump_id = ump_secure_id_get(ump_mem_handle);

					if (UMP_INVALID_SECURE_ID != ump_id)
					{
						private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_UMP, usage, size, (int)cpu_ptr,
						private_handle_t::LOCK_STATE_MAPPED, ump_id, ump_mem_handle);

						if (NULL != hnd)
						{
						#ifdef  USE_LCDC_COMPOSER
            		 		if( reserve & 0x02)
					  		{
	                    		hnd->phy_addr = 0;   
					  		}
					  		else
					  		{
                    		    hnd->phy_addr = ump_phy_addr_get(ump_mem_handle);        
                    		}   
                    	#endif
							*pHandle = hnd;
							return 0;
						}
						else
						{
							AERR("gralloc_alloc_buffer() failed to allocate handle. ump_handle = %p, ump_id = %d", ump_mem_handle, ump_id);
						}
					}
					else
					{
						AERR("gralloc_alloc_buffer() failed to retrieve valid secure id. ump_handle = %p", ump_mem_handle);
					}

					ump_mapped_pointer_release(ump_mem_handle);
				}
				else
				{
					AERR("gralloc_alloc_buffer() failed to map UMP memory. ump_handle = %p", ump_mem_handle);
				}

				ump_reference_release(ump_mem_handle);
			}
			else
			{
				AERR("gralloc_alloc_buffer() failed to allocate UMP memory. size:%d constraints: %d", size, constraints);
			}
		}
		return -1;
	}
#endif