unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
{
	size_t hwmem_mem_chunk_length;
	int hwmem_result = 0;
	struct hwmem_alloc *alloc = (struct hwmem_alloc *)memh;

	/* Call hwmem_pin with mem_chunks set to NULL to get hwmem_mem_chunk_length */
	hwmem_result = hwmem_pin(alloc, NULL, &hwmem_mem_chunk_length);

	return hwmem_mem_chunk_length;
}
ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh,
                                          ump_dd_physical_block * blocks,
                                          unsigned long num_blocks)
{
	struct hwmem_mem_chunk *hwmem_mem_chunks;
	size_t hwmem_mem_chunk_length = num_blocks;

	int hwmem_result;
	int i;

	struct hwmem_alloc *alloc = (struct hwmem_alloc *)memh;

	hwmem_mem_chunks = (struct hwmem_mem_chunk *)kmalloc(sizeof(struct hwmem_mem_chunk)*num_blocks, GFP_KERNEL);

	if (unlikely(blocks == NULL)) {
		MALI_DEBUG_PRINT(1, ("%s: blocks == NULL\n",__func__));
		return UMP_DD_INVALID;
	}

	MALI_DEBUG_PRINT(5, ("Returning physical block information. Alloc: 0x%x num_blocks=%d\n", memh, num_blocks));

	/* It might not look natural to pin here, but it matches the usage by the mali kernel module */
	hwmem_result = hwmem_pin(alloc, hwmem_mem_chunks, &hwmem_mem_chunk_length);

	if (unlikely(hwmem_result < 0)) {
		MALI_DEBUG_PRINT(1, ("%s: Pin failed. Alloc: 0x%x\n",__func__, memh));
		kfree(hwmem_mem_chunks);
		return UMP_DD_INVALID;
	}

	/* Scattered: Currently every page is one mem chunk. It's probably more
	   efficient to create bigger mem chunks if possible when allocated pages
	   are next to each other in memory */
	for(i = 0; i < hwmem_mem_chunk_length; i++) {
		blocks[i].addr = hwmem_mem_chunks[i].paddr;
		blocks[i].size = hwmem_mem_chunks[i].size;
	}
	kfree(hwmem_mem_chunks);

	hwmem_set_domain(alloc, HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE,
		HWMEM_DOMAIN_SYNC, NULL);

	return UMP_DD_SUCCESS;
}
Пример #3
0
static int copy_memref_to_kernel(struct tee_session *ts,
				 struct tee_session *ku_buffer,
				 struct hwmem_alloc **alloc,
				 int memref)
{
	int ret = -EINVAL;
	size_t mem_chunks_length = 1;
	struct hwmem_mem_chunk mem_chunks;

	if (ku_buffer->op->shm[memref].size == 0) {
		pr_err(TEED_PFX "[%s] error, size of memref is zero "
		       "(memref: %d)\n", __func__, memref);
		return ret;
	}

	alloc[memref] = hwmem_alloc(ku_buffer->op->shm[memref].size,
				    (HWMEM_ALLOC_HINT_WRITE_COMBINE |
				     HWMEM_ALLOC_HINT_CACHED |
				     HWMEM_ALLOC_HINT_CACHE_WB |
				     HWMEM_ALLOC_HINT_CACHE_AOW |
				     HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE),
				    (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE |
				     HWMEM_ACCESS_IMPORT),
				    HWMEM_MEM_CONTIGUOUS_SYS);

	if (IS_ERR(alloc[memref])) {
		pr_err(TEED_PFX "[%s] couldn't alloc hwmem_alloc (memref: %d)"
		       "\n", __func__, memref);
		return PTR_ERR(alloc[memref]);
	}

	ret = hwmem_pin(alloc[memref], &mem_chunks, &mem_chunks_length);
	if (ret) {
		pr_err(TEED_PFX "[%s] couldn't pin buffer (memref: %d)\n",
		       __func__, memref);
		return ret;
	}

	/*
	 * Since phys_to_virt is not working for hwmem memory we are storing the
	 * virtual addresses in separate array in tee_session and we keep the
	 * address of the physical pointers in the memref buffer.
	 */
	ts->op->shm[memref].buffer = (void *)mem_chunks.paddr;
	ts->vaddr[memref] = hwmem_kmap(alloc[memref]);

	/* Buffer unmapped/freed in invoke_command if this function fails. */
	if (!ts->op->shm[memref].buffer || !ts->vaddr[memref]) {
		pr_err(TEED_PFX "[%s] out of memory (memref: %d)\n",
		       __func__, memref);
		return -ENOMEM;
	}

	if (ku_buffer->op->shm[memref].flags & TEEC_MEM_INPUT) {
		memcpy(ts->vaddr[memref],
		       ku_buffer->op->shm[memref].buffer,
		       ku_buffer->op->shm[memref].size);
	}

	ts->op->shm[memref].size = ku_buffer->op->shm[memref].size;
	ts->op->shm[memref].flags = ku_buffer->op->shm[memref].flags;

	return 0;
}