Beispiel #1
0
int mshci_s3c_dma_map_sg(struct mshci_host *host, struct device *dev, 
	struct scatterlist *sg,	int nents, enum dma_data_direction dir,
	int flush_type)
{
	struct scatterlist *s;
	int i, j;

	static int count=0;

	if (flush_type == 2) {
		spin_unlock_irqrestore(&host->lock, host->sl_flags);
		flush_all_cpu_caches();
		outer_flush_all();
		spin_lock_irqsave(&host->lock, host->sl_flags);
	} else if(flush_type == 1) {
		spin_unlock_irqrestore(&host->lock, host->sl_flags);
		flush_all_cpu_caches();
		spin_lock_irqsave(&host->lock, host->sl_flags);
	}

	for_each_sg(sg, s, nents, i) {
		s->dma_address = mshci_s3c_dma_map_page(dev, sg_page(s), 
				s->offset, s->length, dir, flush_type);
		if (dma_mapping_error(dev, s->dma_address)) {
			goto bad_mapping;
		}
	}
Beispiel #2
0
static int fimg2d_check_dma_sync(struct fimg2d_bltcmd *cmd)
{
	struct mm_struct *mm = cmd->ctx->mm;

	fimg2d_calc_dma_size(cmd);

	if (fimg2d_check_address(cmd))
		return -EINVAL;

	if (fimg2d_check_pgd(mm, cmd))
		return -EFAULT;

#ifndef CCI_SNOOP
	fimg2d_debug("cache flush\n");
	perf_start(cmd, PERF_CACHE);
	if (is_inner_flushall(cmd->dma_all)) {
		inner_touch_range(cmd);
		flush_all_cpu_caches();
	} else {
		inner_flush_clip_range(cmd);
	}

#ifdef CONFIG_OUTER_CACHE
	if (is_outer_flushall(cmd->dma_all))
		outer_flush_all();
	else
		outer_flush_clip_range(cmd);
#endif
	perf_end(cmd, PERF_CACHE);
#endif
	return 0;
}
static int exynos_secure_mem_enable(void)
{
	/* enable secure world mode : TZASC */
	int ret = 0;

	flush_all_cpu_caches();
	ret = exynos_smc(SMC_MEM_PROT_SET, 0, 0, 1);
	if( ret == SMC_CALL_ERROR ) {
		exynos_smc(SMC_MEM_PROT_SET, 0, 0, 0);
	}

	return ret;
}
static int fimg2d_check_dma_sync(struct fimg2d_bltcmd *cmd)
{
	struct mm_struct *mm = cmd->ctx->mm;
	struct fimg2d_dma *c;
	enum pt_status pt;
	int i;

	fimg2d_calc_dma_size(cmd);

	if (fimg2d_check_address(cmd))
		return -EINVAL;

	for (i = 0; i < MAX_IMAGES; i++) {
		c = &cmd->dma[i].base;
		if (!c->size)
			continue;

		pt = fimg2d_check_pagetable(mm, c->addr, c->size);
		if (pt == PT_FAULT)
			return -EFAULT;
	}

#ifndef CCI_SNOOP
	fimg2d_debug("cache flush\n");
	perf_start(cmd, PERF_CACHE);

	if (is_inner_flushall(cmd->dma_all)) {
		inner_touch_range(cmd);
		flush_all_cpu_caches();
	} else {
		inner_flush_clip_range(cmd);
	}

#ifdef CONFIG_OUTER_CACHE
	if (is_outer_flushall(cmd->dma_all))
		outer_flush_all();
	else
		outer_flush_clip_range(cmd);
#endif
	perf_end(cmd, PERF_CACHE);
#endif
	return 0;
}
static int exynos_secure_mem_enable(struct kbase_device *kbdev, int ion_fd, u64 flags, struct kbase_va_region *reg)
{
	/* enable secure world mode : TZASC */
	int ret = 0;

	if (!kbdev)
		goto secure_out;

	if (!kbdev->secure_mode_support) {
		GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong operation! DDK cannot support Secure Rendering\n", __func__);
		ret = -EINVAL;
		goto secure_out;
	}

	if (!reg) {
		GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong input argument, reg %p\n",
			__func__, reg);
		goto secure_out;
	}
#if defined(CONFIG_ION) && defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION)
#if MALI_SEC_ASP_SECURE_BUF_CTRL
	{
		struct ion_client *client;
		struct ion_handle *ion_handle;
		size_t len = 0;
		ion_phys_addr_t phys = 0;

		flush_all_cpu_caches();

		if ((flags & kbdev->sec_sr_info.secure_flags_crc_asp) == kbdev->sec_sr_info.secure_flags_crc_asp) {
			reg->flags |= KBASE_REG_SECURE_CRC | KBASE_REG_SECURE;
		} else {

			client = ion_client_create(ion_exynos, "G3D");
			if (IS_ERR(client)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get ion_client of G3D\n",
						__func__);
				goto secure_out;
			}

			ion_handle = ion_import_dma_buf(client, ion_fd);

			if (IS_ERR(ion_handle)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get ion_handle of G3D\n",
						__func__);
				ion_client_destroy(client);
				goto secure_out;
			}

			if (ion_phys(client, ion_handle, &phys, &len)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get phys. addr of G3D\n",
						__func__);
				ion_free(client, ion_handle);
				ion_client_destroy(client);
				goto secure_out;
			}

			ion_free(client, ion_handle);
			ion_client_destroy(client);

			ret = exynos_smc(SMC_DRM_SECBUF_CFW_PROT, phys, len, PROT_G3D);
			if (ret != DRMDRV_OK) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: failed to set secure buffer region of G3D buffer, phy 0x%08x, error 0x%x\n",
					__func__, (unsigned int)phys, ret);
				BUG();
			}

			reg->flags |= KBASE_REG_SECURE;
		}

		reg->phys_by_ion = phys;
		reg->len_by_ion = len;
	}
#else
	reg->flags |= KBASE_REG_SECURE;

	reg->phys_by_ion = 0;
	reg->len_by_ion = 0;
#endif
#else
	GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong operation! DDK cannot support Secure Rendering\n", __func__);
	ret = -EINVAL;
#endif // defined(CONFIG_ION) && defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION)

	return ret;
secure_out:
	ret = -EINVAL;
	return ret;
}
Beispiel #6
0
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
			    unsigned long len, unsigned long align,
			    unsigned long flags)
{
	struct ion_cma_heap *cma_heap = to_cma_heap(heap);
	struct device *dev = cma_heap->dev;
	struct ion_cma_buffer_info *info;

	dev_dbg(dev, "Request buffer allocation len %ld\n", len);

	if (buffer->flags & ION_FLAG_CACHED)
		return -EINVAL;

	if (align > PAGE_SIZE)
		return -EINVAL;

	if (!ion_is_heap_available(heap, flags, NULL))
		return -EPERM;

	info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
	if (!info) {
		dev_err(dev, "Can't allocate buffer info\n");
		return ION_CMA_ALLOCATE_FAILED;
	}

	info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
						GFP_HIGHUSER | __GFP_ZERO);

	if (!info->cpu_addr) {
		dev_err(dev, "Fail to allocate buffer\n");
		goto err;
	}

	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (!info->table) {
		dev_err(dev, "Fail to allocate sg table\n");
		goto free_mem;
	}

	if (ion_cma_get_sgtable
	    (dev, info->table, info->cpu_addr, info->handle, len))
		goto free_table;
	/* keep this for memory release */
	buffer->priv_virt = info;

#ifdef CONFIG_ARM64
	if (!ion_buffer_cached(buffer) && !(buffer->flags & ION_FLAG_PROTECTED)) {
		if (ion_buffer_need_flush_all(buffer))
			flush_all_cpu_caches();
		else
			__flush_dcache_area(page_address(sg_page(info->table->sgl)),
									len);
	}
#else
	if (!ion_buffer_cached(buffer) && !(buffer->flags & ION_FLAG_PROTECTED)) {
		if (ion_buffer_need_flush_all(buffer))
			flush_all_cpu_caches();
		else
			dmac_flush_range(page_address(sg_page(info->table->sgl),
							len, DMA_BIDIRECTIONAL));
	}
#endif
	if ((buffer->flags & ION_FLAG_PROTECTED) && ion_secure_protect(heap))
		goto free_table;

	dev_dbg(dev, "Allocate buffer %p\n", buffer);
	return 0;

free_table:
	kfree(info->table);
free_mem:
	dma_free_coherent(dev, len, info->cpu_addr, info->handle);
err:
	kfree(info);
	return ION_CMA_ALLOCATE_FAILED;
}
Beispiel #7
0
static int fimg2d_check_dma_sync(struct fimg2d_bltcmd *cmd)
{
	struct mm_struct *mm = cmd->ctx->mm;
	struct fimg2d_cache *csrc, *cdst, *cmsk;
	enum pt_status pt;

	csrc = &cmd->src_cache;
	cdst = &cmd->dst_cache;
	cmsk = &cmd->msk_cache;

	if (cmd->srcen) {
		csrc->addr = cmd->src.addr.start +
				(cmd->src.stride * cmd->src_rect.y1);
		csrc->size = cmd->src.stride *
				(cmd->src_rect.y2 - cmd->src_rect.y1);

		if (cmd->src.addr.cacheable)
			cmd->size_all += csrc->size;

		if (cmd->src.addr.type == ADDR_USER) {
			pt = fimg2d_check_pagetable(mm, csrc->addr, csrc->size);
			if (pt == PT_FAULT)
				return -1;
		}
	}

	if (cmd->msken) {
		cmsk->addr = cmd->msk.addr.start +
				(cmd->msk.stride * cmd->msk_rect.y1);
		cmsk->size = cmd->msk.stride *
				(cmd->msk_rect.y2 - cmd->msk_rect.y1);

		if (cmd->msk.addr.cacheable)
			cmd->size_all += cmsk->size;

		if (cmd->msk.addr.type == ADDR_USER) {
			pt = fimg2d_check_pagetable(mm, cmsk->addr, cmsk->size);
			if (pt == PT_FAULT)
				return -1;
		}
	}

	/* caculate horizontally clipped region */
	if (cmd->dsten) {
		if (cmd->clipping.enable) {
			cdst->addr = cmd->dst.addr.start +
					(cmd->dst.stride * cmd->clipping.y1);
			cdst->size = cmd->dst.stride *
					(cmd->clipping.y2 - cmd->clipping.y1);
		} else {
			cdst->addr = cmd->dst.addr.start +
					(cmd->dst.stride * cmd->dst_rect.y1);
			cdst->size = cmd->dst.stride *
					(cmd->dst_rect.y2 - cmd->dst_rect.y1);
		}

		if (cmd->dst.addr.cacheable)
			cmd->size_all += cdst->size;

		if (cmd->dst.addr.type == ADDR_USER) {
			pt = fimg2d_check_pagetable(mm, cdst->addr, cdst->size);
			if (pt == PT_FAULT)
				return -1;
		}
	}

	fimg2d_debug("cached size all = %d\n", cmd->size_all);

#ifdef PERF_PROFILE
	perf_start(cmd->ctx, PERF_L1CC_FLUSH);
#endif
	if (cmd->size_all >= L1_CACHE_SIZE) {
		fimg2d_debug("innercache all\n");
		flush_all_cpu_caches();
	} else {
		fimg2d_debug("innercache range\n");
		if (cmd->srcen && cmd->src.addr.cacheable)
			fimg2d_dma_sync_inner(csrc->addr, csrc->size, DMA_TO_DEVICE);

		if (cmd->msken && cmd->msk.addr.cacheable)
			fimg2d_dma_sync_inner(cmsk->addr, cmsk->size, DMA_TO_DEVICE);

		if (cmd->dsten && cmd->dst.addr.cacheable)
			fimg2d_dma_sync_inner(cdst->addr, cdst->size, DMA_BIDIRECTIONAL);
	}
#ifdef PERF_PROFILE
	perf_end(cmd->ctx, PERF_L1CC_FLUSH);
#endif

	return 0;
}
static int fimg2d_check_dma_sync(struct fimg2d_bltcmd *cmd)
{
	struct mm_struct *mm = cmd->ctx->mm;
	struct fimg2d_param *p = &cmd->param;
	struct fimg2d_image *img;
	struct fimg2d_clip *clp;
	struct fimg2d_rect *r;
	struct fimg2d_dma *c;
	enum pt_status pt;
	int clip_x, clip_w, clip_h, y, dir, i;
	unsigned long clip_start;

	clp = &p->clipping;

	for (i = 0; i < MAX_IMAGES; i++) {
		img = &cmd->image[i];
		c = &cmd->dma[i];
		r = &img->rect;

		if (!img->addr.type)
			continue;

		/* caculate horizontally clipped region */
		if (i == IMAGE_DST && clp->enable) {
			c->addr = img->addr.start + (img->stride * clp->y1);
			c->size = img->stride * (clp->y2 - clp->y1);
		} else {
			c->addr = img->addr.start + (img->stride * r->y1);
			c->size = img->stride * (r->y2 - r->y1);
		}

		/* check pagetable */
		if (img->addr.type == ADDR_USER) {
			pt = fimg2d_check_pagetable(mm, c->addr, c->size);
			if (pt == PT_FAULT)
				return -1;
		}

		if (img->need_cacheopr && i != IMAGE_TMP) {
			c->cached = c->size;
			cmd->dma_all += c->cached;
		}
	}

#ifdef PERF_PROFILE
	perf_start(cmd->ctx, PERF_INNERCACHE);
#endif

	if (is_inner_flushall(cmd->dma_all))
		flush_all_cpu_caches();
	else {
		for (i = 0; i < MAX_IMAGES; i++) {
			img = &cmd->image[i];
			c = &cmd->dma[i];
			r = &img->rect;

			if (!img->addr.type || !c->cached)
				continue;

			if (i == IMAGE_DST)
				dir = DMA_BIDIRECTIONAL;
			else
				dir = DMA_TO_DEVICE;

			if (i == IDST && clp->enable) {
				clip_w = width2bytes(clp->x2 - clp->x1,
							img->fmt);
				clip_x = pixel2offset(clp->x1, img->fmt);
				clip_h = clp->y2 - clp->y1;
			} else {
				clip_w = width2bytes(r->x2 - r->x1, img->fmt);
				clip_x = pixel2offset(r->x1, img->fmt);
				clip_h = r->y2 - r->y1;
			}

			if (is_inner_flushrange(img->stride - clip_w))
				fimg2d_dma_sync_inner(c->addr, c->cached, dir);
			else {
				for (y = 0; y < clip_h; y++) {
					clip_start = c->addr +
						(img->stride * y) + clip_x;
					fimg2d_dma_sync_inner(clip_start,
								clip_w, dir);
				}
			}
		}
	}
#ifdef PERF_PROFILE
	perf_end(cmd->ctx, PERF_INNERCACHE);
#endif

#ifdef CONFIG_OUTER_CACHE
#ifdef PERF_PROFILE
	perf_start(cmd->ctx, PERF_OUTERCACHE);
#endif
	if (is_outer_flushall(cmd->dma_all))
		outer_flush_all();
	else {
		for (i = 0; i < MAX_IMAGES; i++) {
			img = &cmd->image[i];
			c = &cmd->dma[i];
			r = &img->rect;

			if (!img->addr.type)
				continue;

			/* clean pagetable */
			if (img->addr.type == ADDR_USER)
				fimg2d_clean_outer_pagetable(mm, c->addr, c->size);

			if (!c->cached)
				continue;

			if (i == IMAGE_DST)
				dir = CACHE_FLUSH;
			else
				dir = CACHE_CLEAN;

			if (i == IDST && clp->enable) {
				clip_w = width2bytes(clp->x2 - clp->x1,
							img->fmt);
				clip_x = pixel2offset(clp->x1, img->fmt);
				clip_h = clp->y2 - clp->y1;
			} else {
				clip_w = width2bytes(r->x2 - r->x1, img->fmt);
				clip_x = pixel2offset(r->x1, img->fmt);
				clip_h = r->y2 - r->y1;
			}

			if (is_outer_flushrange(img->stride - clip_w))
				fimg2d_dma_sync_outer(mm, c->addr,
							c->cached, dir);
			else {
				for (y = 0; y < clip_h; y++) {
					clip_start = c->addr +
						(img->stride * y) + clip_x;
					fimg2d_dma_sync_outer(mm, clip_start,
								clip_w, dir);
				}
			}
		}
	}
#ifdef PERF_PROFILE
	perf_end(cmd->ctx, PERF_OUTERCACHE);
#endif
#endif

	return 0;
}