Пример #1
0
/*
 * kgsl_iommu_create_pagetable - Create a IOMMU pagetable
 *
 * Allocate memory to hold a pagetable and allocate the IOMMU
 * domain which is the actual IOMMU pagetable
 * Return - void
 */
void *kgsl_iommu_create_pagetable(void)
{
	struct kgsl_iommu_pt *iommu_pt;

	iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
	if (!iommu_pt) {
		KGSL_CORE_ERR("kzalloc(%d) failed\n",
				sizeof(struct kgsl_iommu_pt));
		return NULL;
	}
	/* L2 redirect is not stable on IOMMU v2 */
	if (msm_soc_version_supports_iommu_v1())
		iommu_pt->domain = iommu_domain_alloc(&platform_bus_type,
					MSM_IOMMU_DOMAIN_PT_CACHEABLE);
	else
		iommu_pt->domain = iommu_domain_alloc(&platform_bus_type,
					0);
	if (!iommu_pt->domain) {
		KGSL_CORE_ERR("Failed to create iommu domain\n");
		kfree(iommu_pt);
		return NULL;
	} else {
		iommu_set_fault_handler(iommu_pt->domain,
			kgsl_iommu_fault_handler);
	}

	return iommu_pt;
}
Пример #2
0
static int _get_iommu_ctxs(struct kgsl_iommu *iommu, struct kgsl_device *device,
	struct kgsl_device_iommu_data *data)
{
	int i;

	for (i = 0; i < data->iommu_ctx_count; i++) {
		if (iommu->dev_count >= KGSL_IOMMU_MAX_DEV) {
			KGSL_CORE_ERR("Tried to attach too many IOMMU "
				"devices\n");
			return -ENOMEM;
		}

		if (!data->iommu_ctx_names[i])
			continue;

		iommu->dev[iommu->dev_count].dev =
			msm_iommu_get_ctx(data->iommu_ctx_names[i]);
		if (iommu->dev[iommu->dev_count].dev == NULL) {
			KGSL_CORE_ERR("Failed to iommu dev handle for "
				"device %s\n", data->iommu_ctx_names[i]);
			return -EINVAL;
		}

		iommu->dev_count++;
	}

	return 0;
}
/* Push a new buffer object onto the list */
static void push_object(int type,
	struct kgsl_process_private *process,
	uint64_t gpuaddr, uint64_t dwords)
{
	int index;
	struct kgsl_mem_entry *entry;

	if (process == NULL)
		return;

	/*
	 * Sometimes IBs can be reused in the same dump.  Because we parse from
	 * oldest to newest, if we come across an IB that has already been used,
	 * assume that it has been reused and update the list with the newest
	 * size.
	 */

	for (index = 0; index < objbufptr; index++) {
		if (objbuf[index].gpuaddr == gpuaddr &&
			objbuf[index].entry->priv == process) {

			objbuf[index].size = max_t(uint64_t,
						objbuf[index].size,
						dwords << 2);
			return;
		}
	}

	if (objbufptr == SNAPSHOT_OBJ_BUFSIZE) {
		KGSL_CORE_ERR("snapshot: too many snapshot objects\n");
		return;
	}

	entry = kgsl_sharedmem_find(process, gpuaddr);
	if (entry == NULL) {
		KGSL_CORE_ERR("snapshot: Can't find entry for 0x%016llX\n",
			gpuaddr);
		return;
	}

	if (!kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, dwords << 2)) {
		KGSL_CORE_ERR("snapshot: Mem entry 0x%016llX is too small\n",
			gpuaddr);
		kgsl_mem_entry_put(entry);
		return;
	}

	/* Put it on the list of things to parse */
	objbuf[objbufptr].type = type;
	objbuf[objbufptr].gpuaddr = gpuaddr;
	objbuf[objbufptr].size = dwords << 2;
	objbuf[objbufptr++].entry = entry;
}
static int kgsl_iommu_init(struct kgsl_mmu *mmu)
{
	/*
	 * intialize device mmu
	 *
	 * call this with the global lock held
	 */
	int status = 0;
	struct kgsl_iommu *iommu;

	iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL);
	if (!iommu) {
		KGSL_CORE_ERR("kzalloc(%d) failed\n",
				sizeof(struct kgsl_iommu));
		return -ENOMEM;
	}
	iommu->asids = kzalloc(BITS_TO_LONGS(KGSL_IOMMU_MAX_ASIDS) *
				sizeof(unsigned long), GFP_KERNEL);
	if (!iommu->asids) {
		KGSL_CORE_ERR("kzalloc(%d) failed\n",
				sizeof(struct kgsl_iommu));
		status = -ENOMEM;
		goto done;
	}

	mmu->priv = iommu;
	status = kgsl_get_iommu_ctxt(mmu);
	if (status)
		goto done;
	status = kgsl_set_register_map(mmu);
	if (status)
		goto done;

	/* A nop is required in an indirect buffer when switching
	 * pagetables in-stream */
	kgsl_sharedmem_writel(&mmu->setstate_memory,
				KGSL_IOMMU_SETSTATE_NOP_OFFSET,
				cp_nop_packet(1));

	dev_info(mmu->device->dev, "|%s| MMU type set for device is IOMMU\n",
			__func__);
done:
	if (status) {
		kfree(iommu->asids);
		kfree(iommu);
		mmu->priv = NULL;
	}
	return status;
}
Пример #5
0
/*
 * kgsl_set_register_map - Map the IOMMU regsiters in the memory descriptors
 * of the respective iommu units
 * @mmu - Pointer to mmu structure
 *
 * Return - 0 on success else error code
 */
static int kgsl_set_register_map(struct kgsl_mmu *mmu)
{
	struct platform_device *pdev =
		container_of(mmu->device->parentdev, struct platform_device,
				dev);
	struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
	struct kgsl_iommu *iommu = mmu->device->mmu.priv;
	struct kgsl_iommu_unit *iommu_unit;
	int i = 0, ret = 0;

	for (; i < pdata_dev->iommu_count; i++) {
		struct kgsl_device_iommu_data data = pdata_dev->iommu_data[i];
		iommu_unit = &iommu->iommu_units[i];
		/* set up the IOMMU register map for the given IOMMU unit */
		if (!data.physstart || !data.physend) {
			KGSL_CORE_ERR("The register range for IOMMU unit not"
					" specified\n");
			ret = -EINVAL;
			goto err;
		}
		iommu_unit->reg_map.hostptr = ioremap(data.physstart,
					data.physend - data.physstart + 1);
		if (!iommu_unit->reg_map.hostptr) {
			KGSL_CORE_ERR("Failed to map SMMU register address "
				"space from %x to %x\n", data.physstart,
				data.physend - data.physstart + 1);
			ret = -ENOMEM;
			i--;
			goto err;
		}
		iommu_unit->reg_map.size = data.physend - data.physstart + 1;
		iommu_unit->reg_map.physaddr = data.physstart;
		ret = memdesc_sg_phys(&iommu_unit->reg_map, data.physstart,
				iommu_unit->reg_map.size);
		if (ret)
			goto err;
	}
	iommu->unit_count = pdata_dev->iommu_count;
	return ret;
err:
	/* Unmap any mapped IOMMU regions */
	for (; i >= 0; i--) {
		iommu_unit = &iommu->iommu_units[i];
		iounmap(iommu_unit->reg_map.hostptr);
		iommu_unit->reg_map.size = 0;
		iommu_unit->reg_map.physaddr = 0;
	}
	return ret;
}
Пример #6
0
/*
 * _get_iommu_ctxs - Get device pointer to IOMMU contexts
 * @mmu - Pointer to mmu device
 * data - Pointer to the platform data containing information about
 * iommu devices for one iommu unit
 * unit_id - The IOMMU unit number. This is not a specific ID but just
 * a serial number. The serial numbers are treated as ID's of the
 * IOMMU units
 *
 * Return - 0 on success else error code
 */
static int _get_iommu_ctxs(struct kgsl_mmu *mmu,
	struct kgsl_device_iommu_data *data, unsigned int unit_id)
{
	struct kgsl_iommu *iommu = mmu->priv;
	struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[unit_id];
	int i, j;
	int found_ctx;

	for (j = 0; j < KGSL_IOMMU_MAX_DEVS_PER_UNIT; j++) {
		found_ctx = 0;
		for (i = 0; i < data->iommu_ctx_count; i++) {
			if (j == data->iommu_ctxs[i].ctx_id) {
				found_ctx = 1;
				break;
			}
		}
		if (!found_ctx)
			break;
		if (!data->iommu_ctxs[i].iommu_ctx_name) {
			KGSL_CORE_ERR("Context name invalid\n");
			return -EINVAL;
		}

		iommu_unit->dev[iommu_unit->dev_count].dev =
			msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
		if (iommu_unit->dev[iommu_unit->dev_count].dev == NULL) {
			KGSL_CORE_ERR("Failed to get iommu dev handle for "
			"device %s\n", data->iommu_ctxs[i].iommu_ctx_name);
			return -EINVAL;
		}
		iommu_unit->dev[iommu_unit->dev_count].ctx_id =
						data->iommu_ctxs[i].ctx_id;
		iommu_unit->dev[iommu_unit->dev_count].kgsldev = mmu->device;

		KGSL_DRV_INFO(mmu->device,
				"Obtained dev handle %p for iommu context %s\n",
				iommu_unit->dev[iommu_unit->dev_count].dev,
				data->iommu_ctxs[i].iommu_ctx_name);

		iommu_unit->dev_count++;
	}
	if (!j) {
		KGSL_CORE_ERR("No ctxts initialized, user ctxt absent\n ");
		return -EINVAL;
	}

	return 0;
}
Пример #7
0
static int
kgsl_iommu_unmap(void *mmu_specific_pt,
		struct kgsl_memdesc *memdesc,
		unsigned int *tlb_flags)
{
	int ret;
	unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
	struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;


	unsigned int gpuaddr = memdesc->gpuaddr &  KGSL_MMU_ALIGN_MASK;

	if (range == 0 || gpuaddr == 0)
		return 0;

	ret = iommu_unmap_range(iommu_pt->domain, gpuaddr, range);
	if (ret)
		KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
			"with err: %d\n", iommu_pt->domain, gpuaddr,
			range, ret);

#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
	if (!ret)
		*tlb_flags = UINT_MAX;
#endif
	return 0;
}
Пример #8
0
static int kgsl_iommu_init(struct kgsl_mmu *mmu)
{
	int status = 0;
	struct kgsl_iommu *iommu;

	iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL);
	if (!iommu) {
		KGSL_CORE_ERR("kzalloc(%d) failed\n",
				sizeof(struct kgsl_iommu));
		return -ENOMEM;
	}

	mmu->priv = iommu;
	status = kgsl_get_iommu_ctxt(mmu);
	if (status)
		goto done;
	status = kgsl_set_register_map(mmu);
	if (status)
		goto done;

	kgsl_sharedmem_writel(&mmu->setstate_memory,
				KGSL_IOMMU_SETSTATE_NOP_OFFSET,
				cp_nop_packet(1));

	dev_info(mmu->device->dev, "|%s| MMU type set for device is IOMMU\n",
			__func__);
done:
	if (status) {
		kfree(iommu);
		mmu->priv = NULL;
	}
	return status;
}
Пример #9
0
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
	struct device *dev, unsigned long addr, int flags)
{
	struct kgsl_iommu_unit *iommu_unit = get_iommu_unit(dev);
	struct kgsl_iommu_device *iommu_dev = get_iommu_device(iommu_unit, dev);
	unsigned int ptbase, fsr;

	if (!iommu_dev) {
		KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
		return -ENOSYS;
	}

	ptbase = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
					iommu_dev->ctx_id, TTBR0);

	fsr = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
		iommu_dev->ctx_id, FSR);

	KGSL_MEM_CRIT(iommu_dev->kgsldev,
		"GPU PAGE FAULT: addr = %lX pid = %d\n",
		addr, kgsl_mmu_get_ptname_from_ptbase(ptbase));
	KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
		iommu_dev->ctx_id, fsr);

	trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
			kgsl_mmu_get_ptname_from_ptbase(ptbase), 0);

	return 0;
}
Пример #10
0
static int kgsl_iommu_init(struct kgsl_mmu *mmu)
{
	/*
	 * intialize device mmu
	 *
	 * call this with the global lock held
	 */
	int status = 0;
	struct kgsl_iommu *iommu;

	iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL);
	if (!iommu) {
		KGSL_CORE_ERR("kzalloc(%d) failed\n",
				sizeof(struct kgsl_iommu));
		return -ENOMEM;
	}

	mmu->priv = iommu;
	status = kgsl_get_iommu_ctxt(mmu);
	if (status)
		goto done;

	dev_info(mmu->device->dev, "|%s| MMU type set for device is IOMMU\n",
			__func__);
done:
	if (status) {
		kfree(iommu);
		mmu->priv = NULL;
	}
	return status;
}
static int
kgsl_iommu_unmap(void *mmu_specific_pt,
		struct kgsl_memdesc *memdesc)
{
	int ret;
	unsigned int range = memdesc->size;
	struct iommu_domain *domain = (struct iommu_domain *)
					mmu_specific_pt;

	/* All GPU addresses as assigned are page aligned, but some
	   functions purturb the gpuaddr with an offset, so apply the
	   mask here to make sure we have the right address */

	unsigned int gpuaddr = memdesc->gpuaddr &  KGSL_MMU_ALIGN_MASK;

	if (range == 0 || gpuaddr == 0)
		return 0;

	ret = iommu_unmap_range(domain, gpuaddr, range);
	if (ret)
		KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
			"with err: %d\n", domain, gpuaddr,
			range, ret);

	return 0;
}
Пример #12
0
static int
kgsl_iommu_map(void *mmu_specific_pt,
			struct kgsl_memdesc *memdesc,
			unsigned int protflags,
			unsigned int *tlb_flags)
{
	int ret;
	unsigned int iommu_virt_addr;
	struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
	int size = kgsl_sg_size(memdesc->sg, memdesc->sglen);

	BUG_ON(NULL == iommu_pt);


	iommu_virt_addr = memdesc->gpuaddr;

	ret = iommu_map_range(iommu_pt->domain, iommu_virt_addr, memdesc->sg,
				size, (IOMMU_READ | IOMMU_WRITE));
	if (ret) {
		KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %d) "
				"failed with err: %d\n", iommu_pt->domain,
				iommu_virt_addr, memdesc->sg, size,
				(IOMMU_READ | IOMMU_WRITE), ret);
		return ret;
	}

	return ret;
}
static int kgsl_iommu_init(struct kgsl_device *device)
{
	/*
	 * intialize device mmu
	 *
	 * call this with the global lock held
	 */
	int status = 0;
	struct kgsl_mmu *mmu = &device->mmu;
	struct kgsl_iommu *iommu;

	mmu->device = device;

	iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL);
	if (!iommu) {
		KGSL_CORE_ERR("kzalloc(%d) failed\n",
				sizeof(struct kgsl_iommu));
		return -ENOMEM;
	}

	iommu->iommu_priv_dev_attached = 0;
	iommu->iommu_user_dev_attached = 0;
	status = kgsl_get_iommu_ctxt(iommu, device);
	if (status) {
		kfree(iommu);
		iommu = NULL;
	}
	mmu->priv = iommu;

	dev_info(device->dev, "|%s| MMU type set for device is IOMMU\n",
			__func__);
	return status;
}
Пример #14
0
/*
 * kgsl_get_iommu_ctxt - Get device pointer to IOMMU contexts
 * @mmu - Pointer to mmu device
 *
 * Get the device pointers for the IOMMU user and priv contexts of the
 * kgsl device
 * Return - 0 on success else error code
 */
static int kgsl_get_iommu_ctxt(struct kgsl_mmu *mmu)
{
	struct platform_device *pdev =
		container_of(mmu->device->parentdev, struct platform_device,
				dev);
	struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
	struct kgsl_iommu *iommu = mmu->device->mmu.priv;
	int i, ret = 0;

	/* Go through the IOMMU data and get all the context devices */
	if (KGSL_IOMMU_MAX_UNITS < pdata_dev->iommu_count) {
		KGSL_CORE_ERR("Too many IOMMU units defined\n");
		ret = -EINVAL;
		goto  done;
	}

	for (i = 0; i < pdata_dev->iommu_count; i++) {
		ret = _get_iommu_ctxs(mmu, &pdata_dev->iommu_data[i], i);
		if (ret)
			break;
	}
	iommu->unit_count = pdata_dev->iommu_count;
done:
	return ret;
}
Пример #15
0
static int
kgsl_iommu_map(void *mmu_specific_pt,
               struct kgsl_memdesc *memdesc,
               unsigned int protflags,
               unsigned int *tlb_flags)
{
    int ret;
    unsigned int iommu_virt_addr;
    struct iommu_domain *domain = mmu_specific_pt;

    BUG_ON(NULL == domain);


    iommu_virt_addr = memdesc->gpuaddr;

    ret = iommu_map_range(domain, iommu_virt_addr, memdesc->sg,
                          memdesc->size, MSM_IOMMU_ATTR_NONCACHED);
    if (ret) {
        KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %d) "
                      "failed with err: %d\n", domain,
                      iommu_virt_addr, memdesc->sg, memdesc->size,
                      MSM_IOMMU_ATTR_NONCACHED, ret);
        return ret;
    }

#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
    /*
     * Flushing only required if per process pagetables are used. With
     * global case, flushing will happen inside iommu_map function
     */
    if (!ret)
        *tlb_flags = UINT_MAX;
#endif
    return ret;
}
Пример #16
0
/*
 * kgsl_iommu_get_reg_map_desc - Returns an array of pointers that contain
 * the address of memory descriptors which map the IOMMU registers
 * @mmu - Pointer to mmu structure
 * @reg_map_desc - Out parameter in which the address of the array containing
 * pointers to register map descriptors is returned. The caller is supposed
 * to free this array
 *
 * Return - The number of iommu units which is also the number of register
 * mapped descriptor arrays which the out parameter will have
 */
static int kgsl_iommu_get_reg_map_desc(struct kgsl_mmu *mmu,
					void **reg_map_desc)
{
	struct kgsl_iommu *iommu = mmu->priv;
	void **reg_desc_ptr;
	int i;

	/*
	 * Alocate array of pointers that will hold address of the register map
	 * descriptors
	 */
	reg_desc_ptr = kmalloc(iommu->unit_count *
			sizeof(struct kgsl_memdesc *), GFP_KERNEL);
	if (!reg_desc_ptr) {
		KGSL_CORE_ERR("Failed to kmalloc(%d)\n",
			iommu->unit_count * sizeof(struct kgsl_memdesc *));
		return -ENOMEM;
	}

	for (i = 0; i < iommu->unit_count; i++)
		reg_desc_ptr[i] = &(iommu->iommu_units[i].reg_map);

	*reg_map_desc = reg_desc_ptr;
	return i;
}
Пример #17
0
static int
kgsl_iommu_unmap(void *mmu_specific_pt,
		struct kgsl_memdesc *memdesc,
		unsigned int *tlb_flags)
{
	int ret;
	unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
	struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;

	/* All GPU addresses as assigned are page aligned, but some
	   functions purturb the gpuaddr with an offset, so apply the
	   mask here to make sure we have the right address */

	unsigned int gpuaddr = memdesc->gpuaddr &  KGSL_MMU_ALIGN_MASK;

	if (range == 0 || gpuaddr == 0)
		return 0;

	ret = iommu_unmap_range(iommu_pt->domain, gpuaddr, range);
	if (ret)
		KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
			"with err: %d\n", iommu_pt->domain, gpuaddr,
			range, ret);

#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
	/*
	 * Flushing only required if per process pagetables are used. With
	 * global case, flushing will happen inside iommu_map function
	 */
	if (!ret && msm_soc_version_supports_iommu_v1())
		*tlb_flags = UINT_MAX;
#endif
	return 0;
}
static int
kgsl_iommu_map(void *mmu_specific_pt,
			struct kgsl_memdesc *memdesc,
			unsigned int protflags)
{
	int ret;
	unsigned int iommu_virt_addr;
	struct iommu_domain *domain = mmu_specific_pt;

	BUG_ON(NULL == domain);


	iommu_virt_addr = memdesc->gpuaddr;

	ret = iommu_map_range(domain, iommu_virt_addr, memdesc->sg,
				memdesc->size, 0);
	if (ret) {
		KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %d) "
				"failed with err: %d\n", domain,
				iommu_virt_addr, memdesc->sg, memdesc->size,
				0, ret);
		return ret;
	}

	return ret;
}
void *kgsl_iommu_create_pagetable(void)
{
	struct iommu_domain *domain = iommu_domain_alloc(0);
	if (!domain)
		KGSL_CORE_ERR("Failed to create iommu domain\n");

	return domain;
}
Пример #20
0
/*
 * _get_iommu_ctxs - Get device pointer to IOMMU contexts
 * @mmu - Pointer to mmu device
 * data - Pointer to the platform data containing information about
 * iommu devices for one iommu unit
 * unit_id - The IOMMU unit number. This is not a specific ID but just
 * a serial number. The serial numbers are treated as ID's of the
 * IOMMU units
 *
 * Return - 0 on success else error code
 */
static int _get_iommu_ctxs(struct kgsl_mmu *mmu,
	struct kgsl_device_iommu_data *data, unsigned int unit_id)
{
	struct kgsl_iommu *iommu = mmu->priv;
	struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[unit_id];
	int i;

	if (data->iommu_ctx_count > KGSL_IOMMU_MAX_DEVS_PER_UNIT) {
		KGSL_CORE_ERR("Too many iommu devices defined for an "
				"IOMMU unit\n");
		return -EINVAL;
	}

	for (i = 0; i < data->iommu_ctx_count; i++) {
		if (!data->iommu_ctxs[i].iommu_ctx_name)
			continue;

		iommu_unit->dev[iommu_unit->dev_count].dev =
			msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
		if (iommu_unit->dev[iommu_unit->dev_count].dev == NULL) {
			KGSL_CORE_ERR("Failed to get iommu dev handle for "
			"device %s\n", data->iommu_ctxs[i].iommu_ctx_name);
			return -EINVAL;
		}
		if (KGSL_IOMMU_CONTEXT_USER != data->iommu_ctxs[i].ctx_id &&
			KGSL_IOMMU_CONTEXT_PRIV != data->iommu_ctxs[i].ctx_id) {
			KGSL_CORE_ERR("Invalid context ID defined: %d\n",
					data->iommu_ctxs[i].ctx_id);
			return -EINVAL;
		}
		iommu_unit->dev[iommu_unit->dev_count].ctx_id =
						data->iommu_ctxs[i].ctx_id;
		iommu_unit->dev[iommu_unit->dev_count].kgsldev = mmu->device;

		KGSL_DRV_INFO(mmu->device,
				"Obtained dev handle %p for iommu context %s\n",
				iommu_unit->dev[iommu_unit->dev_count].dev,
				data->iommu_ctxs[i].iommu_ctx_name);

		iommu_unit->dev_count++;
	}

	return 0;
}
Пример #21
0
static int
kgsl_iommu_map(void *mmu_specific_pt,
			struct kgsl_memdesc *memdesc,
			unsigned int protflags)
{
	int ret = 0;
	unsigned int physaddr;
	unsigned int iommu_virt_addr;
	unsigned int offset = 0;
	int map_order;
	struct iommu_domain *domain = (struct iommu_domain *)
					mmu_specific_pt;

	BUG_ON(NULL == domain);

	map_order = get_order(SZ_4K);

	for (iommu_virt_addr = memdesc->gpuaddr;
		iommu_virt_addr < (memdesc->gpuaddr + memdesc->size);
		iommu_virt_addr += SZ_4K, offset += PAGE_SIZE) {
		physaddr = memdesc->ops->physaddr(memdesc, offset);
		if (!physaddr) {
			KGSL_CORE_ERR("Failed to convert %x address to "
			"physical\n", (unsigned int)memdesc->hostptr + offset);
			kgsl_iommu_unmap(mmu_specific_pt, memdesc);
			return -EFAULT;
		}
		ret = iommu_map(domain, iommu_virt_addr, physaddr,
				map_order, MSM_IOMMU_ATTR_NONCACHED);
		if (ret) {
			KGSL_CORE_ERR("iommu_map(%p, %x, %x, %d, %d) "
			"failed with err: %d\n", domain,
			iommu_virt_addr, physaddr, map_order,
			MSM_IOMMU_ATTR_NONCACHED, ret);
			kgsl_iommu_unmap(mmu_specific_pt, memdesc);
			return ret;
		}
	}

	return ret;
}
Пример #22
0
static inline struct kgsl_mem_entry *
kgsl_mem_entry_create(void)
{
	struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);

	if (!entry)
		KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*entry));
	else
		kref_init(&entry->refcount);

	return entry;
}
Пример #23
0
static int kgsl_iommu_pt_equal(struct kgsl_pagetable *pt,
					unsigned int pt_base)
{
	struct iommu_domain *domain;
	if (!pt) {
		KGSL_CORE_ERR("pt is NULL\n");
		return 0;
	}
	domain = pt->priv;

	return pt && pt_base && ((unsigned int)domain == pt_base);
}
Пример #24
0
static int kgsl_iommu_init(struct kgsl_mmu *mmu)
{
	/*
	 * intialize device mmu
	 *
	 * call this with the global lock held
	 */
	int status = 0;
	struct kgsl_iommu *iommu;

	iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL);
	if (!iommu) {
		KGSL_CORE_ERR("kzalloc(%d) failed\n",
				sizeof(struct kgsl_iommu));
		return -ENOMEM;
	}

	mmu->priv = iommu;
	status = kgsl_get_iommu_ctxt(mmu);
	if (status)
		goto done;
	status = kgsl_set_register_map(mmu);
	if (status)
		goto done;

	iommu->iommu_reg_list = kgsl_iommuv1_reg;
	iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V1;

	if (msm_soc_version_supports_iommu_v1()) {
		iommu->iommu_reg_list = kgsl_iommuv1_reg;
		iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V1;
	} else {
		iommu->iommu_reg_list = kgsl_iommuv2_reg;
		iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V2;
	}

	/* A nop is required in an indirect buffer when switching
	 * pagetables in-stream */
	kgsl_sharedmem_writel(&mmu->setstate_memory,
				KGSL_IOMMU_SETSTATE_NOP_OFFSET,
				cp_nop_packet(1));

	dev_info(mmu->device->dev, "|%s| MMU type set for device is IOMMU\n",
			__func__);
done:
	if (status) {
		kfree(iommu);
		mmu->priv = NULL;
	}
	return status;
}
Пример #25
0
/*
 * kgsl_iommu_create_pagetable - Create a IOMMU pagetable
 *
 * Allocate memory to hold a pagetable and allocate the IOMMU
 * domain which is the actual IOMMU pagetable
 * Return - void
 */
void *kgsl_iommu_create_pagetable(void)
{
	struct kgsl_iommu_pt *iommu_pt;

	iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
	if (!iommu_pt) {
		KGSL_CORE_ERR("kzalloc(%d) failed\n",
				sizeof(struct kgsl_iommu_pt));
		return NULL;
	}
	iommu_pt->domain = iommu_domain_alloc(&platform_bus_type,
										  MSM_IOMMU_DOMAIN_PT_CACHEABLE);
	if (!iommu_pt->domain) {
		KGSL_CORE_ERR("Failed to create iommu domain\n");
		kfree(iommu_pt);
		return NULL;
	} else {
		iommu_set_fault_handler(iommu_pt->domain,
			kgsl_iommu_fault_handler);
	}

	return iommu_pt;
}
Пример #26
0
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
	struct device *dev, unsigned long addr, int flags)
{
	struct kgsl_iommu_unit *iommu_unit = get_iommu_unit(dev);
	struct kgsl_iommu_device *iommu_dev = get_iommu_device(iommu_unit, dev);
	unsigned int ptbase, fsr;
	static unsigned long last_pagefault_jiffies;
	static int last_pid;
	int current_pid;
	unsigned long wait_time_jiff = 0;

	if (!iommu_dev) {
		KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
		return -ENOSYS;
	}

	wait_time_jiff = last_pagefault_jiffies + msecs_to_jiffies(500);
	last_pagefault_jiffies = jiffies;

	ptbase = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
			iommu_dev->ctx_id, TTBR0);
	current_pid = kgsl_mmu_get_ptname_from_ptbase(ptbase);

        /*
        * Lots of pagefaults in the same process that range over a short amount
        * of time are likely to be part of the same problem, so try to throttle
        * the number of messages being printed.
        */
	if ((last_pid != current_pid) ||
	    (time_after(jiffies, wait_time_jiff))
	   ) {
		fsr = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
			iommu_dev->ctx_id, FSR);

		KGSL_MEM_CRIT(iommu_dev->kgsldev,
			"GPU PAGE FAULT: addr = %lX pid = %d\n",
			addr, kgsl_mmu_get_ptname_from_ptbase(ptbase));
		KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
			iommu_dev->ctx_id, fsr);

		last_pid = current_pid;
	}

	trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
			kgsl_mmu_get_ptname_from_ptbase(ptbase), 0);

	return 0;
}
Пример #27
0
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
	struct device *dev, unsigned long addr, int flags)
{
	struct kgsl_iommu_unit *iommu_unit = get_iommu_unit(dev);
	struct kgsl_iommu_device *iommu_dev = get_iommu_device(iommu_unit, dev);
	unsigned int ptbase, fsr;
	static unsigned long last_pagefault_jiffies;
	static int last_pid;
	int current_pid;
	unsigned long wait_time_jiff = 0;

	if (!iommu_dev) {
		KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
		return -ENOSYS;
	}

	wait_time_jiff = last_pagefault_jiffies + msecs_to_jiffies(500);
	last_pagefault_jiffies = jiffies;

	ptbase = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
			iommu_dev->ctx_id, TTBR0);
	current_pid = kgsl_mmu_get_ptname_from_ptbase(ptbase);

	if ((last_pid != current_pid) ||
	    (time_after(jiffies, wait_time_jiff))
	   ) {
		fsr = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
			iommu_dev->ctx_id, FSR);

		KGSL_MEM_CRIT(iommu_dev->kgsldev,
			"GPU PAGE FAULT: addr = %lX pid = %d\n",
			addr, kgsl_mmu_get_ptname_from_ptbase(ptbase));
		KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
			iommu_dev->ctx_id, fsr);

		last_pid = current_pid;
	}

	trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
			kgsl_mmu_get_ptname_from_ptbase(ptbase), 0);

	return 0;
}
Пример #28
0
static int kgsl_iommu_get_reg_map_desc(struct kgsl_mmu *mmu,
					void **reg_map_desc)
{
	struct kgsl_iommu *iommu = mmu->priv;
	void **reg_desc_ptr;
	int i;

	reg_desc_ptr = kmalloc(iommu->unit_count *
			sizeof(struct kgsl_memdesc *), GFP_KERNEL);
	if (!reg_desc_ptr) {
		KGSL_CORE_ERR("Failed to kmalloc(%d)\n",
			iommu->unit_count * sizeof(struct kgsl_memdesc *));
		return -ENOMEM;
	}

	for (i = 0; i < iommu->unit_count; i++)
		reg_desc_ptr[i] = &(iommu->iommu_units[i].reg_map);

	*reg_map_desc = reg_desc_ptr;
	return i;
}
static int kgsl_get_iommu_ctxt(struct kgsl_iommu *iommu,
				struct kgsl_device *device)
{
	int status = 0;
	struct platform_device *pdev =
		container_of(device->parentdev, struct platform_device, dev);
	struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
	if (pdata_dev->iommu_user_ctx_name)
		iommu->iommu_user_dev = msm_iommu_get_ctx(
					pdata_dev->iommu_user_ctx_name);
	if (pdata_dev->iommu_priv_ctx_name)
		iommu->iommu_priv_dev = msm_iommu_get_ctx(
					pdata_dev->iommu_priv_ctx_name);
	if (!iommu->iommu_user_dev) {
		KGSL_CORE_ERR("Failed to get user iommu dev handle for "
				"device %s\n",
				pdata_dev->iommu_user_ctx_name);
		status = -EINVAL;
	}
	return status;
}
Пример #30
0
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
	struct device *dev, unsigned long addr, int flags)
{
	int ret = 0;
	struct kgsl_mmu *mmu;
	struct kgsl_iommu *iommu;
	struct kgsl_iommu_unit *iommu_unit;
	struct kgsl_iommu_device *iommu_dev;
	unsigned int ptbase, fsr;

	ret = get_iommu_unit(dev, &mmu, &iommu_unit);
	if (ret)
		goto done;
	iommu_dev = get_iommu_device(iommu_unit, dev);
	if (!iommu_dev) {
		KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
		ret = -ENOSYS;
		goto done;
	}
	iommu = mmu->priv;

	ptbase = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
					iommu_dev->ctx_id, TTBR0);

	fsr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
		iommu_dev->ctx_id, FSR);

	KGSL_MEM_CRIT(iommu_dev->kgsldev,
		"GPU PAGE FAULT: addr = %lX pid = %d\n",
		addr, kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase));
	KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
		iommu_dev->ctx_id, fsr);

	trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
			kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase), 0);

done:
	return ret;
}