Example #1
0
/*
 * drm_iommu_attach_device- attach device to iommu mapping
 *
 * @drm_dev: DRM device
 * @subdrv_dev: device to be attach
 *
 * This function should be called by sub drivers to attach it to iommu
 * mapping.
 */
static int drm_iommu_attach_device(struct drm_device *drm_dev,
				struct device *subdrv_dev)
{
	struct exynos_drm_private *priv = drm_dev->dev_private;
	int ret;

	if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
		DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
			  dev_name(subdrv_dev));
		return -EINVAL;
	}

	ret = configure_dma_max_seg_size(subdrv_dev);
	if (ret)
		return ret;

	if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
		if (to_dma_iommu_mapping(subdrv_dev))
			arm_iommu_detach_device(subdrv_dev);

		ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
	} else if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
		ret = iommu_attach_device(priv->mapping, subdrv_dev);
	}

	if (ret)
		clear_dma_max_seg_size(subdrv_dev);

	return 0;
}
Example #2
0
int iommu_dma_supported(struct device *dev, u64 mask)
{
	struct dma_mapping_ops *ops = get_dma_ops(dev);

	if (ops->dma_supported_op)
		return ops->dma_supported_op(dev, mask);

	/* Copied from i386. Doesn't make much sense, because it will
	   only work for pci_alloc_coherent.
	   The caller just has to use GFP_DMA in this case. */
	if (mask < DMA_24BIT_MASK)
		return 0;

	/* Tell the device to use SAC when IOMMU force is on.  This
	   allows the driver to use cheaper accesses in some cases.

	   Problem with this is that if we overflow the IOMMU area and
	   return DAC as fallback address the device may not handle it
	   correctly.

	   As a special case some controllers have a 39bit address
	   mode that is as efficient as 32bit (aic79xx). Don't force
	   SAC for these.  Assume all masks <= 40 bits are of this
	   type. Normally this doesn't make any difference, but gives
	   more gentle handling of IOMMU overflow. */
	if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
		dev_info(dev, "Force SAC with mask %lx\n", mask);
		return 0;
	}

	return 1;
}
Example #3
0
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
		      gfp_t gfp, struct dma_attrs *attrs)
{
	struct dma_map_ops *ops = get_dma_ops(dev);
	void *memory;

	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);

	if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
		return memory;

	if (!dev)
		dev = &x86_dma_fallback_dev;

	if (!is_device_dma_capable(dev))
		return NULL;

	if (!ops->alloc)
		return NULL;

	memory = ops->alloc(dev, size, dma_handle,
			    dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
	debug_dma_alloc_coherent(dev, size, *dma_handle, memory);

	return memory;
}
Example #4
0
/**
 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
 * system.
 *
 * @dev_priv: Pointer to a struct vmw_private
 *
 * This functions tries to determine the IOMMU setup and what actions
 * need to be taken by the driver to make system pages visible to the
 * device.
 * If this function decides that DMA is not possible, it returns -EINVAL.
 * The driver may then try to disable features of the device that require
 * DMA.
 */
static int vmw_dma_select_mode(struct vmw_private *dev_priv)
{
    static const char *names[vmw_dma_map_max] = {
        [vmw_dma_phys] = "Using physical TTM page addresses.",
        [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
        [vmw_dma_map_populate] = "Keeping DMA mappings.",
        [vmw_dma_map_bind] = "Giving up DMA mappings early."
    };
#ifdef CONFIG_X86
    const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);

#ifdef CONFIG_INTEL_IOMMU
    if (intel_iommu_enabled) {
        dev_priv->map_mode = vmw_dma_map_populate;
        goto out_fixup;
    }
#endif

    if (!(vmw_force_iommu || vmw_force_coherent)) {
        dev_priv->map_mode = vmw_dma_phys;
        DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
        return 0;
    }

    dev_priv->map_mode = vmw_dma_map_populate;

    if (dma_ops->sync_single_for_cpu)
        dev_priv->map_mode = vmw_dma_alloc_coherent;
#ifdef CONFIG_SWIOTLB
    if (swiotlb_nr_tbl() == 0)
        dev_priv->map_mode = vmw_dma_map_populate;
#endif

#ifdef CONFIG_INTEL_IOMMU
out_fixup:
#endif
    if (dev_priv->map_mode == vmw_dma_map_populate &&
            vmw_restrict_iommu)
        dev_priv->map_mode = vmw_dma_map_bind;

    if (vmw_force_coherent)
        dev_priv->map_mode = vmw_dma_alloc_coherent;

#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
    /*
     * No coherent page pool
     */
    if (dev_priv->map_mode == vmw_dma_alloc_coherent)
        return -EINVAL;
#endif

#else /* CONFIG_X86 */
    dev_priv->map_mode = vmw_dma_map_populate;
#endif /* CONFIG_X86 */

    DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);

    return 0;
}
Example #5
0
void dma_free_attrs(struct device *dev, size_t size,
		    void *vaddr, dma_addr_t bus,
		    struct dma_attrs *attrs)
{
	struct dma_map_ops *ops = get_dma_ops(dev);

	WARN_ON(irqs_disabled());       /* for portability */

	if (dma_release_from_coherent(dev, get_order(size), vaddr))
		return;

	debug_dma_free_coherent(dev, size, vaddr, bus);
	if (ops->free)
		ops->free(dev, size, vaddr, bus, attrs);
}