static int set_dma_caps(struct pci_dev *pdev) { int err; err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); return err; } } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask.\n"); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting.\n"); return err; } } dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); return err; }
/* * drm_iommu_attach_device- attach device to iommu mapping * * @drm_dev: DRM device * @subdrv_dev: device to be attach * * This function should be called by sub drivers to attach it to iommu * mapping. */ int drm_iommu_attach_device(struct drm_device *drm_dev, struct device *subdrv_dev) { struct device *dev = drm_dev->dev; int ret; if (!dev->archdata.mapping) { DRM_ERROR("iommu_mapping is null.\n"); return -EFAULT; } subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev, sizeof(*subdrv_dev->dma_parms), GFP_KERNEL); dma_set_max_seg_size(subdrv_dev, 0xffffffffu); ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping); if (ret < 0) { DRM_DEBUG_KMS("failed iommu attach.\n"); return ret; } /* * Set dma_ops to drm_device just one time. * * The dma mapping api needs device object and the api is used * to allocate physial memory and map it with iommu table. * If iommu attach succeeded, the sub driver would have dma_ops * for iommu and also all sub drivers have same dma_ops. */ if (!dev->archdata.dma_ops) dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops; return 0; }
/* * drm_create_iommu_mapping - create a mapping structure * * @drm_dev: DRM device */ int drm_create_iommu_mapping(struct drm_device *drm_dev) { struct dma_iommu_mapping *mapping = NULL; struct rockchip_drm_private *priv = drm_dev->dev_private; struct device *dev = drm_dev->dev; if (!priv->da_start) priv->da_start = ROCKCHIP_DEV_ADDR_START; if (!priv->da_space_size) priv->da_space_size = ROCKCHIP_DEV_ADDR_SIZE; if (!priv->da_space_order) priv->da_space_order = ROCKCHIP_DEV_ADDR_ORDER; mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start, priv->da_space_size, priv->da_space_order); if (IS_ERR(mapping)) return PTR_ERR(mapping); dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); dma_set_max_seg_size(dev, 0xffffffffu); dev->archdata.mapping = mapping; return 0; }
/* * drm_create_iommu_mapping - create a mapping structure * * @drm_dev: DRM device */ int drm_create_iommu_mapping(struct drm_device *drm_dev) { struct dma_iommu_mapping *mapping = NULL; struct exynos_drm_private *priv = drm_dev->dev_private; struct device *dev = drm_dev->dev; if (!priv->da_start) priv->da_start = EXYNOS_DEV_ADDR_START; if (!priv->da_space_size) priv->da_space_size = EXYNOS_DEV_ADDR_SIZE; if (!priv->da_space_order) priv->da_space_order = EXYNOS_DEV_ADDR_ORDER; mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start, priv->da_space_size, priv->da_space_order); if (IS_ERR(mapping)) return PTR_ERR(mapping); dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); if (!dev->dma_parms) goto error; dma_set_max_seg_size(dev, 0xffffffffu); dev->archdata.mapping = mapping; return 0; error: arm_iommu_release_mapping(mapping); return -ENOMEM; }
static inline int configure_dma_max_seg_size(struct device *dev) { if (!dev->dma_parms) dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL); if (!dev->dma_parms) return -ENOMEM; dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); return 0; }
/* * Attach a (component) device to the shared drm dma mapping from master drm * device. This is used by the VOPs to map GEM buffers to a common DMA * mapping. */ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, struct device *dev) { struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping; int ret; ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); if (ret) return ret; dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); return arm_iommu_attach_device(dev, mapping); }
static struct macio_dev * macio_add_one_device(struct macio_chip *chip, struct device *parent, struct device_node *np, struct macio_dev *in_bay, struct resource *parent_res) { struct macio_dev *dev; const u32 *reg; if (np == NULL) return NULL; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; dev->bus = &chip->lbus; dev->media_bay = in_bay; dev->ofdev.dev.of_node = np; dev->ofdev.archdata.dma_mask = 0xffffffffUL; dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask; dev->ofdev.dev.parent = parent; dev->ofdev.dev.bus = &macio_bus_type; dev->ofdev.dev.release = macio_release_dev; dev->ofdev.dev.dma_parms = &dev->dma_parms; /* Standard DMA paremeters */ dma_set_max_seg_size(&dev->ofdev.dev, 65536); dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff); #ifdef CONFIG_PCI /* Set the DMA ops to the ones from the PCI device, this could be * fishy if we didn't know that on PowerMac it's always direct ops * or iommu ops that will work fine */ dev->ofdev.dev.archdata.dma_ops = chip->lbus.pdev->dev.archdata.dma_ops; dev->ofdev.dev.archdata.dma_data = chip->lbus.pdev->dev.archdata.dma_data; #endif /* CONFIG_PCI */ #ifdef DEBUG printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n", dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj); #endif /* MacIO itself has a different reg, we use it's PCI base */ if (np == chip->of_node) { dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s", chip->lbus.index, #ifdef CONFIG_PCI (unsigned int)pci_resource_start(chip->lbus.pdev, 0), #else 0, /* NuBus may want to do something better here */ #endif MAX_NODE_NAME_SIZE, np->name); } else { reg = of_get_property(np, "reg", NULL); dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s", chip->lbus.index, reg ? *reg : 0, MAX_NODE_NAME_SIZE, np->name); } /* Setup interrupts & resources */ macio_setup_interrupts(dev); macio_setup_resources(dev, parent_res); macio_add_missing_resources(dev); /* Register with core */ if (of_device_register(&dev->ofdev) != 0) { printk(KERN_DEBUG"macio: device registration error for %s!\n", dev_name(&dev->ofdev.dev)); kfree(dev); return NULL; } return dev; }
static int __init sdma_probe(struct platform_device *pdev) { int ret; int irq; struct resource *iores; struct sdma_platform_data *pdata = pdev->dev.platform_data; int i; struct sdma_engine *sdma; sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); if (!sdma) return -ENOMEM; sdma->dev = &pdev->dev; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!iores || irq < 0 || !pdata) { ret = -EINVAL; goto err_irq; } if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) { ret = -EBUSY; goto err_request_region; } sdma->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(sdma->clk)) { ret = PTR_ERR(sdma->clk); goto err_clk; } sdma->regs = ioremap(iores->start, resource_size(iores)); if (!sdma->regs) { ret = -ENOMEM; goto err_ioremap; } ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma); if (ret) goto err_request_irq; sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); if (!sdma->script_addrs) goto err_alloc; sdma->version = pdata->sdma_version; dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); INIT_LIST_HEAD(&sdma->dma_device.channels); /* Initialize channel parameters */ for (i = 0; i < MAX_DMA_CHANNELS; i++) { struct sdma_channel *sdmac = &sdma->channel[i]; sdmac->sdma = sdma; spin_lock_init(&sdmac->lock); sdmac->chan.device = &sdma->dma_device; sdmac->channel = i; /* * Add the channel to the DMAC list. Do not add channel 0 though * because we need it internally in the SDMA driver. This also means * that channel 0 in dmaengine counting matches sdma channel 1. */ if (i) list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels); } ret = sdma_init(sdma); if (ret) goto err_init; if (pdata->script_addrs) sdma_add_scripts(sdma, pdata->script_addrs); sdma_get_firmware(sdma, pdata->cpu_name, pdata->to_version); sdma->dma_device.dev = &pdev->dev; sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources; sdma->dma_device.device_tx_status = sdma_tx_status; sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_control = sdma_control; sdma->dma_device.device_issue_pending = sdma_issue_pending; sdma->dma_device.dev->dma_parms = &sdma->dma_parms; dma_set_max_seg_size(sdma->dma_device.dev, 65535); ret = dma_async_device_register(&sdma->dma_device); if (ret) { dev_err(&pdev->dev, "unable to register\n"); goto err_init; } dev_info(sdma->dev, "initialized\n"); return 0; err_init: kfree(sdma->script_addrs); err_alloc: free_irq(irq, sdma); err_request_irq: iounmap(sdma->regs); err_ioremap: clk_put(sdma->clk); err_clk: release_mem_region(iores->start, resource_size(iores)); err_request_region: err_irq: kfree(sdma); return ret; }