static bool cxl_pci_enable_device_hook(struct pci_dev *dev) { struct pci_controller *phb; struct cxl_afu *afu; struct cxl_context *ctx; phb = pci_bus_to_host(dev->bus); afu = (struct cxl_afu *)phb->private_data; if (!cxl_ops->link_ok(afu->adapter, afu)) { dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__); return false; } set_dma_ops(&dev->dev, &dma_direct_ops); set_dma_offset(&dev->dev, PAGE_OFFSET); /* * Allocate a context to do cxl things too. If we eventually do real * DMA ops, we'll need a default context to attach them to */ ctx = cxl_dev_context_init(dev); if (!ctx) return false; dev->dev.archdata.cxl_ctx = ctx; return (cxl_ops->afu_check_and_enable(afu) == 0); }
void dmabounce_unregister_dev(struct device *dev) { struct dmabounce_device_info *device_info = dev->archdata.dmabounce; dev->archdata.dmabounce = NULL; set_dma_ops(dev, NULL); if (!device_info) { dev_warn(dev, "Never registered with dmabounce but attempting" "to unregister!\n"); return; } if (!list_empty(&device_info->safe_buffers)) { dev_err(dev, "Removing from dmabounce with pending buffers!\n"); BUG(); } if (device_info->small.pool) dma_pool_destroy(device_info->small.pool); if (device_info->large.pool) dma_pool_destroy(device_info->large.pool); #ifdef STATS if (device_info->attr_res == 0) device_remove_file(dev, &dev_attr_dmabounce_stats); #endif kfree(device_info); dev_info(dev, "dmabounce: device unregistered\n"); }
int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, unsigned long large_buffer_size, int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t)) { struct dmabounce_device_info *device_info; int ret; device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); if (!device_info) { dev_err(dev, "Could not allocated dmabounce_device_info\n"); return -ENOMEM; } ret = dmabounce_init_pool(&device_info->small, dev, "small_dmabounce_pool", small_buffer_size); if (ret) { dev_err(dev, "dmabounce: could not allocate DMA pool for %ld byte objects\n", small_buffer_size); goto err_free; } if (large_buffer_size) { ret = dmabounce_init_pool(&device_info->large, dev, "large_dmabounce_pool", large_buffer_size); if (ret) { dev_err(dev, "dmabounce: could not allocate DMA pool for %ld byte objects\n", large_buffer_size); goto err_destroy; } } device_info->dev = dev; INIT_LIST_HEAD(&device_info->safe_buffers); rwlock_init(&device_info->lock); device_info->needs_bounce = needs_bounce_fn; #ifdef STATS device_info->total_allocs = 0; device_info->map_op_count = 0; device_info->bounce_count = 0; device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats); #endif dev->archdata.dmabounce = device_info; set_dma_ops(dev, &dmabounce_ops); dev_info(dev, "dmabounce: registered device\n"); return 0; err_destroy: dma_pool_destroy(device_info->small.pool); err_free: kfree(device_info); return ret; }
static int mvebu_hwcc_platform_notifier(struct notifier_block *nb, unsigned long event, void *__dev) { struct device *dev = __dev; if (event != BUS_NOTIFY_ADD_DEVICE) return NOTIFY_DONE; set_dma_ops(dev, &mvebu_hwcc_dma_ops); return NOTIFY_OK; }
static int dflt_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; /* We are only intereted in device addition */ if (action != BUS_NOTIFY_ADD_DEVICE) return 0; set_dma_ops(dev, &dma_direct_ops); return NOTIFY_DONE; }
static int ppc_swiotlb_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct dev_archdata *sd; /* We are only intereted in device addition */ if (action != BUS_NOTIFY_ADD_DEVICE) return 0; sd = &dev->archdata; sd->max_direct_dma_addr = 0; /* May need to bounce if the device can't address all of DRAM */ if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM()) set_dma_ops(dev, &swiotlb_dma_ops); return NOTIFY_DONE; }