static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf, struct adf_buffer_mapping *mapping) { int ret = 0; size_t i; for (i = 0; i < buf->n_planes; i++) { struct dma_buf_attachment *attachment; struct sg_table *sg_table; attachment = dma_buf_attach(buf->dma_bufs[i], dev->dev); if (IS_ERR(attachment)) { ret = PTR_ERR(attachment); dev_err(&dev->base.dev, "attaching plane %zu failed: %d\n", i, ret); goto done; } mapping->attachments[i] = attachment; sg_table = dma_buf_map_attachment(attachment, DMA_TO_DEVICE); if (IS_ERR(sg_table)) { ret = PTR_ERR(sg_table); dev_err(&dev->base.dev, "mapping plane %zu failed: %d", i, ret); goto done; } else if (!sg_table) { ret = -ENOMEM; dev_err(&dev->base.dev, "mapping plane %zu failed\n", i); goto done; } mapping->sg_tables[i] = sg_table; } done: if (ret < 0) { adf_buffer_mapping_cleanup(mapping, buf); memset(mapping, 0, sizeof(*mapping)); } return ret; }
/** * adf_device_post_nocopy - flip to a new set of buffers * * adf_device_post_nocopy() has the same behavior as adf_device_post(), * except ADF does not copy @intfs, @bufs, or @custom_data, and it does * not take an extra reference on the dma-bufs in @bufs. * * @intfs, @bufs, and @custom_data must point to buffers allocated by * kmalloc(). On success, ADF takes ownership of these buffers and the dma-bufs * in @bufs, and will kfree()/dma_buf_put() them when they are no longer needed. * On failure, adf_device_post_nocopy() does NOT take ownership of these * buffers or the dma-bufs, and the caller must clean them up. * * adf_device_post_nocopy() is mainly intended for implementing ADF's ioctls. * Clients may find the nocopy variant useful in limited cases, but most should * call adf_device_post() instead. */ struct sync_fence *adf_device_post_nocopy(struct adf_device *dev, struct adf_interface **intfs, size_t n_intfs, struct adf_buffer *bufs, size_t n_bufs, void *custom_data, size_t custom_data_size) { struct adf_pending_post *cfg; struct adf_buffer_mapping *mappings; struct sync_fence *ret; size_t i; int err; cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); if (!cfg) return ERR_PTR(-ENOMEM); mappings = kzalloc(sizeof(mappings[0]) * n_bufs, GFP_KERNEL); if (!mappings) { ret = ERR_PTR(-ENOMEM); goto err_alloc; } mutex_lock(&dev->client_lock); for (i = 0; i < n_bufs; i++) { err = adf_buffer_validate(&bufs[i]); if (err < 0) { ret = ERR_PTR(err); goto err_buf; } err = adf_buffer_map(dev, &bufs[i], &mappings[i]); if (err < 0) { ret = ERR_PTR(err); goto err_buf; } } INIT_LIST_HEAD(&cfg->head); cfg->config.n_bufs = n_bufs; cfg->config.bufs = bufs; cfg->config.mappings = mappings; cfg->config.custom_data = custom_data; cfg->config.custom_data_size = custom_data_size; err = dev->ops->validate(dev, &cfg->config, &cfg->state); if (err < 0) { ret = ERR_PTR(err); goto err_buf; } mutex_lock(&dev->post_lock); if (dev->ops->complete_fence) ret = dev->ops->complete_fence(dev, &cfg->config, cfg->state); else ret = adf_sw_complete_fence(dev); if (IS_ERR(ret)) goto err_fence; list_add_tail(&cfg->head, &dev->post_list); queue_kthread_work(&dev->post_worker, &dev->post_work); mutex_unlock(&dev->post_lock); mutex_unlock(&dev->client_lock); kfree(intfs); return ret; err_fence: mutex_unlock(&dev->post_lock); err_buf: for (i = 0; i < n_bufs; i++) adf_buffer_mapping_cleanup(&mappings[i], &bufs[i]); mutex_unlock(&dev->client_lock); kfree(mappings); err_alloc: kfree(cfg); return ret; }