static int fw_device_op_release(struct inode *inode, struct file *file) { struct client *client = file->private_data; struct event *e, *next_e; mutex_lock(&client->device->client_list_mutex); list_del(&client->link); mutex_unlock(&client->device->client_list_mutex); if (client->iso_context) fw_iso_context_destroy(client->iso_context); if (client->buffer.pages) fw_iso_buffer_destroy(&client->buffer, client->device->card); /* Freeze client->resource_idr and client->event_list */ spin_lock_irq(&client->lock); client->in_shutdown = true; spin_unlock_irq(&client->lock); idr_for_each(&client->resource_idr, shutdown_resource, client); idr_remove_all(&client->resource_idr); idr_destroy(&client->resource_idr); list_for_each_entry_safe(e, next_e, &client->event_list, link) kfree(e); client_put(client); return 0; }
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, int page_count, enum dma_data_direction direction) { int ret; ret = fw_iso_buffer_alloc(buffer, page_count); if (ret < 0) return ret; ret = fw_iso_buffer_map_dma(buffer, card, direction); if (ret < 0) fw_iso_buffer_destroy(buffer, card); return ret; }
static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) { struct client *client = file->private_data; enum dma_data_direction direction; unsigned long size; int page_count, ret; if (fw_device_is_shutdown(client->device)) return -ENODEV; /* FIXME: We could support multiple buffers, but we don't. */ if (client->buffer.pages != NULL) return -EBUSY; if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; if (vma->vm_start & ~PAGE_MASK) return -EINVAL; client->vm_start = vma->vm_start; size = vma->vm_end - vma->vm_start; page_count = size >> PAGE_SHIFT; if (size & ~PAGE_MASK) return -EINVAL; if (vma->vm_flags & VM_WRITE) direction = DMA_TO_DEVICE; else direction = DMA_FROM_DEVICE; ret = fw_iso_buffer_init(&client->buffer, client->device->card, page_count, direction); if (ret < 0) return ret; ret = fw_iso_buffer_map(&client->buffer, vma); if (ret < 0) fw_iso_buffer_destroy(&client->buffer, client->device->card); return ret; }
int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) { int i; buffer->page_count = 0; buffer->page_count_mapped = 0; buffer->pages = kmalloc_array(page_count, sizeof(buffer->pages[0]), GFP_KERNEL); if (buffer->pages == NULL) return -ENOMEM; for (i = 0; i < page_count; i++) { buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); if (buffer->pages[i] == NULL) break; } buffer->page_count = i; if (i < page_count) { fw_iso_buffer_destroy(buffer, NULL); return -ENOMEM; } return 0; }
/** * iso_packets_buffer_destroy - frees packet buffer resources * @b: the buffer structure to free * @unit: the device at the other end of the stream */ void iso_packets_buffer_destroy(struct iso_packets_buffer *b, struct fw_unit *unit) { fw_iso_buffer_destroy(&b->iso_buffer, fw_parent_device(unit)->card); kfree(b->packets); }