/** * snd_dma_free_pages - release the allocated buffer * @dmab: the buffer allocation record to release * * Releases the allocated buffer via snd_dma_alloc_pages(). */ void snd_dma_free_pages(struct snd_dma_buffer *dmab) { switch (dmab->dev.type) { case SNDRV_DMA_TYPE_CONTINUOUS: snd_free_pages(dmab->area, dmab->bytes); break; #ifdef CONFIG_HAS_DMA #ifdef CONFIG_GENERIC_ALLOCATOR case SNDRV_DMA_TYPE_DEV_IRAM: snd_free_dev_iram(dmab); break; #endif /* CONFIG_GENERIC_ALLOCATOR */ case SNDRV_DMA_TYPE_DEV: snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); break; #endif #ifdef CONFIG_SND_DMA_SGBUF case SNDRV_DMA_TYPE_DEV_SG: snd_free_sgbuf_pages(dmab); break; #endif default: pr_err("snd-malloc: invalid device type %d\n", dmab->dev.type); } }
void *snd_malloc_sgbuf_pages(struct device *device, size_t size, struct snd_dma_buffer *dmab, size_t *res_size) { struct snd_sg_buf *sgbuf; unsigned int i, pages; struct snd_dma_buffer tmpb; dmab->area = NULL; dmab->addr = 0; dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); if (! sgbuf) return NULL; sgbuf->dev = device; pages = snd_sgbuf_aligned_pages(size); sgbuf->tblsize = sgbuf_align_table(pages); sgbuf->table = kcalloc(sgbuf->tblsize, sizeof(*sgbuf->table), GFP_KERNEL); if (! sgbuf->table) goto _failed; sgbuf->page_table = kcalloc(sgbuf->tblsize, sizeof(*sgbuf->page_table), GFP_KERNEL); if (! sgbuf->page_table) goto _failed; /* allocate each page */ for (i = 0; i < pages; i++) { if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, device, PAGE_SIZE, &tmpb) < 0) { if (res_size == NULL) goto _failed; *res_size = size = sgbuf->pages * PAGE_SIZE; break; } sgbuf->table[i].buf = tmpb.area; sgbuf->table[i].addr = tmpb.addr; #if (defined(CONFIG_LS2E_DEV_BOARD) || defined(CONFIG_LS2F_DEV_BOARD)) && defined(CONFIG_DMA_NONCOHERENT) sgbuf->page_table[i] = virt_to_page(CAC_ADDR(tmpb.area)); #else sgbuf->page_table[i] = virt_to_page(tmpb.area); #endif sgbuf->pages++; } sgbuf->size = size; #if (defined(CONFIG_LS2E_DEV_BOARD) || defined(CONFIG_LS2F_DEV_BOARD)) && defined(CONFIG_DMA_NONCOHERENT) dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP | VM_IO, pgprot_noncached(PAGE_KERNEL)); #else dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL); #endif if (! dmab->area) goto _failed; return dmab->area; _failed: snd_free_sgbuf_pages(dmab); /* free the table */ return NULL; }
void *snd_malloc_sgbuf_pages(struct device *device, size_t size, struct snd_dma_buffer *dmab, size_t *res_size) { struct snd_sg_buf *sgbuf; unsigned int i, pages; struct snd_dma_buffer tmpb; dmab->area = NULL; dmab->addr = 0; dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); if (! sgbuf) return NULL; sgbuf->dev = device; pages = snd_sgbuf_aligned_pages(size); sgbuf->tblsize = sgbuf_align_table(pages); sgbuf->table = kcalloc(sgbuf->tblsize, sizeof(*sgbuf->table), GFP_KERNEL); if (! sgbuf->table) goto _failed; sgbuf->page_table = kcalloc(sgbuf->tblsize, sizeof(*sgbuf->page_table), GFP_KERNEL); if (! sgbuf->page_table) goto _failed; /* allocate each page */ for (i = 0; i < pages; i++) { if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, device, PAGE_SIZE, &tmpb) < 0) { if (res_size == NULL) goto _failed; *res_size = size = sgbuf->pages * PAGE_SIZE; break; } sgbuf->table[i].buf = tmpb.area; sgbuf->table[i].addr = tmpb.addr; sgbuf->page_table[i] = virt_to_page(tmpb.area); sgbuf->pages++; } sgbuf->size = size; dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL); if (! dmab->area) goto _failed; return dmab->area; _failed: snd_free_sgbuf_pages(dmab); /* free the table */ return NULL; }
void *snd_malloc_sgbuf_pages(struct device *device, size_t size, struct snd_dma_buffer *dmab, size_t *res_size) { struct snd_sg_buf *sgbuf; unsigned int i, pages, chunk, maxpages; struct snd_dma_buffer tmpb; struct snd_sg_page *table; struct page **pgtable; dmab->area = NULL; dmab->addr = 0; dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); if (! sgbuf) return NULL; sgbuf->dev = device; pages = snd_sgbuf_aligned_pages(size); sgbuf->tblsize = sgbuf_align_table(pages); table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL); if (!table) goto _failed; sgbuf->table = table; pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL); if (!pgtable) goto _failed; sgbuf->page_table = pgtable; /* allocate pages */ maxpages = MAX_ALLOC_PAGES; while (pages > 0) { chunk = pages; /* don't be too eager to take a huge chunk */ if (chunk > maxpages) chunk = maxpages; chunk <<= PAGE_SHIFT; if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device, chunk, &tmpb) < 0) { if (!sgbuf->pages) return NULL; if (!res_size) goto _failed; size = sgbuf->pages * PAGE_SIZE; break; } chunk = tmpb.bytes >> PAGE_SHIFT; for (i = 0; i < chunk; i++) { table->buf = tmpb.area; table->addr = tmpb.addr; if (!i) table->addr |= chunk; /* mark head */ table++; *pgtable++ = virt_to_page(tmpb.area); tmpb.area += PAGE_SIZE; tmpb.addr += PAGE_SIZE; } sgbuf->pages += chunk; pages -= chunk; if (chunk < maxpages) maxpages = chunk; } sgbuf->size = size; dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL); if (! dmab->area) goto _failed; if (res_size) *res_size = sgbuf->size; return dmab->area; _failed: snd_free_sgbuf_pages(dmab); /* free the table */ return NULL; }