int AndorUnlockDMABuffers(int iCardNo) { unsigned long addr; unsigned int sz; if(DMA_MODE==0){ if(gpAndorDev[iCardNo].AndorDMABuffer[0].Size != 0){ for (addr = (unsigned long)gpAndorDev[iCardNo].AndorDMABuffer[0].VirtualAdd, sz = gpAndorDev[iCardNo].AndorDMABuffer[0].Size; sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { # if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) mem_map_unreserve(MAP_NR(addr)); # elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) mem_map_unreserve(virt_to_page(addr)); # else ClearPageReserved(virt_to_page(addr)); # endif } free_pages((unsigned long)gpAndorDev[iCardNo].AndorDMABuffer[0].VirtualAdd, DMA_PAGE_ORD); } if(gpAndorDev[iCardNo].AndorDMABuffer[1].Size != 0){ for (addr = (unsigned long)gpAndorDev[iCardNo].AndorDMABuffer[1].VirtualAdd, sz = gpAndorDev[iCardNo].AndorDMABuffer[1].Size; sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { # if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) mem_map_unreserve(MAP_NR(addr)); # elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) mem_map_unreserve(virt_to_page(addr)); # else ClearPageReserved(virt_to_page(addr)); # endif } free_pages((unsigned long)gpAndorDev[iCardNo].AndorDMABuffer[1].VirtualAdd, DMA_PAGE_ORD); } } else{ if(gpAndorDev[iCardNo].AndorDMABuffer[0].VirtualAdd!=0) iounmap(gpAndorDev[iCardNo].AndorDMABuffer[0].VirtualAdd); if(gpAndorDev[iCardNo].AndorDMABuffer[1].VirtualAdd!=0) iounmap(gpAndorDev[iCardNo].AndorDMABuffer[1].VirtualAdd); } gpAndorDev[iCardNo].AndorDMABuffer[0].VirtualAdd = 0; gpAndorDev[iCardNo].AndorDMABuffer[0].Physical = 0; gpAndorDev[iCardNo].AndorDMABuffer[0].Size = 0; gpAndorDev[iCardNo].AndorDMABuffer[1].VirtualAdd = 0; gpAndorDev[iCardNo].AndorDMABuffer[1].Physical = 0; gpAndorDev[iCardNo].AndorDMABuffer[1].Size = 0; return 0; }
void sound_free_dmap (int dev, struct dma_buffparms *dmap) { int sz, size, i; unsigned long start_addr, end_addr; if (dmap->raw_buf == NULL) return; if (dmap->mapping_flags & DMA_MAP_MAPPED) return; /* Don't free mmapped buffer. Will use it next time */ for (sz = 0, size = PAGE_SIZE; size < audio_devs[dev]->buffsize; sz++, size <<= 1); start_addr = (unsigned long) dmap->raw_buf; end_addr = start_addr + audio_devs[dev]->buffsize; for (i = MAP_NR (start_addr); i <= MAP_NR (end_addr); i++) { mem_map_unreserve (i); } free_pages ((unsigned long) dmap->raw_buf, sz); dmap->raw_buf = NULL; }
static inline void dmafree(void *addr, size_t size) { if (size > 0) { struct page *page; for (page = virt_to_page((unsigned long)addr); page < virt_to_page((unsigned long)addr+size); page++) mem_map_unreserve(page); free_pages((unsigned long) addr, get_order(size)); } }
static int alloc_buffer(struct vino_device *v, int size) { int count, i, j, err; err = i = 0; count = (size / PAGE_SIZE + 4) & ~3; v->desc = (unsigned long *) kmalloc(count * sizeof(unsigned long), GFP_KERNEL); if (!v->desc) return -ENOMEM; v->dma_desc.cpu = pci_alloc_consistent(NULL, PAGE_RATIO * (count+4) * sizeof(dma_addr_t), &v->dma_desc.dma); if (!v->dma_desc.cpu) { err = -ENOMEM; goto out_free_desc; } while (i < count) { dma_addr_t dma; v->desc[i] = get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!v->desc[i]) break; dma = pci_map_single(NULL, (void *)v->desc[i], PAGE_SIZE, PCI_DMA_FROMDEVICE); for (j = 0; j < PAGE_RATIO; j++) v->dma_desc.cpu[PAGE_RATIO * i + j ] = dma + VINO_PAGE_SIZE * j; mem_map_reserve(virt_to_page(v->desc[i])); i++; } v->dma_desc.cpu[PAGE_RATIO * count] = VINO_DESC_STOP; if (i-- < count) { while (i >= 0) { mem_map_unreserve(virt_to_page(v->desc[i])); pci_unmap_single(NULL, v->dma_desc.cpu[PAGE_RATIO * i], PAGE_SIZE, PCI_DMA_FROMDEVICE); free_page(v->desc[i]); i--; } pci_free_consistent(NULL, PAGE_RATIO * (count+4) * sizeof(dma_addr_t), (void *)v->dma_desc.cpu, v->dma_desc.dma); err = -ENOBUFS; goto out_free_desc; } v->page_count = count; return 0; out_free_desc: kfree(v->desc); return err; }
static inline void dmafree(void *addr, size_t size) { if (size > 0) { int i; for (i = MAP_NR((unsigned long)addr); i < MAP_NR((unsigned long)addr+size); i++) { mem_map_unreserve (i); } free_pages((unsigned long) addr, __get_order(size)); } }
static void reserve_pages(struct contiguous_page **array,int nr,int flag) { int i; for(i=0;i<nr;++i) { struct page *page = array[i]->page; if(flag) { atomic_inc(&page->count); mem_map_reserve(page); }else { atomic_dec(&page->count); mem_map_unreserve(page); //unreserve } } }
void rvfree(void * mem, unsigned long size) { unsigned long adr, page; if (mem) { adr=(unsigned long) mem; while (size > 0) { #if LINUX_VERSION_CODE < 0x020300 page = kvirt_to_phys(adr); mem_map_unreserve(MAP_NR(phys_to_virt(page))); #else page = kvirt_to_pa(adr); mem_map_unreserve(virt_to_page(__va(page))); #endif adr+=PAGE_SIZE; size-=PAGE_SIZE; } vfree(mem); } }
static void free_buffer(struct vino_device *v) { int i; for (i = 0; i < v->page_count; i++) { mem_map_unreserve(virt_to_page(v->desc[i])); pci_unmap_single(NULL, v->dma_desc.cpu[PAGE_RATIO * i], PAGE_SIZE, PCI_DMA_FROMDEVICE); free_page(v->desc[i]); } pci_free_consistent(NULL, PAGE_RATIO * (v->page_count+4) * sizeof(dma_addr_t), (void *)v->dma_desc.cpu, v->dma_desc.dma); kfree(v->desc); }
static void sscape_free_dma(sscape_info *devc) { int sz, size; unsigned long start_addr, end_addr; struct page *page; if (devc->raw_buf == NULL) return; for (sz = 0, size = PAGE_SIZE; size < devc->buffsize; sz++, size <<= 1); start_addr = (unsigned long) devc->raw_buf; end_addr = start_addr + devc->buffsize; for (page = virt_to_page(start_addr); page <= virt_to_page(end_addr); page++) mem_map_unreserve(page); free_pages((unsigned long) devc->raw_buf, sz); devc->raw_buf = NULL; }
void rvfree(void * mem, signed long size) { unsigned long adr, page; if (!mem) return; adr=(unsigned long) mem; while (size > 0) { page = kvirt_to_pa(adr); mem_map_unreserve(virt_to_page((unsigned long)__va(page))); adr += PAGE_SIZE; size -= PAGE_SIZE; } vfree(mem); }
void bfree(void* mem, unsigned long size) { if (mem) { unsigned long adr = (unsigned long)mem; unsigned long siz = size; while (siz > 0) { mem_map_unreserve(virt_to_page(phys_to_virt(adr))); adr += PAGE_SIZE; siz -= PAGE_SIZE; } #ifdef CONFIG_BIGPHYS_AREA bigphysarea_free_pages(mem); #else free_pages((unsigned long)mem,get_order(size)); #endif } }
static void rvfree(void *mem, unsigned long size) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) struct page *page; #endif unsigned long adr; if (!mem) return; adr = (unsigned long) mem; while ((long) size > 0) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) page = vmalloc_to_page((void *)adr); mem_map_unreserve(page); #else ClearPageReserved(vmalloc_to_page((void *)adr)); #endif adr += PAGE_SIZE; size -= PAGE_SIZE; } vfree(mem); }
static void dev_nvram_exit(void) { int order = 0; struct page *page, *end; if (nvram_handle) devfs_unregister(nvram_handle); if (nvram_major >= 0) devfs_unregister_chrdev(nvram_major, "nvram"); if (nvram_mtd) put_mtd_device(nvram_mtd); while ((PAGE_SIZE << order) < NVRAM_SPACE) order++; end = virt_to_page(nvram_buf + (PAGE_SIZE << order) - 1); for (page = virt_to_page(nvram_buf); page <= end; page++) mem_map_unreserve(page); _nvram_exit(); }