static void vperfctr_free(struct vperfctr *perfctr) { debug_free(perfctr); ClearPageReserved(virt_to_page(perfctr)); free_page((unsigned long)perfctr); dec_nrctrs(); }
/** * Frees the physical pages allocated by the rtR0MemObjLinuxAllocPages() call. * * This method does NOT free the object. * * @param pMemLnx The object which physical pages should be freed. */ static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx) { size_t iPage = pMemLnx->cPages; if (iPage > 0) { /* * Restore the page flags. */ while (iPage-- > 0) { ClearPageReserved(pMemLnx->apPages[iPage]); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) #else MY_SET_PAGES_NOEXEC(pMemLnx->apPages[iPage], 1); #endif } /* * Free the pages. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) if (!pMemLnx->fContiguous) { iPage = pMemLnx->cPages; while (iPage-- > 0) __free_page(pMemLnx->apPages[iPage]); } else #endif __free_pages(pMemLnx->apPages[0], rtR0MemObjLinuxOrder(pMemLnx->cPages)); pMemLnx->cPages = 0; } }
static int uninorth_free_gatt_table(void) { int page_order; char *table, *table_end; void *temp; struct page *page; temp = agp_bridge->current_size; page_order = A_SIZE_32(temp)->page_order; /* Do not worry about freeing memory, because if this is * called, then all agp memory is deallocated and removed * from the table. */ table = (char *) agp_bridge->gatt_table_real; table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) ClearPageReserved(page); free_pages((unsigned long) agp_bridge->gatt_table_real, page_order); return 0; }
void __init set_highmem_pages_init(void) { struct zone *zone; int nid; for_each_zone(zone) { unsigned long zone_start_pfn, zone_end_pfn; if (!is_highmem(zone)) continue; zone_start_pfn = zone->zone_start_pfn; zone_end_pfn = zone_start_pfn + zone->spanned_pages; nid = zone_to_nid(zone); printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", zone->name, nid, zone_start_pfn, zone_end_pfn); add_highpages_with_active_regions(nid, zone_start_pfn, zone_end_pfn); /* XEN: init high-mem pages outside initial allocation. */ if (zone_start_pfn < xen_start_info->nr_pages) zone_start_pfn = xen_start_info->nr_pages; for (; zone_start_pfn < zone_end_pfn; zone_start_pfn++) { ClearPageReserved(pfn_to_page(zone_start_pfn)); init_page_count(pfn_to_page(zone_start_pfn)); } } totalram_pages += totalhigh_pages; }
void __init prom_free_prom_memory (void) { unsigned long addr, end; /* * Free everything below the kernel itself but leave * the first page reserved for the exception handlers. */ #if defined(CONFIG_DECLANCE) || defined(CONFIG_DECLANCE_MODULE) /* * Leave 128 KB reserved for Lance memory for * IOASIC DECstations. * * XXX: save this address for use in dec_lance.c? */ if (IOASIC) end = __pa(&_text) - 0x00020000; else #endif end = __pa(&_text); addr = PAGE_SIZE; while (addr < end) { ClearPageReserved(virt_to_page(__va(addr))); set_page_count(virt_to_page(__va(addr)), 1); free_page((unsigned long)__va(addr)); addr += PAGE_SIZE; } printk("Freeing unused PROM memory: %ldk freed\n", (end - PAGE_SIZE) >> 10); }
static int __init exemple_init (void) { int err; struct page * pg = NULL; exemple_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (exemple_buffer == NULL) return -ENOMEM; exemple_buffer[0] = '\0'; pg = virt_to_page(exemple_buffer); SetPageReserved(pg); err = misc_register(& exemple_misc_driver); if (err != 0) { ClearPageReserved(pg); kfree(exemple_buffer); exemple_buffer = NULL; return err; } #if LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) init_timer (& exemple_timer); exemple_timer.function = exemple_timer_function; #else timer_setup (& exemple_timer, exemple_timer_function, 0); #endif exemple_timer.expires = jiffies + HZ; add_timer(& exemple_timer); return 0; }
static int device_release(struct inode *inode, struct file *filp){ struct mmap_info *info=NULL; struct page *page=NULL; unsigned i=0; unsigned num_pages=0; pr_info("device_release: %d.%d\n", MAJOR (inode->i_rdev), MINOR (inode->i_rdev)); IS_DONE_FLAG = 1; // Free RX buffer pointer queues info = filp->private_data; if (info->data ) { printk("[GNoM_km] on release: (%d) <%p>\n", info->reference_cnt, info->data); num_pages = (1<<K_PAGE_ORDER); for(i=0; i<num_pages; ++i){ // Unpin all of the the allocated pages page = virt_to_page((size_t)info->data + (i*CPU_PAGE_SIZE)); if(PageReserved(page)){ ClearPageReserved(page); } } free_pages((unsigned long)info->data, K_PAGE_ORDER); // Free 2^K_PAGE_ORDER pages kfree(info); filp->private_data = NULL; } printk("[GNoM_km]: %u mmaped Pages succesfully released\n", i); return 0; }
void spu_free_lscsa(struct spu_state *csa) { unsigned char *p; int i; if (!csa->use_big_pages) { spu_free_lscsa_std(csa); return; } csa->use_big_pages = 0; if (csa->lscsa == NULL) goto free_pages; for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE) ClearPageReserved(vmalloc_to_page(p)); vunmap(csa->lscsa); csa->lscsa = NULL; free_pages: for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) if (csa->lscsa_pages[i]) __free_pages(csa->lscsa_pages[i], SPU_64K_PAGE_ORDER); }
static void ati_free_page_map(ati_page_map *page_map) { unmap_page_from_agp(virt_to_page(page_map->real)); iounmap(page_map->remapped); ClearPageReserved(virt_to_page(page_map->real)); free_page((unsigned long) page_map->real); }
/* * Free the memory-mapped buffer memory allocated for a * videobuf_buffer and the associated scatterlist. */ static void omap24xxcam_vbq_free_mmap_buffer(struct videobuf_buffer *vb) { struct videobuf_dmabuf *dma = videobuf_to_dma(vb); size_t alloc_size; struct page *page; int i; if (dma->sglist == NULL) return; i = dma->sglen; while (i) { i--; alloc_size = sg_dma_len(&dma->sglist[i]); page = sg_page(&dma->sglist[i]); do { ClearPageReserved(page++); } while (alloc_size -= PAGE_SIZE); __free_pages(sg_page(&dma->sglist[i]), get_order(sg_dma_len(&dma->sglist[i]))); } kfree(dma->sglist); dma->sglist = NULL; }
static void dev_nvram_exit(void) { int order = 0; struct page *page, *end; if (nvram_class) { class_device_destroy(nvram_class, MKDEV(nvram_major, 0)); class_destroy(nvram_class); } if (nvram_major >= 0) unregister_chrdev(nvram_major, "nvram"); if (nvram_mtd) put_mtd_device(nvram_mtd); while ((PAGE_SIZE << order) < nvram_space) order++; end = virt_to_page(nvram_buf + (PAGE_SIZE << order) - 1); for (page = virt_to_page(nvram_buf); page <= end; page++) ClearPageReserved(page); _nvram_exit(); }
/** * Frees memory allocated using RTMemContAlloc(). * * @param pv Pointer to return from RTMemContAlloc(). * @param cb The cb parameter passed to RTMemContAlloc(). */ RTR0DECL(void) RTMemContFree(void *pv, size_t cb) { if (pv) { int cOrder; unsigned cPages; unsigned iPage; struct page *paPages; /* validate */ AssertMsg(!((uintptr_t)pv & PAGE_OFFSET_MASK), ("pv=%p\n", pv)); Assert(cb > 0); /* calc order and get pages */ cb = RT_ALIGN_Z(cb, PAGE_SIZE); cPages = cb >> PAGE_SHIFT; cOrder = CalcPowerOf2Order(cPages); paPages = virt_to_page(pv); /* * Restore page attributes freeing the pages. */ for (iPage = 0; iPage < cPages; iPage++) { ClearPageReserved(&paPages[iPage]); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 20) /** @todo find the exact kernel where change_page_attr was introduced. */ MY_SET_PAGES_NOEXEC(&paPages[iPage], 1); #endif } __free_pages(paPages, cOrder); } }
static int amd_create_page_map(struct amd_page_map *page_map) { int i; page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); if (page_map->real == NULL) return -ENOMEM; #ifndef CONFIG_X86 SetPageReserved(virt_to_page(page_map->real)); global_cache_flush(); page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), PAGE_SIZE); if (page_map->remapped == NULL) { ClearPageReserved(virt_to_page(page_map->real)); free_page((unsigned long) page_map->real); page_map->real = NULL; return -ENOMEM; } global_cache_flush(); #else set_memory_uc((unsigned long)page_map->real, 1); page_map->remapped = page_map->real; #endif for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { writel(agp_bridge->scratch_page, page_map->remapped+i); readl(page_map->remapped+i); /* PCI Posting. */ } return 0; }
static void Device1Exit(void) { int i; UWORD *pTemp = (UWORD*)pUsbSpeed; dUsbExit(); pUsbSpeed = &UsbSpeedDefault; for (i = 0; i < NPAGES * PAGE_SIZE; i+= PAGE_SIZE) { ClearPageReserved(virt_to_page(((unsigned long)pTemp) + i)); //#define DEBUG #undef DEBUG #ifdef DEBUG printk(" %s memory page %d unmapped\n",DEVICE1_NAME,i); #endif } kfree(kmalloc_ptr); misc_deregister(&Device1); //#define DEBUG #undef DEBUG #ifdef DEBUG printk(" %s device unregistered\n",DEVICE1_NAME); #endif }
static int serverworks_create_page_map(struct serverworks_page_map *page_map) { int i; page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); if (page_map->real == NULL) { return -ENOMEM; } SetPageReserved(virt_to_page(page_map->real)); global_cache_flush(); page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), PAGE_SIZE); if (page_map->remapped == NULL) { ClearPageReserved(virt_to_page(page_map->real)); free_page((unsigned long) page_map->real); page_map->real = NULL; return -ENOMEM; } global_cache_flush(); for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) writel(agp_bridge->scratch_page, page_map->remapped+i); return 0; }
static void dev_nvram_exit(void) { int order = 0; struct page *page, *end; if (nvram_class) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) class_device_destroy(nvram_class, MKDEV(nvram_major, 0)); #else /* 2.6.36 and up */ device_destroy(nvram_class, MKDEV(nvram_major, 0)); #endif class_destroy(nvram_class); } if (nvram_major >= 0) unregister_chrdev(nvram_major, "nvram"); if (nvram_mtd) put_mtd_device(nvram_mtd); while ((PAGE_SIZE << order) < MAX_NVRAM_SPACE) order++; end = virt_to_page(nvram_buf + (PAGE_SIZE << order) - 1); for (page = virt_to_page(nvram_buf); page <= end; page++) ClearPageReserved(page); _nvram_exit(); }
static int ati_create_page_map(ati_page_map *page_map) { int i, err = 0; page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); if (page_map->real == NULL) return -ENOMEM; SetPageReserved(virt_to_page(page_map->real)); err = map_page_into_agp(virt_to_page(page_map->real)); page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), PAGE_SIZE); if (page_map->remapped == NULL || err) { ClearPageReserved(virt_to_page(page_map->real)); free_page((unsigned long) page_map->real); page_map->real = NULL; return -ENOMEM; } /*CACHE_FLUSH();*/ global_cache_flush(); for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { writel(agp_bridge->scratch_page, page_map->remapped+i); readl(page_map->remapped+i); /* PCI Posting. */ } return 0; }
void unreserve_memory(unsigned long base, unsigned long len) { struct page *page, *page_end; page_end = virt_to_page(base + len - 1); for(page = virt_to_page(base); page <= page_end; page++) ClearPageReserved(page); }
void mmap_close(struct vm_area_struct *vma){ struct mmap_info *info = (struct mmap_info *)vma->vm_private_data; struct page *page; int i; printk("[GNoM_km]: Mmap_closed called...\n"); #ifdef DO_GNOM_TX for(i=0; i<1024; ++i){ // Unpin all the pages #else for(i=0; i<512; ++i){ // Unpin all the pages #endif page = virt_to_page((size_t)info->data + (i*CPU_PAGE_SIZE)); if(PageReserved(page)){ ClearPageReserved(page); } } info->reference_cnt--; } static int device_mmap(struct file *filp, struct vm_area_struct *vma){ printk("[GNoM_km]: device_mmap called...\n"); vma->vm_ops = &mmap_vm_ops; vma->vm_flags |= VM_RESERVED; vma->vm_private_data = filp->private_data; mmap_open(vma); return 0; }
static inline void ipu_buf_free( unsigned int phys_addr ) { unsigned char * virt_addr, *addr; int cnt, i; if ( phys_addr == 0 ) return ; for ( cnt=0; cnt<IPU_BUF_MAX; ++cnt ) if ( phys_addr == ipu_buf[cnt].addr ) break; if ( cnt == IPU_BUF_MAX ) { /* addr not in the ipu buffers */ printk("Invalid addr:0x%08x\n", (unsigned int)phys_addr); } virt_addr = (unsigned char *)phys_to_virt(ipu_buf[cnt].addr); addr = virt_addr; for (i = 0; i < (1<<ipu_buf[cnt].page_shift); i++) { ClearPageReserved(virt_to_page(addr)); addr += PAGE_SIZE; } if ( cnt == 0 ) ipu_del_wired_entry(); free_pages((unsigned long )virt_addr, ipu_buf[cnt].page_shift); ipu_buf[cnt].addr = 0; ipu_buf[cnt].page_shift = 0; }
void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro) { if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { ClearPageReserved(page); free_new_highpage(page); } else SetPageReserved(page); }
void online_page(struct page *page) { ClearPageReserved(page); set_page_count(page, 0); free_cold_page(page); totalram_pages++; num_physpages++; }
/* * Memory hotplug specific functions */ void online_page(struct page *page) { ClearPageReserved(page); init_page_count(page); __free_page(page); totalram_pages++; num_physpages++; }
static void gnttab_page_free(struct page *page, unsigned int order) { BUG_ON(order); ClearPageForeign(page); gnttab_reset_grant_page(page); ClearPageReserved(page); put_page(page); }
static void unmark_pages(void *res, int order) { struct page *page = virt_to_page(res); struct page *last_page = page + (1 << order); while (page < last_page) ClearPageReserved(page++); snd_allocated_pages -= 1 << order; }
void unreserve_memory(unsigned long base, unsigned long len) { struct page *page, *page_end; // if(unlikely(enable_debug)) printk("[DNA] unreserve_memory()\n"); page_end = virt_to_page(base + len - 1); for(page = virt_to_page(base); page <= page_end; page++) ClearPageReserved(page); }
static void __exit dev_exit(void) { //注销设备 misc_deregister(&misc); //清除保留 ClearPageReserved(virt_to_page(buffer)); //释放内存 kfree(buffer); }
static void __init demo_exit(void) { struct page *page; misc_deregister(&misc); for(page=virt_to_page(buffer);page<virt_to_page(buffer+BUFFER_SIZE);page++) ClearPageReserved(page); kfree(buffer); printk(DEVICE_NAME"\texit.\n"); }
static void amd_free_page_map(struct amd_page_map *page_map) { #ifndef CONFIG_X86 iounmap(page_map->remapped); ClearPageReserved(virt_to_page(page_map->real)); #else set_memory_wb((unsigned long)page_map->real, 1); #endif free_page((unsigned long) page_map->real); }
static void kimage_free_pages(struct page *page) { unsigned int order, count, i; order = page_private(page); count = 1 << order; for (i = 0; i < count; i++) ClearPageReserved(page + i); __free_pages(page, order); }