int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt) { pt->fl_table = (u32 *)__get_free_pages(GFP_KERNEL, get_order(SZ_16K)); if (!pt->fl_table) return -ENOMEM; memset(pt->fl_table, 0, SZ_16K); clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect); return 0; }
ssize_t scullp_write (struct file *filp, const char *buf, size_t count, loff_t *f_pos) { ScullP_Dev *dev = filp->private_data; ScullP_Dev *dptr; int quantum = PAGE_SIZE << dev->order; int qset = dev->qset; int itemsize = quantum * qset; int item, s_pos, q_pos, rest; ssize_t retval = -ENOMEM; /* our most likely error */ if (down_interruptible (&dev->sem)) return -ERESTARTSYS; /* find listitem, qset index and offset in the quantum */ item = ((long) *f_pos) / itemsize; rest = ((long) *f_pos) % itemsize; s_pos = rest / quantum; q_pos = rest % quantum; /* follow the list up to the right position */ dptr = scullp_follow(dev, item); if (!dptr->data) { dptr->data = kmalloc(qset * sizeof(void *), GFP_KERNEL); if (!dptr->data) goto nomem; memset(dptr->data, 0, qset * sizeof(char *)); } /* Here's the allocation of a single quantum */ if (!dptr->data[s_pos]) { dptr->data[s_pos] = (void *)__get_free_pages(GFP_KERNEL, dptr->order); if (!dptr->data[s_pos]) goto nomem; memset(dptr->data[s_pos], 0, PAGE_SIZE << dptr->order); } if (count > quantum - q_pos) count = quantum - q_pos; /* write only up to the end of this quantum */ if (copy_from_user (dptr->data[s_pos]+q_pos, buf, count)) { retval = -EFAULT; goto nomem; } *f_pos += count; /* update the size */ if (dev->size < *f_pos) dev->size = *f_pos; up (&dev->sem); return count; nomem: up (&dev->sem); return retval; }
/** * snd_malloc_pages - allocate pages with the given size * @size: the size to allocate in bytes * @gfp_flags: the allocation conditions, GFP_XXX * * Allocates the physically contiguous pages with the given size. * * Return: The pointer of the buffer, or %NULL if no enough memory. */ void *snd_malloc_pages(size_t size, gfp_t gfp_flags) { int pg; if (WARN_ON(!size)) return NULL; if (WARN_ON(!gfp_flags)) return NULL; gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ pg = get_order(size); return (void *) __get_free_pages(gfp_flags, pg); }
static int kerneladdressspace_init(void) { int retval; kernelkmalloc = (unsigned char *)kmalloc(100, GFP_KERNEL); if (IS_ERR(kernelkmalloc)) { printk("kmalloc failed.\n"); retval = PTR_ERR(kernelkmalloc); goto failure_kmalloc; } printk("kmalloc address: 0x%lx\n", (unsigned long)kernelkmalloc); addr = virt_to_phys((void *)kernelkmalloc); paddr = phys_to_virt(addr); printk("kernel kmalloc phys address: 0x%lx , virt address : 0x%lx\n", addr, (unsigned long)paddr); kernelpage = (unsigned char *)__get_free_pages(GFP_KERNEL, 2); if (IS_ERR(kernelpage)) { printk("get free pags failed.\n"); retval = PTR_ERR(kernelpage); goto failure_get_free_pages; } printk("kernel __get_free_pages address : 0x%lx\n", (unsigned long)kernelpage); addr = virt_to_phys((void *)kernelpage); paddr = phys_to_virt(addr); printk("kernel get free pages phys address: 0x%lx, virt address: 0x%lx\n", addr, (unsigned long)paddr); kernelvmalloc = (unsigned char *)vmalloc(1024 * 1024); if (IS_ERR(kernelvmalloc)) { printk("vmalloc failed.\n"); retval = PTR_ERR(kernelvmalloc); goto failure_vmalloc; } printk("vmalloc address : 0x%lx\n", (unsigned long)kernelvmalloc); addr = virt_to_phys((void *)kernelvmalloc); paddr = phys_to_virt(addr); printk("vmalloc phys address:0x%lx, virt address : 0x%lx\n", addr, (unsigned long)paddr); return 0; failure_vmalloc: free_pages((unsigned long)kernelpage, 2); kernelpage = NULL; failure_get_free_pages: kfree(kernelkmalloc); kernelkmalloc = NULL; failure_kmalloc: return retval; }
/* on rk29 dyn desktop: [ 63.010000] @CALL@ rk28_memcpy(0xc070148c),argc=0 [ 63.100000] need 90342667 ns to copy 4096 Kbytes [ 63.140000] need 38885333 ns to copy 1024 Kwords [ 63.160000] need 13030791 ns to memcpy 4096 Kbytes [ 63.160000] @return 0x2c(44) # echo rk28_memcpy > cal [ 65.680000] @CALL@ rk28_memcpy(0xc070148c),argc=0 [ 65.740000] need 57542915 ns to copy 4096 Kbytes [ 65.770000] need 30017666 ns to copy 1024 Kwords [ 65.810000] need 32929083 ns to memcpy 4096 Kbytes [ 65.810000] @return 0x2c(44) # echo rk28_memcpy > cal [ 67.400000] @CALL@ rk28_memcpy(0xc070148c),argc=0 [ 67.460000] need 52001876 ns to copy 4096 Kbytes [ 67.490000] need 29809209 ns to copy 1024 Kwords # [ 67.510000] need 20318709 ns to memcpy 4096 Kbytes [ 67.510000] @return 0x2c(44) # echo rk28_memcpy > cal [ 71.800000] @CALL@ rk28_memcpy(0xc070148c),argc=0 [ 71.860000] need 57265835 ns to copy 4096 Kbytes [ 71.900000] need 32522127 ns to copy 1024 Kwords [ 71.930000] need 27615668 ns to memcpy 4096 Kbytes [ 71.930000] @return 0x2c(44) static desktop: [ 171.880000] @CALL@ rk28_memcpy(0xc070148c),argc=0 [ 171.910000] need 27547043 ns to copy 4096 Kbytes [ 171.920000] need 8993501 ns to copy 1024 Kwords [ 171.930000] need 6791584 ns to memcpy 4096 Kbytes [ 171.930000] @return 0x2c(44) # echo rk28_memcpy > cal [ 174.050000] @CALL@ rk28_memcpy(0xc070148c),argc=0 [ 174.080000] need 26437334 ns to copy 4096 Kbytes [ 174.090000] need 8701667 ns to copy 1024 Kwords [ 174.100000] need 6639375 ns to memcpy 4096 Kbytes [ 174.100000] @return 0x2c(44) # echo rk28_memcpy > cal [ 176.290000] @CALL@ rk28_memcpy(0xc070148c),argc=0 [ 176.320000] need 26692502 ns to copy 4096 Kbytes [ 176.330000] need 8659126 ns to copy 1024 Kwords [ 176.340000] need 6702001 ns to memcpy 4096 Kbytes [ 176.340000] @return 0x2c(44) # echo rk28_memcpy > cal [ 177.710000] @CALL@ rk28_memcpy(0xc070148c),argc=0 [ 177.740000] need 27578291 ns to copy 4096 Kbytes [ 177.750000] need 8740042 ns to copy 1024 Kwords [ 177.760000] need 6727458 ns to memcpy 4096 Kbytes [ 177.760000] @return 0x2c(44) */ int rkmemcpy( void ) { #define PAGE_ORDER 7 ktime_t now0,now1; unsigned long pg; unsigned long src = 0xc0010000; int i = 8,k=0; int bytes = ((1<<PAGE_ORDER)*PAGE_SIZE); pg = __get_free_pages(GFP_KERNEL , PAGE_ORDER ); if( !pg ) { printk("alloc %d pages total %dK bytes failed\n" , (1<<PAGE_ORDER) , bytes/(1024)); return -ENOMEM; } now0 = ktime_get(); while( k < i ) { char *p = (char*)pg; char *q = (char*)src; char *m = q+bytes; while( q < m ) *p++ = *q++; k++; } now1 = ktime_get();; printk("need %Ld ns to copy %d Kbytes \n" , ktime_to_ns( ktime_sub( now1 , now0 ) ), bytes * i /1024 ); now0 = ktime_get(); k = 0; while( k < i ) { int *p = (int*)pg; int *q = (int*)src; int *m = q+bytes/sizeof(int); while( q < m ) *p++ = *q++; k++; } now1 = ktime_get(); printk("need %Ld ns to copy %d Kwords \n" , ktime_to_ns( ktime_sub( now1 , now0 ) ), bytes * i / sizeof (int) /1024 ); now0 = ktime_get(); for( k = 0 ; k < i ; k++ ) memcpy((void*)pg,(void*)src , bytes ); now1 = ktime_get(); printk("need %Ld ns to memcpy %d Kbytes \n" , ktime_to_ns( ktime_sub( now1 , now0 ) ), bytes * i / 1024 ); free_pages( pg , PAGE_ORDER ); return 0x2c; }
static u32 dovefb_ovly_create_surface(struct _sOvlySurface *pOvlySurface) { u16 surfaceWidth; u16 surfaceHeight; u32 surfaceSize; DOVEFBVideoMode vmode; u8 *surfVABase; u8 *surfPABase; surfaceWidth = pOvlySurface->viewPortInfo.srcWidth; surfaceHeight = pOvlySurface->viewPortInfo.srcHeight; vmode = pOvlySurface->videoMode; /* calculate video surface size */ switch (vmode) { case DOVEFB_VMODE_YUV422PACKED: case DOVEFB_VMODE_YUV422PACKED_SWAPUV: case DOVEFB_VMODE_YUV422PACKED_SWAPYUorV: case DOVEFB_VMODE_YUV422PLANAR: case DOVEFB_VMODE_YUV422PLANAR_SWAPUV: case DOVEFB_VMODE_YUV422PLANAR_SWAPYUorV: surfaceSize = surfaceWidth * surfaceHeight * 2; break; case DOVEFB_VMODE_YUV420PLANAR: case DOVEFB_VMODE_YUV420PLANAR_SWAPUV: case DOVEFB_VMODE_YUV420PLANAR_SWAPYUorV: surfaceSize = surfaceWidth * surfaceHeight * 3/2; break; default: pr_debug("Unknown video mode.\n"); return -ENXIO; } /* get new video buffer */ surfVABase = (u_char *)__get_free_pages(GFP_ATOMIC | GFP_DMA, get_order(surfaceSize*2)); if (surfVABase == NULL) { pr_debug("Unable to allocate surface memory\n"); return -ENOMEM; } /* pr_debug("\n create surface buffer" " = 0x%08x \n", (u32)surfVABase); */ surfPABase = (u8 *)__pa(surfVABase); memset(surfVABase, 0x0, surfaceSize); pOvlySurface->videoBufferAddr.startAddr = surfPABase; return 0; }
static void *ccio_alloc_consistent(struct pci_dev *dev, size_t size, dma_addr_t *handle) { void *ret; ret = (void *)__get_free_pages(GFP_ATOMIC, get_order(size)); if (ret != NULL) { memset(ret, 0, size); *handle = virt_to_phys(ret); } return ret; }
unsigned long alloc_stack(int order, int atomic) { unsigned long page; gfp_t flags = GFP_KERNEL; if (atomic) flags = GFP_ATOMIC; page = __get_free_pages(flags, order); if(page == 0) return 0; stack_protections(page); return page; }
/** * snd_malloc_pages - allocate pages with the given size * @size: the size to allocate in bytes * @gfp_flags: the allocation conditions, GFP_XXX * * Allocates the physically contiguous pages with the given size. * * Returns the pointer of the buffer, or NULL if no enoguh memory. */ void *snd_malloc_pages(size_t size, unsigned int gfp_flags) { int pg; void *res; snd_assert(size > 0, return NULL); snd_assert(gfp_flags != 0, return NULL); for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++); if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) { mark_pages(res, pg); } return res; }
/** * snd_malloc_pages - allocate pages with the given size * @size: the size to allocate in bytes * @gfp_flags: the allocation conditions, GFP_XXX * * Allocates the physically contiguous pages with the given size. * * Returns the pointer of the buffer, or NULL if no enoguh memory. */ void *snd_malloc_pages(size_t size, gfp_t gfp_flags) { int pg; void *res; snd_assert(size > 0, return NULL); snd_assert(gfp_flags != 0, return NULL); gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ pg = get_order(size); if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) inc_snd_pages(pg); return res; }
pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret; /* pgdir take page or two with 4K pages and a page fraction otherwise */ #ifndef CONFIG_PPC_4K_PAGES ret = kmem_cache_alloc(pgtable_cache, GFP_KERNEL | __GFP_ZERO); #else ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER - PAGE_SHIFT); #endif return ret; }
int __init __get_free_pages_init(void) { addr = __get_free_pages( GFP_KERNEL, 3 ); //分配8个物理页 if(!addr) { return -ENOMEM; } else printk("<0>__get_free_pages Successfully!,\naddr = 0x%lx\n",addr); return 0; }
static unsigned long repl___get_free_pages(gfp_t flags, unsigned int order) { unsigned long ret_val; ret_val = __get_free_pages(flags, order); if ((void *)ret_val != NULL) { klc_add_alloc((const void *)ret_val, (size_t)(PAGE_SIZE << order), stack_depth); } return ret_val; }
static unsigned long setup_zero_page(void) { struct page *page; empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 0); if (!empty_zero_page) panic("Oh boy, that early out of memory?"); page = virt_to_page((void *) empty_zero_page); SetPageReserved(page); return 1UL; }
static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order, unsigned min_order) { gfp_t flags; unsigned long i; max_order++; do { /* * Really try hard to get the needed memory. * We need memory below the first 32MB, so we * add the __GFP_DMA flag that guarantees that we are * below the first 16MB. */ flags = __GFP_DMA | __GFP_HIGH; va->logical = __get_free_pages(flags, --max_order); } while (va->logical == 0 && max_order > min_order); if (!va->logical) return -ENOMEM; va->phys = virt_to_phys((void *)va->logical); va->size = PAGE_SIZE << max_order; va->order = max_order; /* * It seems like __get_free_pages only ups the usage count * of the first page. This doesn't work with fault mapping, so * up the usage count once more (XXX: should use split_page or * compound page). */ memset((void *)va->logical, 0x00, va->size); for (i = va->logical; i < va->logical + va->size; i += PAGE_SIZE) { get_page(virt_to_page(i)); } /* * Change caching policy of the linear kernel map to avoid * mapping type conflicts with user-space mappings. */ set_pages_uc(virt_to_page(va->logical), va->size >> PAGE_SHIFT); // printk(KERN_DEBUG MODULE_NAME // ": Allocated %ld bytes vram area at 0x%08lx\n", ; return 0; }
pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret, *init; ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init(ret); memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); } return ret; }
static void *alpha_noop_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { void *ret; if (!dev || *dev->dma_mask >= 0xffffffffUL) gfp &= ~GFP_DMA; ret = (void *)__get_free_pages(gfp, get_order(size)); if (ret) { memset(ret, 0, size); *dma_handle = virt_to_phys(ret); } return ret; }
int gnt_init(void) { int mfn; int err; struct as_sring *sring; struct evtchn_alloc_unbound alloc_unbound; printk(KERN_INFO "gnt_init\n"); page = __get_free_pages(GFP_KERNEL, 0); if (page == 0) { printk(KERN_DEBUG "\nxen:DomU:could not get free page"); return 0; } sring = (struct as_sring *)page; SHARED_RING_INIT(sring); FRONT_RING_INIT(&(info.ring), sring, PAGE_SIZE); mfn = virt_to_mfn(page); printk(KERN_INFO "grant foreign access\n"); info.gref = gnttab_grant_foreign_access(DOM0_ID, mfn, 0); if (info.gref < 0) { printk(KERN_DEBUG "\nxen:could not grant foreign access"); free_page((unsigned long)page); info.ring.sring = NULL; return 0; } printk(KERN_DEBUG "\n gref = %d", info.gref); alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOM0_ID; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) { printk(KERN_DEBUG "\nalloc unbound port failure"); return err; } err = bind_evtchn_to_irqhandler(alloc_unbound.port, as_int, 0, "xen-eg", &info); if (err < 0) { printk(KERN_DEBUG "\nbind evtchn to irqhandler failure"); return err; } info.irq = err; info.port = alloc_unbound.port; printk(KERN_DEBUG " interrupt = %d, local_port = %d", info.irq, info.port); printk("...\n..."); create_procfs_entry(); return 0; }
void *pci_alloc_consistent(void *hwdev, size_t size, dma_addr_t *dma_handle) { void *ret; int gfp = GFP_ATOMIC; ret = (void *)__get_free_pages(gfp, __get_order(size)); if (ret != NULL) { memset(ret, 0, size); *dma_handle = virt_to_bus(ret); } return ret; }
static int __init calibrate_xor_blocks(void) { void *b1, *b2; struct xor_block_template *f, *fastest; b1 = (void *) __get_free_pages(GFP_KERNEL, 2); if (!b1) { printk(KERN_WARNING "xor: Yikes! No memory available.\n"); return -ENOMEM; } b2 = b1 + 2*PAGE_SIZE + BENCH_SIZE; /* * If this arch/cpu has a short-circuited selection, don't loop through * all the possible functions, just test the best one */ fastest = NULL; #ifdef XOR_SELECT_TEMPLATE fastest = XOR_SELECT_TEMPLATE(fastest); #endif #define xor_speed(templ) do_xor_speed((templ), b1, b2) if (fastest) { printk(KERN_INFO "xor: automatically using best " "checksumming function: %s\n", fastest->name); xor_speed(fastest); } else { printk(KERN_INFO "xor: measuring software checksum speed\n"); XOR_TRY_TEMPLATES; fastest = template_list; for (f = fastest; f; f = f->next) if (f->speed > fastest->speed) fastest = f; } printk(KERN_INFO "xor: using function: %s (%d.%03d MB/sec)\n", fastest->name, fastest->speed / 1000, fastest->speed % 1000); #undef xor_speed free_pages((unsigned long)b1, 2); active_template = fastest; return 0; }
static void _transfer_frame_init(void) { s_mipc_rx_buf = (u8*) __get_free_pages(GFP_KERNEL, get_order(MAX_MIPC_RX_FRAME_SIZE)); WARN_ON(NULL == s_mipc_rx_buf); if(kfifo_alloc(&s_mipc_rx_cache_kfifo,MAX_MIPC_RX_CACHE_SIZE, GFP_KERNEL)) { printk("_transfer_frame_init: kfifo rx cache no memory!\r\n"); panic("%s[%d]kfifo rx cache no memory", __FILE__, __LINE__); } _TxFreeFrameList_Init(&s_mipc_tx_free_frame_list); _TransferInit(&s_mipc_tx_tansfer); }
static void __init setup_zero_pages(void) { struct cpuid cpu_id; unsigned int order; struct page *page; int i; get_cpu_id(&cpu_id); switch (cpu_id.machine) { case 0x9672: /* g5 */ case 0x2064: /* z900 */ case 0x2066: /* z900 */ case 0x2084: /* z990 */ case 0x2086: /* z990 */ case 0x2094: /* z9-109 */ case 0x2096: /* z9-109 */ order = 0; break; case 0x2097: /* z10 */ case 0x2098: /* z10 */ case 0x2817: /* z196 */ case 0x2818: /* z196 */ order = 2; break; case 0x2827: /* zEC12 */ case 0x2828: /* zEC12 */ order = 5; break; case 0x2964: /* z13 */ default: order = 7; break; } /* Limit number of empty zero pages for small memory sizes */ while (order > 2 && (totalram_pages >> 10) < (1UL << order)) order--; empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!empty_zero_page) panic("Out of memory in setup_zero_pages"); page = virt_to_page((void *) empty_zero_page); split_page(page, order); for (i = 1 << order; i > 0; i--) { mark_page_reserved(page); page++; } zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; }
int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt) { pt->fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL, get_order(SZ_16K)); if (!pt->fl_table) return -ENOMEM; memset(pt->fl_table, 0, SZ_16K); clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect); add_meminfo_total_pages(NR_IOMMU_PAGETABLES_PAGES, 1 << get_order(SZ_16K)); return 0; }
/* DMA memory allocation, derived from pci_alloc_consistent. However, the Au1000 data cache is coherent (when programmed so), therefore we return KSEG0 address, not KSEG1. */ static void *dma_alloc(size_t size, dma_addr_t * dma_handle) { void *ret; int gfp = GFP_ATOMIC | GFP_DMA; ret = (void *) __get_free_pages(gfp, get_order(size)); if (ret != NULL) { memset(ret, 0, size); *dma_handle = virt_to_bus(ret); ret = (void *)KSEG0ADDR(ret); } return ret; }
/** * xilinx_pcie_enable_msi - Enable MSI support * @port: PCIe port information */ static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) { phys_addr_t msg_addr; port->msi_pages = __get_free_pages(GFP_KERNEL, 0); if (!port->msi_pages) return -ENOMEM; msg_addr = virt_to_phys((void *)port->msi_pages); pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); return 0; }
/** * snd_malloc_pages - allocate pages with the given size * @size: the size to allocate in bytes * @gfp_flags: the allocation conditions, GFP_XXX * * Allocates the physically contiguous pages with the given size. * * Returns the pointer of the buffer, or NULL if no enoguh memory. */ void *snd_malloc_pages(size_t size, unsigned int gfp_flags) { int pg; void *res; snd_assert(size > 0, return NULL); snd_assert(gfp_flags != 0, return NULL); pg = get_order(size); if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) { mark_pages(virt_to_page(res), pg); inc_snd_pages(pg); } return res; }
/* * need to get a 16k page for level 1 */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); if (!vectors_high()) { /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ new_pmd = pmd_alloc(mm, new_pgd, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pmd = pmd_offset(init_pgd, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: free_pages((unsigned long)new_pgd, 2); no_pgd: return NULL; }
/** * sn_dma_alloc_coherent - allocate memory for coherent DMA * @dev: device to allocate for * @size: size of the region * @dma_handle: DMA (bus) address * @flags: memory allocation flags * * dma_alloc_coherent() returns a pointer to a memory region suitable for * coherent DMA traffic to/from a PCI device. On SN platforms, this means * that @dma_handle will have the %PCIIO_DMA_CMD flag set. * * This interface is usually used for "command" streams (e.g. the command * queue for a SCSI controller). See Documentation/DMA-API.txt for * more information. */ void *sn_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t flags) { void *cpuaddr; unsigned long phys_addr; int node; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); BUG_ON(dev->bus != &pci_bus_type); /* * Allocate the memory. */ node = pcibus_to_node(pdev->bus); if (likely(node >=0)) { struct page *p = alloc_pages_node(node, flags, get_order(size)); if (likely(p)) cpuaddr = page_address(p); else return NULL; } else cpuaddr = (void *)__get_free_pages(flags, get_order(size)); if (unlikely(!cpuaddr)) return NULL; memset(cpuaddr, 0x0, size); /* physical addr. of the memory we just got */ phys_addr = __pa(cpuaddr); /* * 64 bit address translations should never fail. * 32 bit translations can fail if there are insufficient mapping * resources. */ *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); if (!*dma_handle) { printk(KERN_ERR "%s: out of ATEs\n", __func__); free_pages((unsigned long)cpuaddr, get_order(size)); return NULL; } return cpuaddr; }
// /data/aaa int my_init(void) { char *buff; struct file *filp; int ret; filp = filp_open( "/data/bbb", O_WRONLY|O_TRUNC|O_CREAT, 0666 ); if( filp == 0 ) return 0; buff = (char*)__get_free_pages( GFP_KERNEL, 0 ); strcpy(buff, "hello world"); ret = kernel_write( filp, buff, strlen(buff), 0 ); printk("ret=%d\n", ret ); free_pages( (unsigned long)buff, 0 ); filp_close( filp, 0 ); return 0; }
unsigned long toi_get_free_pages(int fail_num, gfp_t mask, unsigned int order) { unsigned long result; if (toi_alloc_ops.enabled) MIGHT_FAIL(fail_num, 0); result = __get_free_pages(mask, order); if (toi_alloc_ops.enabled) alloc_update_stats(fail_num, (void *) result, PAGE_SIZE << order); if (fail_num == toi_trace_allocs) dump_stack(); return result; }