static struct Page * default_alloc_pages(size_t n) { // 我们来仔细分析一下究竟是如何来实现页面的分配的吧!n是请求的页面数 assert(n > 0); if (n > nr_free) { // nr_free是空闲页面的总数 return NULL; } struct Page *page = NULL; list_entry_t *le = &free_list; list_entry_t *len; while ((le = list_next(le)) != &free_list) { // 从现在开始遍历 struct Page *p = le2page(le, page_link); if (p->property >= n) { // 如果空闲块的个数大于请求的个数n,这其实就是首次适应算法,是吧! //page = p; //break; int i; for (i = 0; i < n; ++i) { len = list_next(le); // list entry next struct Page *pp = le2page(le, page_link); SetPageReserved(pp); // setPageReserved主要是设置这个页面已经被占用了 ClearPageProperty(pp); // 表示这个页面已经不是头表了 list_del(le); le = len; } // 一共分配n块嘛 if (p->property > n) { (le2page(le, page_link))->property = p->property - n; } ClearPageProperty(p); SetPageReserved(p); nr_free -= n; return p; } } return NULL; }
static struct Page * default_alloc_pages(size_t n) { assert(n > 0); if (n > nr_free) { return NULL; } list_entry_t *le, *len; le = &free_list; while((le=list_next(le)) != &free_list) { struct Page *p = le2page(le, page_link); if(p->property >= n){ int i; for(i=0;i<n;i++){ len = list_next(le); struct Page *pp = le2page(le, page_link); SetPageReserved(pp); ClearPageProperty(pp); list_del(le); le = len; } if(p->property>n){ (le2page(le,page_link))->property = p->property - n; } ClearPageProperty(p); SetPageReserved(p); nr_free -= n; return p; } } return NULL; }
static int __init jz_proc_init(void) { struct proc_dir_entry *jz_proc; unsigned int virt_addr, i; #ifndef CONFIG_USE_JZ_ROOT_DIR jz_proc = jz_proc_mkdir("mem"); #else jz_proc = get_jz_proc_root(); #endif /* * Reserve a 16MB memory for IPU on JZ. */ #ifdef JZ_PROC_IMEM jz_imem_base = (unsigned int)__get_free_pages(GFP_KERNEL, IMEM_MAX_ORDER); if (jz_imem_base) { /* imem (IPU memory management) */ jz_proc_create("imem", 0664,jz_proc,&imem_proc_fops); /* Set page reserved */ virt_addr = jz_imem_base; for (i = 0; i < (1 << IMEM_MAX_ORDER); i++) { SetPageReserved(virt_to_page((void *)virt_addr)); virt_addr += PAGE_SIZE; } /* Convert to physical address */ jz_imem_base = virt_to_phys((void *)jz_imem_base); printk("Total %dMB memory at 0x%x was reserved for IPU\n", (unsigned int)((1 << IMEM_MAX_ORDER) * PAGE_SIZE)/1000000, jz_imem_base); } else { printk("NOT enough memory for imem\n"); } #endif #ifdef JZ_PROC_IMEM_1 jz_imem1_base = (unsigned int)__get_free_pages(GFP_KERNEL, IMEM1_MAX_ORDER); if (jz_imem1_base) { jz_proc_create("imem1", 0644, jz_proc,&imem1_proc_fops); /* Set page reserved */ virt_addr = jz_imem1_base; for (i = 0; i < (1 << IMEM1_MAX_ORDER); i++) { SetPageReserved(virt_to_page((void *)virt_addr)); virt_addr += PAGE_SIZE; } /* Convert to physical address */ jz_imem1_base = virt_to_phys((void *)jz_imem1_base); printk("Total %dMB memory1 at 0x%x was reserved for IPU\n", (unsigned int)((1 << IMEM1_MAX_ORDER) * PAGE_SIZE)/1000000, jz_imem1_base); } else { printk("NOT enough memory for imem1\n"); } #endif return 0; }
/*initial function no need to hold ring_lock*/ int ringbuf_init(struct seemp_logk_dev *sdev) { char *buf; unsigned long virt_addr; if (kmalloc_flag) { sdev->ring = kmalloc(sdev->ring_sz, GFP_KERNEL); if (sdev->ring == NULL) { pr_err("kmalloc failed, ring_sz= %d\n", sdev->ring_sz); return -ENOMEM; } buf = (char *)sdev->ring; /*reserve kmalloc memory as pages to make them remapable*/ for (virt_addr = (unsigned long)buf; virt_addr < (unsigned long)buf + sdev->ring_sz; virt_addr += PAGE_SIZE) { SetPageReserved(virt_to_page((virt_addr))); } } else { sdev->ring = vmalloc(sdev->ring_sz); if (sdev->ring == NULL) { pr_err("vmalloc failed, ring_sz = %d\n", sdev->ring_sz); return -ENOMEM; } buf = (char *)sdev->ring; /*reserve vmalloc memory as pages to make them remapable*/ for (virt_addr = (unsigned long)buf; virt_addr < (unsigned long)buf + sdev->ring_sz; virt_addr += PAGE_SIZE) { SetPageReserved(vmalloc_to_page( (unsigned long *) virt_addr)); } } memset(sdev->ring, 0, sdev->ring_sz); sdev->num_tot_blks = (sdev->ring_sz / BLK_SIZE); sdev->num_writers = 0; sdev->write_idx = 0; sdev->read_idx = 0; sdev->num_write_avail_blks = sdev->num_tot_blks; /*no. of blocks available for write*/ sdev->num_write_in_prog_blks = 0; /*no. of blocks held by writers to perform writes*/ sdev->num_read_avail_blks = 0; /*no. of blocks ready for read*/ sdev->num_read_in_prog_blks = 0; /*no. of blocks held by the reader to perform read*/ return 0; }
static inline void pages_reserve(void) { unsigned int i; /* set the pages as reserved */ for (i = 0; i < nr_pages; i++) SetPageReserved(pages[i]); }
unsigned long videoin_dmamalloc_phy(unsigned int u32Buf, unsigned long size) { videoin_priv_t *priv = (videoin_priv_t *)&videoin_priv; void *mem; unsigned long adr; DBG_PRINTF("%s\n",__FUNCTION__); size = PAGE_ALIGN(size); priv->vaddr = dma_alloc_writecombine(NULL/*dev*/, size,&priv->paddr, GFP_KERNEL); printk("videoin priv->paddr=%x,priv->vaddr=%x\n", priv->paddr, priv->vaddr); if (!priv->vaddr) return NULL; adr = (unsigned long) priv->vaddr; videoIn_buf[u32Buf].u32PhysAddr = priv->paddr; if(u32Buf<3) videoIn_buf[u32Buf+5].u32PhysAddr; videoIn_buf[u32Buf].u32VirtAddr = adr; while (size > 0) { SetPageReserved(vmalloc_to_page((void *)adr)); adr += PAGE_SIZE; size -= PAGE_SIZE; } DBG_PRINTF("SetPageReserved = 0x%x\n", adr); return priv->paddr; }
static struct Page * default_alloc_pages(size_t n) { assert(n > 0); if (n > nr_free) { return NULL; } list_entry_t *le = &free_list; while ((le = list_next(le)) != &free_list) { struct Page *page = le2page(le, page_link); // Finds a free block. if (page->property >= n) { // Malloc the first n pages. ClearPageProperty(page); SetPageReserved(page); list_del(le); if (page->property > n) { // Updates the remained space size. struct Page* new_page = page + n; new_page->property = page->property - n; list_add_before(list_next(le), &(new_page->page_link)); } page->property = 0; nr_free -= n; return page; } } return NULL; }
static isolate_status_t mca_page_isolate(unsigned long paddr) { int i; struct page *p; /* whether physical address is valid or not */ if ( !ia64_phys_addr_valid(paddr) ) return ISOLATE_NG; /* convert physical address to physical page number */ p = pfn_to_page(paddr>>PAGE_SHIFT); /* check whether a page number have been already registered or not */ for( i = 0; i < num_page_isolate; i++ ) if( page_isolate[i] == p ) return ISOLATE_OK; /* already listed */ /* limitation check */ if( num_page_isolate == MAX_PAGE_ISOLATE ) return ISOLATE_NG; /* kick pages having attribute 'SLAB' or 'Reserved' */ if( PageSlab(p) || PageReserved(p) ) return ISOLATE_NG; /* add attribute 'Reserved' and register the page */ SetPageReserved(p); page_isolate[num_page_isolate++] = p; return ISOLATE_OK; }
/********************************************************************** * * Memory management * **********************************************************************/ static void *rvmalloc(unsigned long size) { void *mem; unsigned long adr; size = PAGE_ALIGN(size); mem = vmalloc_32(size); if (!mem) return NULL; /* * VFB must clear memory to prevent kernel info * leakage into userspace * VGA-based drivers MUST NOT clear memory if * they want to be able to take over vgacon */ memset(mem, 0, size); adr = (unsigned long) mem; while (size > 0) { SetPageReserved(vmalloc_to_page((void *)adr)); adr += PAGE_SIZE; size -= PAGE_SIZE; } return mem; }
static struct Page * default_alloc_pages(size_t n) { assert(n > 0); if (n > nr_free) { return NULL; } struct Page *page = NULL; list_entry_t *le = &free_list; // (4.1) list_entry_t *le1; while ((le = list_next(le)) != &free_list) { struct Page *p = le2page(le, page_link); // (4.1.1) if (p->property >= n) { // (4.1.2) find int i; for (i = 0, le1 = le; i < n; i++, le1 = list_next(le1)) { // allocate page from p until p + n struct Page *p1 = le2page(le1, page_link); SetPageReserved(p1); // (4.1.2) set PG_reserved ClearPageProperty(p1); // (4.1.2) clear PG_property list_del(le1); // (4.1.2) unlink this page from free_list } page = p; break; } } if (page != NULL) { if (page->property > n) { (le2page(le1, page_link))->property = page->property - n; // (4.1.2.1) } nr_free -= n; // (4.1.3) } return page; // (4.1.4) or (4.2) }
static int __init init(void) { /*build proc dir "memshare"and two proc files: phymem_addr, phymem_size in the dir*/ proc_memshare_dir = proc_mkdir(PROC_MEMSHARE_DIR, NULL); create_proc_read_entry(PROC_MEMSHARE_PHYADDR, 0, proc_memshare_dir, proc_read_phymem_addr,NULL); create_proc_read_entry(PROC_MEMSHARE_SIZE, 0, proc_memshare_dir, proc_read_phymem_size,NULL); /*alloc one page*/ kernel_memaddr =__get_free_pages(GFP_KERNEL, PAGE_ORDER); if(!kernel_memaddr) { printk("Allocate memory failure!\n"); } else { SetPageReserved(virt_to_page(kernel_memaddr)); // 内核中申请到页面之后,要调用一下SetPageReserved,相当于告诉系统,这个页面我已经占了。对于每一个申请到的页面,应该都要这样做 kernel_memsize = PAGES_NUMBER * PAGE_SIZE; printk("Allocate memory success!. The phy mem addr=%08lx, size=%lu\n", __pa(kernel_memaddr), kernel_memsize); } return 0; }
static void *alloc_framebuffer(unsigned long len) { void *mem; unsigned int page_shift; unsigned long page; for (page_shift = 0; page_shift < 12; page_shift++) if ((PAGE_SIZE << page_shift) >= len) break; mem = (void *)__get_free_pages(GFP_KERNEL, page_shift); if (!mem) { return NULL; } for (page = (unsigned long)mem; page < PAGE_ALIGN((unsigned long)mem + (PAGE_SIZE << page_shift)); page += PAGE_SIZE) { SetPageReserved(virt_to_page((void*)page)); } memset(mem, 0, PAGE_SIZE << page_shift); D("Allocate framebuffer: Real Size: 0x%08lx, Virt: 0x%08lx, Phys: 0%08lx", PAGE_SIZE << page_shift, (unsigned long)mem, virt_to_phys(mem)); return mem; }
static void *rvmalloc(unsigned long size) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) struct page *page; #endif void *mem; unsigned long adr; size = PAGE_ALIGN(size); mem = vmalloc_32(size); if (!mem) return NULL; memset(mem, 0, size); /* Clear the ram out, no junk to the user */ adr = (unsigned long) mem; while (size > 0) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) page = vmalloc_to_page((void *)adr); mem_map_reserve(page); #else SetPageReserved(vmalloc_to_page((void *)adr)); #endif adr += PAGE_SIZE; size -= PAGE_SIZE; } return mem; }
/***************************************************************************** << entry function for MMAP >> *****************************************************************************/ static int dm_mmap(struct file *file, struct vm_area_struct *vma) { int i; struct page *map, *mapend; long size; size = vma->vm_end - vma->vm_start; down(&sem); for(i = 0; i < DM_PAGE_NUM; i++){ map = virt_to_page(dm_pages[i]); mapend = virt_to_page(dm_pages[i]+DM_PAGE_SIZE-1); while(map<=mapend){ // mem_map_reserve(map); //2007.12.11 Change mem_map_reserve SetPageReserved(map); // to SetPageReserved by mitsu. map++; } // if(remap_page_range(vma->vm_start+i*DM_PAGE_SIZE, //2007.12.11 Change remap_page_range if(remap_pfn_range(vma, vma->vm_start+i*DM_PAGE_SIZE, // to remap_pfn_range by mitsu. __pa(dm_pages[i]) >> PAGE_SHIFT, ((DM_PAGE_SIZE<size)?DM_PAGE_SIZE:size), vma->vm_page_prot)){ up(&sem); return -EAGAIN; } size -= DM_PAGE_SIZE; if(size <= 0) break; } up(&sem); return 0; }
static inline void ipu_buf_get( unsigned int page_shift ) { unsigned char * virt_addr; int i; for ( i=0; i< IPU_BUF_MAX; ++i ) { if ( ipu_buf[i].addr == 0 ) { break; } } if ( (ipu_buf_cnt = i) == IPU_BUF_MAX ) { printk("Error, no free ipu buffer.\n"); return ; } virt_addr = (unsigned char *)__get_free_pages(GFP_KERNEL, page_shift); if ( virt_addr ) { ipu_buf[ipu_buf_cnt].addr = (unsigned int)virt_to_phys((void *)virt_addr); ipu_buf[ipu_buf_cnt].page_shift = page_shift; for (i = 0; i < (1<<page_shift); i++) { SetPageReserved(virt_to_page(virt_addr)); virt_addr += PAGE_SIZE; } } else { printk("get memory Failed.\n"); } }
static int amd_create_page_map(struct amd_page_map *page_map) { int i; page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); if (page_map->real == NULL) return -ENOMEM; SetPageReserved(virt_to_page(page_map->real)); global_cache_flush(); page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), PAGE_SIZE); if (page_map->remapped == NULL) { ClearPageReserved(virt_to_page(page_map->real)); free_page((unsigned long) page_map->real); page_map->real = NULL; return -ENOMEM; } global_cache_flush(); for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { writel(agp_bridge->scratch_page, page_map->remapped+i); readl(page_map->remapped+i); /* PCI Posting. */ } return 0; }
static int serverworks_create_page_map(struct serverworks_page_map *page_map) { int i; page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); if (page_map->real == NULL) { return -ENOMEM; } SetPageReserved(virt_to_page(page_map->real)); global_cache_flush(); page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), PAGE_SIZE); if (page_map->remapped == NULL) { ClearPageReserved(virt_to_page(page_map->real)); free_page((unsigned long) page_map->real); page_map->real = NULL; return -ENOMEM; } global_cache_flush(); for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { page_map->remapped[i] = agp_bridge->scratch_page; } return 0; }
int __init init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem) { struct page *p, *map; unsigned long phys_len, phys_pages, highmem_len, highmem_pages; unsigned long iomem_len, iomem_pages, total_len, total_pages; int i; phys_pages = physmem >> PAGE_SHIFT; phys_len = phys_pages * sizeof(struct page); iomem_pages = iomem >> PAGE_SHIFT; iomem_len = iomem_pages * sizeof(struct page); highmem_pages = highmem >> PAGE_SHIFT; highmem_len = highmem_pages * sizeof(struct page); total_pages = phys_pages + iomem_pages + highmem_pages; total_len = phys_len + iomem_len + highmem_len; map = alloc_bootmem_low_pages(total_len); if (map == NULL) return -ENOMEM; for (i = 0; i < total_pages; i++) { p = &map[i]; memset(p, 0, sizeof(struct page)); SetPageReserved(p); INIT_LIST_HEAD(&p->lru); } max_mapnr = total_pages; return 0; }
/** * Examine address space, create virtual physical memory and map it. */ static void page_init (void) { int i; int freemem_size = 0; /* Construct page descriptor table. * mem => memory not reserved or occupied by kernel code * freemem => memory available after page descriptor table is built */ /* all pages from 0x100000 to the top should have an entry in page descriptor table */ for (i = 0; i < e820map.nr_map; i++) { mem_size += (uint32_t)(e820map.map[i].size); if (e820map.map[i].type == E820_ARM) freemem_size += e820map.map[i].size; } pages = (struct Page *)(uint32_t)(e820map.map[e820map.nr_map-1].addr); npage = (mem_size) / PGSIZE; for (i = 0; i < npage; i++) { SetPageReserved (pages + i); } uintptr_t freemem = PADDR(ROUNDUP((uintptr_t)pages + sizeof(struct Page) * npage, PGSIZE)); uint32_t freemem_npage = freemem_size / PGSIZE - npage * sizeof (struct Page) / PGSIZE; init_memmap(pa2page(freemem), freemem_npage); }
static int ati_create_page_map(ati_page_map *page_map) { int i, err = 0; page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); if (page_map->real == NULL) return -ENOMEM; SetPageReserved(virt_to_page(page_map->real)); err = map_page_into_agp(virt_to_page(page_map->real)); page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), PAGE_SIZE); if (page_map->remapped == NULL || err) { ClearPageReserved(virt_to_page(page_map->real)); free_page((unsigned long) page_map->real); page_map->real = NULL; return -ENOMEM; } /*CACHE_FLUSH();*/ global_cache_flush(); for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { writel(agp_bridge->scratch_page, page_map->remapped+i); readl(page_map->remapped+i); /* PCI Posting. */ } return 0; }
static int __init exemple_init (void) { int err; struct page * pg = NULL; exemple_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (exemple_buffer == NULL) return -ENOMEM; exemple_buffer[0] = '\0'; pg = virt_to_page(exemple_buffer); SetPageReserved(pg); err = misc_register(& exemple_misc_driver); if (err != 0) { ClearPageReserved(pg); kfree(exemple_buffer); exemple_buffer = NULL; return err; } #if LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) init_timer (& exemple_timer); exemple_timer.function = exemple_timer_function; #else timer_setup (& exemple_timer, exemple_timer_function, 0); #endif exemple_timer.expires = jiffies + HZ; add_timer(& exemple_timer); return 0; }
void reserve_memory(unsigned long base, unsigned long len) { struct page *page, *page_end; page_end = virt_to_page(base + len - 1); for(page = virt_to_page(base); page <= page_end; page++) SetPageReserved(page); }
static int mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf){ struct page *page; struct mmap_info *info = (struct mmap_info *)vma->vm_private_data; #ifdef DEBUG_PRINT printk("[GNoM_km]: mmap_fault called...(page offset: %lu, total offset: %lu)\n", vmf->pgoff, vmf->pgoff << PAGE_SHIFT); #endif if(!info->data){ printk("[GNoM_km]: Error in mmap_fault, no data...\n"); return -1; } page = virt_to_page(info->data + (vmf->pgoff << PAGE_SHIFT)); get_page(page); // Reserve the page SetPageReserved(page); // Return the page vmf->page = page; #ifdef DEBUG_PRINT printk("[GNoM_km]: page = %p\n", page); #endif return 0; }
static struct nvos_pagemap *nv_alloc_pages(unsigned int count, pgprot_t prot, bool contiguous, int create_mapping) { struct nvos_pagemap *pm; size_t size; unsigned int i = 0; size = sizeof(struct nvos_pagemap) + sizeof(struct page *)*(count-1); pm = kzalloc(size, GFP_KERNEL); if (!pm) return NULL; if (count==1) contiguous = true; if (contiguous) { size_t order = get_order(count << PAGE_SHIFT); struct page *compound_page; compound_page = alloc_pages(nv_gfp_pool, order); if (!compound_page) goto fail; split_page(compound_page, order); for (i=0; i<count; i++) pm->pages[i] = nth_page(compound_page, i); for ( ; i < (1<<order); i++) __free_page(nth_page(compound_page, i)); i = count; } else { for (i=0; i<count; i++) { pm->pages[i] = alloc_page(nv_gfp_pool); if (!pm->pages[i]) goto fail; } } if (create_mapping) { /* since the linear kernel mapping uses sections and super- * sections rather than PTEs, it's not possible to overwrite * it with the correct caching attributes, so use a local * mapping */ pm->addr = vm_map_ram(pm->pages, count, -1, prot); if (!pm->addr) { pr_err("nv_alloc_pages fail to vmap contiguous area\n"); goto fail; } } pm->nr_pages = count; for (i=0; i<count; i++) { SetPageReserved(pm->pages[i]); pagemap_flush_page(pm->pages[i]); } return pm; fail: while (i) __free_page(pm->pages[--i]); if (pm) kfree(pm); return NULL; }
static void mark_pages(void *res, int order) { struct page *page = virt_to_page(res); struct page *last_page = page + (1 << order); while (page < last_page) SetPageReserved(page++); snd_allocated_pages += 1 << order; }
void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro) { if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { ClearPageReserved(page); free_new_highpage(page); } else SetPageReserved(page); }
/* * initialises the sound device, registering the miscdevice entry. * returns 0 on success, non-zero on error. */ static int Device1Init (void) { int Result = -1; int i = 0; UWORD *pTmp; // Get pointers to memory-mapped registers GetPeripheralBasePtr (0x01C14000, 0x190, (ULONG **) & SYSCFG0); /* SYSCFG0 Pointer */ GetPeripheralBasePtr (0x01F00000, 0x1042, (ULONG **) & eHRPWM0); /* eHRPWM0 Pointer */ GetPeripheralBasePtr (0x01E27000, 0xA80, (ULONG **) & PSC1); /* PSC1 pointer */ Result = misc_register (&Device1); if (Result) { //#define DEBUG #undef DEBUG #ifdef DEBUG printk (" %s device register failed\n", DEVICE_NAME); #endif } else { // allocate kernel shared memory for SoundFlags used by Test etc. // showing the state of the sound-module used by async and waiting // use from VM if ((kmalloc_ptr = kmalloc ((NPAGES + 2) * PAGE_SIZE, GFP_KERNEL)) != NULL) { pTmp = (UWORD *) ((((unsigned long) kmalloc_ptr) + PAGE_SIZE - 1) & PAGE_MASK); for (i = 0; i < NPAGES * PAGE_SIZE; i += PAGE_SIZE) { SetPageReserved (virt_to_page (((unsigned long) pTmp) + i)); } pSound = (SOUND *) pTmp; SOUNDPwmPoweron; /* Setup the Sound PWM peripherals */ SOUNDPwmModuleSetupPcm; /* Setup 125 uS timer interrupt */ Device1TimerSetTiming (0, 125000); // Default to 8 KHz. sample-rate hrtimer_init (&Device1Timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); // Timer Callback function - do the sequential job Device1Timer.function = Device1TimerInterrupt1; Device1TimerCancel (); SOUNDDisable; // Disable the Sound Power Amp //#define DEBUG #undef DEBUG #ifdef DEBUG printk (" %s device register succes\n", DEVICE_NAME); #endif (*pSound).Status = OK; // We're ready for making "noise" } } return (Result); }
void reserve_memory(unsigned long base, unsigned long len) { struct page *page, *page_end; // if(unlikely(enable_debug)) printk("[DNA] reserve_memory()\n"); page_end = virt_to_page(base + len - 1); for(page = virt_to_page(base); page <= page_end; page++) SetPageReserved(page); }
int demo_open_rt(struct rtdm_dev_context *context, rtdm_user_info_t *user_info, int oflags) { struct demodrv_context *my_context; #ifdef USEMMAP unsigned long vaddr; #endif int dev_id = context->device->device_id; int ret; // get the context for our driver - used to store driver info my_context = (struct demodrv_context *)context->dev_private; #ifdef USEMMAP // allocate and prepare memory for our buffer my_context->buf = kmalloc(BUFFER_SIZE, GFP_KERNEL); /* mark pages reserved so that remap_pfn_range works */ for (vaddr = (unsigned long)my_context->buf; vaddr < (unsigned long)my_context->buf + BUFFER_SIZE; vaddr += PAGE_SIZE) SetPageReserved(virt_to_page(vaddr)); // write some test value to the start of our buffer *(int *)my_context->buf = 1234; my_context->mapped_user_addr = NULL; #endif // we also have an interrupt handler: #ifdef TIMERINT ret = rtdm_irq_request(&my_context->irq_handle, TIMER_INT, demo_interrupt, 0, context->device->proc_name, my_context); #else ret = rtdm_irq_request(&my_context->irq_handle, PAR_INT, demo_interrupt, 0, context->device->proc_name, my_context); #endif if (ret < 0) return ret; /* IPC initialisation - cannot fail with used parameters */ rtdm_lock_init(&my_context->lock); rtdm_event_init(&my_context->irq_event, 0); my_context->dev_id = dev_id; my_context->irq_events = 0; my_context->irq_event_lock = 0; my_context->timeout = 0; // wait INFINITE #ifndef TIMERINT //set port to interrupt mode; pins are output outb_p(0x10, BASEPORT + 2); #endif // enable interrupt in RTDM rtdm_irq_enable(&my_context->irq_handle); return 0; }
static void page_init(void) { int i; /* struct e820map *memmap = (struct e820map *)0x0; */ /* uint32_t maxpa = 0; */ /* kprintf("e820map:\n"); */ /* int i; */ /* for (i = 0; i < memmap->nr_map; i ++) { */ /* uint64_t begin = memmap->map[i].addr, end = begin + memmap->map[i].size; */ /* kprintf(" memory: %08llx, [%08llx, %08llx], type = %d.\n", */ /* memmap->map[i].size, begin, end - 1, memmap->map[i].type); */ /* if (memmap->map[i].type == E820_ARM) { */ /* if (maxpa < end && begin < KMEMSIZE) { */ /* maxpa = end; */ /* } */ /* } */ /* } */ /* if (maxpa > KMEMSIZE) { */ /* maxpa = KMEMSIZE; */ /* } */ extern char end[]; npage = RAM_SIZE / PGSIZE; pages = (struct Page *)ROUNDUP((void *)end, PGSIZE); for (i = 0; i < npage; i ++) { SetPageReserved(pages + i); } uintptr_t freemem = PADDR((uintptr_t)pages + sizeof(struct Page) * npage); uint32_t free_begin = ROUNDUP(freemem, PGSIZE), free_end = RAM_SIZE; init_memmap (pa2page(free_begin), (free_end - free_begin) / PGSIZE); kprintf ("free memory: [0x%x, 0x%x)\n", free_begin, free_end); /* for (i = 0; i < memmap->nr_map; i ++) { */ /* uint64_t begin = memmap->map[i].addr, end = begin + memmap->map[i].size; */ /* if (memmap->map[i].type == E820_ARM) { */ /* if (begin < freemem) { */ /* begin = freemem; */ /* } */ /* if (end > KMEMSIZE) { */ /* end = KMEMSIZE; */ /* } */ /* if (begin < end) { */ /* begin = ROUNDUP(begin, PGSIZE); */ /* end = ROUNDDOWN(end, PGSIZE); */ /* if (begin < end) { */ /* init_memmap(pa2page(begin), (end - begin) / PGSIZE); */ /* } */ /* } */ /* } */ /* } */ }