Пример #1
0
void
sound_free_dmap (int dev, struct dma_buffparms *dmap)
{
  int             sz, size, i;
  unsigned long   start_addr, end_addr;

  if (dmap->raw_buf == NULL)
    return;

  if (dmap->mapping_flags & DMA_MAP_MAPPED)
    return;			/* Don't free mmapped buffer. Will use it next time */

  for (sz = 0, size = PAGE_SIZE;
       size < audio_devs[dev]->buffsize;
       sz++, size <<= 1);

  start_addr = (unsigned long) dmap->raw_buf;
  end_addr = start_addr + audio_devs[dev]->buffsize;

  for (i = MAP_NR (start_addr); i <= MAP_NR (end_addr); i++)
    {
      mem_map_unreserve (i);
    }

  free_pages ((unsigned long) dmap->raw_buf, sz);
  dmap->raw_buf = NULL;
}
Пример #2
0
static inline void copy_one_pte(pte_t * old_pte, pte_t * new_pte, int cow)
{
	pte_t pte = *old_pte;
	unsigned long page_nr;

	if (pte_none(pte))
		return;
	if (!pte_present(pte)) {
		swap_duplicate(pte_val(pte));
		set_pte(new_pte, pte);
		return;
	}
	page_nr = MAP_NR(pte_page(pte));
	if (page_nr >= MAP_NR(high_memory) || PageReserved(mem_map+page_nr)) {
		set_pte(new_pte, pte);
		return;
	}
	if (cow)
		pte = pte_wrprotect(pte);
	if (delete_from_swap_cache(page_nr))
		pte = pte_mkdirty(pte);
	set_pte(new_pte, pte_mkold(pte));
	set_pte(old_pte, pte);
	mem_map[page_nr].count++;
}
Пример #3
0
void exit_ringbuf(struct task_struct *tsk)
{
	int i;
	
	if (!tsk->ringbuf) return;
	
	if (tsk->ringbuf->ringbuf) {
		char *rb_ptr = tsk->ringbuf->ringbuf;
		char *shared_ptr = tsk->ringbuf->shared;
		int order = tsk->ringbuf->order;
		int rb_size = PAGE_SIZE * (1<<order);

		for (i=0;i<RBUF_RESERVED;i++)
			if (rb_ptr == reserved_ringbuf[i].rb_ptr) break;
		
		if (i < RBUF_RESERVED) {
			reserved_ringbuf[i].used = 0;
		} else {
			for (i = MAP_NR(rb_ptr); i <= MAP_NR(rb_ptr+rb_size-1); i++) {
				clear_bit(PG_reserved, &mem_map[i].flags);
			}
			free_pages((unsigned)rb_ptr,order);

			i = MAP_NR(shared_ptr);
			clear_bit(PG_reserved,&mem_map[i]);
			free_page((unsigned)shared_ptr);
		}
	}

	kfree_s(tsk->ringbuf,sizeof(*(tsk->ringbuf)));
	tsk->ringbuf = NULL;
}
Пример #4
0
static inline unsigned long setup_zero_pages(void)
{
	unsigned long order, size, pg;

	switch (mips_cputype) {
	case CPU_R4000SC:
	case CPU_R4000MC:
	case CPU_R4400SC:
	case CPU_R4400MC:
		order = 3;
		break;
	default:
		order = 0;
	}

	empty_zero_page = __get_free_pages(GFP_KERNEL, order);
	if (!empty_zero_page)
		panic("Oh boy, that early out of memory?");

	pg = MAP_NR(empty_zero_page);
	while(pg < MAP_NR(empty_zero_page) + (1 << order)) {
		set_bit(PG_reserved, &mem_map[pg].flags);
		atomic_set(&mem_map[pg].count, 0);
		pg++;
	}

	size = PAGE_SIZE << order;
	zero_page_mask = (size - 1) & PAGE_MASK;
	memset((void *)empty_zero_page, 0, size);

	return size;
}
Пример #5
0
/*
 * Trying to stop swapping from a file is fraught with races, so
 * we repeat quite a bit here when we have to pause. swapoff()
 * isn't exactly timing-critical, so who cares (but this is /really/
 * inefficient, ugh).
 *
 * We return 1 after having slept, which makes the process start over
 * from the beginning for this process..
 */
static inline int unuse_pte(struct vm_area_struct * vma, unsigned long address,
	pte_t *dir, unsigned int type, unsigned long page)
{
	pte_t pte = *dir;

	if (pte_none(pte))
		return 0;
	if (pte_present(pte)) {
		unsigned long page_nr = MAP_NR(pte_page(pte));
		if (page_nr >= MAP_NR(high_memory))
			return 0;
		if (!in_swap_cache(page_nr))
			return 0;
		if (SWP_TYPE(in_swap_cache(page_nr)) != type)
			return 0;
		delete_from_swap_cache(page_nr);
		set_pte(dir, pte_mkdirty(pte));
		return 0;
	}
	if (SWP_TYPE(pte_val(pte)) != type)
		return 0;
	read_swap_page(pte_val(pte), (char *) page);
#if 0 /* Is this really needed here, hasn't it been solved elsewhere? */
	flush_page_to_ram(page);
#endif
	if (pte_val(*dir) != pte_val(pte)) {
		free_page(page);
		return 1;
	}
	set_pte(dir, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
	flush_tlb_page(vma, address);
	++vma->vm_mm->rss;
	swap_free(pte_val(pte));
	return 1;
}
Пример #6
0
void ap_ringbuf_init(void)
{
	int i,j;
	char *rb_ptr, *shared_ptr;
	int rb_size = PAGE_SIZE * (1<<RBUF_RESERVED_ORDER);

	/* preallocate some ringbuffers */
	for (i=0;i<RBUF_RESERVED;i++) {
		if (!(rb_ptr = (char *)__get_free_pages(GFP_ATOMIC,RBUF_RESERVED_ORDER))) {
			printk("failed to preallocate ringbuf %d\n",i);
			return;
		}
		for (j = MAP_NR(rb_ptr); j <= MAP_NR(rb_ptr+rb_size-1); j++) {
			set_bit(PG_reserved,&mem_map[j].flags);
		}

		if (!(shared_ptr = (char *)__get_free_page(GFP_ATOMIC))) {
			printk("failed to preallocate shared ptr %d\n",i);
                        return;
		}
		set_bit(PG_reserved,&mem_map[MAP_NR(shared_ptr)].flags);

		reserved_ringbuf[i].used = 0;
		reserved_ringbuf[i].rb_ptr = rb_ptr;
		reserved_ringbuf[i].shared_ptr = shared_ptr;
	}
}
Пример #7
0
/* this routine handles present pages, when users try to write
   to a shared page.
   */
void do_wp_page(struct vm_area_struct *vma, unsigned long address, int write_access)
{
    pgd_t *pgd;
    pmd_t *pmd;
    pte_t *page_table,pte;
    unsigned long old_page, new_page;

    new_page = get_free_page(GFP_KERNEL);

    pgd = pgd_offset(vma->vm_task, address);
    if(pgd_none(*pgd))
        goto end_wp_page;
    if(pgd_bad(*pgd))
        goto bad_wp_page;
    pmd = pmd_offset(pgd,address);
    if(pmd_none(*pmd))
        goto end_wp_page;
    if(pmd_bad(*pmd))
        goto bad_wp_page;
    page_table = pte_offset(pmd,address);
    pte = *page_table;
    if(!pte_present(pte))
        goto end_wp_page;
    if(pte_write(pte))
        goto  end_wp_page;
    old_page = pte_page(pte);
    if(old_page >= main_memory_end)
        goto bad_wp_page;

    (vma->vm_task->mm->min_flt)++;

    if(mem_map[MAP_NR(old_page)].flags & PAGE_PRESENT)
    {
        if(new_page)
        {
            if(mem_map[MAP_NR(old_page)].flags & MAP_PAGE_RESERVED)
                ++(vma->vm_task->mm->rss);
            copy_page(old_page, new_page);
            *page_table = pte_mkwrite(pte_mkdirty(mk_pte((unsigned long)&new_page, vma->vm_page_prot)));
            free_page(old_page);
            return;
        }
        pte_val(*page_table) &= PAGE_BAD;
        free_page(old_page);
        oom();
        return;
    }
    *page_table = pte_mkdirty(pte_mkwrite(pte));
    if(new_page)
        free_page(new_page);
    return;
bad_wp_page:
    printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
    goto end_wp_page;
end_wp_page:
    if(new_page)
        free_page(new_page);
    return;
}
Пример #8
0
static int sound_alloc_dmap(struct dma_buffparms *dmap)
{
	char *start_addr, *end_addr;
	int i, dma_pagesize;
	int sz, size;

	dmap->mapping_flags &= ~DMA_MAP_MAPPED;

	if (dmap->raw_buf != NULL)
		return 0;	/* Already done */
	if (dma_buffsize < 4096)
		dma_buffsize = 4096;
	dma_pagesize = (dmap->dma < 4) ? (64 * 1024) : (128 * 1024);
	dmap->raw_buf = NULL;
	dmap->buffsize = dma_buffsize;
	if (dmap->buffsize > dma_pagesize)
		dmap->buffsize = dma_pagesize;
	start_addr = NULL;
	/*
	 * Now loop until we get a free buffer. Try to get smaller buffer if
	 * it fails. Don't accept smaller than 8k buffer for performance
	 * reasons.
	 */
	while (start_addr == NULL && dmap->buffsize > PAGE_SIZE) {
		for (sz = 0, size = PAGE_SIZE; size < dmap->buffsize; sz++, size <<= 1);
		dmap->buffsize = PAGE_SIZE * (1 << sz);
		start_addr = (char *) __get_free_pages(GFP_ATOMIC|GFP_DMA, sz);
		if (start_addr == NULL)
			dmap->buffsize /= 2;
	}

	if (start_addr == NULL) {
		printk(KERN_WARNING "Sound error: Couldn't allocate DMA buffer\n");
		return -ENOMEM;
	} else {
		/* make some checks */
		end_addr = start_addr + dmap->buffsize - 1;

		if (debugmem)
			printk(KERN_DEBUG "sound: start 0x%lx, end 0x%lx\n", (long) start_addr, (long) end_addr);
		
		/* now check if it fits into the same dma-pagesize */

		if (((long) start_addr & ~(dma_pagesize - 1)) != ((long) end_addr & ~(dma_pagesize - 1))
		    || end_addr >= (char *) (MAX_DMA_ADDRESS)) {
			printk(KERN_ERR "sound: Got invalid address 0x%lx for %db DMA-buffer\n", (long) start_addr, dmap->buffsize);
			return -EFAULT;
		}
	}
	dmap->raw_buf = start_addr;
	dmap->raw_buf_phys = virt_to_bus(start_addr);

#ifndef OSKIT
	for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++)
		set_bit(PG_reserved, &mem_map[i].flags);;
#endif
	return 0;
}
Пример #9
0
void mem_init(unsigned long start_mem, unsigned long end_mem)
{
	kern_return_t		kr;
	struct vm_statistics	vm_stats;
	struct task_basic_info 	task_basic_info;
	mach_msg_type_number_t	count;
	int			avail_pages, total_pages;
	int			code_size, reserved_size, data_size;
	unsigned long		tmp;
	extern int		_etext, _stext;

	start_mem = PAGE_ALIGN(start_mem);
	kr = vm_deallocate(mach_task_self(), (vm_offset_t) start_mem, 
			   end_mem - start_mem);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr, ("mem_init: vm_deallocate"));
	}
	end_mem = start_mem;

	count = HOST_VM_INFO_COUNT;
	kr = host_statistics(privileged_host_port, HOST_VM_INFO,
			     (host_info_t) &vm_stats, &count);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr, ("mem_init: host_statistics"));
	}

	count = TASK_BASIC_INFO_COUNT;
	kr = task_info(mach_task_self(),
		       TASK_BASIC_INFO,
		       (task_info_t) &task_basic_info,
		       &count);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(2, kr, ("mem_init: task_info"));
	}

	for (tmp = PAGE_OFFSET;
	     tmp < high_memory;
	     tmp += PAGE_SIZE) {
		clear_bit(PG_reserved, &mem_map[MAP_NR(tmp)].flags);
		mem_map[MAP_NR(tmp)].count = 1;
		free_page(tmp);
	}

	avail_pages = (vm_stats.free_count + vm_stats.active_count +
		       vm_stats.inactive_count);
	total_pages = avail_pages + vm_stats.wire_count;
	code_size = ((long) &_etext - (long) &_stext);
	reserved_size = end_mem - initial_start_mem;
	data_size = task_basic_info.virtual_size - code_size - reserved_size;
	
	printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
	       avail_pages * (PAGE_SIZE / 1024),
	       total_pages * (PAGE_SIZE / 1024),
	       code_size / 1024,
	       reserved_size / 1024,
	       data_size / 1024);
	       
}
Пример #10
0
int shm_swap (int prio, int gfp_mask)
{
    pte_t page;
    struct shmid_kernel *shp;
    unsigned long swap_nr;
    unsigned long id, idx;
    int loop = 0;
    int counter;

    counter = shm_rss >> prio;
    if (!counter || !(swap_nr = get_swap_page()))
        return 0;

check_id:
    shp = shm_segs[swap_id];
    if (shp == IPC_UNUSED || shp == IPC_NOID || shp->u.shm_perm.mode & SHM_LOCKED ) {
next_id:
        swap_idx = 0;
        if (++swap_id > max_shmid) {
            swap_id = 0;
            if (loop)
                goto failed;
            loop = 1;
        }
        goto check_id;
    }
    id = swap_id;

check_table:
    idx = swap_idx++;
    if (idx >= shp->shm_npages)
        goto next_id;

    page = __pte(shp->shm_pages[idx]);
    if (!pte_present(page))
        goto check_table;
    if ((gfp_mask & __GFP_DMA) && !PageDMA(&mem_map[MAP_NR(pte_page(page))]))
        goto check_table;
    swap_attempts++;

    if (--counter < 0) { /* failed */
failed:
        swap_free (swap_nr);
        return 0;
    }
    if (atomic_read(&mem_map[MAP_NR(pte_page(page))].count) != 1)
        goto check_table;
    shp->shm_pages[idx] = swap_nr;
    rw_swap_page_nocache (WRITE, swap_nr, (char *) pte_page(page));
    free_page(pte_page(page));
    swap_successes++;
    shm_swp++;
    shm_rss--;
    return 1;
}
Пример #11
0
/*
 * set up the free-area data structures:
 *   - mark all pages reserved
 *   - mark all memory queues empty
 *   - clear the memory bitmaps
 */
static unsigned long init_free_area(unsigned long start_mem, unsigned long end_mem)
{
	page_struct_t *p;
	unsigned long mask = PAGE_MASK;
	unsigned long i;
	unsigned long start_addrspace = KADDRSPACE_START;
	int size;

	/*
	 * Select nr of pages we try to keep free for important stuff
	 * with a minimum of 10 pages and a maximum of 256 pages, so
	 * that we don't waste too much memory on large systems.
	 * This is fairly arbitrary, but based on some behaviour
	 * analysis.
	 */
	INIT_LOG("	init_free_area start_mem: %x endmem:%x   \n",start_mem,end_mem);
	i = (end_mem - start_addrspace) >> (PAGE_SHIFT+7);
	if (i < 10)
		i = 10;
	if (i > 256)
		i = 256;
	/*TODO freepages.min = i;
	  freepages.low = i * 2;
	  freepages.high = i * 3;*/
	g_mem_map = (page_struct_t *) LONG_ALIGN(start_mem+8);
	INIT_LOG("	g_mem_map :%x  size:%x  \n",g_mem_map,MAP_NR(end_mem));
	p = g_mem_map + MAP_NR(end_mem);
	start_mem = LONG_ALIGN((unsigned long) p);
	size=(start_mem -(unsigned long) g_mem_map);
	INIT_LOG(" freearemap setup map: %x diff:%x(%dM)   \n",g_mem_map,(start_mem -(unsigned long) g_mem_map),size/(1024*1024));
	//while(1);
	ut_memset((unsigned char *)g_mem_map, 0, start_mem -(unsigned long) g_mem_map);
	do {
		--p;
		atomic_set(&p->count, 0);
		p->flags = (1 << PG_DMA) | (1 << PG_reserved) ;
	} while (p > g_mem_map);

	for (i = 0 ; i < NR_MEM_LISTS ; i++) {
		unsigned long bitmap_size;
		init_mem_queue(free_mem_area+i);
		mask += mask;
		end_mem = (end_mem + ~mask) & mask;
		bitmap_size = (end_mem - start_addrspace) >> (PAGE_SHIFT + i);
		bitmap_size = (bitmap_size + 7) >> 3;
		bitmap_size = LONG_ALIGN(bitmap_size);
		free_mem_area[i].map = (unsigned int *) start_mem;
		ut_memset((void *) start_mem, 0, bitmap_size);
		start_mem += bitmap_size;
		INIT_LOG("		%d : bitmapsize:%x end_mem:%x \n",i,bitmap_size,end_mem);
	}
	return start_mem;
}
Пример #12
0
static inline void dmafree(void *addr, size_t size)
{
	if (size > 0) {
		int i;

		for (i = MAP_NR((unsigned long)addr);
		     i < MAP_NR((unsigned long)addr+size); i++) {
			mem_map_unreserve (i);
		}
		free_pages((unsigned long) addr, __get_order(size));
	}
}
Пример #13
0
static int read_core(struct inode * inode, struct file * file,char * buf, int count)
{
    unsigned long p = file->f_pos, memsize;
    int read;
    int count1;
    char * pnt;
    struct user dump;
#ifdef __i386__
#	define FIRST_MAPPED	PAGE_SIZE	/* we don't have page 0 mapped on x86.. */
#else
#	define FIRST_MAPPED	0
#endif

    memset(&dump, 0, sizeof(struct user));
    dump.magic = CMAGIC;
    dump.u_dsize = MAP_NR(high_memory);
#ifdef __alpha__
    dump.start_data = PAGE_OFFSET;
#endif

    if (count < 0)
        return -EINVAL;
    memsize = MAP_NR(high_memory + PAGE_SIZE) << PAGE_SHIFT;
    if (p >= memsize)
        return 0;
    if (count > memsize - p)
        count = memsize - p;
    read = 0;

    if (p < sizeof(struct user) && count > 0) {
        count1 = count;
        if (p + count1 > sizeof(struct user))
            count1 = sizeof(struct user)-p;
        pnt = (char *) &dump + p;
        memcpy_tofs(buf,(void *) pnt, count1);
        buf += count1;
        p += count1;
        count -= count1;
        read += count1;
    }

    while (count > 0 && p < PAGE_SIZE + FIRST_MAPPED) {
        put_user(0,buf);
        buf++;
        p++;
        count--;
        read++;
    }
    memcpy_tofs(buf, (void *) (PAGE_OFFSET + p - PAGE_SIZE), count);
    read += count;
    file->f_pos += read;
    return read;
}
Пример #14
0
void kmem_t::init_range(void* vstart, void* vend) {
  vstart = (char*)PGROUNDUP((uint)vstart);
  vend = (char*)PGROUNDUP((uint)vend);
  int startpfn = MAP_NR(vstart);
  int endpfn = MAP_NR(vend);
  int j=0;
  for(int i = startpfn; vstart < vend; i++, j++) {
    page_t* page = pages + i;
    page->vaddr = (void*)vstart;
    free_page(page);
    vstart += PGSIZE;
  }
}
Пример #15
0
static void init_mem(unsigned long start_mem, unsigned long end_mem, unsigned long virt_start_addr){
	int reservedpages = 0;
	unsigned long tmp;

	end_mem &= PAGE_MASK;
	g_max_mapnr  = MAP_NR(end_mem);

	INIT_LOG("	first page : %x :%x :%x\n",MAP_NR(start_mem),MAP_NR(start_mem+PAGE_SIZE),MAP_NR(virt_start_addr));
	start_mem = PAGE_ALIGN(start_mem);
	g_stat_mem_size = end_mem -start_mem;
	while (start_mem < end_mem) {
		clear_bit(PG_reserved, &g_mem_map[MAP_NR(start_mem)].flags);
		start_mem += PAGE_SIZE;
	}
	for (tmp = virt_start_addr ; tmp < (end_mem - 0x2000) ; tmp += PAGE_SIZE) {
		/*if (tmp >= MAX_DMA_ADDRESS)
		  clear_bit(PG_DMA, &g_mem_map[MAP_NR(tmp)].flags);*/
		if (PageReserved(g_mem_map+MAP_NR(tmp))) {
			reservedpages++;
			continue;
		}
		atomic_set(&g_mem_map[MAP_NR(tmp)].count, 1);
		PageSetReferenced(g_mem_map+MAP_NR(tmp));
		mm_putFreePages(tmp,0);
	}
	stat_allocs=0;
	stat_frees =0;
	INIT_LOG("	Reserved pages : %x(%d) \n",reservedpages,reservedpages);
	init_done=1;
	INIT_LOG("	Release to FREEMEM : %x \n",(end_mem - 0x2000));
	return;
}
Пример #16
0
int AndorUnlockDMABuffers(int iCardNo)
{

  unsigned long addr;
  unsigned int sz;

  if(DMA_MODE==0){
  if(gpAndorDev[iCardNo].AndorDMABuffer[0].Size != 0){
    for (addr = (unsigned long)gpAndorDev[iCardNo].AndorDMABuffer[0].VirtualAdd, sz = gpAndorDev[iCardNo].AndorDMABuffer[0].Size;
         sz > 0;
         addr += PAGE_SIZE, sz -= PAGE_SIZE) {
#        if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
                    mem_map_unreserve(MAP_NR(addr));
#        elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
                    mem_map_unreserve(virt_to_page(addr));
#        else
        ClearPageReserved(virt_to_page(addr));
#        endif
    }
          free_pages((unsigned long)gpAndorDev[iCardNo].AndorDMABuffer[0].VirtualAdd, DMA_PAGE_ORD);
        }

     if(gpAndorDev[iCardNo].AndorDMABuffer[1].Size != 0){
    for (addr = (unsigned long)gpAndorDev[iCardNo].AndorDMABuffer[1].VirtualAdd, sz = gpAndorDev[iCardNo].AndorDMABuffer[1].Size;
         sz > 0;
         addr += PAGE_SIZE, sz -= PAGE_SIZE) {
#      if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
                  mem_map_unreserve(MAP_NR(addr));
#      elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
                  mem_map_unreserve(virt_to_page(addr));
#      else
      ClearPageReserved(virt_to_page(addr));
#      endif
    }
    free_pages((unsigned long)gpAndorDev[iCardNo].AndorDMABuffer[1].VirtualAdd, DMA_PAGE_ORD);
  }
  }
  else{
    if(gpAndorDev[iCardNo].AndorDMABuffer[0].VirtualAdd!=0) iounmap(gpAndorDev[iCardNo].AndorDMABuffer[0].VirtualAdd);
    if(gpAndorDev[iCardNo].AndorDMABuffer[1].VirtualAdd!=0) iounmap(gpAndorDev[iCardNo].AndorDMABuffer[1].VirtualAdd);
  }
  gpAndorDev[iCardNo].AndorDMABuffer[0].VirtualAdd = 0;
    gpAndorDev[iCardNo].AndorDMABuffer[0].Physical = 0;
    gpAndorDev[iCardNo].AndorDMABuffer[0].Size = 0;
  gpAndorDev[iCardNo].AndorDMABuffer[1].VirtualAdd = 0;
    gpAndorDev[iCardNo].AndorDMABuffer[1].Physical = 0;
    gpAndorDev[iCardNo].AndorDMABuffer[1].Size = 0;

  return 0;
}
Пример #17
0
void free_initmem(void)
{
	unsigned long addr;

	prom_free_prom_memory ();
    
	addr = (unsigned long)(&__init_begin);
	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
		mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
		atomic_set(&mem_map[MAP_NR(addr)].count, 1);
		free_page(addr);
	}
	printk("Freeing unused kernel memory: %dk freed\n",
	       (&__init_end - &__init_begin) >> 10);
}
Пример #18
0
/*
 * This routine is used to map in a page into an address space: needed by
 * execve() for the initial stack and environment pages.
 */
unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
{
	pgd_t * pgd;
	pmd_t * pmd;
	pte_t * pte;

	if (page >= high_memory)
		printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address);
	if (mem_map[MAP_NR(page)].count != 1)
		printk("mem_map disagrees with %08lx at %08lx\n",page,address);
	pgd = pgd_offset(tsk->mm,address);
	pmd = pmd_alloc(pgd, address);
	if (!pmd) {
		free_page(page);
		oom(tsk);
		return 0;
	}
	pte = pte_alloc(pmd, address);
	if (!pte) {
		free_page(page);
		oom(tsk);
		return 0;
	}
	if (!pte_none(*pte)) {
		printk("put_dirty_page: page already exists\n");
		free_page(page);
		return 0;
	}
	flush_page_to_ram(page);
	set_pte(pte, pte_mkwrite(pte_mkdirty(mk_pte(page, PAGE_COPY))));
/* no need for invalidate */
	return page;
}
Пример #19
0
void show_mem(void)
{
	struct sysinfo si;
	int i,free = 0,total = 0,reserved = 0;
	int shared = 0;

	si_swapinfo(&si);

	printk("Mem-info:\n");
	show_free_areas();
	printk("Free swap:       %6ldkB\n", si.freeswap >> 10);
	i = MAP_NR(high_memory);
	while (i-- > 0) {
		total++;
		if (PageReserved(mem_map+i))
			reserved++;
		else if (!mem_map[i].count)
			free++;
		else
			shared += mem_map[i].count-1;
	}
	printk("%d pages of RAM\n",total);
	printk("%d free pages\n",free);
	printk("%d reserved pages\n",reserved);
	printk("%d pages shared\n",shared);
	show_buffers();
#ifdef CONFIG_INET
	show_net_buffers();
#endif
}
Пример #20
0
void si_meminfo(struct sysinfo *val)
{
#if 0
	int i;

	i = MAP_NR(high_memory);
	val->totalram = 0;
	val->sharedram = 0;
	val->freeram = nr_free_pages << PAGE_SHIFT;
	val->bufferram = buffermem;
	while (i-- > 0)  {
		if (PageReserved(mem_map+i))
			continue;
		val->totalram++;
		if (!mem_map[i].count)
			continue;
		val->sharedram += mem_map[i].count-1;
	}
	val->totalram <<= PAGE_SHIFT;
	val->sharedram <<= PAGE_SHIFT;
	return;
#else
	extern struct vm_statistics osfmach3_vm_stats;

	osfmach3_update_vm_info();
	val->totalram = osfmach3_mem_size;
	val->freeram = osfmach3_vm_stats.free_count << PAGE_SHIFT;
	val->bufferram = buffermem;
	val->sharedram = 0;
#endif
}
Пример #21
0
void __init mem_init(void)
{
	int codek = 0, datak = 0, initk = 0;
	unsigned long tmp;
	extern char _etext, _stext, __init_begin, __init_end, _end;
	unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */
	unsigned long end_mem   = memory_end; /* DAVIDM - this must not include kernel stack at top */

#ifdef DEBUG
	printk(KERN_DEBUG "Mem_init: start=%lx, end=%lx\n", start_mem, end_mem);
#endif

	end_mem &= PAGE_MASK;
	high_memory = (void *) end_mem;

	start_mem = PAGE_ALIGN(start_mem);
	max_mapnr = num_physpages = MAP_NR(high_memory);

	/* this will put all memory onto the freelists */
	totalram_pages = free_all_bootmem();

	codek = (&_etext - &_stext) >> 10;
	datak = (&_end - &_etext) >> 10;
	initk = (&__init_begin - &__init_end) >> 10;

	tmp = nr_free_pages() << PAGE_SHIFT;
	printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n",
	       tmp >> 10,
	       (&_end - &_stext) >> 10,
	       (rom_length > 0) ? ((rom_length >> 10) - codek) : 0,
	       rom_length >> 10,
	       codek,
	       datak
	       );
}
Пример #22
0
/*
 * Try to free up some pages by shrinking the buffer-cache
 *
 * Priority tells the routine how hard to try to shrink the
 * buffers: 3 means "don't bother too much", while a value
 * of 0 means "we'd better get some free pages now".
 */
int shrink_buffers(unsigned int priority)
{
	struct buffer_head *bh;
	int i;

	if (priority < 2)
		sync_buffers(0,0);
	bh = free_list;
	i = nr_buffers >> priority;
	for ( ; i-- > 0 ; bh = bh->b_next_free) {
		if (bh->b_count ||
		    (priority >= 5 &&
		     mem_map[MAP_NR((unsigned long) bh->b_data)] > 1)) {
			put_last_free(bh);
			continue;
		}
		if (!bh->b_this_page)
			continue;
		if (bh->b_lock)
			if (priority)
				continue;
			else
				wait_on_buffer(bh);
		if (bh->b_dirt) {
			bh->b_count++;
			ll_rw_block(WRITEA, 1, &bh);
			bh->b_count--;
			continue;
		}
		if (try_to_free(bh, &bh))
			return 1;
	}
	return 0;
}
Пример #23
0
/*
 * page not present ... go through shm_pages
 */
static unsigned long shm_nopage(struct vm_area_struct * shmd, unsigned long address, int no_share)
{
    pte_t pte;
    struct shmid_kernel *shp;
    unsigned int id, idx;

    id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK;
    idx = (address - shmd->vm_start + shmd->vm_offset) >> PAGE_SHIFT;

#ifdef DEBUG_SHM
    if (id > max_shmid) {
        printk ("shm_nopage: id=%d too big. proc mem corrupted\n", id);
        return 0;
    }
#endif
    shp = shm_segs[id];

#ifdef DEBUG_SHM
    if (shp == IPC_UNUSED || shp == IPC_NOID) {
        printk ("shm_nopage: id=%d invalid. Race.\n", id);
        return 0;
    }
#endif
    /* This can occur on a remap */

    if (idx >= shp->shm_npages) {
        return 0;
    }

    pte = __pte(shp->shm_pages[idx]);
    if (!pte_present(pte)) {
        unsigned long page = get_free_page(GFP_USER);
        if (!page)
            return -1;
        pte = __pte(shp->shm_pages[idx]);
        if (pte_present(pte)) {
            free_page (page); /* doesn't sleep */
            goto done;
        }
        if (!pte_none(pte)) {
            rw_swap_page_nocache(READ, pte_val(pte), (char *)page);
            pte = __pte(shp->shm_pages[idx]);
            if (pte_present(pte))  {
                free_page (page); /* doesn't sleep */
                goto done;
            }
            swap_free(pte_val(pte));
            shm_swp--;
        }
        shm_rss++;
        pte = pte_mkdirty(mk_pte(page, PAGE_SHARED));
        shp->shm_pages[idx] = pte_val(pte);
    } else
        --current->maj_flt;  /* was incremented in do_no_page */

done:	/* pte_val(pte) == shp->shm_pages[idx] */
    current->min_flt++;
    atomic_inc(&mem_map[MAP_NR(pte_page(pte))].count);
    return pte_page(pte);
}
Пример #24
0
unsigned long scullc_vma_nopage(struct vm_area_struct *vma,
                                unsigned long address, int write)
{
    unsigned long offset = address - vma->vm_start + vma->vm_offset;
    ScullC_Dev *ptr, *dev = scullc_devices + MINOR(vma->vm_inode->i_rdev);
    void *pageptr = NULL; /* default to "missing" */

    if (offset >= dev->size) return 0; /* out of range: send SIGBUS */

    /*
     * Now retrieve the scullc device from the list,then the page.
     * Don't want to allocate: I'm too lazy. If the device has holes,
     * the process receives a SIGBUS when accessing the hole.
     */
    offset >>= PAGE_SHIFT; /* offset is a number of pages */
    for (ptr = dev; ptr && offset >= dev->qset;) {
        ptr = ptr->next;
        offset -= dev->qset;
    }
    if (ptr && ptr->data) pageptr = ptr->data[offset];
    if (!pageptr) return 0; /* hole or end-of-file: SIGBUS */

    /* got it, now increment the count */
    atomic_inc(&mem_map[MAP_NR(pageptr)].count);
    return (unsigned long)pageptr;
}
Пример #25
0
/*
 * try_to_free() checks if all the buffers on this particular page
 * are unused, and free's the page if so.
 */
static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
{
	unsigned long page;
	struct buffer_head * tmp, * p;

	*bhp = bh;
	page = (unsigned long) bh->b_data;
	page &= PAGE_MASK;
	tmp = bh;
	do {
		if (!tmp)
			return 0;
		if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
			return 0;
		tmp = tmp->b_this_page;
	} while (tmp != bh);
	tmp = bh;
	do {
		p = tmp;
		tmp = tmp->b_this_page;
		nr_buffers--;
		if (p == *bhp)
			*bhp = p->b_prev_free;
		remove_from_queues(p);
		put_unused_buffer_head(p);
	} while (tmp != bh);
	buffermem -= PAGE_SIZE;
	free_page(page);
	return !mem_map[MAP_NR(page)];
}
Пример #26
0
int try_to_swap_out(unsigned long * table_ptr)
{
	unsigned long page;
	unsigned long swap_nr;

	page = *table_ptr;
	if (!(PAGE_PRESENT & page))
		return 0;
	if (page - LOW_MEM > PAGING_MEMORY)
		return 0;
	if (PAGE_DIRTY & page) {
		page &= 0xfffff000;
		if (mem_map[MAP_NR(page)] != 1)
			return 0;
		if (!(swap_nr = get_swap_page()))
			return 0;
		*table_ptr = swap_nr<<1;
		invalidate();
		write_swap_page(swap_nr, (char *) page);
		free_page(page);
		return 1;
	}
	*table_ptr = 0;
	invalidate();
	free_page(page);
	return 1;
}
Пример #27
0
/* allocate user space mmapable block of memory in the kernel space */
void * rvmalloc(unsigned long size)
{
	void * mem;
	unsigned long adr, page;
        
#if LINUX_VERSION_CODE < 0x020300
	mem=vmalloc(size);
#else
	mem=vmalloc_32(size);
#endif
	if (mem) 
	{
		memset(mem, 0, size); /* Clear the ram out, no junk to the user */
	        adr=(unsigned long) mem;
		while (size > 0) 
                {
#if LINUX_VERSION_CODE < 0x020300
	                page = kvirt_to_phys(adr);
			mem_map_reserve(MAP_NR(phys_to_virt(page)));
#else
	                page = kvirt_to_pa(adr);
			mem_map_reserve(virt_to_page(__va(page)));
#endif
			adr+=PAGE_SIZE;
			size-=PAGE_SIZE;
		}
	}
	return mem;
}
Пример #28
0
/* A simple wrapper so the base function doesn't need to enforce
 * that all swap pages go through the swap cache!
 */
void rw_swap_page(int rw, unsigned long entry, char *buf, int wait)
{
	struct page *page = mem_map + MAP_NR(buf);

	if (page->inode && page->inode != &swapper_inode)
		panic ("Tried to swap a non-swapper page");

	/*
	 * Make sure that we have a swap cache association for this
	 * page.  We need this to find which swap page to unlock once
	 * the swap IO has completed to the physical page.  If the page
	 * is not already in the cache, just overload the offset entry
	 * as if it were: we are not allowed to manipulate the inode
	 * hashing for locked pages.
	 */
	if (!PageSwapCache(page)) {
		printk("VM: swap page is not in swap cache\n");
		return;
	}
	if (page->offset != entry) {
		printk ("swap entry mismatch");
		return;
	}
	rw_swap_page_base(rw, entry, page, wait);
}
Пример #29
0
/* reserve a range of pages in mem_map[] */
static void reserve_region( unsigned long addr, unsigned long end )
{
	mem_map_t *mapp = &mem_map[MAP_NR(addr)];

	for( ; addr < end; addr += PAGE_SIZE, ++mapp )
		set_bit( PG_reserved, &mapp->flags );
}
Пример #30
0
static inline void *dmaalloc(size_t size)
{
	unsigned long addr;

	if (size == 0) {
		return NULL;
	}
	addr = __get_dma_pages(GFP_KERNEL, __get_order(size));
	if (addr) {
		int i;

		for (i = MAP_NR(addr); i < MAP_NR(addr+size); i++) {
			mem_map_reserve(i);
		}
	}
	return (void *)addr;
}