Пример #1
0
seg_t mm_dup(seg_t base)
{
    register struct malloc_hole *o, *m;
    size_t i;

    debug("MALLOC: mm_dup()\n");
    o = find_hole(&memmap, base);
    if (o->flags != HOLE_USED)
	panic("bad/swapped hole");

#ifdef CONFIG_SWAP

    while ((m = best_fit_hole(&memmap, o->extent)) == NULL) {
	seg_t s = swap_strategy(NULL);
	if (!s || swap_out(s) == -1)
	    return NULL;
    }

#else

    m = best_fit_hole(&memmap, o->extent);
    if (m == NULL)
	return NULL;

#endif

    split_hole(&memmap, m, o->extent);
    m->flags = HOLE_USED;
    m->refcount = 1;
    i = (o->extent << 4);
    fmemcpy(m->page_base, 0, o->page_base, 0, (__u16) i);
    return m->page_base;
}
Пример #2
0
seg_t mm_alloc(segext_t pages)
{
    /*
     *      Which hole fits best ?
     */
    register struct malloc_hole *m;

#ifdef CONFIG_SWAP

    while ((m = best_fit_hole(&memmap, pages)) == NULL) {
	seg_t s = swap_strategy(NULL);
	if (s == NULL || swap_out(s) == -1)
	    return NULL;
    }

#else

    m = best_fit_hole(&memmap, pages);
    if (m == NULL)
	return NULL;

#endif

    /*
     *      The hole is (probably) too big
     */

    split_hole(&memmap, m, pages);
    m->flags = HOLE_USED;
    m->refcount = 1;

    return m->page_base;
}
Пример #3
0
paddr_t page_alloc()
{
	struct coremap *local_coremap = coremap_list ;

	lock_acquire(coremap_lock);
	while(local_coremap->next != NULL)
	{
		if((local_coremap->status & 0x2) == 2 )
		{
			local_coremap->timestamp = localtime ;
			localtime++ ;
			local_coremap->pages=1;
			local_coremap->status = 1 ;
			local_coremap->as = NULL ;
			bzero((void *)PADDR_TO_KVADDR(local_coremap->pa),PAGE_SIZE);
			lock_release(coremap_lock);
			return local_coremap->pa ;
		}

		local_coremap = local_coremap->next ;
	}
	paddr_t pa = 0 ;
	pa = swap_out(1) ;
	lock_release(coremap_lock);
	return pa ;

}
Пример #4
0
/*
 * Get physical address of first (actually last :-) free page, and mark it
 * used. If no free pages left, return 0.
 */
unsigned long get_free_page(void)
{
	unsigned long result;

repeat:
	__asm__("std ; repne ; scasb\n\t"
		"jne 1f\n\t"
		"movb $1,1(%%edi)\n\t"
		"sall $12,%%ecx\n\t"
		"addl %2,%%ecx\n\t"
		"movl %%ecx,%%edx\n\t"
		"movl $1024,%%ecx\n\t"
		"leal 4092(%%edx),%%edi\n\t"
		"rep ; stosl\n\t"
		"movl %%edx,%%eax\n"
		"1:\tcld"
		:"=a" (result)
		:"0" (0),"i" (LOW_MEM),"c" (PAGING_PAGES),
		"D" (mem_map+PAGING_PAGES-1)
		:"di","cx","dx");
	if (result >= HIGH_MEMORY)
		goto repeat;
	if ((result && result < LOW_MEM) || (result & 0xfff)) {
		printk("weird result: %08x\n",result);
		result = 0;
	}
	if (!result && swap_out())
		goto repeat;
	return result;
}
Пример #5
0
void do_pagefault(struct pf_msg *pmsg)
{
   int frame;
   int page;

   verify_vm_access(pmsg->page_idx, pmsg->word_idx);

   frame = find_pgt_index();
   page = find_vm_index(frame);

   lock_memory();
   lock_vm();
   lock_pagetable();

   //printf("Swapping out frame %d\n", frame);

   swap_out(page, frame);
   usleep(F);

   //printf("Swapping in page %d\n", pmsg->page_idx);
   swap_in(pmsg->page_idx, frame);
   usleep(F);

   page_table[page] = -1;
   page_table[pmsg->page_idx] = frame;

   unlock_memory();
   unlock_vm();
   unlock_pagetable();
}
void asignar_marco(pagina_t* * pagina, segmento_t** segmento, uint32_t pid){
	marco_t* marco=NULL;
	marco = buscar_marco_libre();

	//Si no hay ningun marco libre, swappeo
	//Si hay un marco libre, se lo asigno a la pagina
	if((*pagina)->en_disco){
		swap_out(pid, (*segmento)->id, pagina);
	}else{
		if(marco == NULL){
			swap_in(pagina, (*segmento)->id, pid);
		}else{
			(*pagina)->marco=marco->id;
			(*pagina)->tiene_marco= true;
			marco->id_proceso = pid;
			marco->ocupado= true;

			loggear_trace("Se asigno el marco %d a la pagina %d del segmento %d del proceso %d.",
					marco->id, (*pagina)->id, (*segmento)->id, pid);

			if(cantidad_marcos_libre() == 0){
				loggear_info("Espacio de memoria principal lleno");
			}
		}
	}
}
Пример #7
0
int page_fault(page_table_leaf_t *page_entry) {
    frame_t victim = 0;

    stats.page_faults++;

    /*
     * If there are no free frames, then we need to swap something out
     */
    if( next_free_frame < 0 ) {
        /*
         * Pick a victim to swap out
         */
        page_replacement_alg(&victim);

        /*
         * Swap out the page
         */
        swap_out(victim);
    }

    /*
     * Swap-In the page
     */
    swap_in(page_entry);

    return 0;
}
Пример #8
0
/*! Use the clock algorithm to select a frame to evict and execute the
    eviction. */
void * frame_evict(enum palloc_flags flag) {
    struct list_elem *ce;
    struct thread *ct;
    struct frame_table_entry *cf;
    int i;
    
    if (f_table.lock.holder != thread_current())
        lock_acquire(&f_table.lock);    /* Get frame table lock */
    if (list_empty(&f_table.table)) {
        lock_release(&f_table.lock);    /* Nothing to evict */
        return NULL;
    }
    f_table.hand = f_table.hand % list_size(&f_table.table);
    ce = list_begin(&f_table.table);    /* Begin iteration */
    for (i = 0; i < f_table.hand; i++) {
        /* Find where the "hand" points to: that is where the 
           iteration starts */
        ce = list_next(ce);
    }
    while (true) {
        cf = list_entry(ce, struct frame_table_entry, elem);
        ct = cf->owner;
        if (!cf->spt->pinned) {
            /* If pinned, skip this frame */
            if (pagedir_is_accessed(ct->pagedir, cf->spt->upage))
                /* If accessed, clear accessed bit */
                pagedir_set_accessed(ct->pagedir, cf->spt->upage, false);
            else {
                /* Swap out the page! */
                if (cf->spt->type != SPT_MMAP) {
                    cf->spt->fr = NULL;
                    cf->spt->type = SPT_SWAP;
                    cf->spt->swap_index = swap_out(cf->physical_addr);
                }
                /* Write the frame back to the map'd file */
                else if (cf->spt->type == SPT_MMAP &&
                         pagedir_is_dirty(ct->pagedir, cf->spt->upage)) {
                    cf->spt->fr = NULL;
                    file_write_at(cf->spt->file, cf->physical_addr, 
                                  cf->spt->read_bytes, cf->spt->ofs);
                }
                /* Clear the frame */
                list_remove(ce);
                pagedir_clear_page(ct->pagedir, cf->spt->upage);
                palloc_free_page(cf->physical_addr);
                free(cf);
                lock_release(&f_table.lock);
                return palloc_get_page(flag);
            }
        }
        ce = list_next(ce);
        ++f_table.hand;
        if (ce == list_end(&f_table.table)) { /* Iterate circularly */
            ce = list_begin(&f_table.table);
            f_table.hand = 0;
        }
    }
}
Пример #9
0
/*
 *	Do an iteration of the swapper
 */
int do_swapper_run(register struct task_struct *swapin_target)
{
    while (make_runnable(swapin_target) == -1) {
	seg_t t = swap_strategy(swapin_target);
	if (!t || swap_out(t) == -1)
	    return -1;
    }
    return 0;
}
Пример #10
0
void page_replacement(void *vaddr)
{
  uint32_t *paddr,*lru_page;
  
  if ((paddr = palloc_get_page(PAL_USER)) != NULL)
    swap_in(vaddr, paddr);
  else
  {
    lru_page = lru_get_page();

    swap_out(lru_page);
    swap_in(vaddr,lru_page);
  }
}
Пример #11
0
void stack_growth(void *vaddr){
  uint32_t *paddr,*lru_page;
  
  if ((paddr = palloc_get_page(PAL_ZERO)) != NULL) {
		fte_create(paddr, false);
		install_page_ext (pg_round_down(vaddr), paddr, true);
		set_page_valid(pg_round_down(vaddr), paddr);
	}
  else
  {
    lru_page = lru_get_page();

    swap_out(lru_page);
		set_page_valid(pg_round_down(vaddr), lru_page);
  }

}
Пример #12
0
paddr_t user_page_alloc(){
	struct coremap *local_coremap = coremap_list ;

	while(local_coremap!=NULL){
		if((local_coremap->status & 0x2) == 2  ){
			local_coremap->timestamp = localtime;
			localtime++;
			local_coremap->status = 0 ;
			local_coremap->pages=1;
			local_coremap->as = curthread->t_addrspace ;
			bzero((void *)PADDR_TO_KVADDR(local_coremap->pa),PAGE_SIZE);
			return local_coremap->pa;
		}
		local_coremap = local_coremap->next ;
	}
	paddr_t pa = swap_out(0) ;
	return pa ;
}
Пример #13
0
/*
 * Swap out an object. Only the program is swapped, not the 'object_t'.
 *
 * marion - the swap seems to corrupt the function table
 */
int swap(object_t *  ob)
{
    /* the simuls[] table uses pointers to the functions so the simul_efun
     * program cannot be relocated.  locate_in() could be changed to
     * correct this or simuls[] could use offsets, but it doesn't seem
     * worth it just to get the simul_efun object to swap.  Maybe later.
     *
     * Ditto the master object and master_applies[].  Mudlibs that have
     * a period TIME_TO_SWAP between successive master applies must be
     * extremely rare ...
     */
    if (ob == simul_efun_ob || ob == master_ob) return 0;
    if (ob->flags & O_DESTRUCTED)
	return 0;
    debug(d_flag, ("Swap object /%s (ref %d)", ob->name, ob->ref));

    if (ob->prog->line_info)
	swap_line_numbers(ob->prog);	/* not always done before we get here */
    if ((ob->flags & O_HEART_BEAT) || (ob->flags & O_CLONE)) {
	debug(d_flag, ("  object not swapped - heart beat or cloned."));
	return 0;
    }
    if (ob->prog->ref > 1 || ob->interactive) {
	debug(d_flag, ("  object not swapped - inherited or interactive or in apply_low() cache."));

	return 0;
    }
    if (ob->prog->func_ref > 0) {
	debug(d_flag, ("  object not swapped - referenced by functions."));

	return 0;
    }
    locate_out(ob->prog);	/* relocate the internal pointers */
    if (swap_out((char *) ob->prog, ob->prog->total_size, (int *) &ob->swap_num)) {
	num_swapped++;
	free_prog(ob->prog, 0);	/* Do not free the strings */
	ob->prog = 0;
	ob->flags |= O_SWAPPED;
	return 1;
    } else {
	locate_in(ob->prog);
	return 0;
    }
}
int main(int argc, char *argv[])
{

   do
   {
        prev_ptr = NIL_HOLE;
        hp = hole_head;
        candidate = hole_head;

        last_hole_prev = NIL_HOLE;
        while(hp != NIL_HOLE && hp->h_base < swap_base)
        {
            if(hp->h_len >= clicks && hp->h_len < candidate->h_len)
            {
                last_hole_prev = prev_ptr;
                candidate = hp;
            }
            prev_ptr = last_hole_prev;
            hp = candidate;
            last_hole_prev = NIL_HOLE;
        }
        if (candidate != NIL_HOLE)
        {
            hp = candidate; prev_ptr = last_hole_prev;
            /* We found a hole that is big enough.  Use it. */
      		old_base = hp->h_base;	/* remember where it started */
      		hp->h_base += clicks;	/* bite a piece off */
      		hp->h_len -= clicks;	/* ditto */

      		// Remember new high watermark of used memory.
      		if(hp->h_base > high_watermark)
      		high_watermark = hp->h_base;

      		// Delete the hole if used up completely.
      		if (hp->h_len == 0) del_slot(prev_ptr, hp);

      		// Return the start address of the acquired block.
      		return(old_base);
         }

    }while(swap_out());     //try to swap out some other process
return(NO_MEM);
}
Пример #15
0
void *
frame_evict (enum palloc_flags flags)
{
    lock_acquire (&frame_table_lock);
    struct list_elem *e = list_begin (&frame_table);

    while (true) {
        struct frame_entry *frame_entry = list_entry (e, struct frame_entry, elem);
        struct SP_entry *page_entry = frame_entry->page_entry;
        if (!page_entry->pinned) {
            struct thread *t = frame_entry->thread;
            if (pagedir_is_accessed (t->pagedir, page_entry->page)) {
                pagedir_set_accessed (t->pagedir, page_entry->page, false);
            } else {
                if (pagedir_is_dirty (t->pagedir, page_entry->page) ||
                    page_entry->type == SP_SWAP) {
                    if (page_entry->type == SP_MMAP) {
                        lock_acquire (&filesys_lock);
                        file_write_at (page_entry->file, frame_entry->frame,
                                       page_entry->read_bytes,
                                       page_entry->offset);
                        lock_release (&filesys_lock);
                    } else {
                        page_entry->type = SP_SWAP;
                        page_entry->swap_index = swap_out (frame_entry->frame);
                    }
                }
                page_entry->is_loaded = false;
                list_remove (&frame_entry->elem);
                pagedir_clear_page (t->pagedir, page_entry->page);
                palloc_free_page (frame_entry->frame);
                free (frame_entry);
                lock_release (&frame_table_lock);
                return palloc_get_page (flags);
            }
        }
        e = list_next (e);
        if (e == list_end (&frame_table)) {
            e = list_begin (&frame_table);
        }
    }
}
Пример #16
0
/*
 * Swap out line number information.
 */
int
swap_line_numbers(program_t *  prog)
{
    int size;

    if (!prog || !prog->line_info)
	return 0;

    debug(d_flag, ("Swap line numbers for /%s", prog->name));

    size = prog->file_info[0];
    if (swap_out((char *) prog->file_info, size,
		 &prog->line_swap_index)) {
	line_num_bytes_swapped += size;
	FREE(prog->file_info);
	prog->file_info = 0;
	prog->line_info = 0;
	return 1;
    }
    return 0;
}
Пример #17
0
void *
swap (void)
{
  struct frame_entry *victim_frame = find_victim();
  
  struct spage_entry *victim_spage_entry = victim_frame->spe;
  uint32_t sectornum = swap_out(victim_frame->frame);

  pagedir_clear_page(victim_frame->t->pagedir,victim_spage_entry->uaddr);
  
  victim_spage_entry->indisk = true;
  victim_spage_entry->sec_no = sectornum;
  
  palloc_free_page(victim_frame->frame);
  falloc_free_frame(victim_frame->frame);

  void *kpage = palloc_get_page(PAL_USER|PAL_ZERO);
  ASSERT(kpage != NULL);
  
  return kpage;
}
Пример #18
0
/* The implementation of LRU 'clock' algorithm follows.. we make use of the PTE's accessed bit and dirty bit:
 on any read or write to a page  ==> accessed bit = 1;
 on any write ==> dirty bit = 1;

 Step 1: We navigate through the pages in a circular order.. i.e., if we hit the end of the frame list,
 we circle back to the start and continue our processing..

 Step 2: If the accessed bit is 'accessed', turn it to 'not accessed', skip the page and proceed to look-up the next one in the list

 Step 3: If the accessed bit is 'not accessed', we can proceed with our page replacement */
void evict_page() {
	struct list_elem *e;
	int count = 0;
	lock_acquire(&ftable_lock);
	for (e = list_begin(&frame_list); e != list_end(&frame_list);
			e = list_next(e)) {
		struct frame *frame = list_entry(e, struct frame, frame_list_elem);
		// printf ("\n%d , %d", count, list_size(&frame_list) - 1);
		if (count != list_size(&frame_list) - 1) {
			// get the accessed flag for the current page
			bool accessed_flag = pagedir_is_accessed(
					frame->page->frame_holder_thread->pagedir, frame->page->addr);
			if (accessed_flag)
				pagedir_set_accessed(frame->page->frame_holder_thread->pagedir,
						frame->page->addr, !accessed_flag);
			else {
				// we need to page replace.. i.e.,
				// step 1: remove the existing page from the frame (swap_out call)
				// step 2: remove the existing page from the page directory
				// step 3: set the accessed flag of the page to 'accessed'
				// step 4: free the page and free the frame, for subsequent use
				list_remove(e);
				swap_out(frame->page);
				frame->page->swap_flag = 1;
				pagedir_clear_page(frame->page->frame_holder_thread->pagedir,
						frame->page->addr);
				palloc_free_page(frame->base);
				free(frame);
				// frame->page->swap_flag = 1;
				break;
			}
			count++;
		} else {
			count = 0;
			e = list_begin(&frame_list);
		}
	}
	lock_release(&ftable_lock);
}
Пример #19
0
int try_to_free_pages_zone(zone_t *classzone, unsigned int gfp_mask)
{
	gfp_mask = pf_gfp_mask(gfp_mask);

	for (;;) {
		int tries = vm_passes;
		int failed_swapout = !(gfp_mask & __GFP_IO);
		int nr_pages = SWAP_CLUSTER_MAX;

		do {
			nr_pages = shrink_caches(classzone, gfp_mask, nr_pages, &failed_swapout);
			if (nr_pages <= 0)
				return 1;
			shrink_dcache_memory(vm_vfs_scan_ratio, gfp_mask);
			shrink_icache_memory(vm_vfs_scan_ratio, gfp_mask);
#ifdef CONFIG_QUOTA
			shrink_dqcache_memory(vm_vfs_scan_ratio, gfp_mask);
#endif
			if (!failed_swapout)
				failed_swapout = !swap_out(classzone);
		} while (--tries);

#ifdef	CONFIG_OOM_KILLER
	out_of_memory();
#else
	if (likely(current->pid != 1))
		break;
	if (!check_classzone_need_balance(classzone))
		break;

	__set_current_state(TASK_RUNNING);
	yield();
#endif
	}

	return 0;
}
Пример #20
0
/* Evicts the page held in F. */
void evict(struct frame *f) {
	uint32_t *pd = thread_current()->pagedir;
    
  ASSERT(lock_held_by_current_thread(&f->evicting));
//  lock_acquire(&f->page->moving); /* You should never block here. */
  ASSERT(f->page != NULL);
  ASSERT(lock_held_by_current_thread(&f->page->moving));

	/* Clear page to prevent further writes. */
	pagedir_clear_page(f->page->thread->pagedir, f->page->vaddr);

	/* If the page had been written to, the changes must be saved. */
	if(pagedir_is_dirty(f->page->thread->pagedir, f->page->vaddr)) {
		if(f->page->status == PAGE_MAPPED) {
			/* If the page is mapped, write changes back to file. */
			bool have_lock = lock_held_by_current_thread(&fs_lock);

			if(!have_lock)
				lock_acquire(&fs_lock);
			if(file_write_at(f->page->file, f->addr, f->page->read_bytes, f->page->ofs) != f->page->read_bytes)
				PANIC("Didn't write expected %d bytes.", f->page->read_bytes);
			if(!have_lock)
				lock_release(&fs_lock);

		} else {
			/* Write page to swap. */
			ASSERT(pagedir_is_writeable(f->page->thread->pagedir, f->page->vaddr));
			ASSERT(f->page->writeable);
			swap_out(f);
		}
	}

	/* Unset link the page and frame. */
	f->page->current_frame = NULL;
	lock_release(&f->page->moving);
	f->page = NULL;
}
Пример #21
0
static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, int priority)
{
	struct list_head * entry;
	int max_scan = nr_inactive_pages / priority;
	int max_mapped = min((nr_pages << (10 - priority)), max_scan / 10);

	spin_lock(&pagemap_lru_lock);
	while (--max_scan >= 0 && (entry = inactive_list.prev) != &inactive_list) {
		struct page * page;

		/* lock depth is 1 or 2 */
		if (unlikely(current->need_resched)) {
			spin_unlock(&pagemap_lru_lock);
			__set_current_state(TASK_RUNNING);
			schedule();
			spin_lock(&pagemap_lru_lock);
			continue;
		}

		page = list_entry(entry, struct page, lru);

		if (unlikely(!PageLRU(page)))
			BUG();
		if (unlikely(PageActive(page)))
			BUG();

		list_del(entry);
		list_add(entry, &inactive_list);

		/*
		 * Zero page counts can happen because we unlink the pages
		 * _after_ decrementing the usage count..
		 */
		if (unlikely(!page_count(page)))
			continue;

		if (!memclass(page->zone, classzone))
			continue;

		/* Racy check to avoid trylocking when not worthwhile */
		if (!page->buffers && (page_count(page) != 1 || !page->mapping))
			goto page_mapped;

		/*
		 * The page is locked. IO in progress?
		 * Move it to the back of the list.
		 */
		if (unlikely(TryLockPage(page))) {
			if (PageLaunder(page) && (gfp_mask & __GFP_FS)) {
				page_cache_get(page);
				spin_unlock(&pagemap_lru_lock);
				wait_on_page(page);
				page_cache_release(page);
				spin_lock(&pagemap_lru_lock);
			}
			continue;
		}

		if ((PageDirty(page) || DelallocPage(page)) && is_page_cache_freeable(page) && page->mapping) {
			/*
			 * It is not critical here to write it only if
			 * the page is unmapped beause any direct writer
			 * like O_DIRECT would set the PG_dirty bitflag
			 * on the phisical page after having successfully
			 * pinned it and after the I/O to the page is finished,
			 * so the direct writes to the page cannot get lost.
			 */
			int (*writepage)(struct page *);

			writepage = page->mapping->a_ops->writepage;
			if ((gfp_mask & __GFP_FS) && writepage) {
				ClearPageDirty(page);
				SetPageLaunder(page);
				page_cache_get(page);
				spin_unlock(&pagemap_lru_lock);

				writepage(page);
				page_cache_release(page);

				spin_lock(&pagemap_lru_lock);
				continue;
			}
		}

		/*
		 * If the page has buffers, try to free the buffer mappings
		 * associated with this page. If we succeed we try to free
		 * the page as well.
		 */
		if (page->buffers) {
			spin_unlock(&pagemap_lru_lock);

			/* avoid to free a locked page */
			page_cache_get(page);

			if (try_to_release_page(page, gfp_mask)) {
				if (!page->mapping) {
					/*
					 * We must not allow an anon page
					 * with no buffers to be visible on
					 * the LRU, so we unlock the page after
					 * taking the lru lock
					 */
					spin_lock(&pagemap_lru_lock);
					UnlockPage(page);
					__lru_cache_del(page);

					/* effectively free the page here */
					page_cache_release(page);

					if (--nr_pages)
						continue;
					break;
				} else {
					/*
					 * The page is still in pagecache so undo the stuff
					 * before the try_to_release_page since we've not
					 * finished and we can now try the next step.
					 */
					page_cache_release(page);

					spin_lock(&pagemap_lru_lock);
				}
			} else {
				/* failed to drop the buffers so stop here */
				UnlockPage(page);
				page_cache_release(page);

				spin_lock(&pagemap_lru_lock);
				continue;
			}
		}

		spin_lock(&pagecache_lock);

		/*
		 * this is the non-racy check for busy page.
		 */
		if (!page->mapping || !is_page_cache_freeable(page)) {
			spin_unlock(&pagecache_lock);
			UnlockPage(page);
page_mapped:
			if (--max_mapped >= 0)
				continue;

			/*
			 * Alert! We've found too many mapped pages on the
			 * inactive list, so we start swapping out now!
			 */
			spin_unlock(&pagemap_lru_lock);
			swap_out(priority, gfp_mask, classzone);
			return nr_pages;
		}

		/*
		 * It is critical to check PageDirty _after_ we made sure
		 * the page is freeable* so not in use by anybody.
		 */
		if (PageDirty(page)) {
			spin_unlock(&pagecache_lock);
			UnlockPage(page);
			continue;
		}

		/* point of no return */
		if (likely(!PageSwapCache(page))) {
			__remove_inode_page(page);
			spin_unlock(&pagecache_lock);
		} else {
			swp_entry_t swap;
			swap.val = page->index;
			__delete_from_swap_cache(page);
			spin_unlock(&pagecache_lock);
			swap_free(swap);
		}

		__lru_cache_del(page);
		UnlockPage(page);

		/* effectively free the page here */
		page_cache_release(page);

		if (--nr_pages)
			continue;
		break;
	}
	spin_unlock(&pagemap_lru_lock);

	return nr_pages;
}
Пример #22
0
static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, int * failed_swapout)
{
	struct list_head * entry;
	int max_scan = (classzone->nr_inactive_pages + classzone->nr_active_pages) / vm_cache_scan_ratio;
	int max_mapped = vm_mapped_ratio * nr_pages;

	while (max_scan && classzone->nr_inactive_pages && (entry = inactive_list.prev) != &inactive_list) {
		struct page * page;

		if (unlikely(current->need_resched)) {
			spin_unlock(&pagemap_lru_lock);
			__set_current_state(TASK_RUNNING);
			schedule();
			spin_lock(&pagemap_lru_lock);
			continue;
		}

		page = list_entry(entry, struct page, lru);

		BUG_ON(!PageLRU(page));
		BUG_ON(PageActive(page));

		list_del(entry);
		list_add(entry, &inactive_list);

		/*
		 * Zero page counts can happen because we unlink the pages
		 * _after_ decrementing the usage count..
		 */
		if (unlikely(!page_count(page)))
			continue;

		if (!memclass(page_zone(page), classzone))
			continue;

		max_scan--;

		/* Racy check to avoid trylocking when not worthwhile */
		if (!page->buffers && (page_count(page) != 1 || !page->mapping))
			goto page_mapped;

		/*
		 * The page is locked. IO in progress?
		 * Move it to the back of the list.
		 */
		if (unlikely(TryLockPage(page))) {
			if (PageLaunder(page) && (gfp_mask & __GFP_FS)) {
				page_cache_get(page);
				spin_unlock(&pagemap_lru_lock);
				wait_on_page(page);
				page_cache_release(page);
				spin_lock(&pagemap_lru_lock);
			}
			continue;
		}

		if (PageDirty(page) && is_page_cache_freeable(page) && page->mapping) {
			/*
			 * It is not critical here to write it only if
			 * the page is unmapped beause any direct writer
			 * like O_DIRECT would set the PG_dirty bitflag
			 * on the phisical page after having successfully
			 * pinned it and after the I/O to the page is finished,
			 * so the direct writes to the page cannot get lost.
			 */
			int (*writepage)(struct page *);

			writepage = page->mapping->a_ops->writepage;
			if ((gfp_mask & __GFP_FS) && writepage) {
				ClearPageDirty(page);
				SetPageLaunder(page);
				page_cache_get(page);
				spin_unlock(&pagemap_lru_lock);

				writepage(page);
				page_cache_release(page);

				spin_lock(&pagemap_lru_lock);
				continue;
			}
		}

		/*
		 * If the page has buffers, try to free the buffer mappings
		 * associated with this page. If we succeed we try to free
		 * the page as well.
		 */
		if (page->buffers) {
			spin_unlock(&pagemap_lru_lock);

			/* avoid to free a locked page */
			page_cache_get(page);

			if (try_to_release_page(page, gfp_mask)) {
				if (!page->mapping) {
					/*
					 * We must not allow an anon page
					 * with no buffers to be visible on
					 * the LRU, so we unlock the page after
					 * taking the lru lock
					 */
					spin_lock(&pagemap_lru_lock);
					UnlockPage(page);
					__lru_cache_del(page);

					/* effectively free the page here */
					page_cache_release(page);

					if (--nr_pages)
						continue;
					break;
				} else {
					/*
					 * The page is still in pagecache so undo the stuff
					 * before the try_to_release_page since we've not
					 * finished and we can now try the next step.
					 */
					page_cache_release(page);

					spin_lock(&pagemap_lru_lock);
				}
			} else {
				/* failed to drop the buffers so stop here */
				UnlockPage(page);
				page_cache_release(page);

				spin_lock(&pagemap_lru_lock);
				continue;
			}
		}

		spin_lock(&pagecache_lock);

		/*
		 * This is the non-racy check for busy page.
		 * It is critical to check PageDirty _after_ we made sure
		 * the page is freeable so not in use by anybody.
		 * At this point we're guaranteed that page->buffers is NULL,
		 * nobody can refill page->buffers under us because we still
		 * hold the page lock.
		 */
		if (!page->mapping || page_count(page) > 1) {
			spin_unlock(&pagecache_lock);
			UnlockPage(page);
page_mapped:
			if (--max_mapped < 0) {
				spin_unlock(&pagemap_lru_lock);

				nr_pages -= kmem_cache_reap(gfp_mask);
				if (nr_pages <= 0)
					goto out;

				shrink_dcache_memory(vm_vfs_scan_ratio, gfp_mask);
				shrink_icache_memory(vm_vfs_scan_ratio, gfp_mask);
#ifdef CONFIG_QUOTA
				shrink_dqcache_memory(vm_vfs_scan_ratio, gfp_mask);
#endif

				if (!*failed_swapout)
					*failed_swapout = !swap_out(classzone);

				max_mapped = nr_pages * vm_mapped_ratio;

				spin_lock(&pagemap_lru_lock);
				refill_inactive(nr_pages, classzone);
			}
			continue;
			
		}
		if (PageDirty(page)) {
			spin_unlock(&pagecache_lock);
			UnlockPage(page);
			continue;
		}

		__lru_cache_del(page);

		/* point of no return */
		if (likely(!PageSwapCache(page))) {
			__remove_inode_page(page);
			spin_unlock(&pagecache_lock);
		} else {
			swp_entry_t swap;
			swap.val = page->index;
			__delete_from_swap_cache(page);
			spin_unlock(&pagecache_lock);
			swap_free(swap);
		}

		UnlockPage(page);

		/* effectively free the page here */
		page_cache_release(page);

		if (--nr_pages)
			continue;
		break;
	}
	spin_unlock(&pagemap_lru_lock);

 out:
	return nr_pages;
}
Пример #23
0
/*
 * We need to make the locks finer granularity, but right
 * now we need this so that we can do page allocations
 * without holding the kernel lock etc.
 *
 * We want to try to free "count" pages, and we want to 
 * cluster them so that we get good swap-out behaviour.
 *
 * OTOH, if we're a user process (and not kswapd), we
 * really care about latency. In that case we don't try
 * to free too many pages.
 */
static int refill_inactive(unsigned int gfp_mask, int user)
{
	int priority, count, start_count, made_progress;
	unsigned long idle_time;

	count = inactive_shortage() + free_shortage();
	if (user)
		count = (1 << page_cluster);
	start_count = count;

	/* Always trim SLAB caches when memory gets low. */
	kmem_cache_reap(gfp_mask);

	/*
	 * Calculate the minimum time (in seconds) a process must
	 * have slept before we consider it for idle swapping.
	 * This must be the number of seconds it takes to go through
	 * all of the cache. Doing this idle swapping makes the VM
	 * smoother once we start hitting swap.
	 */
	idle_time = atomic_read(&page_cache_size);
	idle_time += atomic_read(&buffermem_pages);
	idle_time /= (inactive_target + 1);

	priority = 6;
	do {
		made_progress = 0;

		if (current->need_resched) {
			__set_current_state(TASK_RUNNING);
			schedule();
		}

		while (refill_inactive_scan(priority, 1) ||
				swap_out(priority, gfp_mask, idle_time)) {
			made_progress = 1;
			if (--count <= 0)
				goto done;
		}

		/*
		 * don't be too light against the d/i cache since
	   	 * refill_inactive() almost never fail when there's
	   	 * really plenty of memory free. 
		 */
		shrink_dcache_memory(priority, gfp_mask);
		shrink_icache_memory(priority, gfp_mask);

		/*
		 * Then, try to page stuff out..
		 */
		while (swap_out(priority, gfp_mask, 0)) {
			made_progress = 1;
			if (--count <= 0)
				goto done;
		}

		/*
		 * If we either have enough free memory, or if
		 * page_launder() will be able to make enough
		 * free memory, then stop.
		 */
		if (!inactive_shortage() || !free_shortage())
			goto done;

		/*
		 * Only switch to a lower "priority" if we
		 * didn't make any useful progress in the
		 * last loop.
		 */
		if (!made_progress)
			priority--;
	} while (priority >= 0);

	/* Always end on a refill_inactive.., may sleep... */
	while (refill_inactive_scan(0, 1)) {
		if (--count <= 0)
			goto done;
	}

done:
	return (count < start_count);
}