Esempio n. 1
0
const char* test_page_cache() {
  gc_page_cache cache;
  initialize_page_cache(&cache, 16, 0);
  gc_page pages[32];
  for(int i = 0; i < 32; ++i) {
    pages[i].fill = GC_PAGE_SIZE - 512;
    pages[i].space = 0;
    pages[i].zone = i;
    pages[i].step = 0;
  }

  for(int i = 0; i < 16; ++i) {
    cache_page(&cache, &pages[i]);
  }

  int sum = 0;
  for(int i = 0; i < 16; ++i)
    sum += cache.cache_entries[i]->zone;

  if(sum != 120) return "cache 1 failed";

  for(int i = 16; i < 32; ++i) {
    if(cache_page(&cache, &pages[i])->zone != (i - 16))
      return "cache 2 failed";
  }

  sum = 0;
  for(int i = 0; i < 16; ++i) {
    sum += cache.cache_entries[i]->zone;
  }

  if(sum != 376) return "cache 3 failed";

  gc_page* page = lookup_page(&cache, 200, 19, 0);
  if(page->zone != 19) return "lookup 1 failed";

  if(cache.cache_entries[cache.hint_index]->zone != 19)
    return "lookup 2 failed";

  page = lookup_page(&cache, 200, 5, 0);
  if(page != 0) return "lookup 3 failed";

  page = lookup_page(&cache, 700, 21, 0);
  if(page != 0) return "lookup 4 failed";

  page = lookup_page(&cache, 200, 21, 0);
  if(page->zone != 21) return "lookup 5 failed";
  
  if(cache.cache_entries[cache.hint_index]->zone != 21)
    return "lookup 6 failed";

  finalize_page_cache(&cache);

  return "passed";
}
Esempio n. 2
0
int _FTL_OBJ_COPYBACK(int32_t source, int32_t destination)
{
    page_node *source_p;
    
    source_p = lookup_page(source);
    
    // source_p can be NULL if the GC is working on some old pages that belonged to an object we deleted already
    if (source_p != NULL)
    {
        // invalidate the source page
        UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(source), CALC_BLOCK(source), CALC_PAGE(source), INVALID);
        
        // mark new page as valid and used
        UPDATE_NEW_PAGE_MAPPING_NO_LOGICAL(destination);
        
        // change the object's page mapping to the new page
        HASH_DEL(global_page_table, source_p); 
        source_p->page_id = destination;
        HASH_ADD_INT(global_page_table, page_id, source_p); 
    }
#ifdef FTL_DEBUG
    else
    {
        printf("Warning[%s] %u copyback page not mapped to an object \n", __FUNCTION__, source);
    }
#endif

    return SUCCESS;
}
Esempio n. 3
0
/* Frees the PAGE_CNT pages starting at PAGES. */
void palloc_free_umultiple(void *pages, size_t page_cnt) 
// NOTE: THIS FUNCTION IS NOT CORRECT!!!
{
  size_t i;
  for(i = 0; i < page_cnt; i++)
  {
     void * kpage =lookup_page(thread_current()->pagedir, pages + i * PGSIZE, false);
     palloc_free_page(kpage);
  }
}
Esempio n. 4
0
/* Looks up the physical address that corresponds to user virtual
   address UADDR in PD.  Returns the kernel virtual address
   corresponding to that physical address, or a null pointer if
   UADDR is unmapped. */
void * pagedir_get_page (uint32_t *pd, const void *uaddr) 
{
  uint32_t *pte;

  ASSERT (is_user_vaddr (uaddr));
  
  pte = lookup_page (pd, uaddr, false);
  if (pte != NULL && (*pte & PTE_P) != 0)
    return pte_get_page (*pte) + pg_ofs (uaddr);
  else
    return NULL;
}
Esempio n. 5
0
/* Set the CNT consecutive frame table entries in FT starting at index START
   to the kernel virtual addresses of PTEs pointing to consecutive pages
   starting at PAGE according to page directory PD.
   Create a new page table if not found and CREATE is true. */
void
frame_table_set_multiple (struct frame_table *ft, size_t start, size_t cnt,
                          uint32_t *pd, uint8_t *page, bool create)
{
  ASSERT (ft != NULL);
  ASSERT (start <= ft->page_cnt);
  ASSERT (start + cnt <= ft->page_cnt);
  ASSERT (pg_ofs (page) == 0);

  size_t i;
  for (i = 0; i < cnt; i++)
  {
    uint32_t *pte = lookup_page (pd, page + i * PGSIZE, create);
    ASSERT ((void *) pte > PHYS_BASE);
    if ((void *) page < PHYS_BASE)
      *pte |= PTE_I;
    ft->frames[start + i].frame = pte;
  }
}
Esempio n. 6
0
/* Update the frame table entries from the old PTE addresses to the new PTE
   addresses according to the new PD */
void
frame_table_change_pagedir (struct frame_table *ft, uint32_t *pd)
{
  ASSERT (ft != NULL);
  ASSERT (pd != NULL);

  size_t i;
  for (i = 0; i < ft->page_cnt; i++)
  {
    if (ft->frames[i].frame != NULL)
    {
      uint32_t *old_pte = ft->frames[i].frame;
      uint32_t paddr = *old_pte & ~PGMASK;
      uint32_t *new_pte = lookup_page (pd, ptov(paddr), false);
      ASSERT ((*old_pte & ~PGMASK) == (*new_pte & ~PGMASK));
      ft->frames[i].frame = new_pte;
    }
  }
}
Esempio n. 7
0
/*
 * map_and_copy
 * 
 * Maps a series of physical pages into a process's address space, copying the data
 * from the corresponding pages in another process.
 * 
 * This operates similarly to map_new_pages. The difference is that instead of
 * the pages being empty, their contents is copied from existing pages that are
 * already mapped by another process. In order to obtain the latter, we perform a
 * lookup on the source process's page directory to get the physical address of
 * each page, and then use that to obtain the data to store in the newly-allocated
 * physical pages of the destination process.
 * 
 * This is used by the fork system call, which needs to duplicate all aspects of
 * a process's state. It uses this function to copy the text, data, and stack
 * segments of the parent process.
 */
static void
map_and_copy(page_dir src_dir, page_dir dest_dir,
	     unsigned int start, unsigned int end)
{
	assert(0 == start % PAGE_SIZE);
	assert(0 == end % PAGE_SIZE);
	unsigned int addr;
	for (addr = start; addr < end; addr += PAGE_SIZE) {
		/*
		 * Map new page 
		 */
		unsigned int page = (unsigned int)alloc_page();
		map_page(dest_dir, addr, page, PAGE_USER, PAGE_READ_WRITE);

		/*
		 * Copy from source 
		 */
		unsigned int src_phys;
		int sl = lookup_page(src_dir, addr, &src_phys);
		assert(sl);
		memmove((void *)page, (void *)src_phys, PAGE_SIZE);
	}
}
Esempio n. 8
0
/*! Obtains and returns a group of PAGE_CNT contiguous free pages starting at
    START_ADDR.  If PAL_USER is set, the pages are obtained from the user pool,
    otherwise from the kernel pool.  If PAL_ZERO is set in FLAGS, then the pages
    are filled with zeros.  If PAL_PIN is set in FLGAS, then the pages are
    pinned.  If too few pages are available, returns a null pointer, unless 
    PAL_ASSERT is set in FLAGS, in which case the kernel panics.  If LOAD_TYPE
    is ZERO_PAGE, then the page data pointer is set to NULL, otherwise it is set
    to the passed pointer DATA.  If LOAD_TYPE is FILE_PAGE, the page's file
    offset is set to F_OFS. */
void *palloc_make_multiple_addr(void * start_addr,
                                enum palloc_flags flags,
                                size_t page_cnt,
                                enum page_load load_type,
                                void *data,
                                void *f_ofs) {

    struct list *alloc_page_list;
    uint32_t i;
    struct thread *t = thread_current();
    uint32_t *pagedir;
    uint32_t *pte;
    void *vaddr;
    void *curr_f_ofs = f_ofs;

    /* Page data should not be in a frame. */
    if (load_type == FRAME_PAGE) {
        /* TODO: case where it is in a frame. */
        ASSERT(false);
    }

    /* Use to correct pool based on whether it is paging data or not. */
    if (flags & PAL_PAGING) {
        alloc_page_list = init_page_dir_sup;
        pagedir = init_page_dir;
    } else {
        alloc_page_list = &(t->page_entries);
        pagedir = t->pagedir;
    }

    /* If block at specified address is not open, return NULL. */
    if (!palloc_block_open(start_addr, page_cnt)) {
        if (flags & PAL_ASSERT) {
            PANIC("palloc: out of pages");
        }
        return NULL;
    }

    /* Allocate all pages for the block. */
    for (i = 0; i < page_cnt; i++) {

        /* Create a supplemental entry for the page. */
        struct page_entry *page_i = get_page_entry();

        ASSERT (page_i != NULL);

        /* Get the virtual address for the page. */
        vaddr = (uint8_t *) (start_addr + (i * PGSIZE)) + PGSIZE * 1000;

        /* Initialize the page. */
        page_i->vaddr = vaddr;
        page_i->source = load_type;

        if (load_type == ZERO_PAGE) {
            page_i->data = NULL;
        }
        else {
            page_i->data = data;
        }

        if (load_type == FILE_PAGE) {
            /* Get the file offset. */
            curr_f_ofs += PGSIZE;
            page_i->f_ofs = curr_f_ofs;
        }
        else {
            page_i->f_ofs = NULL;
        }
        
        /* Add to list of allocated pages in order by address. */
        list_insert_ordered(alloc_page_list, &(page_i->elem), 
                            palloc_page_less, NULL);

        if (flags & PAL_USER) {
            pagedir_set_page(pagedir, vaddr, 0, !(flags & PAL_READO));
        } else {
            if(!pagedir_set_page_kernel(pagedir, vaddr, 0, !(flags & PAL_READO))) {
                if (flags & PAL_ASSERT) {
                    PANIC("palloc: out of pages");
                }
                return NULL;
            }
        }
        
        pte = lookup_page(pagedir, vaddr, false);

        /* Pin the page if necessary. */
        if (flags & PAL_PIN) {
           *pte = *pte | PTE_PIN;
        }
        
    }
    return start_addr;
}
Esempio n. 9
0
/* Returns true if the PTE for virtual page VPAGE in PD is dirty,
   that is, if the page has been modified since the PTE was
   installed.
   Returns false if PD contains no PTE for VPAGE. */
bool pagedir_is_dirty (uint32_t *pd, const void *vpage) 
{
  uint32_t *pte = lookup_page (pd, vpage, false);
  return pte != NULL && (*pte & PTE_D) != 0;
}
Esempio n. 10
0
int _FTL_OBJ_WRITE(object_id_t object_id, unsigned int offset, unsigned int length)
{
    stored_object *object;
    page_node *current_page = NULL,*temp_page;
    uint32_t page_id;
    int io_page_nb;
    int curr_io_page_nb;
    unsigned int ret = FAIL;
    
    object = lookup_object(object_id);
    
    // file not found
    if (object == NULL)
        return FAIL;
    
    // calculate the overhead of allocating the request. io_page_nb will be the total number of pages we're gonna write
    io_alloc_overhead = ALLOC_IO_REQUEST(offset, length, WRITE, &io_page_nb);
    
    // if the offset is past the current size of the stored_object we need to append new pages until we can start writing
    while (offset > object->size)
    {
        if (GET_NEW_PAGE(VICTIM_OVERALL, EMPTY_TABLE_ENTRY_NB, &page_id) == FAIL)
        {
            // not enough memory presumably
            printf("ERROR[FTL_WRITE] Get new page fail \n");
            return FAIL;
        }
        if(!add_page(object, page_id))
            return FAIL;
        
        // mark new page as valid and used
        UPDATE_NEW_PAGE_MAPPING_NO_LOGICAL(page_id);
    }

    for (curr_io_page_nb = 0; curr_io_page_nb < io_page_nb; curr_io_page_nb++)
    {
        // if this is the first iteration we need to find the page by offset, otherwise we can go with the page chain
        if (current_page == NULL)
            current_page = page_by_offset(object, offset);
        else
            current_page = current_page->next;
        
        // get the pge we'll be writing to
        if (GET_NEW_PAGE(VICTIM_OVERALL, EMPTY_TABLE_ENTRY_NB, &page_id) == FAIL)
        {
            printf("ERROR[FTL_WRITE] Get new page fail \n");
            return FAIL;
        }
        if((temp_page=lookup_page(page_id)))
        {
            printf("ERROR[FTL_WRITE] Object %lu already contains page %d\n",temp_page->object_id,page_id);
            return FAIL;
        }
        
        // mark new page as valid and used
        UPDATE_NEW_PAGE_MAPPING_NO_LOGICAL(page_id);
        
        if (current_page == NULL) // writing at the end of the object and need to allocate more space for it
        {
            current_page = add_page(object, page_id);
            if(!current_page)
                return FAIL;
        }
        else // writing over parts of the object
        {
            // invalidate the old physical page and replace the page_node's page
            UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(current_page->page_id), CALC_BLOCK(current_page->page_id), CALC_PAGE(current_page->page_id), INVALID);
            UPDATE_INVERSE_PAGE_MAPPING(current_page->page_id, -1);            

            HASH_DEL(global_page_table, current_page); 
            current_page->page_id = page_id;
            HASH_ADD_INT(global_page_table, page_id, current_page); 
        }
#ifdef GC_ON
            // must improve this because it is very possible that we will do multiple GCs on the same flash chip and block
            // probably gonna add an array to hold the unique ones and in the end GC all of them
            GC_CHECK(CALC_FLASH(current_page->page_id), CALC_BLOCK(current_page->page_id), false);
#endif
        
        ret = SSD_PAGE_WRITE(CALC_FLASH(page_id), CALC_BLOCK(page_id), CALC_PAGE(page_id), curr_io_page_nb, WRITE, io_page_nb);
        
		// send a physical write action being done to the statistics gathering
		if (ret == SUCCESS)
		{
			FTL_STATISTICS_GATHERING(page_id , PHYSICAL_WRITE);
		}
        
#ifdef FTL_DEBUG
        if (ret == FAIL)
        {
            printf("Error[FTL_WRITE] %d page write fail \n", page_id);
        }
#endif

//        page_node *page;
//        printf("Object page map:{");
//        for(page=object->pages; page; page=page->next)
//            printf("%d->",page->page_id);
//        printf("}\n");

    }

    INCREASE_IO_REQUEST_SEQ_NB();

#ifdef MONITOR_ON
	char szTemp[1024];
	sprintf(szTemp, "WRITE PAGE %d ", length);
	WRITE_LOG(szTemp);
	sprintf(szTemp, "WB CORRECT %d", curr_io_page_nb);
	WRITE_LOG(szTemp);
#endif

#ifdef FTL_DEBUG
	printf("[%s] Complete\n",__FUNCTION__);
#endif

	return ret;
}