Exemplo n.º 1
0
/*
 * System Call: mapid_t mmap (int fd, void *addr)
 * Maps the file open as fd into the process's virtual address space.
 * The entire file is mapped into consecutive virtual pages starting
 * at addr.
 */
mapid_t mmap_handler (int fd, void *addr){
	if(addr==NULL) return -1;
	if(pg_ofs(addr) != 0) return -1;
	if(fd<2) return -1;

	struct file *f_,*f;
	lock_acquire (&fic_m);
	f_ = findfd(fd);
	if(f_== NULL) {
		lock_release (&fic_m);
		return -1;
	}
	f = file_reopen(f_);
	lock_release (&fic_m);

	lock_acquire (&fic_m);
	int32_t size_f = file_length(f);
	lock_release (&fic_m);
	if (size_f == 0) return -1;
	if (addr+size_f>=thread_current()->saved_esp) return -1;
	if(validate_mapping(addr, size_f)){
		size_t page_num = size_f/PGSIZE + 1;
		int ret = addfmap (f, addr, page_num, size_f);
		return ret;
	}
	return -1;
}
Exemplo n.º 2
0
Arquivo: frame.c Projeto: chutchUCD/OS
/**
 * An (internal, private) method --
 * Deallocates a frame or page (internal procedure)
 * MUST BE CALLED with 'frame_lock' held.
 */
void
vm_frame_do_free (void *kpage, bool free_page)
{
  ASSERT (lock_held_by_current_thread(&frame_lock) == true);
  ASSERT (is_kernel_vaddr(kpage));
  ASSERT (pg_ofs (kpage) == 0); // should be aligned

  // hash lookup : a temporary entry
  struct frame_table_entry f_tmp;
  f_tmp.kpage = kpage;

  struct hash_elem *h = hash_find (&frame_map, &(f_tmp.helem));
  if (h == NULL) {
    PANIC ("The page to be freed is not stored in the table");
  }

  struct frame_table_entry *f;
  f = hash_entry(h, struct frame_table_entry, helem);

  hash_delete (&frame_map, &f->helem);
  list_remove (&f->lelem);

  // Free resources
  if(free_page) palloc_free_page(kpage);
  free(f);
}
Exemplo n.º 3
0
/* Frees the PAGE_CNT pages starting at PAGES. */
void
palloc_free_multiple (void *pages, size_t page_cnt) 
{
  struct pool *pool;
  size_t page_idx;

  ASSERT (pg_ofs (pages) == 0);
  if (pages == NULL || page_cnt == 0)
    return;

  if (page_from_pool (&kernel_pool, pages))
    pool = &kernel_pool;
  else if (page_from_pool (&user_pool, pages))
    pool = &user_pool;
  else
    NOT_REACHED ();

  page_idx = pg_no (pages) - pg_no (pool->base);

#ifndef NDEBUG
  memset (pages, 0xcc, PGSIZE * page_cnt);
#endif

  ASSERT (bitmap_all (pool->used_map, page_idx, page_cnt));
  bitmap_set_multiple (pool->used_map, page_idx, page_cnt, false);
}
Exemplo n.º 4
0
/* Read system call. */
static int
sys_read (int handle, void *udst_, unsigned size) 
{
  uint8_t *udst = udst_;
  struct file_descriptor *fd;
  int bytes_read = 0;

  /* Look up file descriptor. */
  if (handle != STDIN_FILENO)
    fd = lookup_file_fd (handle);

  while (size > 0) 
    {
      /* How much to read into this page? */
      size_t page_left = PGSIZE - pg_ofs (udst);
      size_t read_amt = size < page_left ? size : page_left;
      off_t retval;

      /* Check that touching this page is okay. */
      if (!page_lock (udst, true)) 
        thread_exit ();

      /* Read from file into page. */
      if (handle != STDIN_FILENO) 
        {
          retval = file_read (fd->file, udst, read_amt);
          if (retval < 0)
            {
              if (bytes_read == 0)
                bytes_read = -1; 
              break;
            }
          bytes_read += retval; 
        }
      else 
        {
          size_t i;
          
          for (i = 0; i < read_amt; i++) 
            udst[i] = input_getc ();
          bytes_read = read_amt;
        }

      /* Release page. */
      page_unlock (udst);

      /* If it was a short read we're done. */
      if (retval != (off_t) read_amt)
        break;

      /* Advance. */
      udst += retval;
      size -= retval;
    }
   
  return bytes_read;
}
Exemplo n.º 5
0
void
frame_set_pin(void *page, bool pin) {
    lock_acquire_re(&vm_lock);
    // frames MUST always be page aligned
    ASSERT(page != NULL);
    ASSERT(pg_ofs(page) == 0);

    frametable.frametable[page_to_pagenum(page)].pin = pin;
    lock_release_re(&vm_lock);
}
Exemplo n.º 6
0
/* Write system call. */
static int
sys_write (int handle, void *usrc_, unsigned size) 
{
  uint8_t *usrc = usrc_;
  struct file_descriptor *fd = NULL;
  int bytes_written = 0;

  /* Lookup up file descriptor. */
  if (handle != STDOUT_FILENO)
    fd = lookup_file_fd (handle);

  while (size > 0) 
    {
      /* How much bytes to write to this page? */
      size_t page_left = PGSIZE - pg_ofs (usrc);
      size_t write_amt = size < page_left ? size : page_left;
      off_t retval;

      /* Check that we can touch this user page. */
      if (!page_lock (usrc, false)) 
        thread_exit ();

      /* Do the write. */
      if (handle == STDOUT_FILENO)
        {
          putbuf ((char *) usrc, write_amt);
          retval = write_amt;
        }
      else
        retval = file_write (fd->file, usrc, write_amt);

      /* Release user page. */
      page_unlock (usrc);

      /* Handle return value. */
      if (retval < 0) 
        {
          if (bytes_written == 0)
            bytes_written = -1;
          break;
        }
      bytes_written += retval;

      /* If it was a short write we're done. */
      if (retval != (off_t) write_amt)
        break;

      /* Advance. */
      usrc += retval;
      size -= retval;
    }
 
  return bytes_written;
}
Exemplo n.º 7
0
/* Looks up the physical address that corresponds to user virtual
   address UADDR in PD.  Returns the kernel virtual address
   corresponding to that physical address, or a null pointer if
   UADDR is unmapped. */
void * pagedir_get_page (uint32_t *pd, const void *uaddr) 
{
  uint32_t *pte;

  ASSERT (is_user_vaddr (uaddr));
  
  pte = lookup_page (pd, uaddr, false);
  if (pte != NULL && (*pte & PTE_P) != 0)
    return pte_get_page (*pte) + pg_ofs (uaddr);
  else
    return NULL;
}
Exemplo n.º 8
0
bool from_file (struct spage_entry *se)
{
  struct thread *t = thread_current ();

  struct file *file = se->myfile;
  off_t ofs = se->ofs;
  uint8_t upage = se->upage;
  uint32_t read_bytes = se->read_bytes;
  uint32_t zero_bytes = se->zero_bytes;
  bool writable = se->writable;

  ASSERT ((read_bytes + zero_bytes) % PGSIZE == 0);
  ASSERT (pg_ofs (upage) == 0);
  ASSERT (ofs % PGSIZE == 0);

//  lock_acquire (&filesys_lock);
  file_seek (file, ofs);
//  lock_release (&filesys_lock);

  if ( !(read_bytes > 0 || zero_bytes > 0) )
    return false; 
    
      /* Calculate how to fill this page.
         We will read PAGE_READ_BYTES bytes from FILE
         and zero the final PAGE_ZERO_BYTES bytes. */  
      size_t page_read_bytes = read_bytes < PGSIZE ? read_bytes : PGSIZE;
      size_t page_zero_bytes = PGSIZE - page_read_bytes;

      /* Get a page of memory. */  
      uint8_t *kpage = palloc_get_page (PAL_USER);
      if (kpage == NULL)
        return false;

      /* Load this page. */  
//      lock_acquire (&filesys_lock);
      if (file_read (file, kpage, page_read_bytes) != (int) page_read_bytes)
        {
//          lock_release (&filesys_lock);
          palloc_free_page (kpage);
          return false; 
        }
//          lock_release (&filesys_lock);
      memset (kpage + page_read_bytes, 0, page_zero_bytes);

      /* Add the page to the process's address space. */  
      if (!install_page (upage, kpage, writable)) 
        {
          palloc_free_page (kpage);
          return false; 
        }

  return true;
}
Exemplo n.º 9
0
/*
 * Insert a new frame table entry for address `frame_address`.
 * Needs the corresponding `tid` and `virt_address` to look up the
 * correct supplementary page table.
 * The `pte` is used to access the dirty/accessed bits for the frame.
 */
bool
frame_insert(void *frame_address,
             tid_t tid,
             void *virt_address,
             struct pagetable_entry* pte) {
    lock_acquire_re(&vm_lock);
    // frames MUST always be page aligned
    ASSERT(frame_address != NULL);
    ASSERT(pg_ofs(frame_address) == 0);
    // virtual addresses MUST also be page aligned
    ASSERT(pg_ofs(virt_address) == 0);

    // TODO insert entry
    uint32_t pgnum = page_to_pagenum(frame_address);
    frametable_entry_create(&frametable.frametable[pgnum],
                            pte,
                            tid,
                            virt_address,
                            false);
    lock_release_re(&vm_lock);
    return true;
}
Exemplo n.º 10
0
Arquivo: swap.c Projeto: kch31411/os
void
swap_in (disk_sector_t disk_no, void *phy_addr)
{
  ASSERT (pg_ofs (phy_addr) == 0);
  lock_acquire (&swap_lock);

  int i;

  for (i = 0; i < SEC_PER_PG; i++)
  {
    disk_read (swap_disk, disk_no+i, (phy_addr + DISK_SECTOR_SIZE * i));
  }
  swap_free_slot (disk_no);

  lock_release (&swap_lock);
}
Exemplo n.º 11
0
mmapid_t sys_mmap(int fd, void *upage) {
  // check arguments
  if (upage == NULL || pg_ofs(upage) != 0) return -1;
  if (fd <= 1) return -1; // 0 and 1 are unmappable
  struct thread *curr = thread_current();

  lock_acquire (&filesys_lock);

  /* 1. Open file */
  struct file *f = NULL;
  struct file_desc* file_d = find_file_desc(thread_current(), fd, FD_FILE);
  if(file_d && file_d->file) {
    // reopen file so that it doesn't interfere with process itself
    // it will be store in the mmap_desc struct (later closed on munmap)
    f = file_reopen (file_d->file);
  }
  if(f == NULL) goto MMAP_FAIL;

  size_t file_size = file_length(f);
  if(file_size == 0) goto MMAP_FAIL;

  /* 2. Mapping memory pages */
  // First, ensure that all the page address is NON-EXIESENT.
  size_t offset;
  for (offset = 0; offset < file_size; offset += PGSIZE) {
    void *addr = upage + offset;
    if (vm_supt_has_entry(curr->supt, addr)) goto MMAP_FAIL;
  }

  // Now, map each page to filesystem
  for (offset = 0; offset < file_size; offset += PGSIZE) {
    void *addr = upage + offset;

    size_t read_bytes = (offset + PGSIZE < file_size ? PGSIZE : file_size - offset);
    size_t zero_bytes = PGSIZE - read_bytes;

    vm_supt_install_filesys(curr->supt, addr,
        f, offset, read_bytes, zero_bytes, /*writable*/true);
  }

  /* 3. Assign mmapid */
  mmapid_t mid;
  if (! list_empty(&curr->mmap_list)) {
    mid = list_entry(list_back(&curr->mmap_list), struct mmap_desc, elem)->id + 1;
  }
Exemplo n.º 12
0
/* Loads a segment starting at offset OFS in FILE at address
   UPAGE.  In total, READ_BYTES + ZERO_BYTES bytes of virtual
   memory are initialized, as follows:

        - READ_BYTES bytes at UPAGE must be read from FILE
          starting at offset OFS.

        - ZERO_BYTES bytes at UPAGE + READ_BYTES must be zeroed.

   The pages initialized by this function must be writable by the
   user process if WRITABLE is true, read-only otherwise.

   Return true if successful, false if a memory allocation error
   or disk read error occurs. */
static bool
load_segment (struct file *file, off_t ofs, uint8_t *upage,
              uint32_t read_bytes, uint32_t zero_bytes, bool writable) 
{
  ASSERT ((read_bytes + zero_bytes) % PGSIZE == 0);
  ASSERT (pg_ofs (upage) == 0);
  ASSERT (ofs % PGSIZE == 0);

  file_seek (file, ofs);
  while (read_bytes > 0 || zero_bytes > 0) 
    {
      /* Calculate how to fill this page.
         We will read PAGE_READ_BYTES bytes from FILE
         and zero the final PAGE_ZERO_BYTES bytes. */
      size_t page_read_bytes = read_bytes < PGSIZE ? read_bytes : PGSIZE;
      size_t page_zero_bytes = PGSIZE - page_read_bytes;

      /* Get a page of memory. */
      uint8_t *kpage = palloc_get_page (PAL_USER);
      if (kpage == NULL)
        return false;

      /* Load this page. */
      if (file_read (file, kpage, page_read_bytes) != (int) page_read_bytes)
        {
          palloc_free_page (kpage);
          return false; 
        }
      memset (kpage + page_read_bytes, 0, page_zero_bytes);

      /* Add the page to the process's address space. */
      if (!install_page (upage, kpage, writable)) 
        {
          palloc_free_page (kpage);
          return false; 
        }

      /* Advance. */
      read_bytes -= page_read_bytes;
      zero_bytes -= page_zero_bytes;
      upage += PGSIZE;
    }
  return true;
}
Exemplo n.º 13
0
/*
 * Removes the frame entry for physical address `frame_address` from the frame
 * tables. Access to all fields of the corresponding entry are invalid afterwards
 * until `frame_insert` is called.
 *
 * Only non-pinned entries can be removed.
 */
void
frame_remove(void *frame_address) {
    lock_acquire_re(&vm_lock);
    log_debug("--- frame_remove (used: %d, own used: %d) ---\n", frametable.used, frametable.own_used);
    // frames MUST always be page aligned
    ASSERT(frame_address != NULL);
    ASSERT(pg_ofs(frame_address) == 0);

    uint32_t pgnum = page_to_pagenum(frame_address);
    if (frametable.frametable[pgnum].pin != false) {
        PANIC("Remove of pinned frame!");
    }
    ASSERT(frametable.frametable[pgnum].pin == false);

    // TODO reset everything
    frametable.frametable[pgnum].pte = NULL;
    frametable.used--;
    lock_release_re(&vm_lock);
}
Exemplo n.º 14
0
/* Set the CNT consecutive frame table entries in FT starting at index START
   to the kernel virtual addresses of PTEs pointing to consecutive pages
   starting at PAGE according to page directory PD.
   Create a new page table if not found and CREATE is true. */
void
frame_table_set_multiple (struct frame_table *ft, size_t start, size_t cnt,
                          uint32_t *pd, uint8_t *page, bool create)
{
  ASSERT (ft != NULL);
  ASSERT (start <= ft->page_cnt);
  ASSERT (start + cnt <= ft->page_cnt);
  ASSERT (pg_ofs (page) == 0);

  size_t i;
  for (i = 0; i < cnt; i++)
  {
    uint32_t *pte = lookup_page (pd, page + i * PGSIZE, create);
    ASSERT ((void *) pte > PHYS_BASE);
    if ((void *) page < PHYS_BASE)
      *pte |= PTE_I;
    ft->frames[start + i].frame = pte;
  }
}
Exemplo n.º 15
0
/* Copies SIZE bytes from kernel address SRC to user address
   UDST.
   Call thread_exit() if any of the user accesses are invalid. */
static void
copy_out (void *udst_, const void *src_, size_t size) 
{
  uint8_t *udst = udst_;
  const uint8_t *src = src_;

  while (size > 0) 
    {
      size_t chunk_size = PGSIZE - pg_ofs (udst);
      if (chunk_size > size)
        chunk_size = size;
      
      if (!page_lock (udst, false))
        thread_exit ();
      memcpy (udst, src, chunk_size);
      page_unlock (udst);

      udst += chunk_size;
      src += chunk_size;
      size -= chunk_size;
    }
}
Exemplo n.º 16
0
/* Copies SIZE bytes from user address USRC to kernel address
   DST.
   Call thread_exit() if any of the user accesses are invalid. */
static void
copy_in (void *dst_, const void *usrc_, size_t size) 
{
  uint8_t *dst = dst_;
  const uint8_t *usrc = usrc_;

  while (size > 0) 
    {
      size_t chunk_size = PGSIZE - pg_ofs (usrc);
      if (chunk_size > size)
        chunk_size = size;
      
      if (!page_lock (usrc, false))
        thread_exit ();
      memcpy (dst, usrc, chunk_size);
      page_unlock (usrc);

      dst += chunk_size;
      usrc += chunk_size;
      size -= chunk_size;
    }
}
Exemplo n.º 17
0
Arquivo: swap.c Projeto: kch31411/os
disk_sector_t
swap_out (void* phy_addr)
{
  ASSERT (pg_ofs (phy_addr) == 0);
  lock_acquire (&swap_lock);

  int i;
  disk_sector_t ret = swap_get_slot ();

  if (ret == BITMAP_ERROR)
  {
    PANIC ("swap_out: swap disk is full");
  }

  for (i = 0; i < SEC_PER_PG; i++)
  {
    disk_write (swap_disk, ret+i, (phy_addr + DISK_SECTOR_SIZE * i));
  }

  lock_release (&swap_lock);
  
  return ret;
}
Exemplo n.º 18
0
/* Maps the file open as FD into the process' virtual address space - entire
   file mapped into consecutive virtual pages starting at ADDR. (Lazy load
   pages in mmap regions). (Evicting a page mapped by mmap writes it back to
   the actual file it was mapped from). (Set spare bytes on final page to zero
   when that page is faulted in the file system, and ignore these bytes when
   page is written back to disk). Returns mapid_t for the mapping, or -1 on
   failure. Failure occurs when file has length 0, if addr is not page aligned,
   if range of pages mapped overlaps any existing mapped pages (including the
   stack or pages mapped at executable load time), if addr is 0, or if fd
   is 0 or 1. */
static mapid_t
sys_mmap(int fd, void *addr)
{
  /* Check that fd is a valid file descriptor. */
  check_fd(fd);

  /* Cannot map stdin or stdout. */
  if (fd == STDIN_FILENO || fd == STDOUT_FILENO) {
    return ERROR;
  }

  /* Address to map to cannot be 0, because some Pintos code assumes virtual
     page 0 is not mapped. */
  if (addr == 0) {
    return ERROR;
  }

  int size = sys_filesize(fd);

  /* Cannot map a file of size 0 bytes. */
  if (size == 0) {
    return ERROR;
  }

  /* ADDR must be page-aligned. */
  if (pg_ofs(addr) != 0) {
    return ERROR;
  }

  /* If ADDR is not in user/process address space, we cannot map
     the file there. */
  if (!is_user_vaddr(addr)) {
    sys_exit(ERROR);
  }

  /* Pages is number of pages needed to map file.
     (size % PGSIZE) gives the number of spare bytes on the final page.
     This is necessary because the division is integer division, and so
     will round down, but we want it to round up. */
  int pages = size / PGSIZE;
  if ((size % PGSIZE) != 0) {
    pages++;
  }

  struct thread *cur = thread_current();
  struct hash *mmap_table = &cur->mmap_table;
  struct hash *spt = &cur->supp_pt;

  lock_acquire(&secure_file);
  struct file *old_file = get_file(fd);
  if (!old_file) {
    lock_release(&secure_file);
    sys_exit(ERROR);
  }
  /* Must use file_reopen() to get independent 'struct file *' for same file
     (with same inode) because the file could be being read at different points
     (file->pos could be different) and they could have different
     file->deny_write (file_deny_write() could be called on one struct file but
     not another of same file (inode) but different struct file). */
  struct file *file = file_reopen(old_file);
  lock_release(&secure_file);

  int i;
  int bytes_to_write;
  void *cur_page;
  /* Check that the contiguous range of pages to be mapped doesn't overlap
     any existing set of mapped pages (Not including stack). Can then add
     these pages to the supplementary page table. */
  for (i = 0; i < pages; i++) {
    cur_page = (void *) ((uint8_t *) addr) + (i * PGSIZE);
    /* Check to see if there is an existing mapped page at what would be the
       i'th page of this mapped file. */
    if (get_spt_entry(spt, cur_page) != NULL) {
      return ERROR;
    }
    /* Only on the last page do we potentially not fill up whole page with
       part of file. */
    bytes_to_write = (i == (pages - 1)) ? (size % PGSIZE) : PGSIZE;
    /* Add current page to the supplementary page table. */
    spt_insert_file(cur_page, file, bytes_to_write,
                       PGSIZE - bytes_to_write, i * PGSIZE, true, true, false);
  }

  mapid_t mapid = cur->next_mapid;

  /* Lock must be acquired to call hash_insert() in mmap_table_insert(), and
     since we have thread_current() here already it makes sense to lock here
     rather than in mmap_table_insert() in mmap.c. */
  lock_acquire(&cur->mmap_table_lock);
  bool success = mmap_table_insert(mmap_table, addr, addr + size, pages,
                                     mapid, file);
  lock_release(&cur->mmap_table_lock);

  /* Return -1 if mmap_table_insert wasn't successful (meaning there isn't
     enough space to malloc for a struct mmap_mapping *). */
  if (!success) {
    return ERROR;
  }

  /* Increment next_mapid for this thread, so that the next mmap will have a
     different mapid, ensuring unique mapids for all mappings for a process.
     Increment after checking for mmap_table_insert() success status, because
     in the case of failure, we can reuse the mapid that the failed mapping
     would have had. */
  cur->next_mapid++;

  /* If successful, function returns the mapid that uniquely identifies
     the mapping within the process. */
  return mapid;
}
Exemplo n.º 19
0
/* Read system call. */
static int
sys_read (int handle, void *udst_, unsigned size) 
{
  uint8_t *udst = udst_;
  struct file_descriptor *fd;
  int bytes_read = 0;

  fd = lookup_fd (handle);
  while (size > 0) 
    {
      /* How much to read into this page? */
      size_t page_left = PGSIZE - pg_ofs (udst);
      size_t read_amt = size < page_left ? size : page_left;
      off_t retval;

      /* Read from file into page. */
      if (handle != STDIN_FILENO) 
        {
          if (!page_lock (udst, true)) 
            thread_exit (); 
          lock_acquire (&fs_lock);
          retval = file_read (fd->file, udst, read_amt);
          lock_release (&fs_lock);
          page_unlock (udst);
        }
      else 
        {
          size_t i;
          
          for (i = 0; i < read_amt; i++) 
            {
              char c = input_getc ();
              if (!page_lock (udst, true)) 
                thread_exit ();
              udst[i] = c;
              page_unlock (udst);
            }
          bytes_read = read_amt;
        }
      
      /* Check success. */
      if (retval < 0)
        {
          if (bytes_read == 0)
            bytes_read = -1; 
          break;
        }
      bytes_read += retval; 
      if (retval != (off_t) read_amt) 
        {
          /* Short read, so we're done. */
          break; 
        }

      /* Advance. */
      udst += retval;
      size -= retval;
    }
   
  return bytes_read;
}
Exemplo n.º 20
0
/*! Memory map from file to user address. */
mapid_t mmap(uint32_t fd, void* addr){
    
    int f_size;
    void* addr_e;
    mapid_t mapid;
    uint32_t read_bytes, zero_bytes;
    uint8_t *upage;
    off_t ofs;
    struct file* file;
    struct f_info* f;
    struct mmap_elem* me;
    struct supp_table* st;
    struct thread* t = thread_current();
    
    if (!checkva(addr))
        return MAP_FAIL;

    /* Check for the invalid conditions:
     * fd is standard io; file size of the given fd is 0; give user address
     * is not valie; given address is not page aligned; given address is 0.
     * If invalid, then return MAP_FAIL. */
    if (fd == STDIN_FILENO ||
        fd == STDOUT_FILENO ||
        filesize(fd) == 0 ||
        pg_ofs(addr) != 0 ||
        addr == 0) {
            return MAP_FAIL;
    }
    
    /* Get the file size of the file. And check the entire range of the
     * to-be-mapped user address does not overlap with any already allocated
     * pages. */
    f_size = filesize(fd);
    for (addr_e = addr; addr_e < addr + f_size; addr_e += PGSIZE){
            if (find_supp_table(addr_e) != NULL){
                /* If found a supplemental page entry of this page address,
                 * Then this is already alocated. Return MAP_FAIL. */
                return MAP_FAIL;
            }   
    }
    
    /* Increment the thread's max mmapid to give this mmapping a unque ID. */
    ++ t->mmapid_max;
    mapid = t->mmapid_max;
    
    /* Allocated the new mmap struct */
    me = (struct mmap_elem*) malloc(sizeof(struct mmap_elem));
    if (me == NULL)
        return MAP_FAIL;
    
    /* Reopen the file according to the file descriptor. */
    f = findfile(fd);
    if (f->isdir)
        return MAP_FAIL;
    lock_acquire(&filesys_lock);
    file = file_reopen(f->f);
    lock_release(&filesys_lock);
    
    /* If the file is NULL, then free the struct and return MAP_FAIL. */
    if (file == NULL){
        free(me);
        return MAP_FAIL;
    }
    
    /* Setup the fields of the mmap struct.*/
    me->file = file;
    me->mapid = mapid;
    /* Push the mmap struct to the list of mmap of this process. */
    list_push_back(&(t->mmap_lst), &(me->elem));
    list_init(&(me->s_table));
    
    /* Allocate pages for the read-in file data.*/
    upage = addr;
    ofs = 0;
    read_bytes = f_size;
    
    if (read_bytes >= PGSIZE)
        zero_bytes = 0;
    else
        zero_bytes = PGSIZE - read_bytes;
    
    while (read_bytes > 0) {
        /* Calculate how to fill this page.
           We will read PAGE_READ_BYTES bytes from FILE
           and zero the final PAGE_ZERO_BYTES bytes. */
        size_t page_read_bytes = read_bytes < PGSIZE ? read_bytes : PGSIZE;
        size_t page_zero_bytes = PGSIZE - page_read_bytes;
        
        /* Create a new supplemental page entry for this page*/
        st = create_mmap_supp_table(file, ofs, upage, page_read_bytes, 
                                    page_zero_bytes, true);
        /* Push the page entry to the mmap struct's list */
        list_push_back(&(me->s_table), &(st->map_elem));
        
        /* Update the remaining read_bytes, zero_bytes;
         * Update upage, and ofs. This is for the next page to load the file.*/
        read_bytes -= page_read_bytes;
        zero_bytes -= page_zero_bytes;
        upage += PGSIZE;
        ofs += page_read_bytes;
        
    }
    return mapid;
}
Exemplo n.º 21
0
void frame_evict_page(){
	uint8_t *kpage;
	struct frame_table_entry* fte;
	fte = clock_kick();
	ASSERT(fte!=NULL)
	// pin the page in case race condition!
	ASSERT(fte->pg_vaddr!=NULL)
	ASSERT(fte->kpg_vaddr!=NULL)
	ASSERT(pg_ofs(fte->pg_vaddr) == 0)
	ASSERT(fte->owner!=NULL)
	struct spte* spte = spt_lookup(fte->owner,fte->pg_vaddr);
	ASSERT(spte!=NULL)
	ASSERT(spte->fte!=NULL)
	ASSERT(spte->fte==fte)
	/*
	 * TODO:
	 * Copy the page to storage 'type',
	 * record missing page info into SPT
	 * after all, set PTE present = 0,
	 * Then remove the page from frame control,
	 */
//	printf("pg_vaddr = %p\n", fte->pg_vaddr);
	switch (spte->type) {
	case Swap_Space:
		;
//		printf("moving %p to swap\n",fte->pg_vaddr);
		int offs = swap_store_pg(fte->kpg_vaddr);
		spte->offs = offs;
		spte->mapped = false;
		spte->fte = NULL;
//		printf("Store to Swap_Space\n");
//		hex_dump(fte->pg_vaddr, fte->pg_vaddr,4096,true);
//		printf("\n");
		break;
	case File_Map:
		;
		/*
		 * write the page back to file, if necessary,
		 * mmap use only, never write back to an executable!
		 */
		struct file* f = spte->file;
		if (f != NULL) {
			if (spte->writable) {
				file_seek(f, spte->offs);
				file_write(f, fte->kpg_vaddr, PGSIZE);
			}
		}
		spte->mapped = false;
		spte->fte = NULL;
		break;
	case Data:
	case BSS:
		PANIC("Data or BSS should be transformed to Swap_Space");
		break;
	case Read_Only:
		// for a read only file, just discard the data
//		printf("evict read only page %p\n",fte->pg_vaddr);
		spte->mapped = false;
		spte->fte = NULL;
		break;
	default:
		;
//		printf("evict unknown page %p\n",fte->pg_vaddr);
		PANIC("evict unknown page");
	}
	frame_remove(fte);
}