Exemplo n.º 1
0
int exos_bufcache_unmap64 (u32 dev, u_quad_t blk64, void *ptr)
{
   unsigned int vaddr = (unsigned int) ptr;
   struct bc_entry *bc_entry;
   int ret;

   //assert (size == NBPG);
   /* GROK -- this check should go away after awhile... */
   if (! (((bc_entry = __bc_lookup64(dev, blk64)) != NULL) &&
	  (bc_entry->buf_ppn == BUFCACHE_PGNO(vaddr))) ) {
      kprintf ("exos_bufcache_unmap: not actually mapped (dev %d, blk %x:%x, "
	       "ptr %p, bc_entry %p)\n", dev, QUAD2INT_HIGH(blk64),
	       QUAD2INT_LOW(blk64), ptr, bc_entry);
      return (-1);
      //assert (0);
   }

   if ((vaddr < BUFCACHE_REGION_START) || (vaddr >= BUFCACHE_REGION_END)) {
      kprintf ("exos_bufcache_unmap: ptr (%p) out of range\n", ptr);
      assert (0);
   }

   if (ret = _exos_self_unmap_page (CAP_ROOT, vaddr) < 0) {
      kprintf ("exos_bufcache_unmap: _exos_self_insert_pte failed (ret %d)\n",
	       ret);
      assert (0);
   }
   return (0);
}
Exemplo n.º 2
0
void * exos_bufcache_map64 (struct bc_entry *bc_entry, u32 dev, u_quad_t blk64,
			    u_int writeable)
{
   int ret;
   u_int vaddr;
   struct Xn_name *xn;
   struct Xn_name xn_nfs;

   if (bc_entry == NULL) {
      bc_entry = __bc_lookup64 (dev, blk64);
      if (bc_entry == NULL) {
         return (NULL);
      }
   }

   vaddr = BUFCACHE_ADDR (bc_entry->buf_ppn);

   if (writeable) {
      writeable = PG_W;
   }

   if (bc_entry->buf_dev > MAX_DISKS) {
     xn_nfs.xa_dev = bc_entry->buf_dev;
     xn_nfs.xa_name = 0;
     xn = &xn_nfs;
   } else {
     xn = &__sysinfo.si_pxn[bc_entry->buf_dev];
   }

  ret = _exos_self_insert_pte (CAP_ROOT, ppnf2pte(bc_entry->buf_ppn,
						   PG_P | PG_U | writeable |
						   PG_SHARED),
				(u_int)vaddr, ESIP_DONTPAGE, xn);

   if ((bc_entry->buf_ppn != BUFCACHE_PGNO(vaddr)) ||
       (bc_entry->buf_state == BC_EMPTY) || (bc_entry->buf_blk64 != blk64) ||
       (bc_entry->buf_dev != dev) ||
       (bc_entry->buf_ppn != (va2ppn(vaddr)))) {
      kprintf ("buf_state %d, buf_blk %x:%x, diskBlock %x:%x, buf_dev %d, "
	       "dev %d\n", bc_entry->buf_state,
	       QUAD2INT_HIGH(bc_entry->buf_blk64),
	       QUAD2INT_LOW(bc_entry->buf_blk64), QUAD2INT_HIGH(blk64),
	       QUAD2INT_LOW(blk64), bc_entry->buf_dev, dev);
      kprintf ("buf_ppn %d, expected %d\n", bc_entry->buf_ppn,
	       va2ppn(vaddr));
      kprintf ("gotcha: lost race detected (and handled) in "
	       "exos_bufcache_map\n");
      exos_bufcache_unmap64 (dev, blk64, (void *)vaddr);
      vaddr = 0;
   } else if (ret != 0) {
      kprintf ("exos_bufcache_map: _exos_self_insert_pte failed (ret %d, "
	       "vaddr %x, ppn %d)\n", ret, vaddr, bc_entry->buf_ppn);
      assert (ret == 0);
   }

   return ((void *) vaddr);
}
Exemplo n.º 3
0
Arquivo: mmap.c Projeto: aunali1/exopc
/* XXX currently if you mmap a file with a non page-aligned length, what you
   write past what you wanted to mmap will be written to disk (though not
   reflected in the size meta data on the disk, which is good).  It should
   zero it first to be correct. */
static int mmap_fault_handler(struct mregion_ustruct *mru, void *faddr,
			      unsigned int errcode) {
  struct Mmap *m = &(((struct mmap_ustruct*)mru)->m);
  u_int va = (u_int)faddr;
  Pte pte = PG_U | PG_P; /* new page should be present and user space */
  struct Xn_name *xn;
  struct Xn_name xn_nfs;

  /* if it's a write to a page that's not mapped writable then return */
  if ((errcode & FEC_WR) && !(m->mmap_prot & PROT_WRITE)) return 0;

  /* if writable requested... */
  if (m->mmap_prot & PROT_WRITE) pte |= PG_W;

  /* if shared requested... */
  if (m->mmap_flags & MAP_SHARED) pte |= PG_SHARED;


  /* if reading a page that's not present but is mapped private from a file
     then mark it copy-on-write so that it will reflect changes as long as
     possible (must be mapped writable as well) */
  if (!(errcode & FEC_WR) && ((m->mmap_flags &
			       (MAP_PRIVATE | MAP_FILE)) ==
			      (MAP_PRIVATE | MAP_FILE)) &&
      (pte & PG_W)) {
    pte |= PG_COW;
    pte &= ~PG_W;
  }

  /* if mapped anonymous... */
  if (m->mmap_flags & MAP_ANON) {
    /* currently maps a free page and zero's it */
    assert(_exos_self_insert_pte(0, pte, PGROUNDDOWN(va), 0, NULL) == 0);
    bzero((void*)PGROUNDDOWN(va), NBPG);

    return 1;
  }
  else { /* if mapping from a file */
    u_int seq;
    u_quad_t pblock;
    int done = 0, ret, fd;
    struct bc_entry *b;

    /* find a free file descriptor to use with the file pointer during
       the fault */
    for (fd = NR_OPEN - 1; fd >= 0; fd--)
      if (__current->fd[fd] == NULL) {
	__current->fd[fd] = m->mmap_filp;
	break;
      }
    assert (fd >= 0);

    /* if fault is from non-mapped page... */
    if (!(errcode & FEC_PR)) {
      /* map a page from the file */
      ret = bmap(fd, &pblock, m->mmap_offset + PGROUNDDOWN(va) -
		 (u_int)m->mmap_addr, &seq);
      if (ret == -EINVAL && !(m->mmap_flags & MAP_NOEXTEND)) {
	/* XXX File extension not possible for ExOS */
	assert(0);
      } else
	assert(ret == 0);
      assert(seq >= 0);
    mmap_retryMapPage:
      /* check if the block is in the buffer cache */
      while (!(b = __bc_lookup64(m->mmap_dev, pblock))) {
	if ((int)m->mmap_dev >= 0) {
	  /* disk device */
	  int count = 1;
	  /* _exos_bc_read_and_insert returns -E_EXISTS if *any* of the 
	     requested blocks are in the cache... */
	  /* read in up to 64k at a time */
	  while ((count <= seq) && (count < 16) &&
		 (!__bc_lookup64 (m->mmap_dev, (pblock+count)))) {
	    count++;
	  }
	  ret = _exos_bc_read_and_insert(m->mmap_dev, (unsigned int) pblock,
					 count, &done);
	  if (ret == 0)
	    /* sleep until request is completed... */
	    wk_waitfor_value_neq (&done, 0, 0);
	  else if (ret < 0 && ret != -E_EXISTS) {
	    kprintf ("_exos_bc_read_and_insert in mmap returned %d\n", ret);
	    panic ("mmap: error reading in block\n");
	  }
	} else {
	  /* nfs device */
	  if (nfs_bmap_read(fd, pblock) < 0)
	    panic ("mmap: error reading block from nfs\n");
	}
      }
      /* map the page */

      if (b->buf_dev > MAX_DISKS) {
	xn_nfs.xa_dev = b->buf_dev;
	xn_nfs.xa_name = 0;
	xn = &xn_nfs;
      } else {
	xn = &__sysinfo.si_pxn[b->buf_dev];
      }

      ret = _exos_self_insert_pte(0, (b->buf_ppn << PGSHIFT) | pte,
				  ((m->mmap_flags & MAP_PRIVATE) &&
				   (errcode & FEC_WR)) ?
				  MMAP_TEMP_REGION : PGROUNDDOWN(va),
				  ESIP_MMAPED, xn);
      /* make sure the page is completely read in */
      if (b->buf_state & BC_COMING_IN)
	wk_waitfor_value_neq(&b->buf_state, BC_VALID | BC_COMING_IN, 0);
      /* recheck that bc entry is still what we want */
      if (b == __bc_lookup64(m->mmap_dev, pblock)) {
	if (ret < 0) {
	  kprintf ("mmap: ret = %d\n", ret);
	  kprintf ("mmap: b->buf_dev = %d\n", b->buf_dev);
	  assert (0);
	}
      }
      else
	goto mmap_retryMapPage;

      /* if writing to a private page, then make a copy */
      if ((m->mmap_flags & MAP_PRIVATE) && (errcode & FEC_WR)) {
	assert(_exos_self_insert_pte(0, PG_P | PG_U | PG_W,
				     PGROUNDDOWN(va), 0, NULL) == 0);
	bcopy((void*)MMAP_TEMP_REGION, (void*)PGROUNDDOWN(va), NBPG);
	assert(_exos_self_unmap_page(0, MMAP_TEMP_REGION) == 0);
      }
    } else if ((m->mmap_flags & MAP_PRIVATE) && (errcode & FEC_WR) &&
	       (m->mmap_prot & PROT_WRITE)) {
      /* if fault is from a mapped page, but it needs copying... */
      /* perform cow */
      assert(_exos_self_insert_pte(0, PG_P | PG_U | PG_W,
				   MMAP_TEMP_REGION, ESIP_DONTPAGE, NULL) == 0);
      bcopy((void*)PGROUNDDOWN(va), (void*)MMAP_TEMP_REGION, NBPG);
      assert(_exos_self_insert_pte(0, vpt[PGNO(MMAP_TEMP_REGION)],
				   PGROUNDDOWN(va), 0, NULL) == 0);
      assert(_exos_self_unmap_page(0, MMAP_TEMP_REGION) == 0);
    } else { /* trying to write to a page that's mmap'd RO
		or read from system page???... */
      __current->fd[fd] = NULL;
      return 0;
    }

    /* free the file descriptor */
    __current->fd[fd] = NULL;
    return 1;
  }
}
Exemplo n.º 4
0
Arquivo: mmap.c Projeto: aunali1/exopc
caddr_t __mmap (void *addr, size_t len, int prot, int flags, int fd, 
		off_t offset, u_int ke, int envid) {
  Pte pte = PG_U | PG_P;
  u_quad_t pblock;
  struct stat sb;
  u_int vp = 0;
  dev_t dev;
  int ret = 0;
  u_int uaddr = (u_int )addr;
  u_int seq = 0;
  int done;
  struct bc_entry *b;
  off_t block_offset;

  block_offset = (offset & PGMASK);
  offset &= ~PGMASK;
  len += block_offset;

  if ((flags & MAP_PRIVATE) && (flags & MAP_COPY))
    flags &= ~MAP_PRIVATE;
  /* figure out which pte bits we want to set for the pages in this segment */
  if (prot & PROT_WRITE)
    if (flags & MAP_PRIVATE)
      pte |= PG_COW;
    else
      pte |= PG_W;
  else
    pte |= PG_RO;

  if (flags & MAP_SHARED) pte |= PG_SHARED;

  /* XXX -- need to check access on fd */
  
  /* deal with the address they want to map segment at */

  if (uaddr == (uint)NULL) {
    uaddr = (u_int)__malloc(len);
    assert (!(uaddr & PGMASK));
  } else {
    uaddr = uaddr & ~PGMASK;
  }

  /* get the device that this fd refers to */
  if (fstat (fd, &sb) < 0) {
    errno = EINVAL;
    return (caddr_t )-1;
  }
  dev = sb.st_dev;

  /* make sure all the blocks we're mapping are in the cache (if not
     we read them in) and map them in */

  for (vp = 0; vp < len;) {
    struct Xn_name *xn;
    struct Xn_name xn_nfs;

    /* get the largest extent that starts at this offset */
    if (bmap (fd, &pblock, offset, &seq) < 0) {
      errno = EBADF;
      return (caddr_t )-1;
    }

    /* can not close the race between a bc lookup and attempts to   */
    /* map the associated page (or read it in), so simply do things */
    /* optimisticly and repeat them if necessary.                   */
  __mmap_retryMapPage:
    /* check if the block is in the buffer cache */
    while (!(b = __bc_lookup64 (dev, pblock))) {
      if (dev >= 0) {
	/* disk device */
        int count = 1;
	done = 0;
        assert (seq >= 0);
	/* _exos_bc_read_and_insert returns -E_EXISTS if *any* of the */
	/* requested blocks is in the cache...                      */
        while ((count <= seq) && (count < 16) &&
	       (!__bc_lookup64 (dev, (pblock+count)))) {
	  count++;
        }
	ret = _exos_bc_read_and_insert (dev, (unsigned int)pblock, count,
					&done);
        if (ret == -E_EXISTS) {
	  continue;
        }
	if (ret < 0) {
	  kprintf ("_exos_bc_read_and_insert in mmap returned %d\n", ret);
	  panic ("mmap: error reading in block\n");
	}
	/* sleep until request is completed... */
        wk_waitfor_value_neq (&done, 0, 0);
      } else {
	/* nfs device */
	
	if (nfs_bmap_read (fd, pblock) < 0) {
	  panic ("mmap: error reading block from nfs\n");
	}
      }
    }

    if (b->buf_dev > MAX_DISKS) {
      xn_nfs.xa_dev = b->buf_dev;
      xn_nfs.xa_name = 0;
      xn = &xn_nfs;
    } else {
      xn = &__sysinfo.si_pxn[b->buf_dev];
    }

    if (flags & MAP_COPY) {
      int ret;

      ret = _exos_self_insert_pte (0, (b->buf_ppn << PGSHIFT) |
				    PG_P | PG_U | PG_W, MMAP_TEMP_REGION,
				    ESIP_DONTPAGE, xn);
      if (ret < 0) {
	kprintf ("mmap: ret = %d\n", ret);
	assert (0);
      }
      ret = _exos_self_insert_pte (0, PG_P | PG_U | PG_W,
				    MMAP_TEMP_REGION + NBPG,
				    ESIP_DONTPAGE, NULL);
      if (ret < 0) {
	kprintf ("mmap (2nd): ret = %d\n", ret);
	assert (0);
      }      
      if (b->buf_state & BC_COMING_IN)
	wk_waitfor_value_neq(&b->buf_state, BC_VALID | BC_COMING_IN, 0);
      bcopy((void*)MMAP_TEMP_REGION, (void*)(MMAP_TEMP_REGION + NBPG), NBPG);
      assert(_exos_insert_pte (0, (vpt[PGNO(MMAP_TEMP_REGION + NBPG)] & ~PGMASK)
			       | pte | PG_D, uaddr + vp, ke, envid, 0, NULL) >= 0);
      assert(_exos_self_unmap_page (0, MMAP_TEMP_REGION) >= 0);
      assert(_exos_self_unmap_page (0, MMAP_TEMP_REGION + NBPG) >= 0);
    } else {
      ret = sys_bc_buffer_map (xn, CAP_ROOT, (b->buf_ppn << PGSHIFT) | pte, uaddr + vp,
			       ke, envid);
      if (b->buf_state & BC_COMING_IN)
	wk_waitfor_value_neq(&b->buf_state, BC_VALID | BC_COMING_IN, 0);
    }

    /* recheck that bc entry is still what we want */
    if (b == __bc_lookup64 (dev, pblock)) {
      assert (ret >= 0);
    } else {
      goto __mmap_retryMapPage;
    }
    
    offset += NBPG;
    vp += NBPG;
  }
  
  return (caddr_t )uaddr + block_offset;
}