示例#1
0
文件: main.c 项目: aunali1/exopc
void main () {
  int ret;
  struct Xn_name xn;
  struct bc_entry *b;

  printf ("Hello\n");

  xn.xa_dev = PART;
  xn.xa_name = FSID;

  sys_bc_flush (PART, micropart_part_off (MICRO_PART), 1);

  ret = sys_micropart_init (PART, CAP_ROOT);
  printf ("sys_micropart_init returned %d\n", ret);

  ret = sys_micropart_bootfs (PART, CAP_ROOT, FSID);
  printf ("sys_micropart_bootfs returned %d\n", ret);

  ret = _exos_bc_read_and_insert (PART, micropart_part_off (MICRO_PART), 1,
				  NULL);
  printf ("_exos_bc_read_and_insert returned %d\n", ret);

  b = __bc_lookup (PART, micropart_part_off (MICRO_PART));
  assert (b);

  ret = sys_self_bc_buffer_map (&xn, CAP_ROOT, (b->buf_ppn << PGSHIFT) | PG_U|PG_P,
				VA);
  printf ("sys_self_bc_buffer_map returned %d\n", ret);
  assert (sys_self_insert_pte (CAP_ROOT, 0, VA) == 0);

  ret = sys_micropart_alloc (PART, MICRO_PART, CAP_ROOT, FSID);
  printf ("sys_micropart_alloc returned %d\n", ret);

  ret = sys_self_bc_buffer_map (&xn, CAP_ROOT, (b->buf_ppn << PGSHIFT) | PG_U|PG_P,
				VA);
  printf ("sys_self_bc_buffer_map returned %d\n", ret);
  assert (sys_self_insert_pte (CAP_ROOT, 0, VA) == 0);

  ret = sys_micropart_dealloc (PART, MICRO_PART, CAP_ROOT, FSID);
  printf ("sys_micropart_dealloc returned %d\n", ret);

  ret = sys_self_bc_buffer_map (&xn, CAP_ROOT, (b->buf_ppn << PGSHIFT) | PG_U|PG_P,
				VA);
  printf ("sys_self_bc_buffer_map returned %d\n", ret);
  assert (sys_self_insert_pte (CAP_ROOT, 0, VA) == 0);

  ret = sys_micropart_alloc (PART, MICRO_PART, CAP_ROOT, FSID);
  printf ("sys_micropart_alloc returned %d\n", ret);

  ret = sys_self_bc_buffer_map (&xn, CAP_ROOT, (b->buf_ppn << PGSHIFT) | PG_U|PG_P,
				VA);
  printf ("sys_self_bc_buffer_map returned %d\n", ret);
  assert (sys_self_insert_pte (CAP_ROOT, 0, VA) == 0);
}
示例#2
0
int exos_bufcache_initfill (u32 dev, u32 blk, int blockcount, int *resptr)
{
   int ret;

   if (resptr) {
      *resptr = 0;
   }
/*
   ret = sys_xn_readin (firstBlock, blockcount, (xn_cnt_t*)&entry->resid);
*/
   ret = _exos_bc_read_and_insert (dev, blk, blockcount, resptr);
	/* GROK -- this check and assert need to go away.  This is a     */
	/* perfectly fine place to get bad return codes (e.g., E_EXISTS) */
   if (ret != 0) {
      kprintf ("warning: _exos_bc_read_and_insert failed: ret %d, "
	       "firstBlock %d, blockcount %d, dev %d\n", ret, blk, blockcount,
	       dev);
   }
   //assert (ret == 0);

   return (ret);
}
示例#3
0
文件: mmap.c 项目: aunali1/exopc
/* XXX currently if you mmap a file with a non page-aligned length, what you
   write past what you wanted to mmap will be written to disk (though not
   reflected in the size meta data on the disk, which is good).  It should
   zero it first to be correct. */
static int mmap_fault_handler(struct mregion_ustruct *mru, void *faddr,
			      unsigned int errcode) {
  struct Mmap *m = &(((struct mmap_ustruct*)mru)->m);
  u_int va = (u_int)faddr;
  Pte pte = PG_U | PG_P; /* new page should be present and user space */
  struct Xn_name *xn;
  struct Xn_name xn_nfs;

  /* if it's a write to a page that's not mapped writable then return */
  if ((errcode & FEC_WR) && !(m->mmap_prot & PROT_WRITE)) return 0;

  /* if writable requested... */
  if (m->mmap_prot & PROT_WRITE) pte |= PG_W;

  /* if shared requested... */
  if (m->mmap_flags & MAP_SHARED) pte |= PG_SHARED;


  /* if reading a page that's not present but is mapped private from a file
     then mark it copy-on-write so that it will reflect changes as long as
     possible (must be mapped writable as well) */
  if (!(errcode & FEC_WR) && ((m->mmap_flags &
			       (MAP_PRIVATE | MAP_FILE)) ==
			      (MAP_PRIVATE | MAP_FILE)) &&
      (pte & PG_W)) {
    pte |= PG_COW;
    pte &= ~PG_W;
  }

  /* if mapped anonymous... */
  if (m->mmap_flags & MAP_ANON) {
    /* currently maps a free page and zero's it */
    assert(_exos_self_insert_pte(0, pte, PGROUNDDOWN(va), 0, NULL) == 0);
    bzero((void*)PGROUNDDOWN(va), NBPG);

    return 1;
  }
  else { /* if mapping from a file */
    u_int seq;
    u_quad_t pblock;
    int done = 0, ret, fd;
    struct bc_entry *b;

    /* find a free file descriptor to use with the file pointer during
       the fault */
    for (fd = NR_OPEN - 1; fd >= 0; fd--)
      if (__current->fd[fd] == NULL) {
	__current->fd[fd] = m->mmap_filp;
	break;
      }
    assert (fd >= 0);

    /* if fault is from non-mapped page... */
    if (!(errcode & FEC_PR)) {
      /* map a page from the file */
      ret = bmap(fd, &pblock, m->mmap_offset + PGROUNDDOWN(va) -
		 (u_int)m->mmap_addr, &seq);
      if (ret == -EINVAL && !(m->mmap_flags & MAP_NOEXTEND)) {
	/* XXX File extension not possible for ExOS */
	assert(0);
      } else
	assert(ret == 0);
      assert(seq >= 0);
    mmap_retryMapPage:
      /* check if the block is in the buffer cache */
      while (!(b = __bc_lookup64(m->mmap_dev, pblock))) {
	if ((int)m->mmap_dev >= 0) {
	  /* disk device */
	  int count = 1;
	  /* _exos_bc_read_and_insert returns -E_EXISTS if *any* of the 
	     requested blocks are in the cache... */
	  /* read in up to 64k at a time */
	  while ((count <= seq) && (count < 16) &&
		 (!__bc_lookup64 (m->mmap_dev, (pblock+count)))) {
	    count++;
	  }
	  ret = _exos_bc_read_and_insert(m->mmap_dev, (unsigned int) pblock,
					 count, &done);
	  if (ret == 0)
	    /* sleep until request is completed... */
	    wk_waitfor_value_neq (&done, 0, 0);
	  else if (ret < 0 && ret != -E_EXISTS) {
	    kprintf ("_exos_bc_read_and_insert in mmap returned %d\n", ret);
	    panic ("mmap: error reading in block\n");
	  }
	} else {
	  /* nfs device */
	  if (nfs_bmap_read(fd, pblock) < 0)
	    panic ("mmap: error reading block from nfs\n");
	}
      }
      /* map the page */

      if (b->buf_dev > MAX_DISKS) {
	xn_nfs.xa_dev = b->buf_dev;
	xn_nfs.xa_name = 0;
	xn = &xn_nfs;
      } else {
	xn = &__sysinfo.si_pxn[b->buf_dev];
      }

      ret = _exos_self_insert_pte(0, (b->buf_ppn << PGSHIFT) | pte,
				  ((m->mmap_flags & MAP_PRIVATE) &&
				   (errcode & FEC_WR)) ?
				  MMAP_TEMP_REGION : PGROUNDDOWN(va),
				  ESIP_MMAPED, xn);
      /* make sure the page is completely read in */
      if (b->buf_state & BC_COMING_IN)
	wk_waitfor_value_neq(&b->buf_state, BC_VALID | BC_COMING_IN, 0);
      /* recheck that bc entry is still what we want */
      if (b == __bc_lookup64(m->mmap_dev, pblock)) {
	if (ret < 0) {
	  kprintf ("mmap: ret = %d\n", ret);
	  kprintf ("mmap: b->buf_dev = %d\n", b->buf_dev);
	  assert (0);
	}
      }
      else
	goto mmap_retryMapPage;

      /* if writing to a private page, then make a copy */
      if ((m->mmap_flags & MAP_PRIVATE) && (errcode & FEC_WR)) {
	assert(_exos_self_insert_pte(0, PG_P | PG_U | PG_W,
				     PGROUNDDOWN(va), 0, NULL) == 0);
	bcopy((void*)MMAP_TEMP_REGION, (void*)PGROUNDDOWN(va), NBPG);
	assert(_exos_self_unmap_page(0, MMAP_TEMP_REGION) == 0);
      }
    } else if ((m->mmap_flags & MAP_PRIVATE) && (errcode & FEC_WR) &&
	       (m->mmap_prot & PROT_WRITE)) {
      /* if fault is from a mapped page, but it needs copying... */
      /* perform cow */
      assert(_exos_self_insert_pte(0, PG_P | PG_U | PG_W,
				   MMAP_TEMP_REGION, ESIP_DONTPAGE, NULL) == 0);
      bcopy((void*)PGROUNDDOWN(va), (void*)MMAP_TEMP_REGION, NBPG);
      assert(_exos_self_insert_pte(0, vpt[PGNO(MMAP_TEMP_REGION)],
				   PGROUNDDOWN(va), 0, NULL) == 0);
      assert(_exos_self_unmap_page(0, MMAP_TEMP_REGION) == 0);
    } else { /* trying to write to a page that's mmap'd RO
		or read from system page???... */
      __current->fd[fd] = NULL;
      return 0;
    }

    /* free the file descriptor */
    __current->fd[fd] = NULL;
    return 1;
  }
}
示例#4
0
文件: mmap.c 项目: aunali1/exopc
caddr_t __mmap (void *addr, size_t len, int prot, int flags, int fd, 
		off_t offset, u_int ke, int envid) {
  Pte pte = PG_U | PG_P;
  u_quad_t pblock;
  struct stat sb;
  u_int vp = 0;
  dev_t dev;
  int ret = 0;
  u_int uaddr = (u_int )addr;
  u_int seq = 0;
  int done;
  struct bc_entry *b;
  off_t block_offset;

  block_offset = (offset & PGMASK);
  offset &= ~PGMASK;
  len += block_offset;

  if ((flags & MAP_PRIVATE) && (flags & MAP_COPY))
    flags &= ~MAP_PRIVATE;
  /* figure out which pte bits we want to set for the pages in this segment */
  if (prot & PROT_WRITE)
    if (flags & MAP_PRIVATE)
      pte |= PG_COW;
    else
      pte |= PG_W;
  else
    pte |= PG_RO;

  if (flags & MAP_SHARED) pte |= PG_SHARED;

  /* XXX -- need to check access on fd */
  
  /* deal with the address they want to map segment at */

  if (uaddr == (uint)NULL) {
    uaddr = (u_int)__malloc(len);
    assert (!(uaddr & PGMASK));
  } else {
    uaddr = uaddr & ~PGMASK;
  }

  /* get the device that this fd refers to */
  if (fstat (fd, &sb) < 0) {
    errno = EINVAL;
    return (caddr_t )-1;
  }
  dev = sb.st_dev;

  /* make sure all the blocks we're mapping are in the cache (if not
     we read them in) and map them in */

  for (vp = 0; vp < len;) {
    struct Xn_name *xn;
    struct Xn_name xn_nfs;

    /* get the largest extent that starts at this offset */
    if (bmap (fd, &pblock, offset, &seq) < 0) {
      errno = EBADF;
      return (caddr_t )-1;
    }

    /* can not close the race between a bc lookup and attempts to   */
    /* map the associated page (or read it in), so simply do things */
    /* optimisticly and repeat them if necessary.                   */
  __mmap_retryMapPage:
    /* check if the block is in the buffer cache */
    while (!(b = __bc_lookup64 (dev, pblock))) {
      if (dev >= 0) {
	/* disk device */
        int count = 1;
	done = 0;
        assert (seq >= 0);
	/* _exos_bc_read_and_insert returns -E_EXISTS if *any* of the */
	/* requested blocks is in the cache...                      */
        while ((count <= seq) && (count < 16) &&
	       (!__bc_lookup64 (dev, (pblock+count)))) {
	  count++;
        }
	ret = _exos_bc_read_and_insert (dev, (unsigned int)pblock, count,
					&done);
        if (ret == -E_EXISTS) {
	  continue;
        }
	if (ret < 0) {
	  kprintf ("_exos_bc_read_and_insert in mmap returned %d\n", ret);
	  panic ("mmap: error reading in block\n");
	}
	/* sleep until request is completed... */
        wk_waitfor_value_neq (&done, 0, 0);
      } else {
	/* nfs device */
	
	if (nfs_bmap_read (fd, pblock) < 0) {
	  panic ("mmap: error reading block from nfs\n");
	}
      }
    }

    if (b->buf_dev > MAX_DISKS) {
      xn_nfs.xa_dev = b->buf_dev;
      xn_nfs.xa_name = 0;
      xn = &xn_nfs;
    } else {
      xn = &__sysinfo.si_pxn[b->buf_dev];
    }

    if (flags & MAP_COPY) {
      int ret;

      ret = _exos_self_insert_pte (0, (b->buf_ppn << PGSHIFT) |
				    PG_P | PG_U | PG_W, MMAP_TEMP_REGION,
				    ESIP_DONTPAGE, xn);
      if (ret < 0) {
	kprintf ("mmap: ret = %d\n", ret);
	assert (0);
      }
      ret = _exos_self_insert_pte (0, PG_P | PG_U | PG_W,
				    MMAP_TEMP_REGION + NBPG,
				    ESIP_DONTPAGE, NULL);
      if (ret < 0) {
	kprintf ("mmap (2nd): ret = %d\n", ret);
	assert (0);
      }      
      if (b->buf_state & BC_COMING_IN)
	wk_waitfor_value_neq(&b->buf_state, BC_VALID | BC_COMING_IN, 0);
      bcopy((void*)MMAP_TEMP_REGION, (void*)(MMAP_TEMP_REGION + NBPG), NBPG);
      assert(_exos_insert_pte (0, (vpt[PGNO(MMAP_TEMP_REGION + NBPG)] & ~PGMASK)
			       | pte | PG_D, uaddr + vp, ke, envid, 0, NULL) >= 0);
      assert(_exos_self_unmap_page (0, MMAP_TEMP_REGION) >= 0);
      assert(_exos_self_unmap_page (0, MMAP_TEMP_REGION + NBPG) >= 0);
    } else {
      ret = sys_bc_buffer_map (xn, CAP_ROOT, (b->buf_ppn << PGSHIFT) | pte, uaddr + vp,
			       ke, envid);
      if (b->buf_state & BC_COMING_IN)
	wk_waitfor_value_neq(&b->buf_state, BC_VALID | BC_COMING_IN, 0);
    }

    /* recheck that bc entry is still what we want */
    if (b == __bc_lookup64 (dev, pblock)) {
      assert (ret >= 0);
    } else {
      goto __mmap_retryMapPage;
    }
    
    offset += NBPG;
    vp += NBPG;
  }
  
  return (caddr_t )uaddr + block_offset;
}