コード例 #1
0
ファイル: fault.c プロジェクト: aunali1/exopc
int
__remap_reserved_page(u_int va, u_int pte_flags)
{
  u_int i;

  for (i=0; i < __eea->eea_reserved_pages; i++) {
    if ((vpt[PGNO((u_int)__eea->eea_reserved_first) + i] & PG_P)) {
      if (_exos_self_insert_pte(CAP_ROOT,
				ppnf2pte(PGNO(vpt[PGNO((u_int)
						       __eea->
						       eea_reserved_first) +
						 i]), pte_flags),
				va, 0, NULL) < 0 ||
	  _exos_self_unmap_page(CAP_ROOT,
				(u_int)__eea->eea_reserved_first +
				i * NBPG) < 0) {
	sys_cputs("__remap_reserved_page: can't remap\n");
	return -1;
      }
      UAREA.u_reserved_pages--;
      return 0;
    }
  }

  sys_cputs("__remap_reserved_page: none left\n");
  return -1;
}
コード例 #2
0
void * exos_bufcache_alloc (u32 dev, u32 blk, int zerofill, int writeable,
			    int usexn)
{
   int ret;
   unsigned int vaddr = BUFCACHE_ADDR (__sysinfo.si_nppages);

   if (writeable) {
      writeable = PG_W;
   }

   /* This first call to insert_pte causes a physical page to be allocated. */
   /* Start with page mapped writeable, since might be zerofill.            */

   if (((ret = _exos_self_insert_pte (CAP_ROOT, PG_W | PG_P | PG_U | PG_SHARED,
				      vaddr, ESIP_DONTPAGE, NULL)) < 0) ||
       (vpt[PGNO(vaddr)] == 0)) {
      kprintf ("exos_bufcache_alloc: _exos_self_insert_pte failed (ret %d)\n",
	       ret);
      return (NULL);
   }

   if (zerofill) {
      bzero ((char *)vaddr, NBPG);
   }

   /* do final-location mapping based on "writeable" variable */

   if (((ret = _exos_self_insert_pte (CAP_ROOT,
				      ppnf2pte(va2ppn(vaddr),
					       writeable | PG_P | PG_U |
					       PG_SHARED),
				      BUFCACHE_ADDR (va2ppn(vaddr)),
				      ESIP_DONTPAGE, &__sysinfo.si_pxn[dev])) < 0) ||
       (vpt[PGNO(vaddr)] == 0)) {
      kprintf ("exos_bufcache_alloc: failed to add real mapping (ret %d)\n",
	       ret);
      assert (0);
   }
   vaddr = BUFCACHE_ADDR (va2ppn(vaddr));

   /* Unmap the allocation mapping before inserting into bc, to make sure */
   /* that we never have a non-writeable bc entry mapped writable...      */

   if ((ret = _exos_self_unmap_page (CAP_ROOT,
				     BUFCACHE_ADDR(__sysinfo.si_nppages)))
       < 0) {
      kprintf ("exos_bufcache_alloc: failed to clobber fake mapping "
	       "(ret %d)\n", ret);
      assert (0);
   }

#if 1
   vaddr = (u_int) exos_bufcache_insert (dev, blk, (void *)vaddr, usexn);
   assert (vaddr == BUFCACHE_ADDR (va2ppn(vaddr)));
#else
   vaddr = BUFCACHE_ADDR (va2ppn(vaddr));
#endif

   return ((void *) vaddr);
}
コード例 #3
0
int exos_bufcache_unmap64 (u32 dev, u_quad_t blk64, void *ptr)
{
   unsigned int vaddr = (unsigned int) ptr;
   struct bc_entry *bc_entry;
   int ret;

   //assert (size == NBPG);
   /* GROK -- this check should go away after awhile... */
   if (! (((bc_entry = __bc_lookup64(dev, blk64)) != NULL) &&
	  (bc_entry->buf_ppn == BUFCACHE_PGNO(vaddr))) ) {
      kprintf ("exos_bufcache_unmap: not actually mapped (dev %d, blk %x:%x, "
	       "ptr %p, bc_entry %p)\n", dev, QUAD2INT_HIGH(blk64),
	       QUAD2INT_LOW(blk64), ptr, bc_entry);
      return (-1);
      //assert (0);
   }

   if ((vaddr < BUFCACHE_REGION_START) || (vaddr >= BUFCACHE_REGION_END)) {
      kprintf ("exos_bufcache_unmap: ptr (%p) out of range\n", ptr);
      assert (0);
   }

   if (ret = _exos_self_unmap_page (CAP_ROOT, vaddr) < 0) {
      kprintf ("exos_bufcache_unmap: _exos_self_insert_pte failed (ret %d)\n",
	       ret);
      assert (0);
   }
   return (0);
}
コード例 #4
0
ファイル: procd_ps.c プロジェクト: aunali1/exopc
static u_int read_write_memory (int request,u_int env, u_int addr, int data) {
  u_int write;
  u_int pte;
  u_int offset;
  u_int va = (u_int )addr;
  u_int temp_page;
  int r;

#define PT_WRITE_D 1
#define PT_WRITE_I 1
#define PT_READ_D 0
#define PT_READ_I 0

  if (request == PT_WRITE_D || request == PT_WRITE_I) {
    write = 1;
  } else {
    write = 0;
  }

  assert (env);
  if (!(pte = sys_read_pte (va, 0, env, &r))) {
    printf ("ptrace: couldn't read pte (va %x, env %x)\n", va, env);
    return (0);
  }
  temp_page = (u_int)__malloc(NBPG);
  if (temp_page == 0 ||
      _exos_self_insert_pte (0, write ? pte | PG_W : pte, temp_page,
			     ESIP_DONTPAGE, NULL) < 0) {
    printf ("ptrace: couldn't map page\n");
    __free((void*)temp_page);
    return (0);
  }
  offset = va & PGMASK;
  va = temp_page+offset;

  if (write) {
    *(int *)va = data;
  } else {
    data = *(int *)va;
  }

  if (_exos_self_unmap_page (0, temp_page) < 0) {
    printf ("ptrace: couldn't unmap page\n");
    __free((void*)temp_page);
    assert (0);
  }

  __free((void*)temp_page);
  return (data);
}
コード例 #5
0
ファイル: mmap.c プロジェクト: aunali1/exopc
/* XXX currently if you mmap a file with a non page-aligned length, what you
   write past what you wanted to mmap will be written to disk (though not
   reflected in the size meta data on the disk, which is good).  It should
   zero it first to be correct. */
static int mmap_fault_handler(struct mregion_ustruct *mru, void *faddr,
			      unsigned int errcode) {
  struct Mmap *m = &(((struct mmap_ustruct*)mru)->m);
  u_int va = (u_int)faddr;
  Pte pte = PG_U | PG_P; /* new page should be present and user space */
  struct Xn_name *xn;
  struct Xn_name xn_nfs;

  /* if it's a write to a page that's not mapped writable then return */
  if ((errcode & FEC_WR) && !(m->mmap_prot & PROT_WRITE)) return 0;

  /* if writable requested... */
  if (m->mmap_prot & PROT_WRITE) pte |= PG_W;

  /* if shared requested... */
  if (m->mmap_flags & MAP_SHARED) pte |= PG_SHARED;


  /* if reading a page that's not present but is mapped private from a file
     then mark it copy-on-write so that it will reflect changes as long as
     possible (must be mapped writable as well) */
  if (!(errcode & FEC_WR) && ((m->mmap_flags &
			       (MAP_PRIVATE | MAP_FILE)) ==
			      (MAP_PRIVATE | MAP_FILE)) &&
      (pte & PG_W)) {
    pte |= PG_COW;
    pte &= ~PG_W;
  }

  /* if mapped anonymous... */
  if (m->mmap_flags & MAP_ANON) {
    /* currently maps a free page and zero's it */
    assert(_exos_self_insert_pte(0, pte, PGROUNDDOWN(va), 0, NULL) == 0);
    bzero((void*)PGROUNDDOWN(va), NBPG);

    return 1;
  }
  else { /* if mapping from a file */
    u_int seq;
    u_quad_t pblock;
    int done = 0, ret, fd;
    struct bc_entry *b;

    /* find a free file descriptor to use with the file pointer during
       the fault */
    for (fd = NR_OPEN - 1; fd >= 0; fd--)
      if (__current->fd[fd] == NULL) {
	__current->fd[fd] = m->mmap_filp;
	break;
      }
    assert (fd >= 0);

    /* if fault is from non-mapped page... */
    if (!(errcode & FEC_PR)) {
      /* map a page from the file */
      ret = bmap(fd, &pblock, m->mmap_offset + PGROUNDDOWN(va) -
		 (u_int)m->mmap_addr, &seq);
      if (ret == -EINVAL && !(m->mmap_flags & MAP_NOEXTEND)) {
	/* XXX File extension not possible for ExOS */
	assert(0);
      } else
	assert(ret == 0);
      assert(seq >= 0);
    mmap_retryMapPage:
      /* check if the block is in the buffer cache */
      while (!(b = __bc_lookup64(m->mmap_dev, pblock))) {
	if ((int)m->mmap_dev >= 0) {
	  /* disk device */
	  int count = 1;
	  /* _exos_bc_read_and_insert returns -E_EXISTS if *any* of the 
	     requested blocks are in the cache... */
	  /* read in up to 64k at a time */
	  while ((count <= seq) && (count < 16) &&
		 (!__bc_lookup64 (m->mmap_dev, (pblock+count)))) {
	    count++;
	  }
	  ret = _exos_bc_read_and_insert(m->mmap_dev, (unsigned int) pblock,
					 count, &done);
	  if (ret == 0)
	    /* sleep until request is completed... */
	    wk_waitfor_value_neq (&done, 0, 0);
	  else if (ret < 0 && ret != -E_EXISTS) {
	    kprintf ("_exos_bc_read_and_insert in mmap returned %d\n", ret);
	    panic ("mmap: error reading in block\n");
	  }
	} else {
	  /* nfs device */
	  if (nfs_bmap_read(fd, pblock) < 0)
	    panic ("mmap: error reading block from nfs\n");
	}
      }
      /* map the page */

      if (b->buf_dev > MAX_DISKS) {
	xn_nfs.xa_dev = b->buf_dev;
	xn_nfs.xa_name = 0;
	xn = &xn_nfs;
      } else {
	xn = &__sysinfo.si_pxn[b->buf_dev];
      }

      ret = _exos_self_insert_pte(0, (b->buf_ppn << PGSHIFT) | pte,
				  ((m->mmap_flags & MAP_PRIVATE) &&
				   (errcode & FEC_WR)) ?
				  MMAP_TEMP_REGION : PGROUNDDOWN(va),
				  ESIP_MMAPED, xn);
      /* make sure the page is completely read in */
      if (b->buf_state & BC_COMING_IN)
	wk_waitfor_value_neq(&b->buf_state, BC_VALID | BC_COMING_IN, 0);
      /* recheck that bc entry is still what we want */
      if (b == __bc_lookup64(m->mmap_dev, pblock)) {
	if (ret < 0) {
	  kprintf ("mmap: ret = %d\n", ret);
	  kprintf ("mmap: b->buf_dev = %d\n", b->buf_dev);
	  assert (0);
	}
      }
      else
	goto mmap_retryMapPage;

      /* if writing to a private page, then make a copy */
      if ((m->mmap_flags & MAP_PRIVATE) && (errcode & FEC_WR)) {
	assert(_exos_self_insert_pte(0, PG_P | PG_U | PG_W,
				     PGROUNDDOWN(va), 0, NULL) == 0);
	bcopy((void*)MMAP_TEMP_REGION, (void*)PGROUNDDOWN(va), NBPG);
	assert(_exos_self_unmap_page(0, MMAP_TEMP_REGION) == 0);
      }
    } else if ((m->mmap_flags & MAP_PRIVATE) && (errcode & FEC_WR) &&
	       (m->mmap_prot & PROT_WRITE)) {
      /* if fault is from a mapped page, but it needs copying... */
      /* perform cow */
      assert(_exos_self_insert_pte(0, PG_P | PG_U | PG_W,
				   MMAP_TEMP_REGION, ESIP_DONTPAGE, NULL) == 0);
      bcopy((void*)PGROUNDDOWN(va), (void*)MMAP_TEMP_REGION, NBPG);
      assert(_exos_self_insert_pte(0, vpt[PGNO(MMAP_TEMP_REGION)],
				   PGROUNDDOWN(va), 0, NULL) == 0);
      assert(_exos_self_unmap_page(0, MMAP_TEMP_REGION) == 0);
    } else { /* trying to write to a page that's mmap'd RO
		or read from system page???... */
      __current->fd[fd] = NULL;
      return 0;
    }

    /* free the file descriptor */
    __current->fd[fd] = NULL;
    return 1;
  }
}
コード例 #6
0
ファイル: mmap.c プロジェクト: aunali1/exopc
caddr_t __mmap (void *addr, size_t len, int prot, int flags, int fd, 
		off_t offset, u_int ke, int envid) {
  Pte pte = PG_U | PG_P;
  u_quad_t pblock;
  struct stat sb;
  u_int vp = 0;
  dev_t dev;
  int ret = 0;
  u_int uaddr = (u_int )addr;
  u_int seq = 0;
  int done;
  struct bc_entry *b;
  off_t block_offset;

  block_offset = (offset & PGMASK);
  offset &= ~PGMASK;
  len += block_offset;

  if ((flags & MAP_PRIVATE) && (flags & MAP_COPY))
    flags &= ~MAP_PRIVATE;
  /* figure out which pte bits we want to set for the pages in this segment */
  if (prot & PROT_WRITE)
    if (flags & MAP_PRIVATE)
      pte |= PG_COW;
    else
      pte |= PG_W;
  else
    pte |= PG_RO;

  if (flags & MAP_SHARED) pte |= PG_SHARED;

  /* XXX -- need to check access on fd */
  
  /* deal with the address they want to map segment at */

  if (uaddr == (uint)NULL) {
    uaddr = (u_int)__malloc(len);
    assert (!(uaddr & PGMASK));
  } else {
    uaddr = uaddr & ~PGMASK;
  }

  /* get the device that this fd refers to */
  if (fstat (fd, &sb) < 0) {
    errno = EINVAL;
    return (caddr_t )-1;
  }
  dev = sb.st_dev;

  /* make sure all the blocks we're mapping are in the cache (if not
     we read them in) and map them in */

  for (vp = 0; vp < len;) {
    struct Xn_name *xn;
    struct Xn_name xn_nfs;

    /* get the largest extent that starts at this offset */
    if (bmap (fd, &pblock, offset, &seq) < 0) {
      errno = EBADF;
      return (caddr_t )-1;
    }

    /* can not close the race between a bc lookup and attempts to   */
    /* map the associated page (or read it in), so simply do things */
    /* optimisticly and repeat them if necessary.                   */
  __mmap_retryMapPage:
    /* check if the block is in the buffer cache */
    while (!(b = __bc_lookup64 (dev, pblock))) {
      if (dev >= 0) {
	/* disk device */
        int count = 1;
	done = 0;
        assert (seq >= 0);
	/* _exos_bc_read_and_insert returns -E_EXISTS if *any* of the */
	/* requested blocks is in the cache...                      */
        while ((count <= seq) && (count < 16) &&
	       (!__bc_lookup64 (dev, (pblock+count)))) {
	  count++;
        }
	ret = _exos_bc_read_and_insert (dev, (unsigned int)pblock, count,
					&done);
        if (ret == -E_EXISTS) {
	  continue;
        }
	if (ret < 0) {
	  kprintf ("_exos_bc_read_and_insert in mmap returned %d\n", ret);
	  panic ("mmap: error reading in block\n");
	}
	/* sleep until request is completed... */
        wk_waitfor_value_neq (&done, 0, 0);
      } else {
	/* nfs device */
	
	if (nfs_bmap_read (fd, pblock) < 0) {
	  panic ("mmap: error reading block from nfs\n");
	}
      }
    }

    if (b->buf_dev > MAX_DISKS) {
      xn_nfs.xa_dev = b->buf_dev;
      xn_nfs.xa_name = 0;
      xn = &xn_nfs;
    } else {
      xn = &__sysinfo.si_pxn[b->buf_dev];
    }

    if (flags & MAP_COPY) {
      int ret;

      ret = _exos_self_insert_pte (0, (b->buf_ppn << PGSHIFT) |
				    PG_P | PG_U | PG_W, MMAP_TEMP_REGION,
				    ESIP_DONTPAGE, xn);
      if (ret < 0) {
	kprintf ("mmap: ret = %d\n", ret);
	assert (0);
      }
      ret = _exos_self_insert_pte (0, PG_P | PG_U | PG_W,
				    MMAP_TEMP_REGION + NBPG,
				    ESIP_DONTPAGE, NULL);
      if (ret < 0) {
	kprintf ("mmap (2nd): ret = %d\n", ret);
	assert (0);
      }      
      if (b->buf_state & BC_COMING_IN)
	wk_waitfor_value_neq(&b->buf_state, BC_VALID | BC_COMING_IN, 0);
      bcopy((void*)MMAP_TEMP_REGION, (void*)(MMAP_TEMP_REGION + NBPG), NBPG);
      assert(_exos_insert_pte (0, (vpt[PGNO(MMAP_TEMP_REGION + NBPG)] & ~PGMASK)
			       | pte | PG_D, uaddr + vp, ke, envid, 0, NULL) >= 0);
      assert(_exos_self_unmap_page (0, MMAP_TEMP_REGION) >= 0);
      assert(_exos_self_unmap_page (0, MMAP_TEMP_REGION + NBPG) >= 0);
    } else {
      ret = sys_bc_buffer_map (xn, CAP_ROOT, (b->buf_ppn << PGSHIFT) | pte, uaddr + vp,
			       ke, envid);
      if (b->buf_state & BC_COMING_IN)
	wk_waitfor_value_neq(&b->buf_state, BC_VALID | BC_COMING_IN, 0);
    }

    /* recheck that bc entry is still what we want */
    if (b == __bc_lookup64 (dev, pblock)) {
      assert (ret >= 0);
    } else {
      goto __mmap_retryMapPage;
    }
    
    offset += NBPG;
    vp += NBPG;
  }
  
  return (caddr_t )uaddr + block_offset;
}
コード例 #7
0
ファイル: shexec.c プロジェクト: aunali1/exopc
/* load an EXOS_MAGIC binary */
int
__do_simple_load (int fd, struct Env *e)
{
  // struct Uenv cu;

  u_int start_text_addr, start_text_pg;
  struct exec hdr;
  u_int text_size, data_size, bss_size, overlap_size;
  u_int envid = e->env_id;


  /* read a.out headers */
  if (lseek(fd, 0, SEEK_SET) == -1 ||
      read(fd, &hdr, sizeof(hdr)) != sizeof(hdr) ||
      lseek(fd, sizeof(hdr) + hdr.a_text, SEEK_SET) == -1 ||
      read(fd, &start_text_addr, sizeof(start_text_addr)) != 
          sizeof(start_text_addr)) 
  {
    errornf("Invalid executable format.\n");
  }

  start_text_pg = PGROUNDDOWN(start_text_addr);
  text_size = hdr.a_text + sizeof(hdr);
  data_size = hdr.a_data;
  if (text_size % NBPG) {
    data_size += text_size % NBPG;
    text_size = PGROUNDDOWN(text_size);
  }
  bss_size = hdr.a_bss;
  
  
  if (!(data_size % NBPG))
    overlap_size = 0;
  else
  {
    /* read in the page that contains both bss and inited data */
    u_int temp_page;
     
    temp_page = (u_int)__malloc(NBPG);
    overlap_size = NBPG;
   
    if (temp_page == 0 || 
	lseek(fd, text_size + PGROUNDDOWN(data_size), SEEK_SET) == -1 ||
        read(fd, (void*)temp_page, data_size % NBPG) != data_size % NBPG ||
        _exos_insert_pte
	  (0, vpt[PGNO(temp_page)], start_text_pg + text_size +
	   PGROUNDDOWN(data_size), 0, envid, 0, NULL) != 0) 
    {
      _exos_self_unmap_page(0, temp_page);
      error("Error mmaping text segment\n");
    }
    
    bzero((void*)temp_page + (data_size % NBPG),
          NBPG - (data_size % NBPG));
    _exos_self_unmap_page(0, temp_page);
    __free((void*)temp_page);
    bss_size -= NBPG - (data_size % NBPG);
    bss_size = PGROUNDUP(bss_size);
    data_size = PGROUNDDOWN(data_size);

  }


  /* mmap the text segment readonly */
  if ((u_int)__mmap((void*)start_text_pg, text_size, PROT_READ  | PROT_EXEC, 
		    MAP_FILE | MAP_FIXED | MAP_COPY, fd, (off_t)0, 0, envid)
	!= start_text_pg) 
  {
    errornf("Error mmaping text segment\n");
  }

  /* mmap the data segment read/write */
  if ((u_int)__mmap((void*)(start_text_pg + text_size), data_size,
		    PROT_READ | PROT_WRITE | PROT_EXEC,
		    MAP_FILE | MAP_FIXED | MAP_COPY,
		    fd, text_size, (off_t)0, envid)
	!= start_text_pg + text_size) 
  {
    errornf("Error mmaping data segment\n");
  }

#if 0 /* we set up a stack page later on when setting up arguments */
  /* allocate a stack page */
  if (_exos_insert_pte (0, PG_U|PG_W|PG_P, USTACKTOP-NBPG, 0, envid, 0,
			NULL) < 0) 
  {
    errornf("could not allocate stack\n");
  }
#endif

  /* set the entry point */
  assert(e->env_id == envid);
  e->env_tf.tf_eip = start_text_addr;

  return 1;
}
コード例 #8
0
ファイル: shexec.c プロジェクト: aunali1/exopc
u_int
__load_prog_fd(int fd, int _static, u_int envid)
{
  u_int start_text_addr;
  struct exec hdr;
  u_int text_size, data_size, bss_size, overlap_size;
  u_int dynamic, start_text_pg;

  /* read a.out headers */
  if (lseek(fd, 0, SEEK_SET) == -1 ||
      read(fd, &hdr, sizeof(hdr)) != sizeof(hdr) ||
      lseek(fd, sizeof(hdr) + hdr.a_text, SEEK_SET) == -1 ||
      read(fd, &dynamic, sizeof(dynamic)) != sizeof(dynamic) ||
      read(fd, &start_text_addr, sizeof(start_text_addr)) !=
      sizeof(start_text_addr)) {
    fprintf(stderr,"Invalid executable format.\n");
    errno = ENOEXEC;
    goto err;
  }
  start_text_pg = PGROUNDDOWN(start_text_addr);
  text_size = hdr.a_text + sizeof(hdr);
  data_size = hdr.a_data;
  if (text_size % NBPG) {
    data_size += text_size % NBPG;
    text_size = PGROUNDDOWN(text_size);
  }
  bss_size = hdr.a_bss;
  if (_static) {
    if (!(data_size % NBPG))
      overlap_size = 0;
    else
      {
	/* read in the page that contains both bss and inited data */
	u_int temp_page;
	
	temp_page = (u_int)__malloc(NBPG);
	overlap_size = NBPG;
	if (temp_page == 0 ||
	    lseek(fd, text_size + PGROUNDDOWN(data_size),
		  SEEK_SET) == -1 ||
	    read(fd, (void*)temp_page, data_size % NBPG) !=
	    data_size % NBPG ||
	    _exos_insert_pte(0, vpt[PGNO(temp_page)],
			     start_text_pg + text_size +
			     PGROUNDDOWN(data_size), 0, envid, 0, NULL) != 0) {
	  _exos_self_unmap_page(0, temp_page);
	  __free((void*)temp_page);
	  fprintf(stderr,"Error mmaping text segment\n");
	  goto err;
	}
	bzero((void*)temp_page + (data_size % NBPG),
	      NBPG - (data_size % NBPG));
	_exos_self_unmap_page(0, temp_page);
	__free((void*)temp_page);
	bss_size -= NBPG - (data_size % NBPG);
	bss_size = PGROUNDUP(bss_size);
	data_size = PGROUNDDOWN(data_size);
      }
    /* mmap the text segment readonly */
    if ((u_int)__mmap((void*)start_text_pg, text_size,
		      PROT_READ  | PROT_EXEC, 
		      MAP_FILE | MAP_FIXED | MAP_COPY, fd, (off_t)0, 0,
		      envid)
	!= start_text_pg) {
      fprintf(stderr,"Error mmaping text segment\n");
      goto err;
    }
    /* mmap the data segment read/write */
    if ((u_int)__mmap((void*)(start_text_pg + text_size), data_size,
		      PROT_READ | PROT_WRITE | PROT_EXEC,
		      MAP_FILE | MAP_FIXED | MAP_COPY,
		      fd, text_size, (off_t)0, envid)
	!= start_text_pg + text_size) {
      fprintf(stderr,"Error mmaping data segment\n");
      goto err;
    }
  } else {
    /* if dynamic... */
    u_int mflags;
    if (!(data_size % NBPG))
      overlap_size = 0;
    else
      {
	/* read in the page that contains both bss and inited data */
	overlap_size = NBPG;
	if (_exos_self_insert_pte(0, PG_P | PG_W | PG_U,
				  start_text_pg + text_size +
				  PGROUNDDOWN(data_size), 0, NULL) < 0 ||
	    lseek(fd, text_size + PGROUNDDOWN(data_size),
		  SEEK_SET) == -1 ||
	    read(fd, (void*)(start_text_pg + text_size +
			     PGROUNDDOWN(data_size)),
		 data_size % NBPG) != data_size % NBPG) {
	  fprintf(stderr,"Error mmaping text segment\n");
	  goto err;
	}
	bzero((void*)(start_text_pg + text_size + data_size),
	      NBPG - (data_size % NBPG));
	bss_size -= NBPG - (data_size % NBPG);
	bss_size = PGROUNDUP(bss_size);
	data_size = PGROUNDDOWN(data_size);
      }
    /* mmap the text segment readonly */
    mflags = MAP_FILE | MAP_FIXED;
    if (getenv("NO_DEMAND_LOAD"))
      mflags |= MAP_COPY;
    else
      mflags |= MAP_SHARED;
    if ((u_int)mmap((void*)start_text_pg, text_size,
		    PROT_READ | PROT_EXEC, 
		    mflags, fd, (off_t)0) != start_text_pg) {
      fprintf(stderr,"Error mmaping text segment\n");
      goto err;
    }
    /* mmap the data segment read/write */
    if (!(mflags & MAP_COPY)) mflags = MAP_FILE | MAP_FIXED | MAP_PRIVATE;
    if ((u_int)mmap((void*)(start_text_pg + text_size), data_size,
		    PROT_READ | PROT_WRITE | PROT_EXEC, mflags, fd,
		    (off_t)text_size) != start_text_pg + text_size) {
      fprintf(stderr,"Error mmaping data segment: %d\n", errno);
      goto err;
    }
    /* mmap the bss as demand zero'd */
    if ((u_int)mmap((void*)(start_text_pg + text_size + data_size +
			    overlap_size),
		    bss_size, PROT_READ | PROT_WRITE | PROT_EXEC,
		    MAP_ANON | MAP_FIXED | MAP_PRIVATE,
		    (off_t)-1, 0) !=
	start_text_pg + text_size + data_size + overlap_size) {
      fprintf(stderr,"Error mmaping bss\n");
      goto err;
    }
  }

  return start_text_addr;

err:
  return 0;
}