Example #1
0
/* For logical block number LBN of file NP, look it the block address,
   giving the "path" of indirect blocks to the file, starting 
   with the least indirect.  Fill *INDIRS with information for
   the block.  */
error_t
fetch_indir_spec (struct node *np, volatile daddr_t lbn,
		  struct iblock_spec *indirs)
{
  struct dinode *di = dino (np->dn->number);
  error_t err;
  daddr_t *siblock;
  
  err = diskfs_catch_exception ();
  if (err)
    return err;
  
  indirs[0].offset = -2;
  indirs[1].offset = -2;
  indirs[2].offset = -2;
  indirs[3].offset = -2;

  if (lbn < NDADDR)
    {
      if (lbn >= 0)
	{
	  indirs[0].bno = read_disk_entry (di->di_db[lbn]);
	  indirs[0].offset = -1;
	}
  
      diskfs_end_catch_exception ();
      return 0;
    }

  lbn -= NDADDR;

  indirs[0].offset = lbn % NINDIR (sblock);
  
  if (lbn / NINDIR (sblock))
    {
      /* We will use the double indirect block */
      int ibn;
      daddr_t *diblock;

      ibn = lbn / NINDIR (sblock) - 1;

      indirs[1].offset = ibn % NINDIR (sblock);
      
      /* We don't support triple indirect blocks, but this 
	 is where we'd do it. */
      assert (!(ibn / NINDIR (sblock)));
  
      indirs[2].offset = -1;
      indirs[2].bno = read_disk_entry (di->di_ib[INDIR_DOUBLE]);

      if (indirs[2].bno)
	{
	  diblock = indir_block (indirs[2].bno);
	  indirs[1].bno = read_disk_entry (diblock[indirs[1].offset]);
	}
      else
	indirs[1].bno = 0;
    }
  else
    {
      indirs[1].offset = -1;
      indirs[1].bno = read_disk_entry (di->di_ib[INDIR_SINGLE]);
    }

  if (indirs[1].bno)
    {
      siblock = indir_block (indirs[1].bno);
      indirs[0].bno = read_disk_entry (siblock[indirs[0].offset]);
    }
  else
    indirs[0].bno = 0;

  diskfs_end_catch_exception ();
  return 0;
}
Example #2
0
error_t
diskfs_get_directs (struct node *dp,
		    int entry,
		    int nentries,
		    char **data,
		    size_t *datacnt,
		    vm_size_t bufsiz,
		    int *amt)
{
  volatile vm_size_t allocsize;
  struct dirrect *ep;
  struct dirent *userp;
  int i;
  void *dirbuf, *bufp;
  char *datap;
  volatile int ouralloc = 0;
  error_t err;

  /* Allocate some space to hold the returned data. */
  allocsize = bufsiz ? round_page (bufsiz) : vm_page_size * 4;
  if (allocsize > *datacnt)
    {
      *data = mmap (0, allocsize, PROT_READ|PROT_WRITE, MAP_ANON, 0, 0);
      ouralloc = 1;
    }

  err = diskfs_catch_exception ();
  if (err)
    {
      if (ouralloc)
	munmap (*data, allocsize);
      return err;
    }

  /* Skip to ENTRY */
  dirbuf = disk_image + (dp->dn->file_start << store->log2_block_size);
  bufp = dirbuf;
  for (i = 0; i < entry; i ++)
    {
      struct rrip_lookup rr;

      ep = (struct dirrect *) bufp;
      rrip_lookup (ep, &rr, 0);

      /* Ignore and skip RE entries */
      if (rr.valid & VALID_RE)
	i--;
      else
	{
	  if (bufp - dirbuf >= dp->dn_stat.st_size)
	    {
	      /* Not that many entries in the directory; return nothing. */
	      release_rrip (&rr);
	      if (allocsize > *datacnt)
		munmap (data, allocsize);
	      *datacnt = 0;
	      *amt = 0;
	      return 0;
	    }
	}
      bufp = bufp + ep->len;
      release_rrip (&rr);

      /* If BUFP points at a null, then we have hit the last
	 record in this logical sector.  In that case, skip up to
	 the next logical sector. */
      if (*(char *)bufp == '\0')
	bufp = (void *) (((long) bufp & ~(logical_sector_size - 1))
			 + logical_sector_size);
    }

  /* Now copy entries one at a time */
  i = 0;
  datap = *data;
  while (((nentries == -1) || (i < nentries))
	 && (!bufsiz || datap - *data < bufsiz)
	 && ((void *) bufp - dirbuf < dp->dn_stat.st_size))
    {
      struct rrip_lookup rr;
      const char *name;
      size_t namlen, reclen;

      ep = (struct dirrect *) bufp;

      /* Fetch Rock-Ridge information for this file */
      rrip_lookup (ep, &rr, 0);

      /* Ignore and skip RE entries */
      if (! (rr.valid & VALID_RE))
	{
	  /* See if there's room to hold this one */
	  name = rr.valid & VALID_NM ? rr.name : (char *) ep->name;
	  namlen = rr.valid & VALID_NM ? strlen (name) : ep->namelen;

	  /* Name frobnication */
	  if (!(rr.valid & VALID_NM))
	    {
	      if (namlen == 1 && name[0] == '\0')
		{
		  name = ".";
		  namlen = 1;
		}
	      else if (namlen == 1 && name[0] == '\1')
		{
		  name = "..";
		  namlen = 2;
		}
	      /* Perhaps downcase it too? */
	    }

	  reclen = sizeof (struct dirent) + namlen;
	  reclen = (reclen + 3) & ~3;

	  /* Expand buffer if necessary */
	  if (datap - *data + reclen > allocsize)
	    {
	      vm_address_t newdata;

	      vm_allocate (mach_task_self (), &newdata,
			   (ouralloc
			    ? (allocsize *= 2)
			    : (allocsize = vm_page_size * 2)), 1);
	      memcpy ((void *) newdata, (void *) *data, datap - *data);

	      if (ouralloc)
		munmap (*data, allocsize / 2);

	      datap = (char *) newdata + (datap - *data);
	      *data = (char *) newdata;
	      ouralloc = 1;
	    }

	  userp = (struct dirent *) datap;

	  /* Fill in entry */

	  if (use_file_start_id (ep, &rr))
	    {
	      off_t file_start;

	      err = calculate_file_start (ep, &file_start, &rr);
	      if (err)
		{
		  release_rrip (&rr);
		  diskfs_end_catch_exception ();
		  if (ouralloc)
		    munmap (*data, allocsize);
		  return err;
		}

	      userp->d_fileno = file_start << store->log2_block_size;
	    }
	  else
	    userp->d_fileno = (ino_t) ((void *) ep - (void *) disk_image);

	  userp->d_type = DT_UNKNOWN;
	  userp->d_reclen = reclen;
	  userp->d_namlen = namlen;
	  memcpy (userp->d_name, name, namlen);
	  userp->d_name[namlen] = '\0';

	  /* And move along */
	  datap = datap + reclen;
	  i++;
	}

      release_rrip (&rr);
      bufp = bufp + ep->len;

      /* If BUFP points at a null, then we have hit the last
	 record in this logical sector.  In that case, skip up to
	 the next logical sector. */
      if (*(char *)bufp == '\0')
	bufp = (void *) (((long) bufp & ~(logical_sector_size - 1))
			 + logical_sector_size);
    }

  diskfs_end_catch_exception ();

  /* If we didn't use all the pages of a buffer we allocated, free
     the excess.  */
  if (ouralloc
      && round_page (datap - *data) < round_page (allocsize))
    munmap ((caddr_t) round_page (datap),
	    round_page (allocsize) - round_page (datap - *data));

  /* Return */
  *amt = i;
  *datacnt = datap - *data;
  return 0;
}
Example #3
0
/* The user must define this function if she wants to use the node
   cache.  Read stat information out of the on-disk node.  */
error_t
diskfs_user_read_node (struct node *np, struct lookup_context *ctx)
{
  error_t err;
  struct stat *st = &np->dn_stat;
  struct disknode *dn = diskfs_node_disknode (np);
  struct ext2_inode *di;
  struct ext2_inode_info *info = &dn->info;

  ext2_debug ("(%llu)", np->cache_id);

  err = diskfs_catch_exception ();
  if (err)
    return err;

  di = dino_ref (np->cache_id);

  st->st_fstype = FSTYPE_EXT2FS;
  st->st_fsid = getpid ();	/* This call is very cheap.  */
  st->st_ino = np->cache_id;
  st->st_blksize = vm_page_size * 2;

  st->st_nlink = di->i_links_count;
  st->st_size = di->i_size;
  st->st_gen = di->i_generation;

  st->st_atim.tv_sec = di->i_atime;
#ifdef not_yet
  /* ``struct ext2_inode'' doesn't do better than sec. precision yet.  */
#else
  st->st_atim.tv_nsec = 0;
#endif
  st->st_mtim.tv_sec = di->i_mtime;
#ifdef not_yet
  /* ``struct ext2_inode'' doesn't do better than sec. precision yet.  */
#else
  st->st_mtim.tv_nsec = 0;
#endif
  st->st_ctim.tv_sec = di->i_ctime;
#ifdef not_yet
  /* ``struct ext2_inode'' doesn't do better than sec. precision yet.  */
#else
  st->st_ctim.tv_nsec = 0;
#endif

  st->st_blocks = di->i_blocks;

  st->st_flags = 0;
  if (di->i_flags & EXT2_APPEND_FL)
    st->st_flags |= UF_APPEND;
  if (di->i_flags & EXT2_NODUMP_FL)
    st->st_flags |= UF_NODUMP;
  if (di->i_flags & EXT2_IMMUTABLE_FL)
    st->st_flags |= UF_IMMUTABLE;

  if (sblock->s_creator_os == EXT2_OS_HURD)
    {
      st->st_mode = di->i_mode | (di->i_mode_high << 16);
      st->st_mode &= ~S_ITRANS;
      if (di->i_translator)
	st->st_mode |= S_IPTRANS;

      st->st_uid = di->i_uid | (di->i_uid_high << 16);
      st->st_gid = di->i_gid | (di->i_gid_high << 16);

      st->st_author = di->i_author;
      if (st->st_author == -1)
	st->st_author = st->st_uid;
    }
  else
    {
      st->st_mode = di->i_mode & ~S_ITRANS;
      st->st_uid = di->i_uid;
      st->st_gid = di->i_gid;
      st->st_author = st->st_uid;
      np->author_tracks_uid = 1;
    }

  /* Setup the ext2fs auxiliary inode info.  */
  info->i_dtime = di->i_dtime;
  info->i_flags = di->i_flags;
  info->i_faddr = di->i_faddr;
  info->i_frag_no = di->i_frag;
  info->i_frag_size = di->i_fsize;
  info->i_osync = 0;
  info->i_file_acl = di->i_file_acl;
  if (S_ISDIR (st->st_mode))
    info->i_dir_acl = di->i_dir_acl;
  else
    {
      info->i_dir_acl = 0;
      info->i_high_size = di->i_size_high;
      if (info->i_high_size)	/* XXX */
	{
	  dino_deref (di);
	  ext2_warning ("cannot handle large file inode %Ld", np->cache_id);
	  diskfs_end_catch_exception ();
	  return EFBIG;
	}
    }
  info->i_block_group = inode_group_num (np->cache_id);
  info->i_next_alloc_block = 0;
  info->i_next_alloc_goal = 0;
  info->i_prealloc_count = 0;

  /* Set to a conservative value.  */
  dn->last_page_partially_writable = 0;

  if (S_ISCHR (st->st_mode) || S_ISBLK (st->st_mode))
    st->st_rdev = di->i_block[0];
  else
    {
      memcpy (info->i_data, di->i_block,
	      EXT2_N_BLOCKS * sizeof info->i_data[0]);
      st->st_rdev = 0;
    }
  dn->info_i_translator = di->i_translator;

  dino_deref (di);
  diskfs_end_catch_exception ();

  if (S_ISREG (st->st_mode) || S_ISDIR (st->st_mode)
      || (S_ISLNK (st->st_mode) && st->st_blocks))
    {
      unsigned offset;

      np->allocsize = np->dn_stat.st_size;

      /* Round up to a block multiple.  */
      offset = np->allocsize & ((1 << log2_block_size) - 1);
      if (offset > 0)
	np->allocsize += block_size - offset;
    }
  else
    /* Allocsize should be zero for anything except directories, files, and
       long symlinks.  These are the only things allowed to have any blocks
       allocated as well, although st_size may be zero for any type (cases
       where st_blocks=0 and st_size>0 include fast symlinks, and, under
       linux, some devices).  */
    np->allocsize = 0;

  if (!diskfs_check_readonly () && !np->dn_stat.st_gen)
    {
      pthread_spin_lock (&generation_lock);
      if (++next_generation < diskfs_mtime->seconds)
	next_generation = diskfs_mtime->seconds;
      np->dn_stat.st_gen = next_generation;
      pthread_spin_unlock (&generation_lock);
      np->dn_set_ctime = 1;
    }

  return 0;
}