Esempio n. 1
0
/* allocate_sectors
inode와 sectors를 받아서
어떤 한 파일이 sectors개수만큼 disk에서 할당받도록 해준다
두가지 경우가 있는데
아예 처음부터 새로 할당을 시작하는 경우와
원래 가지고 있던 부분에서 추가해서 할당하는 경우

*/
static bool
allocate_sectors(struct inode_disk *disk_inode, int sector , int cnt)
{ 
  /* doubly indirect */
  int i, j;
  static char zeros[512];
  if(sector == 0) return true;
  struct sector_table *doubly_indirect_table;
  doubly_indirect_table = calloc(1, sizeof(struct sector_table));

  if(cnt == 0)
  {
    if(!free_map_allocate(1, &disk_inode->doubly_indirect))
    {
      return false;
    }
  }
  cache_read(disk_inode->doubly_indirect, doubly_indirect_table, 512, 0);
    
  for(i = 0; i < 128 && sector > 0; i++)
  {
    struct sector_table *indirect_table = calloc(1, sizeof(struct sector_table));

    if(cnt == 0)
    {
      if(!free_map_allocate(1, &doubly_indirect_table->table[i]))
        return false;
    }
  
    cache_read(doubly_indirect_table->table[i], indirect_table, 512, 0);
    
    for(j = 0; j < 128 && sector > 0; j++)
    {
      if(cnt == 0)
      {
        if(free_map_allocate(1, &indirect_table->table[j]))
        {
          cache_write(indirect_table->table[j], zeros, 512, 0);
        }
        else
        {
          free(indirect_table);
          free(disk_inode);
          return false;
        }       
      }
      else
        cnt--;
      
      sector--;
    }
    cache_write(doubly_indirect_table->table[i], indirect_table, 512, 0);
    free(indirect_table);

  }
  cache_write(disk_inode->doubly_indirect, doubly_indirect_table, 512, 0);
  free(doubly_indirect_table);
  return true;
}
Esempio n. 2
0
/* Initializes an inode with LENGTH bytes of data and
   writes the new inode to sector SECTOR on the file system
   disk.
   Returns true if successful.
   Returns false if memory or disk allocation fails. */
bool
inode_create (disk_sector_t sector, off_t length, bool is_dir)
{
  struct inode_disk *disk_inode = NULL;
  struct inode *inode;
  bool success = false;

  ASSERT (length >= 0);

  /* If this assertion fails, the inode structure is not exactly
     one sector in size, and you should fix that. */
  ASSERT (sizeof *disk_inode == DISK_SECTOR_SIZE);

  disk_inode = calloc (1, sizeof *disk_inode);
  if (disk_inode != NULL)
    {
      disk_inode->length = 0;
      disk_inode->sector_count = 0;
      disk_inode->is_dir = is_dir;
      disk_inode->parent = dir_get_inode (thread_current ()->dir)->sector;
      disk_inode->magic = INODE_MAGIC;

      cache_write (sector, disk_inode, 0, DISK_SECTOR_SIZE);
      inode = inode_open (sector);
      success = inode_extend (inode, length);
      free (disk_inode);
    }
  return success;
}
Esempio n. 3
0
/* Extends the INODE with LENGTH bytes. */
static bool
inode_extend (struct inode *inode, off_t length)
{
  struct inode_disk *disk;
  off_t free_length;
  size_t sectors;
  disk_sector_t sector;
  size_t i;

  lock_acquire (&inode->lock);
  disk = (struct inode_disk *) malloc (sizeof *disk);

  ASSERT (disk != NULL);

  cache_read (inode->sector, disk, 0, DISK_SECTOR_SIZE);

  free_length = disk->sector_count * DISK_SECTOR_SIZE - disk->length;
  sectors = bytes_to_sectors (length - free_length);

  for (i = 0; i < sectors; i++)
    {
      if (!free_map_allocate (1, &sector)
          || !inode_append (inode, sector))
        {
          lock_release (&inode->lock);
          return false;
        }
    }
  disk->length += length;
  cache_write (inode->sector, &disk->length, INODE_OFFSET_LENGTH, 4);
  lock_release (&inode->lock);

  free (disk);
  return true;
}
Esempio n. 4
0
/**
 * Read from remote host.
 */
static void
do_read_remote (struct command *command, uint8_t *buf, ssize_t *buflen)
{
    ssize_t readbytes;

    /* recv on remote data socket */
    if ((readbytes = read (command->rfd, buf, BUFLEN)) <= 0) {
        /* connection closed or socket error */
        if (readbytes == 0) {
            /* EOF, remote host closed socket */
        } else {
            perror ("data recv error");
        }

        /* close socket to remote host */
        close (command->rfd);
    }

    if (cache_write (command->service, buf, readbytes) < 0) {
        warn ("Could not write to cache");
    }

    free (command->service);
    free (command);

    /* "return value" read bytes */
    *buflen = readbytes;
}
static void
eventCallback(CFSocketRef s, CFSocketCallBackType type, CFDataRef address, const void *data, void *info)
{
	int			so		= CFSocketGetNative(s);
	int			status;
	char			buf[1024];
	struct kern_event_msg	*ev_msg		= (struct kern_event_msg *)&buf[0];
	int			offset		= 0;

	status = recv(so, &buf, sizeof(buf), 0);
	if (status == -1) {
		SCLog(TRUE, LOG_ERR, CFSTR("recv() failed: %s"), strerror(errno));
		goto error;
	}

	cache_open();

	while (offset < status) {
		if ((offset + ev_msg->total_size) > status) {
			SCLog(TRUE, LOG_NOTICE, CFSTR("missed SYSPROTO_EVENT event, buffer not big enough"));
			break;
		}

		switch (ev_msg->vendor_code) {
			case KEV_VENDOR_APPLE :
				switch (ev_msg->kev_class) {
					case KEV_NETWORK_CLASS :
						processEvent_Apple_Network(ev_msg);
						break;
					case KEV_IOKIT_CLASS :
						processEvent_Apple_IOKit(ev_msg);
						break;
					default :
						/* unrecognized (Apple) event class */
						logEvent(CFSTR("New (Apple) class"), ev_msg);
						break;
				}
				break;
			default :
				/* unrecognized vendor code */
				logEvent(CFSTR("New vendor"), ev_msg);
				break;
		}
		offset += ev_msg->total_size;
		ev_msg = (struct kern_event_msg *)&buf[offset];
	}

	cache_write(store);
	cache_close();

	return;

    error :

	SCLog(TRUE, LOG_ERR, CFSTR("kernel event monitor disabled."));
	CFSocketInvalidate(s);
	return;

}
Esempio n. 6
0
/* Writes SIZE bytes from BUFFER into INODE, starting at OFFSET.
   Returns the number of bytes actually written, which may be
   less than SIZE if an error occurs. */
off_t
inode_write_at (struct inode *inode, const void *buffer_, off_t size,
                off_t offset) 
{
	if(INODE_DEBUG) printf("INODE: writing inode %u @ offset %i from buffer %x. Size: %i bytes\n", inode->sector, offset, (unsigned) buffer_, size);

	const void *buffer = buffer_;
	off_t bytes_written = 0;

	if (inode->deny_write_cnt)
		return 0;

	/* current inode size */
	off_t length = inode_length (inode);

	/* space left in last sector */
	off_t space_left = BLOCK_SECTOR_SIZE - length % BLOCK_SECTOR_SIZE;
	if(space_left == BLOCK_SECTOR_SIZE)
		space_left = 0;

	/* add block sectors if needed */
	if(offset + size > length)
	{
		/* extend file */
		ASSERT(inode_extend (inode, offset + size - length));
	
		/* update length */
		length += offset + size;
	}

	/* write to file */
	while (size > 0)
	{
		/* Sector to write, starting byte offset within sector. */
		block_sector_t sector_idx = byte_to_sector (inode, offset);
		int sector_ofs = offset % BLOCK_SECTOR_SIZE;
		
		/* Bytes left in inode, bytes left in sector, lesser of the two. */
		off_t inode_left = length - offset;
		int sector_left = BLOCK_SECTOR_SIZE - sector_ofs;
		int min_left = inode_left < sector_left ? inode_left : sector_left;

		/* Number of bytes to actually write into this sector. */
		int chunk_size = size < min_left ? size : min_left;
		if (chunk_size <= 0)
			break;

		/* write chunk to cache */
		cache_write(sector_idx, buffer + bytes_written, sector_ofs, chunk_size);
	
		/* Advance. */
		size -= chunk_size;
		offset += chunk_size;
		bytes_written += chunk_size;
    }

	return bytes_written;
}
Esempio n. 7
0
static Boolean
eventCallback(int so)
{
	ssize_t			status;
	union {
		char			bytes[1024];
		struct kern_event_msg	ev_msg1;	// first kernel event
	} buf;
	struct kern_event_msg	*ev_msg		= &buf.ev_msg1;
	ssize_t			offset		= 0;

	status = recv(so, &buf, sizeof(buf), 0);
	if (status == -1) {
		SCLog(TRUE, LOG_ERR, CFSTR("recv() failed: %s"), strerror(errno));
		return FALSE;
	}

	cache_open();

	while (offset < status) {
		if ((offset + ev_msg->total_size) > status) {
			SCLog(TRUE, LOG_NOTICE, CFSTR("missed SYSPROTO_EVENT event, buffer not big enough"));
			break;
		}

		switch (ev_msg->vendor_code) {
			case KEV_VENDOR_APPLE :
				switch (ev_msg->kev_class) {
					case KEV_NETWORK_CLASS :
						processEvent_Apple_Network(ev_msg);
						break;
					case KEV_IOKIT_CLASS :
					case KEV_SYSTEM_CLASS :
					case KEV_APPLESHARE_CLASS :
					case KEV_FIREWALL_CLASS :
					case KEV_IEEE80211_CLASS :
						break;
					default :
						/* unrecognized (Apple) event class */
						logEvent(CFSTR("New (Apple) class"), ev_msg);
						break;
				}
				break;
			default :
				/* unrecognized vendor code */
				logEvent(CFSTR("New vendor"), ev_msg);
				break;
		}
		offset += ev_msg->total_size;
		ev_msg = (struct kern_event_msg *)(void *)&buf.bytes[offset];
	}

	cache_write(store);
	cache_close();
	post_network_changed();

	return TRUE;
}
Esempio n. 8
0
/* padding zeros from start_pos (inclusive) to end_pos (exclusive) */
bool zero_padding(struct inode *inode, struct inode_disk *id,
		off_t start_pos, off_t end_pos) {
	ASSERT(lock_held_by_current_thread (&inode->inode_lock));
	static char zeros[BLOCK_SECTOR_SIZE];
	/* padding the first partial sector */
	if (start_pos % BLOCK_SECTOR_SIZE != 0) {
		block_sector_t eof_sector = byte_to_sector(inode, start_pos-1);
		off_t sector_ofs = start_pos % BLOCK_SECTOR_SIZE;
		size_t zero_bytes = BLOCK_SECTOR_SIZE - sector_ofs;
		cache_write(eof_sector, zeros, sector_ofs, zero_bytes);
	}

	/* padding full sectors until end_pos-1 */
	int extra_sectors = (int)bytes_to_sectors(end_pos)-
			(int)bytes_to_sectors(start_pos);
	off_t* record_sectors=malloc(sizeof(off_t) * extra_sectors);
	off_t i,j;
	block_sector_t new_sector=-1;
	for(i=0;i<extra_sectors;i++){
		if (!free_map_allocate (1, &new_sector)) {
			for(j=0;j<i;j++){
				free_map_release(record_sectors[i],1);
			}
			free(record_sectors);
			return false;
		}
		if(!append_sector_to_inode(id,new_sector)){
			for(j=0;j<i;j++){
				free_map_release(record_sectors[i],1);
			}
			free(record_sectors);
			return false;
		}
		cache_write(new_sector, zeros, 0, BLOCK_SECTOR_SIZE);
		record_sectors[i]=new_sector;
		id->length += BLOCK_SECTOR_SIZE;
	}
	/*update the physical length info*/
	id->length=end_pos;
	cache_write(inode->sector, id, 0, BLOCK_SECTOR_SIZE);
	free(record_sectors);
	return true;

}
Esempio n. 9
0
/* Writes SIZE bytes from BUFFER into INODE, starting at OFFSET.
   Returns the number of bytes actually written, which may be
   less than SIZE if end of file is reached or an error occurs.
   (Normally a write at end of file would extend the inode, but
   growth is not yet implemented.) */
off_t
inode_write_at (struct inode *inode, const void *buffer_, off_t size_,
                off_t offset_)
{
  const uint8_t *buffer = buffer_;
  int bytes_written = 0;
  int size=(int) size_;
  int offset=(int)offset_;

  if (inode->deny_write_cnt)
    return 0;

  struct inode_disk id;
  lock_acquire(&inode->inode_lock);
  cache_read(inode->sector, INVALID_SECTOR_ID, &id, 0, BLOCK_SECTOR_SIZE);
  int phy_length = (int)id.length;
  if (offset + size > phy_length) {
	  if(!zero_padding(inode, &id, phy_length, offset+size)){
		  lock_release(&inode->inode_lock);
		  return 0;
	  }
  }


  while (size > 0)
    {
      /* Sector to write, starting byte offset within sector. */
      block_sector_t sector_idx = byte_to_sector_no_check (inode, offset);
      int sector_ofs = offset % BLOCK_SECTOR_SIZE;

      /* Bytes left in inode, bytes left in sector, lesser of the two. */
      off_t inode_left = id.length - offset;
      int sector_left = BLOCK_SECTOR_SIZE - sector_ofs;
      int min_left = inode_left < sector_left ? inode_left : sector_left;

      /* Number of bytes to actually write into this sector. */
      int chunk_size = size < min_left ? size : min_left;
      if (chunk_size <= 0)
        break;

      cache_write(sector_idx, (void *)(buffer+bytes_written),
    		  sector_ofs, chunk_size);

      /* Advance. */
      size -= chunk_size;
      offset += chunk_size;
      bytes_written += chunk_size;
    }

  inode->readable_length=id.length;
  lock_release(&inode->inode_lock);
  return bytes_written;
}
Esempio n. 10
0
bool
inode_truncate (struct inode *inode)
{
  struct inode_disk *disk_inode = &inode->data;
  disk_inode->block_count = 0;
  disk_inode->length = 0;
  free_inode_blocks (disk_inode);

  /* update to the inode sector */
  cache_write (fs_cache, inode->sector, &inode->data, 0, BLOCK_SECTOR_SIZE);
  return true;
}
Esempio n. 11
0
/* extend inode at sector sector with length bytes */
static bool
inode_extend (struct inode* inode, off_t ext_length)
{
	if(INODE_DEBUG || FILE_DEBUG) printf("INODE: extending inode %u by %i bytes\n", inode->sector, ext_length);

	lock_acquire(&inode->lock);

	bool success = true;

	/* local copy of disk inode */
	struct inode_disk* id = malloc(sizeof(struct inode_disk));
	cache_read(inode->sector, id, 0, sizeof(struct inode_disk));

	/* free space left in bytes */
	off_t free_space = id->sector_count * BLOCK_SECTOR_SIZE - id->length;

	/* needed sectors */
	size_t sectors = bytes_to_sectors (ext_length - free_space);

	/* add sector to inode */
	unsigned i;
	block_sector_t block_sector;
	for(i = 0; i < sectors; i++)
	{
		/* allocate one vector at a time */
		if(free_map_allocate (1, &block_sector))
		{
			/* add new block to inode */
			inode_add_block(inode, block_sector);
		}
		/* not enough space on disk - abort */
		else
		{
			printf("INODE: that should not happen.\n");
			success = false;
			break;
		}
	}
	
	/* increment length and write back */
	id->length += ext_length;
	cache_write(inode->sector, (void *) &id->length, INODE_OFFSET_LENGTH, 4);

	lock_release(&inode->lock);
	
	if(INODE_DEBUG || FILE_DEBUG) printf("INODE: completetd extending inode %u by %i bytes : %u\n", inode->sector, ext_length, (unsigned)success);

	return success;
}
Esempio n. 12
0
/*! Writes SIZE bytes from BUFFER into INODE, starting at OFFSET.
    Returns the number of bytes actually written, which may be
    less than SIZE if an error occurs. */
off_t inode_write_at(struct inode *inode, const void *buffer_, off_t size, off_t offset) {
    const uint8_t *buffer = buffer_;
    off_t bytes_written = 0;
    uint8_t *bounce = NULL;

    if (inode->deny_write_cnt)
        return 0;
    
    // grow if necessary
    if (offset + size > inode->data.length) {
      bool success = grow(&(inode->data), offset + size);
      lock_acquire(filesys_lock_list + inode->sector);
      block_write(fs_device, inode->sector, &(inode->data));
      lock_release(filesys_lock_list + inode->sector);
    }

    while (size > 0) {
        /* Sector to write, starting byte offset within sector. */
        block_sector_t sector_idx = byte_to_sector(inode, offset);
        int sector_ofs = offset % BLOCK_SECTOR_SIZE;

        /* Bytes left in inode, bytes left in sector, lesser of the two. */
        off_t inode_left = inode_length(inode) - offset;
        int sector_left = BLOCK_SECTOR_SIZE - sector_ofs;
        int min_left = inode_left < sector_left ? inode_left : sector_left;

        /* Number of bytes to actually write into this sector. */
        int chunk_size = size < min_left ? size : min_left;
        if (chunk_size <= 0)
            break;
        
        if (sector_idx == -1) {
          return bytes_written;
        }
        
        bounce = cache_write(inode, sector_idx);
        if(bounce == NULL) { break; }
        if (sector_ofs == 0 && chunk_size == sector_left)
            memset(bounce, 0, BLOCK_SECTOR_SIZE);
        memcpy(bounce + sector_ofs, buffer + bytes_written, chunk_size);

        /* Advance. */
        size -= chunk_size;
        offset += chunk_size;
        bytes_written += chunk_size;
    }

    return bytes_written;
}
Esempio n. 13
0
/* Initializes an inode with LENGTH bytes of data and
   writes the new inode to sector SECTOR on the file system
   disk.
   Returns true if successful.
   Returns false if memory or disk allocation fails. */
bool
inode_create (disk_sector_t sector, off_t length, bool isdir)
{
  //printf("@@@@@@@@@@@@@@@@@@ inode_create start sector : %d length : %d @@@@@@@@@@@@\n",sector,length);
  struct inode_disk *disk_inode = NULL;
  bool success = false;

  ASSERT (length >= 0);

  /* If this assertion fails, the inode structure is not exactly
     one sector in size, and you should fix that. */
  ASSERT (sizeof *disk_inode == DISK_SECTOR_SIZE);

  disk_inode = calloc (1, sizeof *disk_inode);
  if (disk_inode != NULL)
    {
      size_t sectors = bytes_to_sectors (length);
      disk_inode->length = length;
      disk_inode->isdir = isdir;
      disk_inode->magic = INODE_MAGIC;
      /*
      if (free_map_allocate (sectors, &disk_inode->start))
        {
          disk_write (filesys_disk, sector, disk_inode);
          if (sectors > 0) 
            {
              static char zeros[DISK_SECTOR_SIZE];
              size_t i;
              
              for (i = 0; i < sectors; i++) 
                disk_write (filesys_disk, disk_inode->start + i, zeros); 
            }
          success = true; 
        }*/
      if(allocate_sectors(disk_inode, sectors, 0))
      {
        //disk_write(filesys_disk, sector, disk_inode);
        cache_write(sector, disk_inode, 512, 0);
        //printf("@@@@@@@@ disk_inode -> disk @@@@@@@\n");
        success = true;
      }
      free (disk_inode);
    }
  //printf("@@@@@@@@@@ inode_create finish @@@@@@@@@\n");
  return success;
}
Esempio n. 14
0
static bool allocate_zero_block (block_sector_t *res)
{
  char *zero_block = calloc (1, BLOCK_SECTOR_SIZE);
  bool success = false;

  if (zero_block == NULL)
    return false;

  if (free_map_allocate (1, res))
  {
    cache_write (fs_cache, *res, zero_block, 0, BLOCK_SECTOR_SIZE);
    success = true;
  }

  free (zero_block);
  return success;
}
Esempio n. 15
0
/* Initializes an inode with LENGTH bytes of data and
   writes the new inode to sector SECTOR on the file system
   device.
   Returns true if successful.
   Returns false if memory or disk allocation fails. */
bool
inode_create (block_sector_t sector, off_t length, enum file_t file_type)
{
	if(INODE_DEBUG || FILE_DEBUG) printf("INODE: creating inode for sector %u with inital size %i\n", sector, length);

	struct inode_disk *disk_inode = NULL;
	bool success = false;

	ASSERT (length >= 0);

	/* If this assertion fails, the inode structure is not exactly
	 one sector in size, and you should fix that. */
	ASSERT (sizeof *disk_inode == BLOCK_SECTOR_SIZE);

	disk_inode = calloc (1, sizeof *disk_inode);
	if (disk_inode != NULL)
	{

		disk_inode->length = 0;
		disk_inode->sector_count = 0;
		disk_inode->magic = INODE_MAGIC;
		disk_inode->type = file_type;

		/* create empty file */
		cache_write(sector, disk_inode, 0, BLOCK_SECTOR_SIZE);

		/* open the file */
		struct inode* inode = inode_open(sector);

		/* extend file by length bytes */
		inode_extend(inode, length);
	
		/* close file */
		inode_close(inode);

		/* free space */
		free (disk_inode);

		success = true;
    }

	return success;
}
Esempio n. 16
0
/* Writes SIZE bytes from BUFFER into INODE, starting at OFFSET.
   Returns the number of bytes actually written, which may be
   less than SIZE if an error occurs. */
off_t
inode_write_at (struct inode *inode, const void *buffer_, off_t size,
                off_t offset)
{
  const uint8_t *buffer = buffer_;
  off_t bytes_written = 0;
  off_t length;

  if (inode->deny_write_cnt)
    return 0;

  /* Extend file. */
  length = inode_length (inode);
  if (offset + size > length)
    inode_extend (inode, offset + size - length);

  while (size > 0)
    {
      /* Sector to write, starting byte offset within sector. */
      disk_sector_t sector_idx = byte_to_sector (inode, offset);
      int sector_ofs = offset % DISK_SECTOR_SIZE;

      /* Bytes left in inode, bytes left in sector, lesser of the two. */
      off_t inode_left = inode_length (inode) - offset;
      int sector_left = DISK_SECTOR_SIZE - sector_ofs;
      int min_left = inode_left < sector_left ? inode_left : sector_left;

      /* Number of bytes to actually write into this sector. */
      int chunk_size = size < min_left ? size : min_left;
      if (chunk_size <= 0)
        break;

      /* Write sector through buffer cache. */
      cache_write (sector_idx, buffer + bytes_written, sector_ofs, chunk_size);

      /* Advance. */
      size -= chunk_size;
      offset += chunk_size;
      bytes_written += chunk_size;
    }

  return bytes_written;
}
Esempio n. 17
0
/* Writes SIZE bytes from BUFFER into INODE, starting at OFFSET.
   Returns the number of bytes actually written, which may be
   less than SIZE if end of file is reached or an error occurs.
   (Normally a write at end of file would extend the inode, but
   growth is not yet implemented.) */
off_t
inode_write_at (struct inode *inode, const void *buffer_, off_t size,
                off_t offset) 
{
  const uint8_t *buffer = buffer_;
  off_t bytes_written = 0;
  if (inode->deny_write_cnt)
    return 0;
  if(offset + size > inode->data.length){ //growth
    if(!inode_growth(&inode->data, inode->sector, offset + size, INODE_MAX_LEVEL, inode->data.is_dir))
      return 0;
    inode->data.length = offset + size;
  }

  while (size > 0) 
    {
      /* Sector to write, starting byte offset within sector. */
      disk_sector_t sector_idx = byte_to_sector (inode, offset);
      int sector_ofs = offset % DISK_SECTOR_SIZE;

      /* Bytes left in inode, bytes left in sector, lesser of the two. */
      off_t inode_left = inode_length (inode) - offset;
      int sector_left = DISK_SECTOR_SIZE - sector_ofs;
      int min_left = inode_left < sector_left ? inode_left : sector_left;

      /* Number of bytes to actually write into this sector. */
      int chunk_size = size < min_left ? size : min_left;
      if (chunk_size <= 0)
        break;
      
      cache_write (sector_idx, buffer + bytes_written, sector_ofs, chunk_size); 

      /* Advance. */
      size -= chunk_size;
      offset += chunk_size;
      bytes_written += chunk_size;
    }

  return bytes_written;
}
Esempio n. 18
0
/* Initializes an inode with LENGTH bytes of data and
   writes the new inode to sector SECTOR on the file system
   device.
   Returns true if successful.
   Returns false if memory or disk allocation fails. */
bool
inode_create (block_sector_t sector, off_t length)
{
  struct inode_disk *disk_inode = NULL;
  bool success = true;

  ASSERT (length >= 0);

  /* If this assertion fails, the inode structure is not exactly
     one sector in size, and you should fix that. */
  ASSERT (sizeof *disk_inode == BLOCK_SECTOR_SIZE);

  disk_inode = calloc (1, sizeof *disk_inode);
  if (disk_inode != NULL)
    {
      size_t sectors = bytes_to_sectors (length);
      disk_inode->block_count = 0;
      disk_inode->length = length;
      disk_inode->magic = INODE_MAGIC;
      for (; disk_inode->block_count < sectors; disk_inode->block_count++)
      {
        if (!allocate_block (disk_inode, disk_inode->block_count))
        {
          success = false;
          break;
        }
      }
      if (!success)
      {
        free_inode_blocks (disk_inode);
      }
      else
      {
        cache_write (fs_cache, sector, disk_inode, 0, BLOCK_SECTOR_SIZE);
      }
      free (disk_inode);
    }
  return success;
}
Esempio n. 19
0
static void test_regsave0(void) {
	// arrange things so that a save fails
	{
		reg_setSpecial(rL,0);
		reg_setSpecial(rS,STACK_ADDR);
		reg_setSpecial(rO,STACK_ADDR);

		jmp_buf env;
		int ex = setjmp(env);
		if(ex != EX_NONE) {
			test_assertOcta(reg_getSpecial(rL),0);
			test_assertOcta(reg_getSpecial(rS),STACK_ADDR);
			test_assertOcta(reg_getSpecial(rO),STACK_ADDR);
		}
		else {
			ex_push(&env);
			reg_save(255,false);
			test_assertFalse(true);
		}
		ex_pop();
	}

	// arrange things so that when doing a save, the first stores succeed, but not all
	{
		reg_setSpecial(rS,0x100000000 - 48);
		reg_setSpecial(rO,0x100000000 - 48);
		// PTE 1 for 0x100000000 .. 0x1FFFFFFFF (---)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000000,0);

		jmp_buf env;
		int ex = setjmp(env);
		if(ex != EX_NONE) {
			test_assertOcta(reg_getSpecial(rL),0);
			test_assertOcta(reg_getSpecial(rS),0x100000000 - 48);
			test_assertOcta(reg_getSpecial(rO),0x100000000 - 48);
		}
		else {
			ex_push(&env);
			reg_save(255,false);
			test_assertFalse(true);
		}
		ex_pop();
	}

	// arrange things so that when doing a save, the first stores succeed, but not all
	// and push a few registers down first
	{
		reg_setSpecial(rS,0x100000000 - 258 * sizeof(octa));
		reg_setSpecial(rO,0x100000000 - 258 * sizeof(octa));
		// PTE 1 for 0x100000000 .. 0x1FFFFFFFF (---)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000000,0);

		reg_set(254,0x1234);
		reg_push(254);
		reg_set(10,0x5678);
		test_assertOcta(reg_getSpecial(rL),11);
		test_assertOcta(reg_getSpecial(rS),0xfffff848);
		test_assertOcta(reg_getSpecial(rO),0xffffffe8);

		jmp_buf env;
		int ex = setjmp(env);
		if(ex != EX_NONE) {
			test_assertOcta(reg_getSpecial(rL),11);
			test_assertOcta(reg_getSpecial(rS),0xfffff848);
			test_assertOcta(reg_getSpecial(rO),0xffffffe8);
		}
		else {
			ex_push(&env);
			reg_save(255,false);
			test_assertFalse(true);
		}
		ex_pop();
	}

	// arrange things so that the save works, but one write more would fail
	{
		// 13 special, rL, 60 locals, 56 globals; 200 regs in the caller
		reg_setSpecial(rS,0x100000000 - (13 + 1 + 60 + 56 + 200 + 1) * sizeof(octa));
		reg_setSpecial(rO,0x100000000 - (13 + 1 + 60 + 56 + 200 + 1) * sizeof(octa));
		// PTE 1 for 0x100000000 .. 0x1FFFFFFFF (---)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000000,0);

		reg_setSpecial(rG,200);
		reg_set(199,0x1234);
		reg_push(199);
		reg_set(60,0x5678);
		test_assertOcta(reg_getSpecial(rL),61);
		// 6 have already been saved because of $60 = 0x5678
		test_assertOcta(reg_getSpecial(rS),0x100000000 - (13 + 1 + 60 + 56 + 200 + 1 - 6) * sizeof(octa));
		// 200 have been pushed down
		test_assertOcta(reg_getSpecial(rO),0x100000000 - (13 + 1 + 60 + 56 + 1) * sizeof(octa));
		reg_save(255,false);
		test_assertOcta(reg_getSpecial(rL),0);
		// this proves that one write more would have failed
		test_assertOcta(reg_getSpecial(rS),0x100000000);
		test_assertOcta(reg_getSpecial(rO),0x100000000);
	}

	// undo mapping
	cache_write(CACHE_DATA,0x400000008,0,0);
	cache_write(CACHE_DATA,0x400000010,0,0);
	tc_removeAll(TC_DATA);
}
Esempio n. 20
0
static void test_regpop(void) {
	reg_setSpecial(rL,0);

	// arrange things so that a pop(0) needs to read a register from memory first
	{
		reg_setSpecial(rS,STACK_ADDR);
		reg_setSpecial(rO,STACK_ADDR);

		jmp_buf env;
		int ex = setjmp(env);
		if(ex != EX_NONE) {
			test_assertOcta(reg_getSpecial(rL),0);
			test_assertOcta(reg_getSpecial(rS),STACK_ADDR);
			test_assertOcta(reg_getSpecial(rO),STACK_ADDR);
		}
		else {
			ex_push(&env);
			reg_pop(0);
			test_assertFalse(true);
		}
		ex_pop();
	}

	// arrange things so that when doing a pop(1), the first loads succeed, but not all
	{
		reg_setSpecial(rS,0x200000000 - 16);
		reg_setSpecial(rO,0x200000000 - 16);
		// PTE 1 for 0x100000000 .. 0x1FFFFFFFF (rwx)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000007,0);
		// PTE 2 for 0x200000000 .. 0x2FFFFFFFF (rwx)
		cache_write(CACHE_DATA,0x400000010,0x0000000500000007,0);

		reg_set(254,0x1234);
		reg_push(254);
		reg_set(253,0x1234);
		test_assertOcta(reg_getSpecial(rL),254);
		test_assertOcta(reg_getSpecial(rS),(0x200000000 - 16) + 0x7F0);
		test_assertOcta(reg_getSpecial(rO),(0x200000000 - 16) + 0x7F8);

		// make it unreadable (0x100000000 .. 0x1FFFFFFFF)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000000,0);
		tc_removeAll(TC_DATA);

		// try a pop(1)
		jmp_buf env;
		int ex = setjmp(env);
		if(ex != EX_NONE) {
			// the last two are left
			test_assertOcta(reg_getSpecial(rL),2);
			// rS has walked back, but stopped at 0x200000000 because the page isn't readable
			test_assertOcta(reg_getSpecial(rS),0x200000000);
			test_assertOcta(reg_getSpecial(rO),(0x200000000 - 16) + 0x7F8);
		}
		else {
			ex_push(&env);
			reg_pop(1);
			test_assertFalse(true);
		}
		ex_pop();

		// call a subroutine with the state left by the unfinished pop
		reg_push(255);
		reg_pop(0);
		test_assertOcta(reg_getSpecial(rL),2);
		test_assertOcta(reg_getSpecial(rS),0x200000008);
		test_assertOcta(reg_getSpecial(rO),(0x200000000 - 16) + 0x7F8);

		// now, make it writable again and retry the pop (0x100000000 .. 0x1FFFFFFFF)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000007,0);
		tc_removeAll(TC_DATA);

		// repeat pop, which should succeed
		reg_pop(1);
		test_assertOcta(reg_getSpecial(rL),255);
		test_assertOcta(reg_getSpecial(rS),0x200000000 - 16);
		test_assertOcta(reg_getSpecial(rO),0x200000000 - 16);

		// undo mapping
		cache_write(CACHE_DATA,0x400000008,0,0);
		cache_write(CACHE_DATA,0x400000010,0,0);
		tc_removeAll(TC_DATA);
	}

	// arrange things so that when doing a pop(1), only the first load succeed
	{
		reg_setSpecial(rL,0);
		reg_setSpecial(rS,0x200000000 - 0x7F0);
		reg_setSpecial(rO,0x200000000 - 0x7F0);
		// PTE 1 for 0x100000000 .. 0x1FFFFFFFF (rwx)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000007,0);
		// PTE 2 for 0x200000000 .. 0x2FFFFFFFF (rwx)
		cache_write(CACHE_DATA,0x400000010,0x0000000500000007,0);

		reg_push(0);
		reg_set(253,0x1234);
		reg_push(253);
		reg_set(254,0x1234);
		test_assertOcta(reg_getSpecial(rL),255);
		test_assertOcta(reg_getSpecial(rS),0x200000008);
		test_assertOcta(reg_getSpecial(rO),0x200000008);

		// make it unreadable (0x100000000 .. 0x1FFFFFFFF)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000000,0);
		tc_removeAll(TC_DATA);

		// try a pop(1)
		jmp_buf env;
		int ex = setjmp(env);
		if(ex != EX_NONE) {
			test_assertOcta(reg_getSpecial(rL),254);
			// rS has walked back, but stopped at 0x200000000 because the page isn't readable
			test_assertOcta(reg_getSpecial(rS),0x200000000);
			test_assertOcta(reg_getSpecial(rO),0x200000008);
		}
		else {
			ex_push(&env);
			reg_pop(1);
			test_assertFalse(true);
		}
		ex_pop();

		// call a subroutine with the state left by the unfinished pop
		reg_push(255);
		reg_pop(0);
		test_assertOcta(reg_getSpecial(rL),254);
		// note that rS has been increased, because the value loaded by the previous pop has been
		// saved again by the push and will be loaded again by the next pop.
		test_assertOcta(reg_getSpecial(rS),0x200000008);
		test_assertOcta(reg_getSpecial(rO),0x200000008);

		// now, make it writable again and retry the pop (0x100000000 .. 0x1FFFFFFFF)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000007,0);
		tc_removeAll(TC_DATA);

		// repeat pop, which should succeed
		reg_pop(1);
		test_assertOcta(reg_getSpecial(rL),254);
		test_assertOcta(reg_getSpecial(rS),0x200000000 - 0x7E8);
		test_assertOcta(reg_getSpecial(rO),0x200000000 - 0x7E8);
		// restore the original state
		reg_pop(0);
		test_assertOcta(reg_getSpecial(rL),0);
		test_assertOcta(reg_getSpecial(rS),0x200000000 - 0x7F0);
		test_assertOcta(reg_getSpecial(rO),0x200000000 - 0x7F0);

		// undo mapping
		cache_write(CACHE_DATA,0x400000008,0,0);
		cache_write(CACHE_DATA,0x400000010,0,0);
		tc_removeAll(TC_DATA);
	}

	// arrange things so that when doing a pop(1), only the first two loads succeed
	{
		reg_setSpecial(rL,0);
		reg_setSpecial(rS,0x200000000 - 0x7E8);
		reg_setSpecial(rO,0x200000000 - 0x7E8);
		// PTE 1 for 0x100000000 .. 0x1FFFFFFFF (rwx)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000007,0);
		// PTE 2 for 0x200000000 .. 0x2FFFFFFFF (rwx)
		cache_write(CACHE_DATA,0x400000010,0x0000000500000007,0);

		reg_push(0);
		reg_set(253,0x1234);
		reg_push(253);
		reg_set(254,0x1234);
		test_assertOcta(reg_getSpecial(rL),255);
		test_assertOcta(reg_getSpecial(rS),0x200000010);
		test_assertOcta(reg_getSpecial(rO),0x200000010);

		// make it unreadable (0x100000000 .. 0x1FFFFFFFF)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000000,0);
		tc_removeAll(TC_DATA);

		// try a pop(1)
		jmp_buf env;
		int ex = setjmp(env);
		if(ex != EX_NONE) {
			// two locals less
			test_assertOcta(reg_getSpecial(rL),253);
			// rS has walked back, but stopped at 0x200000000 because the page isn't readable
			test_assertOcta(reg_getSpecial(rS),0x200000000);
			test_assertOcta(reg_getSpecial(rO),0x200000010);
		}
		else {
			ex_push(&env);
			reg_pop(1);
			test_assertFalse(true);
		}
		ex_pop();

		// call a subroutine with the state left by the unfinished pop
		reg_push(255);
		reg_pop(0);
		test_assertOcta(reg_getSpecial(rL),253);
		// as in the previous test: rS has been increased
		test_assertOcta(reg_getSpecial(rS),0x200000008);
		test_assertOcta(reg_getSpecial(rO),0x200000010);

		// now, make it writable again and retry the pop (0x100000000 .. 0x1FFFFFFFF)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000007,0);
		tc_removeAll(TC_DATA);

		// repeat pop, which should succeed
		reg_pop(1);
		test_assertOcta(reg_getSpecial(rL),254);
		test_assertOcta(reg_getSpecial(rS),0x200000000 - 0x7E0);
		test_assertOcta(reg_getSpecial(rO),0x200000000 - 0x7E0);
		// restore the original state
		reg_pop(0);
		test_assertOcta(reg_getSpecial(rL),0);
		test_assertOcta(reg_getSpecial(rS),0x200000000 - 0x7E8);
		test_assertOcta(reg_getSpecial(rO),0x200000000 - 0x7E8);

		// undo mapping
		cache_write(CACHE_DATA,0x400000008,0,0);
		cache_write(CACHE_DATA,0x400000010,0,0);
		tc_removeAll(TC_DATA);
	}
}
Esempio n. 21
0
static int
on_read_request_process(struct query_state *qstate)
{
	struct cache_read_request *read_request;
	struct cache_read_response *read_response;
	cache_entry	c_entry, neg_c_entry;

	struct agent	*lookup_agent;
	struct common_agent *c_agent;
	int res;

	TRACE_IN(on_read_request_process);
	init_comm_element(&qstate->response, CET_READ_RESPONSE);
	read_response = get_cache_read_response(&qstate->response);
	read_request = get_cache_read_request(&qstate->request);

	qstate->config_entry = configuration_find_entry(
		s_configuration, read_request->entry);
	if (qstate->config_entry == NULL) {
		read_response->error_code = ENOENT;

		LOG_ERR_2("read_request",
			"can't find configuration "
			"entry '%s'. aborting request", read_request->entry);
		goto fin;
	}

	if (qstate->config_entry->enabled == 0) {
		read_response->error_code = EACCES;

		LOG_ERR_2("read_request",
			"configuration entry '%s' is disabled",
			read_request->entry);
		goto fin;
	}

	/*
	 * if we perform lookups by ourselves, then we don't need to separate
	 * cache entries by euid and egid
	 */
	if (qstate->config_entry->perform_actual_lookups != 0)
		memset(read_request->cache_key, 0, qstate->eid_str_length);
	else {
#ifdef NS_NSCD_EID_CHECKING
		if (check_query_eids(qstate) != 0) {
		/* if the lookup is not self-performing, we check for clients euid/egid */
			read_response->error_code = EPERM;
			goto fin;
		}
#endif
	}

	configuration_lock_rdlock(s_configuration);
	c_entry = find_cache_entry(s_cache,
		qstate->config_entry->positive_cache_params.entry_name);
	neg_c_entry = find_cache_entry(s_cache,
		qstate->config_entry->negative_cache_params.entry_name);
	configuration_unlock(s_configuration);
	if ((c_entry != NULL) && (neg_c_entry != NULL)) {
		configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
		qstate->config_entry->positive_cache_entry = c_entry;
		read_response->error_code = cache_read(c_entry,
			read_request->cache_key,
			read_request->cache_key_size, NULL,
			&read_response->data_size);

		if (read_response->error_code == -2) {
			read_response->data = (char *)malloc(
				read_response->data_size);
			assert(read_response != NULL);
			read_response->error_code = cache_read(c_entry,
				read_request->cache_key,
				read_request->cache_key_size,
				read_response->data,
				&read_response->data_size);
		}
		configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);

		configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
		qstate->config_entry->negative_cache_entry = neg_c_entry;
		if (read_response->error_code == -1) {
			read_response->error_code = cache_read(neg_c_entry,
				read_request->cache_key,
				read_request->cache_key_size, NULL,
				&read_response->data_size);

			if (read_response->error_code == -2) {
				read_response->error_code = 0;
				read_response->data = NULL;
				read_response->data_size = 0;
			}
		}
		configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);

		if ((read_response->error_code == -1) &&
			(qstate->config_entry->perform_actual_lookups != 0)) {
			free(read_response->data);
			read_response->data = NULL;
			read_response->data_size = 0;

			lookup_agent = find_agent(s_agent_table,
				read_request->entry, COMMON_AGENT);

			if ((lookup_agent != NULL) &&
			(lookup_agent->type == COMMON_AGENT)) {
				c_agent = (struct common_agent *)lookup_agent;
				res = c_agent->lookup_func(
					read_request->cache_key +
						qstate->eid_str_length,
					read_request->cache_key_size -
						qstate->eid_str_length,
					&read_response->data,
					&read_response->data_size);

				if (res == NS_SUCCESS) {
					read_response->error_code = 0;
					configuration_lock_entry(
						qstate->config_entry,
						CELT_POSITIVE);
					cache_write(c_entry,
						read_request->cache_key,
						read_request->cache_key_size,
						read_response->data,
						read_response->data_size);
					configuration_unlock_entry(
						qstate->config_entry,
						CELT_POSITIVE);
				} else if ((res == NS_NOTFOUND) ||
					  (res == NS_RETURN)) {
					configuration_lock_entry(
						  qstate->config_entry,
						  CELT_NEGATIVE);
					cache_write(neg_c_entry,
						read_request->cache_key,
						read_request->cache_key_size,
						negative_data,
						sizeof(negative_data));
					configuration_unlock_entry(
						  qstate->config_entry,
						  CELT_NEGATIVE);

					read_response->error_code = 0;
					read_response->data = NULL;
					read_response->data_size = 0;
				}
			}
		}

		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
			memcpy(&qstate->timeout,
				&qstate->config_entry->common_query_timeout,
				sizeof(struct timeval));
	} else
		read_response->error_code = -1;

fin:
	qstate->kevent_filter = EVFILT_WRITE;
	if (read_response->error_code == 0)
		qstate->kevent_watermark = sizeof(int) + sizeof(size_t);
	else
		qstate->kevent_watermark = sizeof(int);
	qstate->process_func = on_read_response_write1;

	TRACE_OUT(on_read_request_process);
	return (0);
}
Esempio n. 22
0
static int
on_negative_write_request_process(struct query_state *qstate)
{
	struct cache_write_request	*write_request;
	struct cache_write_response	*write_response;
	cache_entry c_entry;

	TRACE_IN(on_negative_write_request_process);
	init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
	write_response = get_cache_write_response(&qstate->response);
	write_request = get_cache_write_request(&qstate->request);

	qstate->config_entry = configuration_find_entry(
		s_configuration, write_request->entry);

	if (qstate->config_entry == NULL) {
		write_response->error_code = ENOENT;

		LOG_ERR_2("negative_write_request",
			"can't find configuration"
			" entry '%s'. aborting request", write_request->entry);
		goto fin;
	}

	if (qstate->config_entry->enabled == 0) {
		write_response->error_code = EACCES;

		LOG_ERR_2("negative_write_request",
			"configuration entry '%s' is disabled",
			write_request->entry);
		goto fin;
	}

	if (qstate->config_entry->perform_actual_lookups != 0) {
		write_response->error_code = EOPNOTSUPP;

		LOG_ERR_2("negative_write_request",
			"entry '%s' performs lookups by itself: "
			"can't write to it", write_request->entry);
		goto fin;
	} else {
#ifdef NS_NSCD_EID_CHECKING
		if (check_query_eids(qstate) != 0) {
			write_response->error_code = EPERM;
			goto fin;
		}
#endif
	}

	configuration_lock_rdlock(s_configuration);
	c_entry = find_cache_entry(s_cache,
		qstate->config_entry->negative_cache_params.entry_name);
	configuration_unlock(s_configuration);
	if (c_entry != NULL) {
		configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
		qstate->config_entry->negative_cache_entry = c_entry;
		write_response->error_code = cache_write(c_entry,
			write_request->cache_key,
			write_request->cache_key_size,
			negative_data,
			sizeof(negative_data));
		configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);

		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
			memcpy(&qstate->timeout,
				&qstate->config_entry->common_query_timeout,
				sizeof(struct timeval));
	} else
		write_response->error_code = -1;

fin:
	qstate->kevent_filter = EVFILT_WRITE;
	qstate->kevent_watermark = sizeof(int);
	qstate->process_func = on_write_response_write1;

	TRACE_OUT(on_negative_write_request_process);
	return (0);
}
Esempio n. 23
0
/* set the new_sector to the first non-allocated sector in the inode
 * must acquire inode lock before calling it */
bool append_sector_to_inode(struct inode_disk *id,
		block_sector_t new_sector) {
	int sectors = (int)bytes_to_sectors(id->length);
	static struct indirect_block ib;
	static struct indirect_block db;

	if (sectors <= DIRECT_INDEX_NUM) {
		if (sectors < DIRECT_INDEX_NUM) {
			/*within direct index part*/
			id->direct_idx[sectors] = new_sector;
		} else {
			/*use up direct index part, start using single indirect index*/
			if (!free_map_allocate (1, &id->single_idx)) {
				return false;
			}
			ib.sectors[0] = new_sector;
			cache_write(id->single_idx, &ib, 0, BLOCK_SECTOR_SIZE);
		}
	} else if (sectors <= DIRECT_INDEX_NUM+INDEX_PER_SECTOR) {
		if (sectors < DIRECT_INDEX_NUM+INDEX_PER_SECTOR) {
			/*within single indirect index part*/
			cache_read(id->single_idx, INVALID_SECTOR_ID,
					&ib, 0, BLOCK_SECTOR_SIZE);
			ib.sectors[sectors-DIRECT_INDEX_NUM] = new_sector;
			cache_write(id->single_idx, &ib, 0, BLOCK_SECTOR_SIZE);
		} else {
			/*use up single indirect index part, start using
			 * double indirect index*/
			if (!free_map_allocate (1, &id->double_idx)) {
				return false;
			}
			static struct indirect_block single_ib;
			if (!free_map_allocate (1, &db.sectors[0])) {
				free_map_release (id->double_idx, 1);
				return false;
			}
			single_ib.sectors[0] = new_sector;
			cache_write(db.sectors[0], &single_ib, 0, BLOCK_SECTOR_SIZE);
			cache_write(id->double_idx, &db, 0, BLOCK_SECTOR_SIZE);
		}
	} else {
		size_t sectors_left=sectors - DIRECT_INDEX_NUM - INDEX_PER_SECTOR;
		if(sectors_left%INDEX_PER_SECTOR ==0){
			/*on the edge of one double indirect index, need
			 *  to allocate another single indirect index in
			 *  the double indirect index*/
			cache_read(id->double_idx, INVALID_SECTOR_ID,
					&db, 0, BLOCK_SECTOR_SIZE);
			if (!free_map_allocate (1,
					&db.sectors[sectors_left/INDEX_PER_SECTOR])) {
				return false;
			}
			 ib.sectors[0]=new_sector;
			cache_write(db.sectors[sectors_left/INDEX_PER_SECTOR],
					&ib, 0, BLOCK_SECTOR_SIZE);
			cache_write(id->double_idx, &db, 0, BLOCK_SECTOR_SIZE);
		}else{
			cache_read(id->double_idx, INVALID_SECTOR_ID,
					&db, 0, BLOCK_SECTOR_SIZE);
			cache_read(db.sectors[sectors_left/INDEX_PER_SECTOR],
					INVALID_SECTOR_ID, &ib, 0, BLOCK_SECTOR_SIZE);
			ib.sectors[sectors_left%INDEX_PER_SECTOR]=new_sector;
			cache_write(db.sectors[sectors_left/INDEX_PER_SECTOR],
					&ib, 0, BLOCK_SECTOR_SIZE);
		}

	}
	return true;
}
Esempio n. 24
0
/* truncates the inode to length 0 */
static void
inode_truncate (struct inode *inode)
{
	if(INODE_DEBUG) printf("INODE: inode %u is beeing truncated\n", inode->sector);

	ASSERT(lock_held_by_current_thread(&inode->lock));

	/* local copy of disks inode */
	struct inode_disk* id = malloc(BLOCK_SECTOR_SIZE);
	cache_read(inode->sector, id, 0, BLOCK_SECTOR_SIZE);

	/* if there is a block left */
	if(id->sector_count > 0)
	{
		/* delete all doubly indirect block sectors */
		if(id->sector_count > INODE_DIRECT_BLOCKS + INODE_INDIRECT_BLOCKS)
		{
			ASSERT(id->sector_count <= INODE_DIRECT_BLOCKS + INODE_INDIRECT_BLOCKS + INODE_DOUBLY_DIRECT_BLOCKS);

			/* fetch doubly indirect block sector */
			struct indirect_block_sector *doubly_indirect_bs = malloc(BLOCK_SECTOR_SIZE);
			cache_read(id->doubly_indirect_block_sector, doubly_indirect_bs, 0, BLOCK_SECTOR_SIZE);

			/* doubly indirect blocks */
			unsigned di_length = id->sector_count - (INODE_DIRECT_BLOCKS + INODE_INDIRECT_BLOCKS);
			off_t di_offset;

			/* delete every indirect block listed */
			while(di_length > 0)
			{
				/* doubly indirect block offset */
				di_offset = di_length / INODE_INDIRECT_BLOCKS;

				/* fetch indirect block sector */
				struct indirect_block_sector *indirect_bs = malloc(BLOCK_SECTOR_SIZE);
				cache_read(doubly_indirect_bs->direct_block_sectors[di_offset], indirect_bs,
						0, BLOCK_SECTOR_SIZE);

				/* number of entries in indirect block sector */
				unsigned entry_cnt = di_length;
				if(entry_cnt > INODE_INDIRECT_BLOCKS) { entry_cnt = entry_cnt % INODE_INDIRECT_BLOCKS; }

				/* release every block sector registered */
				unsigned i;
				block_sector_t entry;
				for(i = 0; i < entry_cnt; i++)
				{
					/* fetch block sector number */
					entry = indirect_bs->direct_block_sectors[i];

					/* release block sector */
					free_map_release (entry, 1);
				}

				/* decrement length */
				di_length -= entry_cnt;
			}

			ASSERT(di_length == 0);

			/* only indirect and direct block sectors are left */
			id->sector_count = INODE_DIRECT_BLOCKS + INODE_INDIRECT_BLOCKS;
		}

		/* delete all indirect block sectors */
		if(id->sector_count > INODE_DIRECT_BLOCKS)
		{
			ASSERT(id->sector_count <= INODE_DIRECT_BLOCKS + INODE_INDIRECT_BLOCKS);

			/* internal count */
			unsigned cnt = id->sector_count - INODE_DIRECT_BLOCKS;

			/* fetch indirect block sector */
			struct indirect_block_sector *indirect_bs = malloc(BLOCK_SECTOR_SIZE);
			cache_read(id->indirect_block_sector, indirect_bs, 0, BLOCK_SECTOR_SIZE);

			/* release every block sector registered */
			unsigned i;
			block_sector_t entry;
			for(i = 0; i < cnt; i++)
			{
				/* fetch block sector number */
				entry = indirect_bs->direct_block_sectors[i];

				/* release block sector */
				free_map_release (entry, 1);
			}
		}

		ASSERT(id->sector_count <= INODE_DIRECT_BLOCKS);

		/* delete direct block sectors */
		unsigned i;
		block_sector_t entry;
		for(i = 0; i < id->sector_count; i++)
		{
			/* fetch block sector number */
			entry = id->direct_block_sectors[i];

			/* release block sector */
			free_map_release (entry, 1);
		}

		/* save counter to inode */
		id->sector_count = 0;
		id->length = 0;

		/* writeback inode */
		cache_write(inode->sector, (void *) id, 0, BLOCK_SECTOR_SIZE);
	}
}
Esempio n. 25
0
/* Initializes an inode with LENGTH bytes of data and
   writes the new inode to sector SECTOR on the file system
   device.
   Returns true if successful.
   Returns false if memory or disk allocation fails. */
bool
inode_create (block_sector_t sector, off_t length, bool is_dir)
{
  struct inode_disk *disk_inode = NULL;

  ASSERT (length >= 0);
  /* If this assertion fails, the inode structure is not exactly
     one sector in size, and you should fix that. */
  ASSERT (sizeof *disk_inode == BLOCK_SECTOR_SIZE);

  disk_inode = calloc (1, sizeof *disk_inode);
  if (disk_inode != NULL)
    {
      int sectors = (int)bytes_to_sectors (length);
      disk_inode->length = length;
      disk_inode->magic = INODE_MAGIC;
      disk_inode->is_dir = is_dir;

      int i;
      block_sector_t sector_idx = 0;
      static char zeros[BLOCK_SECTOR_SIZE];
      bool allocate_failed = false;

      /* allocate sectors for data and write all zeros to sectors*/
      int direct_sector_num = sectors < DIRECT_INDEX_NUM ?
    		  sectors : DIRECT_INDEX_NUM;
      int indirect_sector_num = (sectors-DIRECT_INDEX_NUM) <
    		  INDEX_PER_SECTOR ? (sectors - DIRECT_INDEX_NUM) :
    				  INDEX_PER_SECTOR;
      int double_indirect_sector_num = sectors - DIRECT_INDEX_NUM
    		  - INDEX_PER_SECTOR;

      /* allocate direct sectors */
      for (i = 0; i < direct_sector_num; i++) {
    	  	  if (free_map_allocate (1, &sector_idx)) {
    	  		  disk_inode->direct_idx[i] = sector_idx;
    	  		  cache_write(sector_idx, zeros, 0, BLOCK_SECTOR_SIZE);
    	  	  } else {
    	  		  allocate_failed = true;
    	  		  break;
    	  	  }
      }
      /* release allocated direct sectors when failed to allocate */
      if (allocate_failed) {
    	  	  free_map_release_direct(disk_inode, i);
    	  	  free (disk_inode);
    	  	  return false;
      }

      static struct indirect_block ib;
      /* allocate single indirect sectors */
      if(indirect_sector_num > 0){
			if (!free_map_allocate (1, &disk_inode->single_idx)) {
				free_map_release_all_direct(disk_inode);
				free (disk_inode);
				return false;
			}

			for (i = 0; i < indirect_sector_num; i++) {
			  if (free_map_allocate (1, &sector_idx)) {
				  ib.sectors[i] = sector_idx;
				  cache_write(sector_idx, zeros, 0, BLOCK_SECTOR_SIZE);
			  } else {
				  allocate_failed = true;
				  break;
			  }
			}

			/* release all direct sectors and allocated single indirect
			 * sectors when failed to allocate */
			if (allocate_failed) {
				free_map_release_all_direct(disk_inode);
				free_map_release_single_indirect(&ib, i);
				free_map_release(disk_inode->single_idx, 1);
				free (disk_inode);
				return false;
			}

			cache_write(disk_inode->single_idx, &ib, 0, BLOCK_SECTOR_SIZE);
      }



      /* allocate double indirect sectors */
      if(double_indirect_sector_num > 0){
    	  	  if (!free_map_allocate (1, &disk_inode->double_idx)) {
    	  		  free_map_release_all_direct(disk_inode);
    	  		  free_map_release_all_single_indirect(&ib);
    	  		  free_map_release (disk_inode->single_idx, 1);
    	  		  free (disk_inode);
    	  		  return false;
		  }

    	  	  off_t double_level_end_idx =
    	  			  (double_indirect_sector_num-1) / INDEX_PER_SECTOR;
    	      off_t single_level_end_idx =
    	    		  (double_indirect_sector_num-1) % INDEX_PER_SECTOR;
    	      int i, j;
    	      /*double indirect index block*/
    	      static struct indirect_block db;
    	      /*buffer the single indirect index block in double
    	       * indirect index block*/
    	      static struct indirect_block single_ib;
    	      /* allocate all full single indirect block */
    	      for (i = 0; i < double_level_end_idx; i++) {
			  if (!free_map_allocate (1, &db.sectors[i])){
				  free_map_release_all_direct(disk_inode);
				  free_map_release_all_single_indirect(&ib);
				  free_map_release (disk_inode->single_idx, 1);
				  free_map_release_double_indirect (&db, i, 0);
				  free_map_release (disk_inode->double_idx, 1);
				  free (disk_inode);
				  return false;
			  }

			  /* fully allocate the whole single indirect block */
    	    	  	  for (j = 0; j < INDEX_PER_SECTOR; j++) {
    	    	  		  if (free_map_allocate (1, &sector_idx)) {
    	    	  			  single_ib.sectors[j] = sector_idx;
    	    	  			  cache_write(sector_idx, zeros, 0, BLOCK_SECTOR_SIZE);
    	    	  		  } else {
    	    	  			  allocate_failed = true;
    	    	  			  break;
    	    	  		  }
    	    	  	  }


    	    	  	  if (allocate_failed) {
    	    	  		  free_map_release_all_direct(disk_inode);
    	    	  		  free_map_release_all_single_indirect(&ib);
				  free_map_release (disk_inode->single_idx, 1);
				  free_map_release_double_indirect (&db, i, j);
				  free_map_release (disk_inode->double_idx, 1);
				  free (disk_inode);
				  return false;
    	    	  	  }

    	    	  	  cache_write(db.sectors[i], &single_ib, 0, BLOCK_SECTOR_SIZE);
    	      }

    	      /* allocate the last partial/full single indirect block */
    	      if (!free_map_allocate (1, &db.sectors[double_level_end_idx])){
			  free_map_release_all_direct(disk_inode);
			  free_map_release_all_single_indirect(&ib);
			  free_map_release (disk_inode->single_idx, 1);
			  free_map_release_double_indirect (&db,
					  double_level_end_idx, 0);
			  free_map_release (disk_inode->double_idx, 1);
			  free (disk_inode);
			  return false;
    	      }
    	      /* partially or fully (depend on single_level_end_idx)
    	       * allocate the last single indirect block */
		  for (j = 0; j <= single_level_end_idx; j++) {
			  if (free_map_allocate (1, &sector_idx)) {
				  single_ib.sectors[j] = sector_idx;
				  cache_write(sector_idx, zeros, 0, BLOCK_SECTOR_SIZE);
			  } else {
				  allocate_failed = true;
				  break;
			  }
		  }


		  if (allocate_failed) {
			  free_map_release_all_direct(disk_inode);
			  free_map_release_all_single_indirect(&ib);
			  free_map_release (disk_inode->single_idx, 1);
			  free_map_release_double_indirect (&db,
					  double_level_end_idx, j);
			  free_map_release (disk_inode->double_idx, 1);
			  free (disk_inode);
			  return false;
		  }

		  cache_write(db.sectors[double_level_end_idx],
				  &single_ib, 0, BLOCK_SECTOR_SIZE);
		  /* update inode_disk(metadata) after successfully
		   * allocate all necessary sectors */
		  cache_write(disk_inode->double_idx, &db, 0, BLOCK_SECTOR_SIZE);
      }


      /* write inode_disk(metadata) to sector */
      cache_write(sector, disk_inode, 0, BLOCK_SECTOR_SIZE);
      free (disk_inode);
      return true;
    }
  return false;
}
Esempio n. 26
0
static void test_regunsave0(void) {
	static octa specials[SPECIAL_NUM];
	static octa locals[LREG_NUM];
	static octa globals[GREG_NUM];

	// set some state
	reg_setSpecial(rG,100);
	reg_setSpecial(rS,0x1000);
	reg_setSpecial(rO,0x1000);
	for(size_t i = 100; i < GREG_NUM; i++)
		reg_setGlobal(i,i);
	for(size_t i = 0; i < 10; i++)
		reg_set(i,i);

	// backup
	for(size_t i = 0; i < LREG_NUM; i++)
		locals[i] = reg_get(i);
	for(size_t i = 100; i < GREG_NUM; i++)
		globals[i] = reg_getGlobal(i);
	for(size_t i = 0; i < SPECIAL_NUM; i++)
		specials[i] = reg_getSpecial(i);

	// arrange things so that an unsave fails at the beginning
	{
		jmp_buf env;
		int ex = setjmp(env);
		if(ex != EX_NONE) {
			for(size_t i = 0; i < LREG_NUM; i++)
				test_assertOcta(reg_get(i),locals[i]);
			for(size_t i = 100; i < GREG_NUM; i++)
				test_assertOcta(reg_getGlobal(i),globals[i]);
			for(size_t i = 0; i < SPECIAL_NUM; i++)
				test_assertOcta(reg_getSpecial(i),specials[i]);
		}
		else {
			ex_push(&env);
			reg_unsave(STACK_ADDR,false);
			test_assertFalse(true);
		}
		ex_pop();
	}

	// PTE 1 for 0x100000000 .. 0x1FFFFFFFF (---)
	cache_write(CACHE_DATA,0x400000008,0x0000000400000000,0);
	// PTE 2 for 0x200000000 .. 0x2FFFFFFFF (rwx)
	cache_write(CACHE_DATA,0x400000010,0x0000000500000007,0);

	// arrange things so that an unsave fails when reading rL
	{
		// set rG|rA
		mmu_writeOcta(0x200000020,0xFE00000000000000,MEM_SIDE_EFFECTS);

		jmp_buf env;
		int ex = setjmp(env);
		if(ex != EX_NONE) {
			for(size_t i = 0; i < LREG_NUM; i++)
				test_assertOcta(reg_get(i),locals[i]);
			for(size_t i = 100; i < GREG_NUM; i++)
				test_assertOcta(reg_getGlobal(i),globals[i]);
			for(size_t i = 0; i < SPECIAL_NUM; i++)
				test_assertOcta(reg_getSpecial(i),specials[i]);
		}
		else {
			ex_push(&env);
			reg_unsave(0x200000020,false);
			test_assertFalse(true);
		}
		ex_pop();
	}

	// arrange things so that an unsave fails when testing the range
	{
		// set rG|rA
		mmu_writeOcta(0x200000080,0xFE00000000000000,MEM_SIDE_EFFECTS);

		jmp_buf env;
		int ex = setjmp(env);
		if(ex != EX_NONE) {
			for(size_t i = 0; i < LREG_NUM; i++)
				test_assertOcta(reg_get(i),locals[i]);
			for(size_t i = 100; i < GREG_NUM; i++)
				test_assertOcta(reg_getGlobal(i),globals[i]);
			for(size_t i = 0; i < SPECIAL_NUM; i++)
				test_assertOcta(reg_getSpecial(i),specials[i]);
		}
		else {
			ex_push(&env);
			reg_unsave(0x200000080,false);
			test_assertFalse(true);
		}
		ex_pop();
	}

	// arrange things so that it works, but one value more would fail
	{
		// we have 13 specials, rL, rL locals and rG globals
		octa off = (13 + 1 + 10 + (256 - 254) - 1) * sizeof(octa);
		// set rG|rA
		mmu_writeOcta(0x200000000 + off,0xFE00000000000000,MEM_SIDE_EFFECTS);
		// rL is saved 13+rG positions further
		mmu_writeOcta(0x200000000 + off - (13 + (256 - 254)) * sizeof(octa),10,MEM_SIDE_EFFECTS);

		reg_unsave(0x200000000 + off,false);
		test_assertTrue(true);
		test_assertOcta(reg_getSpecial(rG),254);
		test_assertOcta(reg_getSpecial(rL),10);
		// this proves that one read more would have failed
		test_assertOcta(reg_getSpecial(rS),0x200000000);
		test_assertOcta(reg_getSpecial(rO),0x200000000);
	}

	// undo mapping
	cache_write(CACHE_DATA,0x400000008,0,0);
	cache_write(CACHE_DATA,0x400000010,0,0);
	tc_removeAll(TC_DATA);
}
Esempio n. 27
0
/* add sector block_sector to inode inode */
static void
inode_add_block(struct inode* inode, block_sector_t block_sector)
{
	ASSERT(lock_held_by_current_thread(&inode->lock));

	//if(INODE_DEBUG && INODE_PRINT) inode_print(inode);
	
	/* local copy of disks inode */
	struct inode_disk* id = malloc(sizeof(struct inode_disk));
	cache_read(inode->sector, id, 0, sizeof(struct inode_disk));

	/* direct block sectors */
	if(id->sector_count < INODE_DIRECT_BLOCKS)
	{
		if(INODE_DEBUG) printf("INODE: adding block %u to direct block sectors of %u @ position %u\n", block_sector, inode->sector, id->sector_count);

		/* add to direct blocks */
		id->direct_block_sectors[id->sector_count] = block_sector;
	}

	/* indirect block sector */
	else if (id->sector_count < INODE_DIRECT_BLOCKS + INODE_INDIRECT_BLOCKS)
	{
		if(INODE_DEBUG) printf("INODE: adding block %u to indirect block sector of %u\n", block_sector, inode->sector);

		/* offset in indirect list */
		off_t offset = id->sector_count - INODE_DIRECT_BLOCKS;

		/* create indirect block if this is
		 * the first element in the indirect list */
		if(offset == 0)
		{
			/* empty block */
			void *zero = malloc(BLOCK_SECTOR_SIZE);

			/* create indirect block */
			block_sector_t indirect_bs;
			free_map_allocate (1, &indirect_bs);

			/* save to disks inode */
			id->indirect_block_sector = indirect_bs;

			/* write back empty indirect sector */
			cache_write(indirect_bs, zero, 0, BLOCK_SECTOR_SIZE);
		}

		ASSERT(id->indirect_block_sector != 0);

		/* add sector to indirect sector list */
		cache_write(id->indirect_block_sector, (void *) &block_sector,
				offset * sizeof(block_sector_t), sizeof(block_sector_t));
	}

	/* doubly indirect sector */
	else if (id->sector_count < INODE_DIRECT_BLOCKS + INODE_INDIRECT_BLOCKS + INODE_DOUBLY_DIRECT_BLOCKS)
	{
		if(INODE_DEBUG) printf("INODE: adding block %u to doubly indirect block sector of %u\n", block_sector, inode->sector);

		/* count of all doubly indirect block sectors */
		off_t entry_cnt = id->sector_count - (INODE_DIRECT_BLOCKS + INODE_INDIRECT_BLOCKS);

		/* offset in the doubly indirect list */
		off_t offset_doubly_indirect = entry_cnt / INODE_INDIRECT_BLOCKS;

		/* offset in the indirect list */
		off_t offset_indirect = entry_cnt % INODE_INDIRECT_BLOCKS;

		//printf("INODE DIB: entries {%i} offset dib {%i} offset ib {%i}\n", entry_cnt, offset_doubly_indirect, offset_indirect);

		/* create doubly indirect block if this is
		 * the first element in the doubly indirect list */
		if(entry_cnt == 0)
		{
			/* empty block */
			void *zero = malloc(BLOCK_SECTOR_SIZE);

			/* create doubly indirect block sector */
			block_sector_t di_block_sector;
			free_map_allocate (1, &di_block_sector);

			/* save to disks inode */
			id->doubly_indirect_block_sector = di_block_sector;

			/* write back empty indirect sector */
			cache_write(di_block_sector, zero, 0, BLOCK_SECTOR_SIZE);
		}

		/* create indirect block sector if this is the first entry */
		if(offset_indirect == 0)
		{
			/* empty block */
			void *zero = malloc(BLOCK_SECTOR_SIZE);

			/* create indirect block sector */
			block_sector_t indirect_block_sector;
			free_map_allocate (1, &indirect_block_sector);

			/* initialize empty indirect block sector */
			cache_write(indirect_block_sector, zero, 0, BLOCK_SECTOR_SIZE);

			/* save indirect block sector to doubly indirect block sector */
			cache_write(id->doubly_indirect_block_sector, (void *) &indirect_block_sector,
					offset_doubly_indirect * sizeof(block_sector_t), sizeof(block_sector_t));

			//printf("added indirect block %u @ sector %u offset %i\n", indirect_block_sector, id->doubly_indirect_block_sector, offset_doubly_indirect * sizeof(block_sector_t));
		}
		
		ASSERT(id->doubly_indirect_block_sector != 0);

		/* fetch indirect block sector number */
		block_sector_t indirect_block_sector;
		cache_read(id->doubly_indirect_block_sector, (void *) &indirect_block_sector,
				offset_doubly_indirect * sizeof(block_sector_t), sizeof(block_sector_t));

		//printf("INODE-IDS: %u\n", indirect_block_sector);
		ASSERT(indirect_block_sector != 0);


		/* add block sector number to indirect block sector */
		cache_write(indirect_block_sector, (void *) &block_sector, 
				offset_indirect * sizeof(block_sector_t), sizeof(block_sector_t));

		//printf("done\n");

		}
	else
	{
		/* something went horribly wrong. */
		ASSERT(false);
	}

	/* increment sector count and write back */
	id->sector_count++;
	cache_write(inode->sector, (void *) id, 0, BLOCK_SECTOR_SIZE);

	free(id);

	//if(INODE_DEBUG && INODE_PRINT) inode_print(inode);
}
Esempio n. 28
0
/* Writes SIZE bytes from BUFFER into INODE, starting at OFFSET.
   Returns the number of bytes actually written, which may be
   less than SIZE if end of file is reached or an error occurs.
   (Normally a write at end of file would extend the inode, but
   growth is not yet implemented.) */
off_t
inode_write_at (struct inode *inode, const void *buffer_, off_t size,
                off_t offset)
{
  const uint8_t *buffer = buffer_;
  off_t bytes_written = 0;

  if (inode->deny_write_cnt)
    return 0;

  if (offset >= MAX_FILE_LENGTH)
    return 0;

  if (size > MAX_FILE_LENGTH - offset)
    size = MAX_FILE_LENGTH - offset;

  if (size + offset <= MAX_FILE_LENGTH &&
      size + offset > inode->data.length)
  {
    /* extend file */
    off_t length = size + offset;
    size_t sectors = bytes_to_sectors (length);
    for (;inode->data.block_count < sectors; inode->data.block_count++)
    {
      if (!allocate_block (&inode->data, inode->data.block_count))
      {
        /* update to the inode sector */
        cache_write (fs_cache, inode->sector, &inode->data, 0, BLOCK_SECTOR_SIZE);
        return 0;
      }
    }
    inode->data.length = length;

    /* update to the inode sector */
    cache_write (fs_cache, inode->sector, &inode->data, 0, BLOCK_SECTOR_SIZE);
  }

  while (size > 0)
    {
      /* Sector to write, starting byte offset within sector. */
      block_sector_t sector_idx = byte_to_sector (inode, offset);
      int sector_ofs = offset % BLOCK_SECTOR_SIZE;

      /* Bytes left in inode, bytes left in sector, lesser of the two. */
      off_t inode_left = inode_length (inode) - offset;
      int sector_left = BLOCK_SECTOR_SIZE - sector_ofs;
      int min_left = inode_left < sector_left ? inode_left : sector_left;

      /* Number of bytes to actually write into this sector. */
      int chunk_size = size < min_left ? size : min_left;
      if (chunk_size <= 0)
        break;

      cache_write (fs_cache, sector_idx, buffer + bytes_written,
        sector_ofs, chunk_size);

      /* Advance. */
      size -= chunk_size;
      offset += chunk_size;
      bytes_written += chunk_size;
    }

  return bytes_written;
}
Esempio n. 29
0
static void test_cswap(void) {
	sInstrArgs iargs;
	reg_setSpecial(rL,0);
	reg_setSpecial(rG,255);

	// arrange things so that reading the memory-location fails
	{
		// PTE 1 for 0x100000000 .. 0x1FFFFFFFF (-wx)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000003,0);
		tc_removeAll(TC_DATA);

		reg_setSpecial(rP,0x1234567890ABCDEF);
		reg_set(10,0x1234);
		mmu_writeOcta(0x100000000,0,MEM_SIDE_EFFECTS);

		jmp_buf env;
		int ex = setjmp(env);
		if(ex != EX_NONE) {
			test_assertOcta(reg_getSpecial(rP),0x1234567890ABCDEF);
			test_assertOcta(reg_get(10),0x1234);
			test_assertOcta(cache_read(CACHE_DATA,0x400000000,MEM_SIDE_EFFECTS),0);
		}
		else {
			ex_push(&env);
			iargs.x = 10;
			iargs.y = 0x100000000;
			iargs.z = 0;
			cpu_instr_cswap(&iargs);
			test_assertFalse(true);
		}
		ex_pop();
	}

	// arrange things so that writing the memory-location fails
	{
		reg_setSpecial(rP,0x1234567890ABCDEF);
		reg_set(10,0x1234);
		mmu_writeOcta(0x100000000,0x1234567890ABCDEF,MEM_SIDE_EFFECTS);

		// PTE 1 for 0x100000000 .. 0x1FFFFFFFF (r-x)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000005,0);
		tc_removeAll(TC_DATA);

		jmp_buf env;
		int ex = setjmp(env);
		if(ex != EX_NONE) {
			test_assertOcta(reg_getSpecial(rP),0x1234567890ABCDEF);
			test_assertOcta(reg_get(10),0x1234);
			test_assertOcta(mmu_readOcta(0x100000000,MEM_SIDE_EFFECTS),0x1234567890ABCDEF);
		}
		else {
			ex_push(&env);
			iargs.x = 10;
			iargs.y = 0x100000000;
			iargs.z = 0;
			cpu_instr_cswap(&iargs);
			test_assertFalse(true);
		}
		ex_pop();
	}

	// arrange things so that setting register X fails
	{
		// PTE 1 for 0x100000000 .. 0x1FFFFFFFF (rwx)
		cache_write(CACHE_DATA,0x400000008,0x0000000400000007,0);
		tc_removeAll(TC_DATA);

		reg_setSpecial(rP,0x1234567890ABCDEF);
		mmu_writeOcta(0x100000000,0x1234567890ABCDEF,MEM_SIDE_EFFECTS);

		// push some registers down
		reg_setSpecial(rO,STACK_ADDR);
		reg_setSpecial(rS,STACK_ADDR);
		reg_set(254,0x1234);
		reg_push(254);

		jmp_buf env;
		int ex = setjmp(env);
		if(ex != EX_NONE) {
			test_assertOcta(reg_getSpecial(rP),0x1234567890ABCDEF);
			test_assertOcta(reg_getSpecial(rL),0);
			test_assertOcta(mmu_readOcta(0x100000000,MEM_SIDE_EFFECTS),0x1234567890ABCDEF);
		}
		else {
			ex_push(&env);
			iargs.x = 10;
			iargs.y = 0x100000000;
			iargs.z = 0;
			cpu_instr_cswap(&iargs);
			test_assertFalse(true);
		}
		ex_pop();
	}
}
Esempio n. 30
0
static bool allocate_block (struct inode_disk *disk_inode, off_t block_index)
{
  int level;
  off_t block;
  off_t indirect_index[3];
  if (block_index < indirect_begin)
  {
    ASSERT (disk_inode->direct[block_index] == 0);
    level = 0;
    return allocate_zero_block (&disk_inode->direct[block_index]);
  }
  else if (block_index < dbl_indirect_begin)
  {
    off_t ind = block_index - indirect_begin;
    off_t head_index = ind / DIRECT_ENTRY_CNT;
    level = 1;
    if (disk_inode->indirect[head_index] == 0)
    {
      if (!allocate_zero_block (&disk_inode->indirect[head_index]))
      {
        return false;
      }
    }
    block = disk_inode->indirect[head_index];
    indirect_index[0] = ind % DIRECT_ENTRY_CNT;
  }
  else
  {
    off_t ind = block_index - dbl_indirect_begin;
    off_t head_index = ind / DIRECT_ENTRY_CNT / DIRECT_ENTRY_CNT;
    level = 2;
    if (disk_inode->dbl_indirect[head_index] == 0)
    {
      if (!allocate_zero_block (&disk_inode->dbl_indirect[head_index]))
      {
        return false;
      }
    }
    block = disk_inode->dbl_indirect[head_index];
    indirect_index[0] = ind % DIRECT_ENTRY_CNT;
    indirect_index[1] = ind / DIRECT_ENTRY_CNT % DIRECT_ENTRY_CNT;
  }

  for(; level-- > 0;)
  {
    block_sector_t next_block;
    off_t offset = sizeof(block_sector_t) * indirect_index[level];
    size_t size = sizeof(block_sector_t);

    cache_read (fs_cache, block, &next_block, offset, size);

    /* already there */
    if (next_block != 0)
    {
      block = next_block;
      continue;
    }

    if (!allocate_zero_block (&next_block))
    {
      return false;
    }

    cache_write (fs_cache, block, &next_block, offset, size);
    block = next_block;
  }

  return true;
}