Exemple #1
0
static void simplefs_put_super(struct super_block *sb)
{
    struct simplefs_super_block *sfs_sb = SIMPLEFS_SB(sb);
    if (sfs_sb->journal)
        WARN_ON(jbd2_journal_destroy(sfs_sb->journal) < 0);
    sfs_sb->journal = NULL;
}
Exemple #2
0
void simplefs_inode_add(struct super_block *vsb, struct simplefs_inode *inode)
{
    struct simplefs_super_block *sb = SIMPLEFS_SB(vsb);
    struct buffer_head *bh;
    struct simplefs_inode *inode_iterator;

    if (mutex_lock_interruptible(&simplefs_inodes_mgmt_lock)) {
        sfs_trace("Failed to acquire mutex lock\n");
        return;
    }

    bh = sb_bread(vsb, SIMPLEFS_INODESTORE_BLOCK_NUMBER);
    BUG_ON(!bh);

    inode_iterator = (struct simplefs_inode *)bh->b_data;

    if (mutex_lock_interruptible(&simplefs_sb_lock)) {
        sfs_trace("Failed to acquire mutex lock\n");
        return;
    }

    /* Append the new inode in the end in the inode store */
    inode_iterator += sb->inodes_count;

    memcpy(inode_iterator, inode, sizeof(struct simplefs_inode));
    sb->inodes_count++;

    mark_buffer_dirty(bh);
    simplefs_sb_sync(vsb);
    brelse(bh);

    mutex_unlock(&simplefs_sb_lock);
    mutex_unlock(&simplefs_inodes_mgmt_lock);
}
Exemple #3
0
static int simplefs_load_journal(struct super_block *sb, int devnum)
{
    struct journal_s *journal;
    char b[BDEVNAME_SIZE];
    dev_t dev;
    struct block_device *bdev;
    int hblock, blocksize, len;
    struct simplefs_super_block *sfs_sb = SIMPLEFS_SB(sb);

    dev = new_decode_dev(devnum);
    printk(KERN_INFO "Journal device is: %s\n", __bdevname(dev, b));

    bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
    if (IS_ERR(bdev))
        return 1;
    blocksize = sb->s_blocksize;
    hblock = bdev_logical_block_size(bdev);
    len = SIMPLEFS_MAX_FILESYSTEM_OBJECTS_SUPPORTED;

    journal = jbd2_journal_init_dev(bdev, sb->s_bdev, 1, -1, blocksize);
    if (!journal) {
        printk(KERN_ERR "Can't load journal\n");
        return 1;
    }
    journal->j_private = sb;

    sfs_sb->journal = journal;

    return 0;
}
Exemple #4
0
/* This functions returns a simplefs_inode with the given inode_no
 * from the inode store, if it exists. */
struct simplefs_inode *simplefs_get_inode(struct super_block *sb,
					  uint64_t inode_no)
{
	struct simplefs_super_block *sfs_sb = SIMPLEFS_SB(sb);
	struct simplefs_inode *sfs_inode = NULL;

	int i;
	struct buffer_head *bh;

	/* The inode store can be read once and kept in memory permanently while mounting.
	 * But such a model will not be scalable in a filesystem with
	 * millions or billions of files (inodes) */
	bh = (struct buffer_head *)sb_bread(sb,
					    SIMPLEFS_INODESTORE_BLOCK_NUMBER);
	sfs_inode = (struct simplefs_inode *)bh->b_data;

#if 0
	if (mutex_lock_interruptible(&simplefs_inodes_mgmt_lock)) {
		printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n",
		       __FILE__, __LINE__);
		return NULL;
	}
#endif
	for (i = 0; i < sfs_sb->inodes_count; i++) {
		if (sfs_inode->inode_no == inode_no) {
			/* FIXME: bh->b_data is probably leaking */
			return sfs_inode;
		}
		sfs_inode++;
	}
//      mutex_unlock(&simplefs_inodes_mgmt_lock);

	return NULL;
}
Exemple #5
0
static void simplefs_put_super(struct super_block *sb) 
{
	struct simple_fs_sb_i *msblk = SIMPLEFS_SB(sb);
	if(msblk->inode_cachep)
		kmem_cache_destroy(msblk->inode_cachep);	
	kfree(msblk);
	sb->s_private = NULL;
}
Exemple #6
0
static struct inode* simplefs_alloc_inode(struct super_block *sb) 
{
	struct simple_fs_sb_i *msblk = SIMPLEFS_SB(sb);
	struct simple_fs_inode_i *inode = 
			kmem_cache_alloc(msblk->inode_cachep,GFP_KERNEL);
	if(!inode)
		return NULL;
	return &inode->vfs_inode;
}
Exemple #7
0
void simplefs_sync_metadata(struct super_block *sb)
{
	struct simple_fs_sb_i *msblk = SIMPLEFS_SB(sb);
	/*
	 * Start with inodes.
	 */
	simplefs_sync_metadata_buffer(msblk->inode_table);
	simplefs_sync_metadata_buffer(msblk->inode_bitmap);
	simeplfs_sync_metadata_buffer(msblk->block_bitmap);
}
Exemple #8
0
static void simplefs_destroy_inode(struct inode *vfs_inode) 
{
	struct simple_fs_inode_i *inode = SIMPLEFS_INODE(vfs_inode);
	struct simple_fs_sb_i *sb = SIMPLEFS_SB(vfs_inode->i_sb);
	if (inode->indirect_block) {
		if(!buffer_uptodate(inode->indirect_block))
			sync_dirty_buffer(inode->indirect_block);
		bforget(inode->indirect_block);
	}
	kmem_cache_free(sb->inode_cachep,inode);
}
Exemple #9
0
void simplefs_sb_sync(struct super_block *vsb)
{
	struct buffer_head *bh;
	struct simplefs_super_block *sb = SIMPLEFS_SB(vsb);

	bh = (struct buffer_head *)sb_bread(vsb,
					    SIMPLEFS_SUPERBLOCK_BLOCK_NUMBER);
	bh->b_data = (char *)sb;
	mark_buffer_dirty(bh);
	sync_dirty_buffer(bh);
	brelse(bh);
}
Exemple #10
0
static int simplefs_sb_get_objects_count(struct super_block *vsb,
        uint64_t * out)
{
    struct simplefs_super_block *sb = SIMPLEFS_SB(vsb);

    if (mutex_lock_interruptible(&simplefs_inodes_mgmt_lock)) {
        sfs_trace("Failed to acquire mutex lock\n");
        return -EINTR;
    }
    *out = sb->inodes_count;
    mutex_unlock(&simplefs_inodes_mgmt_lock);

    return 0;
}
Exemple #11
0
static int simplefs_sb_get_objects_count(struct super_block *vsb,
					 uint64_t * out)
{
	struct simplefs_super_block *sb = SIMPLEFS_SB(vsb);

	if (mutex_lock_interruptible(&simplefs_inodes_mgmt_lock)) {
		printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n",
		       __FILE__, __LINE__);
		return -EINTR;
	}
	*out = sb->inodes_count;
	mutex_unlock(&simplefs_inodes_mgmt_lock);

	return 0;
}
Exemple #12
0
static int simplefs_sb_load_journal(struct super_block *sb, struct inode *inode)
{
    struct journal_s *journal;
    struct simplefs_super_block *sfs_sb = SIMPLEFS_SB(sb);

    journal = jbd2_journal_init_inode(inode);
    if (!journal) {
        printk(KERN_ERR "Can't load journal\n");
        return 1;
    }
    journal->j_private = sb;

    sfs_sb->journal = journal;

    return 0;
}
Exemple #13
0
struct simplefs_inode *simplefs_inode_search(struct super_block *sb,
        struct simplefs_inode *start,
        struct simplefs_inode *search)
{
    uint64_t count = 0;
    while (start->inode_no != search->inode_no
            && count < SIMPLEFS_SB(sb)->inodes_count) {
        count++;
        start++;
    }

    if (start->inode_no == search->inode_no) {
        return start;
    }

    return NULL;
}
Exemple #14
0
/* This functions returns a simplefs_inode with the given inode_no
 * from the inode store, if it exists. */
struct simplefs_inode *simplefs_get_inode(struct super_block *sb,
        uint64_t inode_no)
{
    struct simplefs_super_block *sfs_sb = SIMPLEFS_SB(sb);
    struct simplefs_inode *sfs_inode = NULL;
    struct simplefs_inode *inode_buffer = NULL;

    int i;
    struct buffer_head *bh;

    /* The inode store can be read once and kept in memory permanently while mounting.
     * But such a model will not be scalable in a filesystem with
     * millions or billions of files (inodes) */
    bh = sb_bread(sb, SIMPLEFS_INODESTORE_BLOCK_NUMBER);
    BUG_ON(!bh);

    sfs_inode = (struct simplefs_inode *)bh->b_data;

#if 0
    if (mutex_lock_interruptible(&simplefs_inodes_mgmt_lock)) {
        printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n",
               __FILE__, __LINE__);
        return NULL;
    }
#endif
    for (i = 0; i < sfs_sb->inodes_count; i++) {
        if (sfs_inode->inode_no == inode_no) {
            inode_buffer = kmem_cache_alloc(sfs_inode_cachep, GFP_KERNEL);
            memcpy(inode_buffer, sfs_inode, sizeof(*inode_buffer));

            break;
        }
        sfs_inode++;
    }
//      mutex_unlock(&simplefs_inodes_mgmt_lock);

    brelse(bh);
    return inode_buffer;
}
Exemple #15
0
/* This function returns a blocknumber which is free.
 * The block will be removed from the freeblock list.
 *
 * In an ideal, production-ready filesystem, we will not be dealing with blocks,
 * and instead we will be using extents 
 *
 * If for some reason, the file creation/deletion failed, the block number
 * will still be marked as non-free. You need fsck to fix this.*/
int simplefs_sb_get_a_freeblock(struct super_block *vsb, uint64_t * out)
{
	struct simplefs_super_block *sb = SIMPLEFS_SB(vsb);
	int i;
	int ret = 0;

	if (mutex_lock_interruptible(&simplefs_sb_lock)) {
		printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n",
		       __FILE__, __LINE__);
		ret = -EINTR;
		goto end;
	}

	/* Loop until we find a free block. We start the loop from 3,
	 * as all prior blocks will always be in use */
	for (i = 3; i < SIMPLEFS_MAX_FILESYSTEM_OBJECTS_SUPPORTED; i++) {
		if (sb->free_blocks & (1 << i)) {
			break;
		}
	}

	if (unlikely(i == SIMPLEFS_MAX_FILESYSTEM_OBJECTS_SUPPORTED)) {
		printk(KERN_ERR "No more free blocks available");
		ret = -ENOSPC;
		goto end;
	}

	*out = i;

	/* Remove the identified block from the free list */
	sb->free_blocks &= ~(1 << i);

	simplefs_sb_sync(vsb);

end:
	mutex_unlock(&simplefs_sb_lock);
	return ret;
}
Exemple #16
0
static int simplefs_write_inode(struct inode *vfs_inode, struct writeback_control *wbc) 
{
	/*
	 * We just need to write the inode here not it's pages.
	 */
	struct simple_fs_inode_i *minode = SIMPLEFS_INODE(vfs_inode);
	struct simple_fs_sb_i *msblk = SIMPLEFS_SB(vfs_inode->i_sb);
	int inodes_per_block = SIMPLEFS_INODE_SIZE/msblk->sb.block_size;

	/*
	 * Find the inode table where we need to write this inode.
	 */
	struct buffer_head *inode_table = msblk->inode_table[(minode->inode.inode_no - 1)/inodes_per_block];
	struct simplefs_inode *disk_inode =
			(struct simplefs_inode*)(inode_table->b_data + (minode->inode.inode_no - 1) % inodes_per_block);
	
	minode->inode.m_time = timespec_to_ns(vfs_inode.m_time);
	minode->inode.m_time = cpu_to_le64(minode->inode.m_time);
	
	if(!(vfs_inode->i_mode & S_IFDIR)) {
		minode->inode.file_size = i_size_read(vfs_inode);
		minode->inode.file_size = cpu_to_le64(minode->inode.file_size);
	}
	
	memcpy(disk_inode,&minode->inode,sizeof(struct simplefs_inode));
	mark_buffer_dirty(inode_table);
	if(wbc->sync_mode == WB_SYNC_ALL) {
		SFSDBG("[SFS] Writeback control was to sync all in %s \n",__FUNCTION__);		
		sync_dirty_buffer(inode_table);
	}
	/*
	 * Perhaps we should sync dirty buffer here,
	 * but let's see how far we can go. The inode
	 * may actually not be written if we don't force
	 * sync and let the flush thread do it for us.
	 */
	SFSDBG("Not syncing in %s\n",__FUNCTION__);
}
Exemple #17
0
/* FIXME: The write support is rudimentary. I have not figured out a way to do writes
 * from particular offsets (even though I have written some untested code for this below) efficiently. */
ssize_t simplefs_write(struct file * filp, const char __user * buf, size_t len,
		       loff_t * ppos)
{
	/* After the commit dd37978c5 in the upstream linux kernel,
	 * we can use just filp->f_inode instead of the
	 * f->f_path.dentry->d_inode redirection */
	struct inode *inode;
	struct simplefs_inode *sfs_inode;
	struct simplefs_inode *inode_iterator;
	struct buffer_head *bh;
	struct super_block *sb;

	char *buffer;
	int count;

	inode = filp->f_path.dentry->d_inode;
	sfs_inode = SIMPLEFS_INODE(inode);
	sb = inode->i_sb;

	if (*ppos + len >= SIMPLEFS_DEFAULT_BLOCK_SIZE) {
		printk(KERN_ERR "File size write will exceed a block");
		return -ENOSPC;
	}

	bh = (struct buffer_head *)sb_bread(filp->f_path.dentry->d_inode->i_sb,
					    sfs_inode->data_block_number);

	if (!bh) {
		printk(KERN_ERR "Reading the block number [%llu] failed.",
		       sfs_inode->data_block_number);
		return 0;
	}
	buffer = (char *)bh->b_data;

	/* Move the pointer until the required byte offset */
	buffer += *ppos;

	if (copy_from_user(buffer, buf, len)) {
		brelse(bh);
		printk(KERN_ERR
		       "Error copying file contents from the userspace buffer to the kernel space\n");
		return -EFAULT;
	}
	*ppos += len;

	mark_buffer_dirty(bh);
	sync_dirty_buffer(bh);
	brelse(bh);

	/* Set new size
	 * sfs_inode->file_size = max(sfs_inode->file_size, *ppos);
	 *
	 * FIXME: What to do if someone writes only some parts in between ?
	 * The above code will also fail in case a file is overwritten with
	 * a shorter buffer */

	if (mutex_lock_interruptible(&simplefs_inodes_mgmt_lock)) {
		printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n",
		       __FILE__, __LINE__);
		return -EINTR;
	}
	/* Save the modified inode */
	bh = (struct buffer_head *)sb_bread(sb,
					    SIMPLEFS_INODESTORE_BLOCK_NUMBER);

	sfs_inode->file_size = *ppos;

	inode_iterator = (struct simplefs_inode *)bh->b_data;

	if (mutex_lock_interruptible(&simplefs_sb_lock)) {
		printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n",
		       __FILE__, __LINE__);
		return -EINTR;
	}

	count = 0;
	while (inode_iterator->inode_no != sfs_inode->inode_no
	       && count < SIMPLEFS_SB(sb)->inodes_count) {
		count++;
		inode_iterator++;
	}

	if (likely(count < SIMPLEFS_SB(sb)->inodes_count)) {
		inode_iterator->file_size = sfs_inode->file_size;
		printk(KERN_INFO
		       "The new filesize that is written is: [%llu] and len was: [%lu]\n",
		       sfs_inode->file_size, len);

		mark_buffer_dirty(bh);
		sync_dirty_buffer(bh);
	} else {
		printk(KERN_ERR
		       "The new filesize could not be stored to the inode.");
		len = -EIO;
	}

	brelse(bh);

	mutex_unlock(&simplefs_sb_lock);
	mutex_unlock(&simplefs_inodes_mgmt_lock);

	return len;
}
Exemple #18
0
static int simplefs_create_fs_object(struct inode *dir, struct dentry *dentry,
				     umode_t mode)
{
	struct inode *inode;
	struct simplefs_inode *sfs_inode;
	struct simplefs_inode *inode_iterator;
	struct super_block *sb;
	struct simplefs_dir_record *record;
	struct simplefs_inode *parent_dir_inode;
	struct buffer_head *bh;
	struct simplefs_dir_record *dir_contents_datablock;
	uint64_t count;
	int ret;

	if (mutex_lock_interruptible(&simplefs_directory_children_update_lock)) {
		printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n",
		       __FILE__, __LINE__);
		return -EINTR;
	}
	sb = dir->i_sb;

	ret = simplefs_sb_get_objects_count(sb, &count);
	if (ret < 0) {
		mutex_unlock(&simplefs_directory_children_update_lock);
		return ret;
	}

	if (unlikely(count >= SIMPLEFS_MAX_FILESYSTEM_OBJECTS_SUPPORTED)) {
		/* The above condition can be just == insted of the >= */
		printk(KERN_ERR
		       "Maximum number of objects supported by simplefs is already reached");
		mutex_unlock(&simplefs_directory_children_update_lock);
		return -ENOSPC;
	}

	if (!S_ISDIR(mode) && !S_ISREG(mode)) {
		printk(KERN_ERR
		       "Creation request but for neither a file nor a directory");
		mutex_unlock(&simplefs_directory_children_update_lock);
		return -EINVAL;
	}

	inode = new_inode(sb);
	if (!inode) {
		mutex_unlock(&simplefs_directory_children_update_lock);
		return -ENOMEM;
	}

	inode->i_sb = sb;
	inode->i_op = &simplefs_inode_ops;
	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
	inode->i_ino = 10;

	/* Loop until we get an unique inode number */
	while (simplefs_get_inode(sb, inode->i_ino)) {
		/* inode inode->i_ino already exists */
		inode->i_ino++;
	}

	/* FIXME: This is leaking. We need to free all in-memory inodes sometime */
	sfs_inode = kmalloc(sizeof(struct simplefs_inode), GFP_KERNEL);
	sfs_inode->inode_no = inode->i_ino;
	inode->i_private = sfs_inode;
	sfs_inode->mode = mode;

	if (S_ISDIR(mode)) {
		printk(KERN_INFO "New directory creation request\n");
		sfs_inode->dir_children_count = 0;
		inode->i_fop = &simplefs_dir_operations;
	} else if (S_ISREG(mode)) {
		printk(KERN_INFO "New file creation request\n");
		sfs_inode->file_size = 0;
		inode->i_fop = &simplefs_file_operations;
	}

	/* First get a free block and update the free map,
	 * Then add inode to the inode store and update the sb inodes_count,
	 * Then update the parent directory's inode with the new child.
	 *
	 * The above ordering helps us to maintain fs consistency
	 * even in most crashes
	 */
	ret = simplefs_sb_get_a_freeblock(sb, &sfs_inode->data_block_number);
	if (ret < 0) {
		printk(KERN_ERR "simplefs could not get a freeblock");
		mutex_unlock(&simplefs_directory_children_update_lock);
		return ret;
	}

	simplefs_inode_add(sb, sfs_inode);

	record = kmalloc(sizeof(struct simplefs_dir_record), GFP_KERNEL);
	record->inode_no = sfs_inode->inode_no;
	strcpy(record->filename, dentry->d_name.name);

	parent_dir_inode = SIMPLEFS_INODE(dir);
	bh = sb_bread(sb, parent_dir_inode->data_block_number);
	dir_contents_datablock = (struct simplefs_dir_record *)bh->b_data;

	/* Navigate to the last record in the directory contents */
	dir_contents_datablock += parent_dir_inode->dir_children_count;

	memcpy(dir_contents_datablock, record,
	       sizeof(struct simplefs_dir_record));

	mark_buffer_dirty(bh);
	sync_dirty_buffer(bh);
	brelse(bh);

	if (mutex_lock_interruptible(&simplefs_inodes_mgmt_lock)) {
		mutex_unlock(&simplefs_directory_children_update_lock);
		printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n",
		       __FILE__, __LINE__);
		return -EINTR;
	}

	bh = (struct buffer_head *)sb_bread(sb,
					    SIMPLEFS_INODESTORE_BLOCK_NUMBER);

	inode_iterator = (struct simplefs_inode *)bh->b_data;

	if (mutex_lock_interruptible(&simplefs_sb_lock)) {
		printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n",
		       __FILE__, __LINE__);
		return -EINTR;
	}

	count = 0;
	while (inode_iterator->inode_no != parent_dir_inode->inode_no
	       && count < SIMPLEFS_SB(sb)->inodes_count) {
		count++;
		inode_iterator++;
	}

	if (likely(inode_iterator->inode_no == parent_dir_inode->inode_no)) {
		parent_dir_inode->dir_children_count++;
		inode_iterator->dir_children_count =
		    parent_dir_inode->dir_children_count;
		/* Updated the parent inode's dir count to reflect the new child too */

		mark_buffer_dirty(bh);
		sync_dirty_buffer(bh);
	} else {
		printk(KERN_ERR
		       "The updated childcount could not be stored to the dir inode.");
		/* TODO: Remove the newly created inode from the disk and in-memory inode store
		 * and also update the superblock, freemaps etc. to reflect the same.
		 * Basically, Undo all actions done during this create call */
	}

	brelse(bh);

	mutex_unlock(&simplefs_sb_lock);
	mutex_unlock(&simplefs_inodes_mgmt_lock);
	mutex_unlock(&simplefs_directory_children_update_lock);

	inode_init_owner(inode, dir, mode);
	d_add(dentry, inode);

	return 0;
}
Exemple #19
0
/* FIXME: The write support is rudimentary. I have not figured out a way to do writes
 * from particular offsets (even though I have written some untested code for this below) efficiently. */
ssize_t simplefs_write(struct file * filp, const char __user * buf, size_t len,
                       loff_t * ppos)
{
    /* After the commit dd37978c5 in the upstream linux kernel,
     * we can use just filp->f_inode instead of the
     * f->f_path.dentry->d_inode redirection */
    struct inode *inode;
    struct simplefs_inode *sfs_inode;
    struct buffer_head *bh;
    struct super_block *sb;
    struct simplefs_super_block *sfs_sb;
    handle_t *handle;

    char *buffer;

    int retval;

    sb = filp->f_path.dentry->d_inode->i_sb;
    sfs_sb = SIMPLEFS_SB(sb);

    handle = jbd2_journal_start(sfs_sb->journal, 1);
    if (IS_ERR(handle))
        return PTR_ERR(handle);
    retval = generic_write_checks(filp, ppos, &len, 0);
    if (retval)
        return retval;

    inode = filp->f_path.dentry->d_inode;
    sfs_inode = SIMPLEFS_INODE(inode);

    bh = sb_bread(filp->f_path.dentry->d_inode->i_sb,
                  sfs_inode->data_block_number);

    if (!bh) {
        printk(KERN_ERR "Reading the block number [%llu] failed.",
               sfs_inode->data_block_number);
        return 0;
    }
    buffer = (char *)bh->b_data;

    /* Move the pointer until the required byte offset */
    buffer += *ppos;

    retval = jbd2_journal_get_write_access(handle, bh);
    if (WARN_ON(retval)) {
        brelse(bh);
        sfs_trace("Can't get write access for bh\n");
        return retval;
    }

    if (copy_from_user(buffer, buf, len)) {
        brelse(bh);
        printk(KERN_ERR
               "Error copying file contents from the userspace buffer to the kernel space\n");
        return -EFAULT;
    }
    *ppos += len;

    retval = jbd2_journal_dirty_metadata(handle, bh);
    if (WARN_ON(retval)) {
        brelse(bh);
        return retval;
    }
    handle->h_sync = 1;
    retval = jbd2_journal_stop(handle);
    if (WARN_ON(retval)) {
        brelse(bh);
        return retval;
    }

    mark_buffer_dirty(bh);
    sync_dirty_buffer(bh);
    brelse(bh);

    /* Set new size
     * sfs_inode->file_size = max(sfs_inode->file_size, *ppos);
     *
     * FIXME: What to do if someone writes only some parts in between ?
     * The above code will also fail in case a file is overwritten with
     * a shorter buffer */
    if (mutex_lock_interruptible(&simplefs_inodes_mgmt_lock)) {
        sfs_trace("Failed to acquire mutex lock\n");
        return -EINTR;
    }
    sfs_inode->file_size = *ppos;
    retval = simplefs_inode_save(sb, sfs_inode);
    if (retval) {
        len = retval;
    }
    mutex_unlock(&simplefs_inodes_mgmt_lock);

    return len;
}
Exemple #20
0
static int simplefs_get_block(struct inode *vfs_inode, sector_t iblock,
				struct buffer_head *bh_result, int create)
{
	struct simple_fs_sb_i *msblk = SIMPLEFS_SB(vfs_inode->i_sb);
	struct simple_fs_inode_i *minode = SIMPLEFS_INODE(vfs_inode);
	uint64_t mapped_block = -1;
	
	if(iblock > msblk->sb.block_size/sizeof(uint64_t))
		goto fail_get_block;
	
	if(create) {
		/* Do we already have allocated the indirect block.
		 * If yes then all we need to do is check the block location
		 * for being 0 within that.
		 */
		if(iblock) {
allocate_indirect_block:
			if(minode->inode.indirect_block_number) {
				if(!minode->indirect_block) {
					minode->indirect_block =
						sb_bread(vfs_inode->i_sb,
							minode->inode.indirect_block_number);
					if(!minode->indirect_block)
						goto fail_get_block;
				}
				uint64_t *block_offset = 
					((uint64_t*)(minode->indirect_block->b_data) + (iblock-1));
				mapped_block = le64_to_cpu(*block_offset);
				if(!mapped_block) {
					mapped_block = allocate_data_blocks(vfs_inode,1);
					if (!mapped_block) {
						SFSDBG(KERN_INFO "Error allocating indirect data block %s %d\n"
							,__FUNCTION__,__LINE__);
						goto fail_get_block;
					}
				}
				*block_offset = cpu_to_le64(mapped_block);
				mark_buffer_dirty(minode->indirect_block);
				mapped_block = block_offset;
			}
			else { /*Allocate that indirect block and the block within*/
				minode->inode.indirect_block_number = allocate_data_blocks(vfs_inode,1);
				if(!minode->inode.indirect_block_number) {
					SFSDBG(KERN_INFO "Error allocating indirect block %s %d\n"
							,__FUNCTION__,__LINE__);
					goto fail_get_block;
				}
				else
					goto allocate_indirect_block;
			}
		}
		else { /*This is the first block for the file*/
			if( minode->inode.data_block_number ){
				mapped_block = le64_to_cpu(minode->inode.data_block_number);
			}
			else
			{
				minode->inode.data_block_number = allocate_data_blocks(vfs_inode,1);
				if(!minode->inode.data_block_number) {
					SFSDBG(KERN_INFO "Error allocating direct block %s %d\n"
							,__FUNCTION__,__LINE__);
					goto fail_get_block;
				}
				mapped_block = minode->inode.data_block_number;
				minode->inode.data_block_number = cpu_to_le64(
						minode->inode.data_block_number);
			}
		}
	}
	else {
	
		/*
		 * Find the mapping but don't create it.
		 */
		if(iblock) {
			if(minode->inode.indirect_block_number) {
				if(!minode->indirect_block) {
					minode->indirect_block =
						sb_bread(vfs_inode->i_sb,
							minode->inode.indirect_block_number);
					if(!minode->indirect_block)
						goto fail_get_block;
				}
				uint64_t *block_offset = 
					((uint64_t*)(minode->indirect_block->b_data) + (iblock-1));
				mapped_block = le64_to_cpu(*block_offset);
				if(!mapped_block)
					goto fail_get_block;
			}
			else
				goto fail_get_block;
		}
		else {
			if(!minode->inode.data_block_number)
				goto fail_get_block;
			mapped_block = le64_to_cpu(minode->inode.data_block_number);
		}
	}
	set_buffer_new(bh_result);
	map_bh(bh_result,vfs_inode->i_sb,mapped_block);
	return 0;
fail_get_block:
	return -EOF;
}
Exemple #21
0
static int allocate_data_blocks(struct inode *vfs_inode,int nr_blocks)
{
	struct simple_fs_sb_i *msblk = SIMPLEFS_SB(vfs_inode->i_sb);
	struct simple_fs_inode_i *minode = SIMPLEFS_INODE(vfs_inode);
	struct buffer_head *sb_buffer_bitmap = NULL;
	char *bitmap_buffer = NULL;
	int block_start = 0,block_no = 0;
	int bitmap_index = 0;
	int blocks_alloced = 0;
	int buffer_offset = 0; /*How many b_this_page has been done on a single bh*/
	int block_start_buffer_offset = 0;
	int block_start_bitmap_index = 0;
	if(!nr_blocks)
		return 0;
	mutex_lock(&msblk->sb_mutex);
new_bitmap_buffer:
		sb_buffer_bitmap = msblk->block_bitmap[bitmap_index];
		if(!sb_buffer_bitmap)
				goto out_failed;
allocate_block:
		bitmap_buffer = sb_buffer_bitmap->b_data;
		if( nr_blocks && 
				(block_no = 
					alloc_bmap(bitmap_buffer,sb_buffer_bitmap->b_size)) < 0) {
			sb_buffer_bitmap = sb_buffer_bitmap->b_this_page;
			if(sb_buffer_bitmap == mbslk->block_bitmap[bitmap_index]) {
				bitmap_index++; /*Move to next buffer head in the array*/
				buffer_offset = 0;
				goto new_bitmap_buffer;
			}
			else 
				buffer_offset++;
			goto allocate_block;
		}
		else if(block_no >= 0) {
			nr_blocks--;
			blocks_alloced++;
			if(!block_start) {
				block_start = block_no + 
						(((sb_buffer_bitmap->b_size * buffer_offset) 
										+ (PAGE_SIZE*bitmap_index)) << 3);
				block_start_buffer_offset = buffer_offset;
				block_start_bitmap_index = bitmap_index;
			}
			if(buffer_uptodate(sb_buffer_bitmap))
				mark_buffer_dirty(sb_buffer_bitmap);
			block_no = -1;
			if(nr_blocks)
				goto allocate_block;
		}
	simplefs_sync_metadata(sb);
	mutex_unlock(&msblk->sb_mutex);
	return block_start; /*Return starting block number of the allocated blocks*/
out_failed:
	if(blocks_alloced) {
		/*
		 * Get the starting buffer head from where allocations
		 * were started.
		 */
		sb_buffer_bitmap = msblk->block_bitmap[block_start_bitmap_index];
		/*
		 * Get the starting block number relative to the buffer head.
		 */
		block_no = block_start - ((
				(sb_buffer_bitmap->b_size * block_start_buffer_offset)
					+(PAGE_SIZE * block_start_bitmap_index))) * 8;		
		/*
		 * Move to the correct buffer head within the page.
		 */
		while(block_start_buffer_offset) {
			sb_buffer_bitmap = sb_buffer_bitmap->b_this_page;
			block_start_buffer_offset--;
		}
		
		bitmap_buffer = sb_buffer_bitmap->b_data;
		while(blocks_alloced) {
			if(free_bmap(bitmap_buffer,sb_buffer_bitmap->b_size,block_no++)){
					blocks_alloced--;
					if(buffer_uptodate(sb_buffer_bitmap))
						mark_buffer_dirty(sb_buffer_bitmap);
			}
			/*
			 * There was no freeing of block because this block_no didn't
			 * belonged the starting buffer head. We need to move the
			 * buffer head to new buffer or perhaps we might need to
			 * get a new msblk->block_bitmap[j].
			 */
			else {
				sb_buffer_bitmap = sb_buffer_bitmap->b_this_page;
				if( sb_buffer_bitmap == msblk->block_bitmap[block_start_bitmap_index] )
					sb_buffer_bitmap = msblk->block_bitmap[++block_start_bitmap_index];
				block_no = 0; /*This is relative to the buffer head*/
				bitmap_buffer = sb_buffer_bitmap->b_data;
			}
		}
	}
	simplefs_sync_metadata(sb);
	mutex_unlock(msblk->sb_mutex);
	return 0;
}