Exemple #1
0
/*
 * NAME:	dbMount()
 *
 * FUNCTION:	initializate the block allocation map.
 *
 *		memory is allocated for the in-core bmap descriptor and
 *		the in-core descriptor is initialized from disk.
 *
 * PARAMETERS:
 *	ipbmap	- pointer to in-core inode for the block map.
 *
 * RETURN VALUES:
 *	0	- success
 *	-ENOMEM	- insufficient memory
 *	-EIO	- i/o error
 */
int dbMount(struct inode *ipbmap)
{
	struct bmap *bmp;
	struct dbmap_disk *dbmp_le;
	struct metapage *mp;
	int i;

	/*
	 * allocate/initialize the in-memory bmap descriptor
	 */
	/* allocate memory for the in-memory bmap descriptor */
	bmp = kmalloc(sizeof(struct bmap), GFP_KERNEL);
	if (bmp == NULL)
		return -ENOMEM;

	/* read the on-disk bmap descriptor. */
	mp = read_metapage(ipbmap,
			   BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
			   PSIZE, 0);
	if (mp == NULL) {
		kfree(bmp);
		return -EIO;
	}

	/* copy the on-disk bmap descriptor to its in-memory version. */
	dbmp_le = (struct dbmap_disk *) mp->data;
	bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize);
	bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
	bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
	bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
	bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
	bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
	bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
	bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
	bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
	bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
	bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
	bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
	for (i = 0; i < MAXAG; i++)
		bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]);
	bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize);
	bmp->db_maxfreebud = dbmp_le->dn_maxfreebud;

	/* release the buffer. */
	release_metapage(mp);

	/* bind the bmap inode and the bmap descriptor to each other. */
	bmp->db_ipbmap = ipbmap;
	JFS_SBI(ipbmap->i_sb)->bmap = bmp;

	memset(bmp->db_active, 0, sizeof(bmp->db_active));

	/*
	 * allocate/initialize the bmap lock
	 */
	BMAP_LOCK_INIT(bmp);

	return (0);
}
/*
 * NAME:	jfs_mount_rw(sb, remount)
 *
 * FUNCTION:	Completes read-write mount, or remounts read-only volume
 *		as read-write
 */
int jfs_mount_rw(struct super_block *sb, int remount)
{
	struct jfs_sb_info *sbi = JFS_SBI(sb);  
	struct jfs_log *log;
	int rc;

	/*
	 * If we are re-mounting a previously read-only volume, we want to
	 * re-read the inode and block maps, since fsck.jfs may have updated
	 * them.
	 */
	if (remount) {
		if (chkSuper(sb) || (sbi->state != FM_CLEAN))
			return -EINVAL;

		truncate_inode_pages(sbi->ipimap->i_mapping, 0);
		truncate_inode_pages(sbi->ipbmap->i_mapping, 0);
		diUnmount(sbi->ipimap, 1);
		if ((rc = diMount(sbi->ipimap))) {
			jfs_err("jfs_mount_rw: diMount failed!");
			return rc;
		}

		dbUnmount(sbi->ipbmap, 1);
		if ((rc = dbMount(sbi->ipbmap))) {
			jfs_err("jfs_mount_rw: dbMount failed!");
			return rc;
		}
	}

	/*
	 * open/initialize log
	 */
	if ((rc = lmLogOpen(sb, &log)))
		return rc;

	JFS_SBI(sb)->log = log;

	/*
	 * update file system superblock;
	 */
	if ((rc = updateSuper(sb, FM_MOUNT))) {
		jfs_err("jfs_mount: updateSuper failed w/rc = %d", rc);
		lmLogClose(sb, log);
		JFS_SBI(sb)->log = 0;
		return rc;
	}

	/*
	 * write MOUNT log record of the file system
	 */
	logMOUNT(sb);

	return rc;
}
Exemple #3
0
/*
 *	dbSync()
 */
int dbSync(struct inode *ipbmap)
{
	struct dbmap_disk *dbmp_le;
	struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
	struct metapage *mp;
	int i;

	/*
	 * write bmap global control page
	 */
	/* get the buffer for the on-disk bmap descriptor. */
	mp = read_metapage(ipbmap,
			   BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
			   PSIZE, 0);
	if (mp == NULL) {
		jfs_err("dbSync: read_metapage failed!");
		return -EIO;
	}
	/* copy the in-memory version of the bmap to the on-disk version */
	dbmp_le = (struct dbmap_disk *) mp->data;
	dbmp_le->dn_mapsize = cpu_to_le64(bmp->db_mapsize);
	dbmp_le->dn_nfree = cpu_to_le64(bmp->db_nfree);
	dbmp_le->dn_l2nbperpage = cpu_to_le32(bmp->db_l2nbperpage);
	dbmp_le->dn_numag = cpu_to_le32(bmp->db_numag);
	dbmp_le->dn_maxlevel = cpu_to_le32(bmp->db_maxlevel);
	dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag);
	dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref);
	dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel);
	dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight);
	dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth);
	dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart);
	dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size);
	for (i = 0; i < MAXAG; i++)
		dbmp_le->dn_agfree[i] = cpu_to_le64(bmp->db_agfree[i]);
	dbmp_le->dn_agsize = cpu_to_le64(bmp->db_agsize);
	dbmp_le->dn_maxfreebud = bmp->db_maxfreebud;

	/* write the buffer */
	write_metapage(mp);

	/*
	 * write out dirty pages of bmap
	 */
	filemap_write_and_wait(ipbmap->i_mapping);

	diWriteSpecial(ipbmap, 0);

	return (0);
}
Exemple #4
0
static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struct nameidata *nd)
{
	struct btstack btstack;
	ino_t inum;
	struct inode *ip;
	struct component_name key;
	const char *name = dentry->d_name.name;
	int len = dentry->d_name.len;
	int rc;

	jfs_info("jfs_lookup: name = %s", name);


	if ((name[0] == '.') && (len == 1))
		inum = dip->i_ino;
	else if (strcmp(name, "..") == 0)
		inum = PARENT(dip);
	else {
		if ((rc = get_UCSname(&key, dentry)))
			return ERR_PTR(rc);
		rc = dtSearch(dip, &key, &inum, &btstack, JFS_LOOKUP);
		free_UCSname(&key);
		if (rc == -ENOENT) {
			d_add(dentry, NULL);
			return ERR_PTR(0);
		} else if (rc) {
			jfs_err("jfs_lookup: dtSearch returned %d", rc);
			return ERR_PTR(rc);
		}
	}

	ip = iget(dip->i_sb, inum);
	if (ip == NULL || is_bad_inode(ip)) {
		jfs_err("jfs_lookup: iget failed on inum %d", (uint) inum);
		if (ip)
			iput(ip);
		return ERR_PTR(-EACCES);
	}

	if (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)
		dentry->d_op = &jfs_ci_dentry_operations;

	dentry = d_splice_alias(ip, dentry);

	if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2))
		dentry->d_op = &jfs_ci_dentry_operations;

	return dentry;
}
Exemple #5
0
void jfs_clear_inode(struct inode *inode)
{
	struct jfs_inode_info *ji = JFS_IP(inode);

	if (is_bad_inode(inode))
		/*
		 * We free the fs-dependent structure before making the
		 * inode bad
		 */
		return;

	jfs_info("jfs_clear_inode called ip = 0x%p", inode);

	if (ji->active_ag != -1) {
		struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
		atomic_dec(&bmap->db_active[ji->active_ag]);
	}

	ASSERT(list_empty(&ji->anon_inode_list));

	if (ji->atlhead) {
		jfs_err("jfs_clear_inode: inode %p has anonymous tlocks",
			inode);
		jfs_err("i_state = 0x%lx, cflag = 0x%lx", inode->i_state,
			ji->cflag);
	}

	free_jfs_inode(inode);
}
Exemple #6
0
static int jfs_open(struct inode *inode, struct file *file)
{
	int rc;

	if ((rc = dquot_file_open(inode, file)))
		return rc;

	/*
	 * We attempt to allow only one "active" file open per aggregate
	 * group.  Otherwise, appending to files in parallel can cause
	 * fragmentation within the files.
	 *
	 * If the file is empty, it was probably just created and going
	 * to be written to.  If it has a size, we'll hold off until the
	 * file is actually grown.
	 */
	if (S_ISREG(inode->i_mode) && file->f_mode & FMODE_WRITE &&
	    (inode->i_size == 0)) {
		struct jfs_inode_info *ji = JFS_IP(inode);
		spin_lock_irq(&ji->ag_lock);
		if (ji->active_ag == -1) {
			struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb);
			ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb);
			atomic_inc( &jfs_sb->bmap->db_active[ji->active_ag]);
		}
		spin_unlock_irq(&ji->ag_lock);
	}

	return 0;
}
Exemple #7
0
int jfs_umount_rw(struct super_block *sb)
{
	struct jfs_sb_info *sbi = JFS_SBI(sb);
	struct jfs_log *log = sbi->log;

	if (!log)
		return 0;

	/*
	 * close log: 
	 *
	 * remove file system from log active file system list.
	 */
	jfs_flush_journal(log, 2);

	/*
	 * Make sure all metadata makes it to disk
	 */
	dbSync(sbi->ipbmap);
	diSync(sbi->ipimap);
	fsync_inode_data_buffers(sb->s_bdev->bd_inode);

	updateSuper(sb, FM_CLEAN);
	sbi->log = NULL;

	return lmLogClose(sb, log);
}
Exemple #8
0
int jfs_umount_rw(struct super_block *sb)
{
	struct jfs_sb_info *sbi = JFS_SBI(sb);
	struct jfs_log *log = sbi->log;

	if (!log)
		return 0;

	/*
	 * close log:
	 *
	 * remove file system from log active file system list.
	 */
	jfs_flush_journal(log, 1);

	/*
	 * Make sure all metadata makes it to disk
	 */
	dbSync(sbi->ipbmap);
	diSync(sbi->ipimap);

	/*
	 * Note that we have to do this even if sync_blockdev() will
	 * do exactly the same a few instructions later:  We can't
	 * mark the superblock clean before everything is flushed to
	 * disk.
	 */
	filemap_write_and_wait(sbi->direct_inode->i_mapping);

	updateSuper(sb, FM_CLEAN);

	return lmLogClose(sb);
}
Exemple #9
0
static int jfs_release(struct inode *inode, struct file *file)
{
	struct jfs_inode_info *ji = JFS_IP(inode);

	if (ji->active_ag != -1) {
		struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
		atomic_dec(&bmap->db_active[ji->active_ag]);
		ji->active_ag = -1;
	}

	return 0;
}
int jfs_umount(struct super_block *sb)
{
	struct jfs_sb_info *sbi = JFS_SBI(sb);
	struct inode *ipbmap = sbi->ipbmap;
	struct inode *ipimap = sbi->ipimap;
	struct inode *ipaimap = sbi->ipaimap;
	struct inode *ipaimap2 = sbi->ipaimap2;
	struct jfs_log *log;
	int rc = 0;

	jfs_info("UnMount JFS: sb:0x%p", sb);

	if ((log = sbi->log))
		/*
		 * Wait for outstanding transactions to be written to log:
		 */
		jfs_flush_journal(log, 2);

	diUnmount(ipimap, 0);

	diFreeSpecial(ipimap);
	sbi->ipimap = NULL;

	ipaimap2 = sbi->ipaimap2;
	if (ipaimap2) {
		diUnmount(ipaimap2, 0);
		diFreeSpecial(ipaimap2);
		sbi->ipaimap2 = NULL;
	}

	ipaimap = sbi->ipaimap;
	diUnmount(ipaimap, 0);
	diFreeSpecial(ipaimap);
	sbi->ipaimap = NULL;

	dbUnmount(ipbmap, 0);

	diFreeSpecial(ipbmap);
	sbi->ipimap = NULL;

	filemap_write_and_wait(sbi->direct_inode->i_mapping);

	if (log) {		
		updateSuper(sb, FM_CLEAN);

		rc = lmLogClose(sb);
	}
	jfs_info("UnMount JFS Complete: rc = %d", rc);
	return rc;
}
/*
 *	updateSuper()
 *
 * update synchronously superblock if it is mounted read-write.
 */
int updateSuper(struct super_block *sb, uint state)
{
	struct jfs_superblock *j_sb;
	struct jfs_sb_info *sbi = JFS_SBI(sb);
	struct buffer_head *bh;
	int rc;

	if (sbi->flag & JFS_NOINTEGRITY) {
		if (state == FM_DIRTY) {
			sbi->p_state = state;
			return 0;
		} else if (state == FM_MOUNT) {
			sbi->p_state = sbi->state;
			state = FM_DIRTY;
		} else if (state == FM_CLEAN) {
			state = sbi->p_state;
		} else
			jfs_err("updateSuper: bad state");
	} else if (sbi->state == FM_DIRTY)
		return 0;
	
	if ((rc = readSuper(sb, &bh)))
		return rc;

	j_sb = (struct jfs_superblock *)bh->b_data;

	j_sb->s_state = cpu_to_le32(state);
	sbi->state = state;

	if (state == FM_MOUNT) {
		/* record log's dev_t and mount serial number */
		j_sb->s_logdev = cpu_to_le32(sbi->log->bdev->bd_dev);
		j_sb->s_logserial = cpu_to_le32(sbi->log->serial);
	} else if (state == FM_CLEAN) {
		/*
		 * If this volume is shared with OS/2, OS/2 will need to
		 * recalculate DASD usage, since we don't deal with it.
		 */
		if (j_sb->s_flag & cpu_to_le32(JFS_DASD_ENABLED))
			j_sb->s_flag |= cpu_to_le32(JFS_DASD_PRIME);
	}

	mark_buffer_dirty(bh);
	ll_rw_block(WRITE, 1, &bh);
	wait_on_buffer(bh);
	brelse(bh);

	return 0;
}
Exemple #12
0
static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry)
{
	struct btstack btstack;
	ino_t inum;
	struct inode *ip;
	struct component_name key;
	const char *name = dentry->d_name.name;
	int len = dentry->d_name.len;
	int rc;

	jfs_info("jfs_lookup: name = %s", name);


	if ((name[0] == '.') && (len == 1))
		inum = dip->i_ino;
	else if (strcmp(name, "..") == 0)
		inum = PARENT(dip);
	else {
		if ((rc =
		     get_UCSname(&key, dentry, JFS_SBI(dip->i_sb)->nls_tab)))
			return ERR_PTR(rc);
		rc = dtSearch(dip, &key, &inum, &btstack, JFS_LOOKUP);
		free_UCSname(&key);
		if (rc == -ENOENT) {
			d_add(dentry, NULL);
			return ERR_PTR(0);
		} else if (rc) {
			jfs_err("jfs_lookup: dtSearch returned %d", rc);
			return ERR_PTR(rc);
		}
	}

	ip = iget(dip->i_sb, inum);
	if (ip == NULL) {
		jfs_err("jfs_lookup: iget failed on inum %d", (uint) inum);
		return ERR_PTR(-EACCES);
	}
	if (is_bad_inode(ip)) {
		jfs_err("jfs_lookup: iget returned bad inode, inum = %d",
			(uint) inum);
		iput(ip);
		return ERR_PTR(-EACCES);
	}

	d_add(dentry, ip);

	return ERR_PTR(0);
}
Exemple #13
0
int jfs_fsync(struct file *file, struct dentry *dentry, int datasync)
{
	struct inode *inode = dentry->d_inode;
	int rc = 0;

	if (!(inode->i_state & I_DIRTY) ||
	    (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) {
		/* Make sure committed changes hit the disk */
		jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1);
		return rc;
	}

	rc |= jfs_commit_inode(inode, 1);

	return rc ? -EIO : 0;
}
Exemple #14
0
/*
 * NAME:	dbUnmount()
 *
 * FUNCTION:	terminate the block allocation map in preparation for
 *		file system unmount.
 *
 *		the in-core bmap descriptor is written to disk and
 *		the memory for this descriptor is freed.
 *
 * PARAMETERS:
 *	ipbmap	- pointer to in-core inode for the block map.
 *
 * RETURN VALUES:
 *	0	- success
 *	-EIO	- i/o error
 */
int dbUnmount(struct inode *ipbmap, int mounterror)
{
	struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;

	if (!(mounterror || isReadOnly(ipbmap)))
		dbSync(ipbmap);

	/*
	 * Invalidate the page cache buffers
	 */
	truncate_inode_pages(ipbmap->i_mapping, 0);

	/* free the memory for the in-memory bmap. */
	kfree(bmp);

	return (0);
}
Exemple #15
0
int updateSuper(struct super_block *sb, uint state)
{
	struct jfs_superblock *j_sb;
	struct jfs_sb_info *sbi = JFS_SBI(sb);
	struct buffer_head *bh;
	int rc;

	if (sbi->flag & JFS_NOINTEGRITY) {
		if (state == FM_DIRTY) {
			sbi->p_state = state;
			return 0;
		} else if (state == FM_MOUNT) {
			sbi->p_state = sbi->state;
			state = FM_DIRTY;
		} else if (state == FM_CLEAN) {
			state = sbi->p_state;
		} else
			jfs_err("updateSuper: bad state");
	} else if (sbi->state == FM_DIRTY)
		return 0;

	if ((rc = readSuper(sb, &bh)))
		return rc;

	j_sb = (struct jfs_superblock *)bh->b_data;

	j_sb->s_state = cpu_to_le32(state);
	sbi->state = state;

	if (state == FM_MOUNT) {
		
		j_sb->s_logdev = cpu_to_le32(new_encode_dev(sbi->log->bdev->bd_dev));
		j_sb->s_logserial = cpu_to_le32(sbi->log->serial);
	} else if (state == FM_CLEAN) {
		if (j_sb->s_flag & cpu_to_le32(JFS_DASD_ENABLED))
			j_sb->s_flag |= cpu_to_le32(JFS_DASD_PRIME);
	}

	mark_buffer_dirty(bh);
	sync_dirty_buffer(bh);
	brelse(bh);

	return 0;
}
int jfs_umount_rw(struct super_block *sb)
{
	struct jfs_sb_info *sbi = JFS_SBI(sb);
	struct jfs_log *log = sbi->log;

	if (!log)
		return 0;

	jfs_flush_journal(log, 2);

	dbSync(sbi->ipbmap);
	diSync(sbi->ipimap);

	filemap_write_and_wait(sbi->direct_inode->i_mapping);

	updateSuper(sb, FM_CLEAN);

	return lmLogClose(sb);
}
Exemple #17
0
void jfs_write_inode(struct inode *inode, int wait)
{
	if (test_cflag(COMMIT_Nolink, inode))
		return;
	/*
	 * If COMMIT_DIRTY is not set, the inode isn't really dirty.
	 * It has been committed since the last change, but was still
	 * on the dirty inode list.
	 */
	 if (!test_cflag(COMMIT_Dirty, inode)) {
		/* Make sure committed changes hit the disk */
		jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait);
		return;
	 }

	if (jfs_commit_inode(inode, wait)) {
		jfs_err("jfs_write_inode: jfs_commit_inode failed!");
	}
}
Exemple #18
0
void jfs_clear_inode(struct inode *inode)
{
	struct jfs_inode_info *ji = JFS_IP(inode);

	if (is_bad_inode(inode))
		/*
		 * We free the fs-dependent structure before making the
		 * inode bad
		 */
		return;

	jfs_info("jfs_clear_inode called ip = 0x%p", inode);

	if (ji->active_ag != -1) {
		struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
		atomic_dec(&bmap->db_active[ji->active_ag]);
	}

	ASSERT(list_empty(&ji->anon_inode_list));

#ifdef CONFIG_JFS_POSIX_ACL
	if (ji->i_acl != JFS_ACL_NOT_CACHED) {
		posix_acl_release(ji->i_acl);
		ji->i_acl = JFS_ACL_NOT_CACHED;
	}
	if (ji->i_default_acl != JFS_ACL_NOT_CACHED) {
		posix_acl_release(ji->i_default_acl);
		ji->i_default_acl = JFS_ACL_NOT_CACHED;
	}
#endif

	if (ji->atlhead) {
		jfs_err("jfs_clear_inode: inode %p has anonymous tlocks",
			inode);
		jfs_err("i_state = 0x%lx, cflag = 0x%lx", inode->i_state,
			ji->cflag);
	}

	free_jfs_inode(inode);
}
Exemple #19
0
int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
	struct inode *inode = file->f_mapping->host;
	int rc = 0;

	rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
	if (rc)
		return rc;

	mutex_lock(&inode->i_mutex);
	if (!(inode->i_state & I_DIRTY_ALL) ||
	    (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) {
		/* Make sure committed changes hit the disk */
		jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1);
		mutex_unlock(&inode->i_mutex);
		return rc;
	}

	rc |= jfs_commit_inode(inode, 1);
	mutex_unlock(&inode->i_mutex);

	return rc ? -EIO : 0;
}
Exemple #20
0
int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
	int wait = 1; /* XXX fix fsync and use wbc->sync_mode == WB_SYNC_ALL; */

	if (test_cflag(COMMIT_Nolink, inode))
		return 0;
	/*
	 * If COMMIT_DIRTY is not set, the inode isn't really dirty.
	 * It has been committed since the last change, but was still
	 * on the dirty inode list.
	 */
	 if (!test_cflag(COMMIT_Dirty, inode)) {
		/* Make sure committed changes hit the disk */
		jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait);
		return 0;
	 }

	if (jfs_commit_inode(inode, wait)) {
		jfs_err("jfs_write_inode: jfs_commit_inode failed!");
		return -EIO;
	} else
		return 0;
}
Exemple #21
0
int jfs_mount_rw(struct super_block *sb, int remount)
{
	struct jfs_sb_info *sbi = JFS_SBI(sb);
	int rc;

	if (remount) {
		if (chkSuper(sb) || (sbi->state != FM_CLEAN))
			return -EINVAL;

		truncate_inode_pages(sbi->ipimap->i_mapping, 0);
		truncate_inode_pages(sbi->ipbmap->i_mapping, 0);
		diUnmount(sbi->ipimap, 1);
		if ((rc = diMount(sbi->ipimap))) {
			jfs_err("jfs_mount_rw: diMount failed!");
			return rc;
		}

		dbUnmount(sbi->ipbmap, 1);
		if ((rc = dbMount(sbi->ipbmap))) {
			jfs_err("jfs_mount_rw: dbMount failed!");
			return rc;
		}
	}

	if ((rc = lmLogOpen(sb)))
		return rc;

	if ((rc = updateSuper(sb, FM_MOUNT))) {
		jfs_err("jfs_mount: updateSuper failed w/rc = %d", rc);
		lmLogClose(sb);
		return rc;
	}

	logMOUNT(sb);

	return rc;
}
Exemple #22
0
/*
 * NAME:    get_UCSname()
 *
 * FUNCTION:    Allocate and translate to unicode string
 *
 */
int get_UCSname(struct component_name * uniName, struct dentry *dentry)
{
    struct nls_table *nls_tab = JFS_SBI(dentry->d_sb)->nls_tab;
    int length = dentry->d_name.len;

    if (length > JFS_NAME_MAX)
        return -ENAMETOOLONG;

    uniName->name =
        kmalloc((length + 1) * sizeof(wchar_t), GFP_NOFS);

    if (uniName->name == NULL)
        return -ENOMEM;

    uniName->namlen = jfs_strtoUCS(uniName->name, dentry->d_name.name,
                       length, nls_tab);

    if (uniName->namlen < 0) {
        kfree(uniName->name);
        return uniName->namlen;
    }

    return 0;
}
Exemple #23
0
/*
 * NAME:	ialloc()
 *
 * FUNCTION:	Allocate a new inode
 *
 */
struct inode *ialloc(struct inode *parent, umode_t mode)
{
	struct super_block *sb = parent->i_sb;
	struct inode *inode;
	struct jfs_inode_info *jfs_inode;
	int rc;

	inode = new_inode(sb);
	if (!inode) {
		jfs_warn("ialloc: new_inode returned NULL!");
		return ERR_PTR(-ENOMEM);
	}

	jfs_inode = JFS_IP(inode);

	rc = diAlloc(parent, S_ISDIR(mode), inode);
	if (rc) {
		jfs_warn("ialloc: diAlloc returned %d!", rc);
		if (rc == -EIO)
			make_bad_inode(inode);
		iput(inode);
		return ERR_PTR(rc);
	}

	inode->i_uid = current_fsuid();
	if (parent->i_mode & S_ISGID) {
		inode->i_gid = parent->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	} else
		inode->i_gid = current_fsgid();

	/*
	 * New inodes need to save sane values on disk when
	 * uid & gid mount options are used
	 */
	jfs_inode->saved_uid = inode->i_uid;
	jfs_inode->saved_gid = inode->i_gid;

	/*
	 * Allocate inode to quota.
	 */
	if (DQUOT_ALLOC_INODE(inode)) {
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		return ERR_PTR(-EDQUOT);
	}

	inode->i_mode = mode;
	/* inherit flags from parent */
	jfs_inode->mode2 = JFS_IP(parent)->mode2 & JFS_FL_INHERIT;

	if (S_ISDIR(mode)) {
		jfs_inode->mode2 |= IDIRECTORY;
		jfs_inode->mode2 &= ~JFS_DIRSYNC_FL;
	}
	else {
		jfs_inode->mode2 |= INLINEEA | ISPARSE;
		if (S_ISLNK(mode))
			jfs_inode->mode2 &= ~(JFS_IMMUTABLE_FL|JFS_APPEND_FL);
	}
	jfs_inode->mode2 |= mode;

	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
	jfs_inode->otime = inode->i_ctime.tv_sec;
	inode->i_generation = JFS_SBI(sb)->gengen++;

	jfs_inode->cflag = 0;

	/* Zero remaining fields */
	memset(&jfs_inode->acl, 0, sizeof(dxd_t));
	memset(&jfs_inode->ea, 0, sizeof(dxd_t));
	jfs_inode->next_index = 0;
	jfs_inode->acltype = 0;
	jfs_inode->btorder = 0;
	jfs_inode->btindex = 0;
	jfs_inode->bxflag = 0;
	jfs_inode->blid = 0;
	jfs_inode->atlhead = 0;
	jfs_inode->atltail = 0;
	jfs_inode->xtlid = 0;
	jfs_set_inode_flags(inode);

	jfs_info("ialloc returns inode = 0x%p\n", inode);

	return inode;
}
Exemple #24
0
/*
 * NAME:	jfs_mount(sb)
 *
 * FUNCTION:	vfs_mount()
 *
 * PARAMETER:	sb	- super block
 *
 * RETURN:	-EBUSY	- device already mounted or open for write
 *		-EBUSY	- cvrdvp already mounted;
 *		-EBUSY	- mount table full
 *		-ENOTDIR- cvrdvp not directory on a device mount
 *		-ENXIO	- device open failure
 */
int jfs_mount(struct super_block *sb)
{
	int rc = 0;		/* Return code          */
	struct jfs_sb_info *sbi = JFS_SBI(sb);
	struct inode *ipaimap = NULL;
	struct inode *ipaimap2 = NULL;
	struct inode *ipimap = NULL;
	struct inode *ipbmap = NULL;

	/*
	 * read/validate superblock 
	 * (initialize mount inode from the superblock)
	 */
	if ((rc = chkSuper(sb))) {
		goto errout20;
	}

	ipaimap = diReadSpecial(sb, AGGREGATE_I, 0);
	if (ipaimap == NULL) {
		jfs_err("jfs_mount: Faild to read AGGREGATE_I");
		rc = -EIO;
		goto errout20;
	}
	sbi->ipaimap = ipaimap;

	jfs_info("jfs_mount: ipaimap:0x%p", ipaimap);

	/*
	 * initialize aggregate inode allocation map
	 */
	if ((rc = diMount(ipaimap))) {
		jfs_err("jfs_mount: diMount(ipaimap) failed w/rc = %d", rc);
		goto errout21;
	}

	/*
	 * open aggregate block allocation map
	 */
	ipbmap = diReadSpecial(sb, BMAP_I, 0);
	if (ipbmap == NULL) {
		rc = -EIO;
		goto errout22;
	}

	jfs_info("jfs_mount: ipbmap:0x%p", ipbmap);

	sbi->ipbmap = ipbmap;

	/*
	 * initialize aggregate block allocation map
	 */
	if ((rc = dbMount(ipbmap))) {
		jfs_err("jfs_mount: dbMount failed w/rc = %d", rc);
		goto errout22;
	}

	/*
	 * open the secondary aggregate inode allocation map
	 *
	 * This is a duplicate of the aggregate inode allocation map.
	 *
	 * hand craft a vfs in the same fashion as we did to read ipaimap.
	 * By adding INOSPEREXT (32) to the inode number, we are telling
	 * diReadSpecial that we are reading from the secondary aggregate
	 * inode table.  This also creates a unique entry in the inode hash
	 * table.
	 */
	if ((sbi->mntflag & JFS_BAD_SAIT) == 0) {
		ipaimap2 = diReadSpecial(sb, AGGREGATE_I, 1);
		if (ipaimap2 == 0) {
			jfs_err("jfs_mount: Faild to read AGGREGATE_I");
			rc = -EIO;
			goto errout35;
		}
		sbi->ipaimap2 = ipaimap2;

		jfs_info("jfs_mount: ipaimap2:0x%p", ipaimap2);

		/*
		 * initialize secondary aggregate inode allocation map
		 */
		if ((rc = diMount(ipaimap2))) {
			jfs_err("jfs_mount: diMount(ipaimap2) failed, rc = %d",
				rc);
			goto errout35;
		}
	} else
		/* Secondary aggregate inode table is not valid */
		sbi->ipaimap2 = NULL;

	/*
	 *      mount (the only/single) fileset
	 */
	/*
	 * open fileset inode allocation map (aka fileset inode)
	 */
	ipimap = diReadSpecial(sb, FILESYSTEM_I, 0);
	if (ipimap == NULL) {
		jfs_err("jfs_mount: Failed to read FILESYSTEM_I");
		/* open fileset secondary inode allocation map */
		rc = -EIO;
		goto errout40;
	}
	jfs_info("jfs_mount: ipimap:0x%p", ipimap);

	/* map further access of per fileset inodes by the fileset inode */
	sbi->ipimap = ipimap;

	/* initialize fileset inode allocation map */
	if ((rc = diMount(ipimap))) {
		jfs_err("jfs_mount: diMount failed w/rc = %d", rc);
		goto errout41;
	}

	goto out;

	/*
	 *      unwind on error
	 */
      errout41:		/* close fileset inode allocation map inode */
	diFreeSpecial(ipimap);

      errout40:		/* fileset closed */

	/* close secondary aggregate inode allocation map */
	if (ipaimap2) {
		diUnmount(ipaimap2, 1);
		diFreeSpecial(ipaimap2);
	}

      errout35:

	/* close aggregate block allocation map */
	dbUnmount(ipbmap, 1);
	diFreeSpecial(ipbmap);

      errout22:		/* close aggregate inode allocation map */

	diUnmount(ipaimap, 1);

      errout21:		/* close aggregate inodes */
	diFreeSpecial(ipaimap);
      errout20:		/* aggregate closed */

      out:

	if (rc)
		jfs_err("Mount JFS Failure: %d", rc);

	return rc;
}
Exemple #25
0
/*
 *	chkSuper()
 *
 * validate the superblock of the file system to be mounted and 
 * get the file system parameters.
 *
 * returns
 *	0 with fragsize set if check successful
 *	error code if not successful
 */
static int chkSuper(struct super_block *sb)
{
	int rc = 0;
	struct jfs_sb_info *sbi = JFS_SBI(sb);
	struct jfs_superblock *j_sb;
	struct buffer_head *bh;
	int AIM_bytesize, AIT_bytesize;
	int expected_AIM_bytesize, expected_AIT_bytesize;
	s64 AIM_byte_addr, AIT_byte_addr, fsckwsp_addr;
	s64 byte_addr_diff0, byte_addr_diff1;
	s32 bsize;

	if ((rc = readSuper(sb, &bh)))
		return rc;
	j_sb = (struct jfs_superblock *)bh->b_data;

	/*
	 * validate superblock
	 */
	/* validate fs signature */
	if (strncmp(j_sb->s_magic, JFS_MAGIC, 4) ||
	    le32_to_cpu(j_sb->s_version) > JFS_VERSION) {
		rc = -EINVAL;
		goto out;
	}

	bsize = le32_to_cpu(j_sb->s_bsize);
#ifdef _JFS_4K
	if (bsize != PSIZE) {
		jfs_err("Currently only 4K block size supported!");
		rc = -EINVAL;
		goto out;
	}
#endif				/* _JFS_4K */

	jfs_info("superblock: flag:0x%08x state:0x%08x size:0x%Lx",
		 le32_to_cpu(j_sb->s_flag), le32_to_cpu(j_sb->s_state),
		 (unsigned long long) le64_to_cpu(j_sb->s_size));

	/* validate the descriptors for Secondary AIM and AIT */
	if ((j_sb->s_flag & cpu_to_le32(JFS_BAD_SAIT)) !=
	    cpu_to_le32(JFS_BAD_SAIT)) {
		expected_AIM_bytesize = 2 * PSIZE;
		AIM_bytesize = lengthPXD(&(j_sb->s_aim2)) * bsize;
		expected_AIT_bytesize = 4 * PSIZE;
		AIT_bytesize = lengthPXD(&(j_sb->s_ait2)) * bsize;
		AIM_byte_addr = addressPXD(&(j_sb->s_aim2)) * bsize;
		AIT_byte_addr = addressPXD(&(j_sb->s_ait2)) * bsize;
		byte_addr_diff0 = AIT_byte_addr - AIM_byte_addr;
		fsckwsp_addr = addressPXD(&(j_sb->s_fsckpxd)) * bsize;
		byte_addr_diff1 = fsckwsp_addr - AIT_byte_addr;
		if ((AIM_bytesize != expected_AIM_bytesize) ||
		    (AIT_bytesize != expected_AIT_bytesize) ||
		    (byte_addr_diff0 != AIM_bytesize) ||
		    (byte_addr_diff1 <= AIT_bytesize))
			j_sb->s_flag |= cpu_to_le32(JFS_BAD_SAIT);
	}

	if ((j_sb->s_flag & cpu_to_le32(JFS_GROUPCOMMIT)) !=
	    cpu_to_le32(JFS_GROUPCOMMIT))
		j_sb->s_flag |= cpu_to_le32(JFS_GROUPCOMMIT);

	/* validate fs state */
	if (j_sb->s_state != cpu_to_le32(FM_CLEAN) &&
	    !(sb->s_flags & MS_RDONLY)) {
		jfs_err("jfs_mount: Mount Failure: File System Dirty.");
		rc = -EINVAL;
		goto out;
	}

	sbi->state = le32_to_cpu(j_sb->s_state);
	sbi->mntflag = le32_to_cpu(j_sb->s_flag);

	/*
	 * JFS always does I/O by 4K pages.  Don't tell the buffer cache
	 * that we use anything else (leave s_blocksize alone).
	 */
	sbi->bsize = bsize;
	sbi->l2bsize = le16_to_cpu(j_sb->s_l2bsize);

	/*
	 * For now, ignore s_pbsize, l2bfactor.  All I/O going through buffer
	 * cache.
	 */
	sbi->nbperpage = PSIZE >> sbi->l2bsize;
	sbi->l2nbperpage = L2PSIZE - sbi->l2bsize;
	sbi->l2niperblk = sbi->l2bsize - L2DISIZE;
	if (sbi->mntflag & JFS_INLINELOG)
		sbi->logpxd = j_sb->s_logpxd;
	else {
		sbi->logdev = new_decode_dev(le32_to_cpu(j_sb->s_logdev));
		memcpy(sbi->uuid, j_sb->s_uuid, sizeof(sbi->uuid));
		memcpy(sbi->loguuid, j_sb->s_loguuid, sizeof(sbi->uuid));
	}
	sbi->fsckpxd = j_sb->s_fsckpxd;
	sbi->ait2 = j_sb->s_ait2;

      out:
	brelse(bh);
	return rc;
}
Exemple #26
0
static int jfs_symlink(struct inode *dip, struct dentry *dentry,
                       const char *name)
{
    int rc;
    tid_t tid;
    ino_t ino = 0;
    struct component_name dname;
    int ssize;		/* source pathname size */
    struct btstack btstack;
    struct inode *ip = dentry->d_inode;
    unchar *i_fastsymlink;
    s64 xlen = 0;
    int bmask = 0, xsize;
    s64 extent = 0, xaddr;
    struct metapage *mp;
    struct super_block *sb;
    struct tblock *tblk;

    struct inode *iplist[2];

    jfs_info("jfs_symlink: dip:0x%p name:%s", dip, name);

    dquot_initialize(dip);

    ssize = strlen(name) + 1;

    /*
     * search parent directory for entry/freespace
     * (dtSearch() returns parent directory page pinned)
     */

    if ((rc = get_UCSname(&dname, dentry)))
        goto out1;

    /*
     * allocate on-disk/in-memory inode for symbolic link:
     * (iAlloc() returns new, locked inode)
     */
    ip = ialloc(dip, S_IFLNK | 0777);
    if (IS_ERR(ip)) {
        rc = PTR_ERR(ip);
        goto out2;
    }

    tid = txBegin(dip->i_sb, 0);

    mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
    mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);

    rc = jfs_init_security(tid, ip, dip, &dentry->d_name);
    if (rc)
        goto out3;

    tblk = tid_to_tblock(tid);
    tblk->xflag |= COMMIT_CREATE;
    tblk->ino = ip->i_ino;
    tblk->u.ixpxd = JFS_IP(ip)->ixpxd;

    /* fix symlink access permission
     * (dir_create() ANDs in the u.u_cmask,
     * but symlinks really need to be 777 access)
     */
    ip->i_mode |= 0777;

    /*
     * write symbolic link target path name
     */
    xtInitRoot(tid, ip);

    /*
     * write source path name inline in on-disk inode (fast symbolic link)
     */

    if (ssize <= IDATASIZE) {
        ip->i_op = &jfs_fast_symlink_inode_operations;

        i_fastsymlink = JFS_IP(ip)->i_inline;
        memcpy(i_fastsymlink, name, ssize);
        ip->i_size = ssize - 1;

        /*
         * if symlink is > 128 bytes, we don't have the space to
         * store inline extended attributes
         */
        if (ssize > sizeof (JFS_IP(ip)->i_inline))
            JFS_IP(ip)->mode2 &= ~INLINEEA;

        jfs_info("jfs_symlink: fast symlink added  ssize:%d name:%s ",
                 ssize, name);
    }
    /*
     * write source path name in a single extent
     */
    else {
        jfs_info("jfs_symlink: allocate extent ip:0x%p", ip);

        ip->i_op = &jfs_symlink_inode_operations;
        ip->i_mapping->a_ops = &jfs_aops;

        /*
         * even though the data of symlink object (source
         * path name) is treated as non-journaled user data,
         * it is read/written thru buffer cache for performance.
         */
        sb = ip->i_sb;
        bmask = JFS_SBI(sb)->bsize - 1;
        xsize = (ssize + bmask) & ~bmask;
        xaddr = 0;
        xlen = xsize >> JFS_SBI(sb)->l2bsize;
        if ((rc = xtInsert(tid, ip, 0, 0, xlen, &xaddr, 0))) {
            txAbort(tid, 0);
            goto out3;
        }
        extent = xaddr;
        ip->i_size = ssize - 1;
        while (ssize) {
            /* This is kind of silly since PATH_MAX == 4K */
            int copy_size = min(ssize, PSIZE);

            mp = get_metapage(ip, xaddr, PSIZE, 1);

            if (mp == NULL) {
                xtTruncate(tid, ip, 0, COMMIT_PWMAP);
                rc = -EIO;
                txAbort(tid, 0);
                goto out3;
            }
            memcpy(mp->data, name, copy_size);
            flush_metapage(mp);
            ssize -= copy_size;
            name += copy_size;
            xaddr += JFS_SBI(sb)->nbperpage;
        }
    }

    /*
     * create entry for symbolic link in parent directory
     */
    rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE);
    if (rc == 0) {
        ino = ip->i_ino;
        rc = dtInsert(tid, dip, &dname, &ino, &btstack);
    }
    if (rc) {
        if (xlen)
            xtTruncate(tid, ip, 0, COMMIT_PWMAP);
        txAbort(tid, 0);
        /* discard new inode */
        goto out3;
    }

    mark_inode_dirty(ip);

    dip->i_ctime = dip->i_mtime = CURRENT_TIME;
    mark_inode_dirty(dip);
    /*
     * commit update of parent directory and link object
     */

    iplist[0] = dip;
    iplist[1] = ip;
    rc = txCommit(tid, 2, &iplist[0], 0);

out3:
    txEnd(tid);
    mutex_unlock(&JFS_IP(ip)->commit_mutex);
    mutex_unlock(&JFS_IP(dip)->commit_mutex);
    if (rc) {
        free_ea_wmap(ip);
        ip->i_nlink = 0;
        unlock_new_inode(ip);
        iput(ip);
    } else {
        d_instantiate(dentry, ip);
        unlock_new_inode(ip);
    }

out2:
    free_UCSname(&dname);

out1:
    jfs_info("jfs_symlink: rc:%d", rc);
    return rc;
}
Exemple #27
0
/*
 * NAME:	ialloc()
 *
 * FUNCTION:	Allocate a new inode
 *
 */
struct inode *ialloc(struct inode *parent, umode_t mode)
{
	struct super_block *sb = parent->i_sb;
	struct inode *inode;
	struct jfs_inode_info *jfs_inode;
	int rc;

	inode = new_inode(sb);
	if (!inode) {
		jfs_warn("ialloc: new_inode returned NULL!");
		rc = -ENOMEM;
		goto fail;
	}

	jfs_inode = JFS_IP(inode);

	rc = diAlloc(parent, S_ISDIR(mode), inode);
	if (rc) {
		jfs_warn("ialloc: diAlloc returned %d!", rc);
		if (rc == -EIO)
			make_bad_inode(inode);
		goto fail_put;
	}

	if (insert_inode_locked(inode) < 0) {
		rc = -EINVAL;
		goto fail_unlock;
	}

	inode_init_owner(inode, parent, mode);
	/*
	 * New inodes need to save sane values on disk when
	 * uid & gid mount options are used
	 */
	jfs_inode->saved_uid = inode->i_uid;
	jfs_inode->saved_gid = inode->i_gid;

	/*
	 * Allocate inode to quota.
	 */
	dquot_initialize(inode);
	rc = dquot_alloc_inode(inode);
	if (rc)
		goto fail_drop;

	/* inherit flags from parent */
	jfs_inode->mode2 = JFS_IP(parent)->mode2 & JFS_FL_INHERIT;

	if (S_ISDIR(mode)) {
		jfs_inode->mode2 |= IDIRECTORY;
		jfs_inode->mode2 &= ~JFS_DIRSYNC_FL;
	}
	else {
		jfs_inode->mode2 |= INLINEEA | ISPARSE;
		if (S_ISLNK(mode))
			jfs_inode->mode2 &= ~(JFS_IMMUTABLE_FL|JFS_APPEND_FL);
	}
	jfs_inode->mode2 |= inode->i_mode;

	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
	jfs_inode->otime = inode->i_ctime.tv_sec;
	inode->i_generation = JFS_SBI(sb)->gengen++;

	jfs_inode->cflag = 0;

	/* Zero remaining fields */
	memset(&jfs_inode->acl, 0, sizeof(dxd_t));
	memset(&jfs_inode->ea, 0, sizeof(dxd_t));
	jfs_inode->next_index = 0;
	jfs_inode->acltype = 0;
	jfs_inode->btorder = 0;
	jfs_inode->btindex = 0;
	jfs_inode->bxflag = 0;
	jfs_inode->blid = 0;
	jfs_inode->atlhead = 0;
	jfs_inode->atltail = 0;
	jfs_inode->xtlid = 0;
	jfs_set_inode_flags(inode);

	jfs_info("ialloc returns inode = 0x%p\n", inode);

	return inode;

fail_drop:
	dquot_drop(inode);
	inode->i_flags |= S_NOQUOTA;
fail_unlock:
	inode->i_nlink = 0;
	unlock_new_inode(inode);
fail_put:
	iput(inode);
fail:
	return ERR_PTR(rc);
}
int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
{
	int rc = 0;
	struct jfs_sb_info *sbi = JFS_SBI(sb);
	struct inode *ipbmap = sbi->ipbmap;
	struct inode *ipbmap2;
	struct inode *ipimap = sbi->ipimap;
	struct jfs_log *log = sbi->log;
	struct bmap *bmp = sbi->bmap;
	s64 newLogAddress, newFSCKAddress;
	int newFSCKSize;
	s64 newMapSize = 0, mapSize;
	s64 XAddress, XSize, nblocks, xoff, xaddr, t64;
	s64 oldLVSize;
	s64 newFSSize;
	s64 VolumeSize;
	int newNpages = 0, nPages, newPage, xlen, t32;
	int tid;
	int log_formatted = 0;
	struct inode *iplist[1];
	struct jfs_superblock *j_sb, *j_sb2;
	s64 old_agsize;
	int agsizechanged = 0;
	struct buffer_head *bh, *bh2;

	

	if (sbi->mntflag & JFS_INLINELOG)
		oldLVSize = addressPXD(&sbi->logpxd) + lengthPXD(&sbi->logpxd);
	else
		oldLVSize = addressPXD(&sbi->fsckpxd) +
		    lengthPXD(&sbi->fsckpxd);

	if (oldLVSize >= newLVSize) {
		printk(KERN_WARNING
		       "jfs_extendfs: volume hasn't grown, returning\n");
		goto out;
	}

	VolumeSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;

	if (VolumeSize) {
		if (newLVSize > VolumeSize) {
			printk(KERN_WARNING "jfs_extendfs: invalid size\n");
			rc = -EINVAL;
			goto out;
		}
	} else {
		
		bh = sb_bread(sb, newLVSize - 1);
		if (!bh) {
			printk(KERN_WARNING "jfs_extendfs: invalid size\n");
			rc = -EINVAL;
			goto out;
		}
		bforget(bh);
	}

	

	if (isReadOnly(ipbmap)) {
		printk(KERN_WARNING "jfs_extendfs: read-only file system\n");
		rc = -EROFS;
		goto out;
	}


	if ((sbi->mntflag & JFS_INLINELOG)) {
		if (newLogSize == 0) {
			newLogSize = newLVSize >> 8;
			t32 = (1 << (20 - sbi->l2bsize)) - 1;
			newLogSize = (newLogSize + t32) & ~t32;
			newLogSize =
			    min(newLogSize, MEGABYTE32 >> sbi->l2bsize);
		} else {
Exemple #29
0
/*
 *	jfs_extendfs()
 *
 * function: extend file system;
 *
 *   |-------------------------------|----------|----------|
 *   file system space               fsck       inline log
 *                                   workspace  space
 *
 * input:
 *	new LVSize: in LV blocks (required)
 *	new LogSize: in LV blocks (optional)
 *	new FSSize: in LV blocks (optional)
 *
 * new configuration:
 * 1. set new LogSize as specified or default from new LVSize;
 * 2. compute new FSCKSize from new LVSize;
 * 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where
 *    assert(new FSSize >= old FSSize),
 *    i.e., file system must not be shrinked;
 */
int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
{
	int rc = 0;
	struct jfs_sb_info *sbi = JFS_SBI(sb);
	struct inode *ipbmap = sbi->ipbmap;
	struct inode *ipbmap2;
	struct inode *ipimap = sbi->ipimap;
	struct jfs_log *log = sbi->log;
	struct bmap *bmp = sbi->bmap;
	s64 newLogAddress, newFSCKAddress;
	int newFSCKSize;
	s64 newMapSize = 0, mapSize;
	s64 XAddress, XSize, nblocks, xoff, xaddr, t64;
	s64 oldLVSize;
	s64 newFSSize;
	s64 VolumeSize;
	int newNpages = 0, nPages, newPage, xlen, t32;
	int tid;
	int log_formatted = 0;
	struct inode *iplist[1];
	struct jfs_superblock *j_sb, *j_sb2;
	uint old_agsize;
	struct buffer_head *bh, *bh2;

	/* If the volume hasn't grown, get out now */

	if (sbi->mntflag & JFS_INLINELOG)
		oldLVSize = addressPXD(&sbi->logpxd) + lengthPXD(&sbi->logpxd);
	else
		oldLVSize = addressPXD(&sbi->fsckpxd) +
		    lengthPXD(&sbi->fsckpxd);

	if (oldLVSize >= newLVSize) {
		printk(KERN_WARNING
		       "jfs_extendfs: volume hasn't grown, returning\n");
		goto out;
	}

	VolumeSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;

	if (VolumeSize) {
		if (newLVSize > VolumeSize) {
			printk(KERN_WARNING "jfs_extendfs: invalid size\n");
			rc = -EINVAL;
			goto out;
		}
	} else {
		/* check the device */
		bh = sb_bread(sb, newLVSize - 1);
		if (!bh) {
			printk(KERN_WARNING "jfs_extendfs: invalid size\n");
			rc = -EINVAL;
			goto out;
		}
		bforget(bh);
	}

	/* Can't extend write-protected drive */

	if (isReadOnly(ipbmap)) {
		printk(KERN_WARNING "jfs_extendfs: read-only file system\n");
		rc = -EROFS;
		goto out;
	}

	/*
	 *	reconfigure LV spaces
	 *	---------------------
	 *
	 * validate new size, or, if not specified, determine new size
	 */

	/*
	 * reconfigure inline log space:
	 */
	if ((sbi->mntflag & JFS_INLINELOG)) {
		if (newLogSize == 0) {
			/*
			 * no size specified: default to 1/256 of aggregate
			 * size; rounded up to a megabyte boundary;
			 */
			newLogSize = newLVSize >> 8;
			t32 = (1 << (20 - sbi->l2bsize)) - 1;
			newLogSize = (newLogSize + t32) & ~t32;
			newLogSize =
			    min(newLogSize, MEGABYTE32 >> sbi->l2bsize);
		} else {
Exemple #30
0
/*
 * NAME:	jfs_umount(vfsp, flags, crp)
 *
 * FUNCTION:	vfs_umount()
 *
 * PARAMETERS:	vfsp	- virtual file system pointer
 *		flags	- unmount for shutdown
 *		crp	- credential
 *
 * RETURN :	EBUSY	- device has open files
 */
int jfs_umount(struct super_block *sb)
{
	struct jfs_sb_info *sbi = JFS_SBI(sb);
	struct inode *ipbmap = sbi->ipbmap;
	struct inode *ipimap = sbi->ipimap;
	struct inode *ipaimap = sbi->ipaimap;
	struct inode *ipaimap2 = sbi->ipaimap2;
	struct jfs_log *log;
	int rc = 0;

	jfs_info("UnMount JFS: sb:0x%p", sb);

	/*
	 *	update superblock and close log
	 *
	 * if mounted read-write and log based recovery was enabled
	 */
	if ((log = sbi->log))
		/*
		 * Wait for outstanding transactions to be written to log:
		 */
		jfs_flush_journal(log, 1);

	/*
	 * close fileset inode allocation map (aka fileset inode)
	 */
	diUnmount(ipimap, 0);

	diFreeSpecial(ipimap);
	sbi->ipimap = NULL;

	/*
	 * close secondary aggregate inode allocation map
	 */
	ipaimap2 = sbi->ipaimap2;
	if (ipaimap2) {
		diUnmount(ipaimap2, 0);
		diFreeSpecial(ipaimap2);
		sbi->ipaimap2 = NULL;
	}

	/*
	 * close aggregate inode allocation map
	 */
	ipaimap = sbi->ipaimap;
	diUnmount(ipaimap, 0);
	diFreeSpecial(ipaimap);
	sbi->ipaimap = NULL;

	/*
	 * close aggregate block allocation map
	 */
	dbUnmount(ipbmap, 0);

	diFreeSpecial(ipbmap);
	sbi->ipimap = NULL;

	/*
	 * Make sure all metadata makes it to disk before we mark
	 * the superblock as clean
	 */
	filemap_write_and_wait(sbi->direct_inode->i_mapping);

	/*
	 * ensure all file system file pages are propagated to their
	 * home blocks on disk (and their in-memory buffer pages are
	 * invalidated) BEFORE updating file system superblock state
	 * (to signify file system is unmounted cleanly, and thus in
	 * consistent state) and log superblock active file system
	 * list (to signify skip logredo()).
	 */
	if (log) {		/* log = NULL if read-only mount */
		updateSuper(sb, FM_CLEAN);

		/*
		 * close log:
		 *
		 * remove file system from log active file system list.
		 */
		rc = lmLogClose(sb);
	}
	jfs_info("UnMount JFS Complete: rc = %d", rc);
	return rc;
}