예제 #1
0
파일: super.c 프로젝트: nhanh0/hah
//
// a portion of this function, particularly the VFS interface portion,
// was derived from minix or ext2's analog and evolved as the
// prototype did. You should be able to tell which portion by looking
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
static int reiserfs_remount (struct super_block * s, int * flags, char * data)
{
  struct reiserfs_super_block * rs;
  struct reiserfs_transaction_handle th ;
  unsigned long blocks;
  unsigned long mount_options;

  rs = SB_DISK_SUPER_BLOCK (s);

  if (!parse_options(data, &mount_options, &blocks))
  	return 0;

  if(blocks) {
      int rc = reiserfs_resize(s, blocks);
      if (rc != 0)
	  return rc;
  }

  if ((unsigned long)(*flags & MS_RDONLY) == (s->s_flags & MS_RDONLY)) {
    /* there is nothing to do to remount read-only fs as read-only fs */
    return 0;
  }
  
  if (*flags & MS_RDONLY) {
    /* try to remount file system with read-only permissions */
    if (sb_state(rs) == REISERFS_VALID_FS || s->u.reiserfs_sb.s_mount_state != REISERFS_VALID_FS) {
      return 0;
    }

    journal_begin(&th, s, 10) ;
    /* Mounting a rw partition read-only. */
    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
    set_sb_state( rs, s->u.reiserfs_sb.s_mount_state );
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    s->s_dirt = 0;
  } else {
    s->u.reiserfs_sb.s_mount_state = sb_state(rs) ;
    s->s_flags &= ~MS_RDONLY ; /* now it is safe to call journal_begin */
    journal_begin(&th, s, 10) ;

    /* Mount a partition which is read-only, read-write */
    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
    s->u.reiserfs_sb.s_mount_state = sb_state(rs);
    s->s_flags &= ~MS_RDONLY;
    set_sb_state( rs, REISERFS_ERROR_FS );
    /* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); */
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    s->s_dirt = 0;
    s->u.reiserfs_sb.s_mount_state = REISERFS_VALID_FS ;
  }
  /* this will force a full flush of all journal lists */
  SB_JOURNAL(s)->j_must_wait = 1 ;
  journal_end(&th, s, 10) ;

  if (!( *flags & MS_RDONLY ) )
    finish_unfinished( s );

  return 0;
}
예제 #2
0
static void _reiserfs_free_block (struct reiserfs_transaction_handle *th,
                                  struct inode *inode, b_blocknr_t block,
                                  int for_unformatted)
{
    struct super_block * s = th->t_super;
    struct reiserfs_super_block * rs;
    struct buffer_head * sbh;
    struct reiserfs_bitmap_info *apbi;
    int nr, offset;

    BUG_ON (!th->t_trans_id);

    PROC_INFO_INC( s, free_block );

    rs = SB_DISK_SUPER_BLOCK (s);
    sbh = SB_BUFFER_WITH_SB (s);
    apbi = SB_AP_BITMAP(s);

    get_bit_address (s, block, &nr, &offset);

    if (nr >= sb_bmap_nr (rs)) {
        reiserfs_warning (s, "vs-4075: reiserfs_free_block: "
                          "block %lu is out of range on %s",
                          block, reiserfs_bdevname (s));
        return;
    }

    reiserfs_prepare_for_journal(s, apbi[nr].bh, 1 ) ;

    /* clear bit for the given block in bit map */
    if (!reiserfs_test_and_clear_le_bit (offset, apbi[nr].bh->b_data)) {
        reiserfs_warning (s, "vs-4080: reiserfs_free_block: "
                          "free_block (%s:%lu)[dev:blocknr]: bit already cleared",
                          reiserfs_bdevname (s), block);
    }
    apbi[nr].free_count ++;
    journal_mark_dirty (th, s, apbi[nr].bh);

    reiserfs_prepare_for_journal(s, sbh, 1) ;
    /* update super block */
    set_sb_free_blocks( rs, sb_free_blocks(rs) + 1 );

    journal_mark_dirty (th, s, sbh);
    if (for_unformatted)
        DQUOT_FREE_BLOCK_NODIRTY(inode, 1);
}
예제 #3
0
static void _reiserfs_free_block (struct reiserfs_transaction_handle *th,
                                  b_blocknr_t block)
{
    struct super_block * s = th->t_super;
    struct reiserfs_super_block * rs;
    struct buffer_head * sbh;
    struct reiserfs_bitmap_info *apbi;
    int nr, offset;

    PROC_INFO_INC( s, free_block );

    rs = SB_DISK_SUPER_BLOCK (s);
    sbh = SB_BUFFER_WITH_SB (s);
    apbi = SB_AP_BITMAP(s);

    get_bit_address (s, block, &nr, &offset);

    if (nr >= sb_bmap_nr (rs)) {
        reiserfs_warning ("vs-4075: reiserfs_free_block: "
                          "block %lu is out of range on %s\n",
                          block, bdevname(s->s_dev));
        return;
    }

    reiserfs_prepare_for_journal(s, apbi[nr].bh, 1 ) ;

    /* clear bit for the given block in bit map */
    if (!reiserfs_test_and_clear_le_bit (offset, apbi[nr].bh->b_data)) {
        reiserfs_warning ("vs-4080: reiserfs_free_block: "
                          "free_block (%04x:%lu)[dev:blocknr]: bit already cleared\n",
                          s->s_dev, block);
    }
    if (offset < apbi[nr].first_zero_hint) {
        apbi[nr].first_zero_hint = offset;
    }
    apbi[nr].free_count ++;
    journal_mark_dirty (th, s, apbi[nr].bh);

    reiserfs_prepare_for_journal(s, sbh, 1) ;
    /* update super block */
    set_sb_free_blocks( rs, sb_free_blocks(rs) + 1 );

    journal_mark_dirty (th, s, sbh);
}
예제 #4
0
				/* I wonder if it would be less modest
                                   now that we use journaling. -Hans */
void reiserfs_free_block (struct reiserfs_transaction_handle *th, unsigned long block)
{
    struct super_block * s = th->t_super;
    struct reiserfs_super_block * rs;
    struct buffer_head * sbh;
    struct buffer_head ** apbh;
    int nr, offset;

  RFALSE(!s, "vs-4060: trying to free block on nonexistent device");
  RFALSE(is_reusable (s, block, 1) == 0, "vs-4070: can not free such block");

  PROC_INFO_INC( s, free_block );

  rs = SB_DISK_SUPER_BLOCK (s);
  sbh = SB_BUFFER_WITH_SB (s);
  apbh = SB_AP_BITMAP (s);

  get_bit_address (s, block, &nr, &offset);

  /* mark it before we clear it, just in case */
  journal_mark_freed(th, s, block) ;

  reiserfs_prepare_for_journal(s, apbh[nr], 1 ) ;

  /* clear bit for the given block in bit map */
  if (!reiserfs_test_and_clear_le_bit (offset, apbh[nr]->b_data)) {
      reiserfs_warning ("vs-4080: reiserfs_free_block: "
			"free_block (%04x:%lu)[dev:blocknr]: bit already cleared\n", 
	    s->s_dev, block);
  }
  journal_mark_dirty (th, s, apbh[nr]);

  reiserfs_prepare_for_journal(s, sbh, 1) ;
  /* update super block */
  set_sb_free_blocks( rs, sb_free_blocks(rs) + 1 );

  journal_mark_dirty (th, s, sbh);
  s->s_dirt = 1;
}
예제 #5
0
//
// a portion of this function, particularly the VFS interface portion,
// was derived from minix or ext2's analog and evolved as the
// prototype did. You should be able to tell which portion by looking
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
static void reiserfs_write_super_lockfs (struct super_block * s)
{

  int dirty = 0 ;
  struct reiserfs_transaction_handle th ;
  lock_kernel() ;
  if (!(s->s_flags & MS_RDONLY)) {
    journal_begin(&th, s, 1) ;
    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    reiserfs_block_writes(&th) ;
    journal_end(&th, s, 1) ;
  }
  s->s_dirt = dirty;
  unlock_kernel() ;
}
예제 #6
0
static void reiserfs_put_super (struct super_block * s)
{
  int i;
  struct reiserfs_transaction_handle th ;
  
  /* change file system state to current state if it was mounted with read-write permissions */
  if (!(s->s_flags & MS_RDONLY)) {
    journal_begin(&th, s, 10) ;
    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
    set_sb_state( SB_DISK_SUPER_BLOCK(s), s->u.reiserfs_sb.s_mount_state );
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
  }

  /* note, journal_release checks for readonly mount, and can decide not
  ** to do a journal_end
  */
  journal_release(&th, s) ;

  for (i = 0; i < SB_BMAP_NR (s); i ++)
    brelse (SB_AP_BITMAP (s)[i].bh);

  vfree (SB_AP_BITMAP (s));

  brelse (SB_BUFFER_WITH_SB (s));

  print_statistics (s);

  if (s->u.reiserfs_sb.s_kmallocs != 0) {
    reiserfs_warning ("vs-2004: reiserfs_put_super: allocated memory left %d\n",
		      s->u.reiserfs_sb.s_kmallocs);
  }

  if (s->u.reiserfs_sb.reserved_blocks != 0) {
    reiserfs_warning ("green-2005: reiserfs_put_super: reserved blocks left %d\n",
		      s->u.reiserfs_sb.reserved_blocks);
  }

  reiserfs_proc_unregister( s, "journal" );
  reiserfs_proc_unregister( s, "oidmap" );
  reiserfs_proc_unregister( s, "on-disk-super" );
  reiserfs_proc_unregister( s, "bitmap" );
  reiserfs_proc_unregister( s, "per-level" );
  reiserfs_proc_unregister( s, "super" );
  reiserfs_proc_unregister( s, "version" );
  reiserfs_proc_info_done( s );
  return;
}
예제 #7
0
/* get unique object identifier */
__u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
{
	struct super_block *s = th->t_super;
	struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
	__le32 *map = objectid_map(s, rs);
	__u32 unused_objectid;

	BUG_ON(!th->t_trans_id);

	check_objectid_map(s, map);

	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
	/* comment needed -Hans */
	unused_objectid = le32_to_cpu(map[1]);
	if (unused_objectid == U32_MAX) {
		reiserfs_warning(s, "reiserfs-15100", "no more object ids");
		reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s));
		return 0;
	}

	/* This incrementation allocates the first unused objectid. That
	   is to say, the first entry on the objectid map is the first
	   unused objectid, and by incrementing it we use it.  See below
	   where we check to see if we eliminated a sequence of unused
	   objectids.... */
	map[1] = cpu_to_le32(unused_objectid + 1);

	/* Now we check to see if we eliminated the last remaining member of
	   the first even sequence (and can eliminate the sequence by
	   eliminating its last objectid from oids), and can collapse the
	   first two odd sequences into one sequence.  If so, then the net
	   result is to eliminate a pair of objectids from oids.  We do this
	   by shifting the entire map to the left. */
	if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) {
		memmove(map + 1, map + 3,
			(sb_oid_cursize(rs) - 3) * sizeof(__u32));
		set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
	}

	journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
	return unused_objectid;
}
예제 #8
0
/* it searches for a window of zero bits with given minimum and maximum lengths in one bitmap
 * block; */
static int scan_bitmap_block (struct reiserfs_transaction_handle *th,
                              int bmap_n, int *beg, int boundary, int min, int max, int unfm)
{
    struct super_block *s = th->t_super;
    struct reiserfs_bitmap_info *bi=&SB_AP_BITMAP(s)[bmap_n];
    int end, next;
    int org = *beg;

    BUG_ON (!th->t_trans_id);

    RFALSE(bmap_n >= SB_BMAP_NR (s), "Bitmap %d is out of range (0..%d)",bmap_n, SB_BMAP_NR (s) - 1);
    PROC_INFO_INC( s, scan_bitmap.bmap );
    /* this is unclear and lacks comments, explain how journal bitmaps
       work here for the reader.  Convey a sense of the design here. What
       is a window? */
    /* - I mean `a window of zero bits' as in description of this function - Zam. */

    if ( !bi ) {
        reiserfs_warning (s, "NULL bitmap info pointer for bitmap %d", bmap_n);
        return 0;
    }
    if (buffer_locked (bi->bh)) {
        PROC_INFO_INC( s, scan_bitmap.wait );
        __wait_on_buffer (bi->bh);
    }

    while (1) {
cont:
        if (bi->free_count < min)
            return 0; // No free blocks in this bitmap

        /* search for a first zero bit -- beggining of a window */
        *beg = reiserfs_find_next_zero_le_bit
               ((unsigned long*)(bi->bh->b_data), boundary, *beg);

        if (*beg + min > boundary) {
            /* search for a zero bit fails or the rest of bitmap block
            			      * cannot contain a zero window of minimum size */
            return 0;
        }

        if (unfm && is_block_in_journal(s,bmap_n, *beg, beg))
            continue;
        /* first zero bit found; we check next bits */
        for (end = *beg + 1;; end ++) {
            if (end >= *beg + max || end >= boundary || reiserfs_test_le_bit (end, bi->bh->b_data)) {
                next = end;
                break;
            }
            /* finding the other end of zero bit window requires looking into journal structures (in
             * case of searching for free blocks for unformatted nodes) */
            if (unfm && is_block_in_journal(s, bmap_n, end, &next))
                break;
        }

        /* now (*beg) points to beginning of zero bits window,
         * (end) points to one bit after the window end */
        if (end - *beg >= min) { /* it seems we have found window of proper size */
            int i;
            reiserfs_prepare_for_journal (s, bi->bh, 1);
            /* try to set all blocks used checking are they still free */
            for (i = *beg; i < end; i++) {
                /* It seems that we should not check in journal again. */
                if (reiserfs_test_and_set_le_bit (i, bi->bh->b_data)) {
                    /* bit was set by another process
                     * while we slept in prepare_for_journal() */
                    PROC_INFO_INC( s, scan_bitmap.stolen );
                    if (i >= *beg + min)	{
                        /* we can continue with smaller set of allocated blocks,
                        		   * if length of this set is more or equal to `min' */
                        end = i;
                        break;
                    }
                    /* otherwise we clear all bit were set ... */
                    while (--i >= *beg)
                        reiserfs_test_and_clear_le_bit (i, bi->bh->b_data);
                    reiserfs_restore_prepared_buffer (s, bi->bh);
                    *beg = org;
                    /* ... and search again in current block from beginning */
                    goto cont;
                }
            }
            bi->free_count -= (end - *beg);
            journal_mark_dirty (th, s, bi->bh);

            /* free block count calculation */
            reiserfs_prepare_for_journal (s, SB_BUFFER_WITH_SB(s), 1);
            PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (end - *beg));
            journal_mark_dirty (th, s, SB_BUFFER_WITH_SB(s));

            return end - (*beg);
        } else {
            *beg = next;
        }
    }
}
예제 #9
0
/*
 * process, that is going to call fix_nodes/do_balance must hold only
 * one path. If it holds 2 or more, it can get into endless waiting in
 * get_empty_nodes or its clones
 */
static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
			   struct inode *new_dir, struct dentry *new_dentry)
{
	int retval;
	INITIALIZE_PATH(old_entry_path);
	INITIALIZE_PATH(new_entry_path);
	INITIALIZE_PATH(dot_dot_entry_path);
	struct item_head new_entry_ih, old_entry_ih, dot_dot_ih;
	struct reiserfs_dir_entry old_de, new_de, dot_dot_de;
	struct inode *old_inode, *new_dentry_inode;
	struct reiserfs_transaction_handle th;
	int jbegin_count;
	umode_t old_inode_mode;
	unsigned long savelink = 1;
	struct timespec ctime;

	/* three balancings: (1) old name removal, (2) new name insertion
	   and (3) maybe "save" link insertion
	   stat data updates: (1) old directory,
	   (2) new directory and (3) maybe old object stat data (when it is
	   directory) and (4) maybe stat data of object to which new entry
	   pointed initially and (5) maybe block containing ".." of
	   renamed directory
	   quota updates: two parent directories */
	jbegin_count =
	    JOURNAL_PER_BALANCE_CNT * 3 + 5 +
	    4 * REISERFS_QUOTA_TRANS_BLOCKS(old_dir->i_sb);

	dquot_initialize(old_dir);
	dquot_initialize(new_dir);

	old_inode = old_dentry->d_inode;
	new_dentry_inode = new_dentry->d_inode;

	// make sure, that oldname still exists and points to an object we
	// are going to rename
	old_de.de_gen_number_bit_string = NULL;
	reiserfs_write_lock(old_dir->i_sb);
	retval =
	    reiserfs_find_entry(old_dir, old_dentry->d_name.name,
				old_dentry->d_name.len, &old_entry_path,
				&old_de);
	pathrelse(&old_entry_path);
	if (retval == IO_ERROR) {
		reiserfs_write_unlock(old_dir->i_sb);
		return -EIO;
	}

	if (retval != NAME_FOUND || old_de.de_objectid != old_inode->i_ino) {
		reiserfs_write_unlock(old_dir->i_sb);
		return -ENOENT;
	}

	old_inode_mode = old_inode->i_mode;
	if (S_ISDIR(old_inode_mode)) {
		// make sure, that directory being renamed has correct ".."
		// and that its new parent directory has not too many links
		// already

		if (new_dentry_inode) {
			if (!reiserfs_empty_dir(new_dentry_inode)) {
				reiserfs_write_unlock(old_dir->i_sb);
				return -ENOTEMPTY;
			}
		}

		/* directory is renamed, its parent directory will be changed,
		 ** so find ".." entry
		 */
		dot_dot_de.de_gen_number_bit_string = NULL;
		retval =
		    reiserfs_find_entry(old_inode, "..", 2, &dot_dot_entry_path,
					&dot_dot_de);
		pathrelse(&dot_dot_entry_path);
		if (retval != NAME_FOUND) {
			reiserfs_write_unlock(old_dir->i_sb);
			return -EIO;
		}

		/* inode number of .. must equal old_dir->i_ino */
		if (dot_dot_de.de_objectid != old_dir->i_ino) {
			reiserfs_write_unlock(old_dir->i_sb);
			return -EIO;
		}
	}

	retval = journal_begin(&th, old_dir->i_sb, jbegin_count);
	if (retval) {
		reiserfs_write_unlock(old_dir->i_sb);
		return retval;
	}

	/* add new entry (or find the existing one) */
	retval =
	    reiserfs_add_entry(&th, new_dir, new_dentry->d_name.name,
			       new_dentry->d_name.len, old_inode, 0);
	if (retval == -EEXIST) {
		if (!new_dentry_inode) {
			reiserfs_panic(old_dir->i_sb, "vs-7050",
				       "new entry is found, new inode == 0");
		}
	} else if (retval) {
		int err = journal_end(&th, old_dir->i_sb, jbegin_count);
		reiserfs_write_unlock(old_dir->i_sb);
		return err ? err : retval;
	}

	reiserfs_update_inode_transaction(old_dir);
	reiserfs_update_inode_transaction(new_dir);

	/* this makes it so an fsync on an open fd for the old name will
	 ** commit the rename operation
	 */
	reiserfs_update_inode_transaction(old_inode);

	if (new_dentry_inode)
		reiserfs_update_inode_transaction(new_dentry_inode);

	while (1) {
		// look for old name using corresponding entry key (found by reiserfs_find_entry)
		if ((retval =
		     search_by_entry_key(new_dir->i_sb, &old_de.de_entry_key,
					 &old_entry_path,
					 &old_de)) != NAME_FOUND) {
			pathrelse(&old_entry_path);
			journal_end(&th, old_dir->i_sb, jbegin_count);
			reiserfs_write_unlock(old_dir->i_sb);
			return -EIO;
		}

		copy_item_head(&old_entry_ih, get_ih(&old_entry_path));

		reiserfs_prepare_for_journal(old_inode->i_sb, old_de.de_bh, 1);

		// look for new name by reiserfs_find_entry
		new_de.de_gen_number_bit_string = NULL;
		retval =
		    reiserfs_find_entry(new_dir, new_dentry->d_name.name,
					new_dentry->d_name.len, &new_entry_path,
					&new_de);
		// reiserfs_add_entry should not return IO_ERROR, because it is called with essentially same parameters from
		// reiserfs_add_entry above, and we'll catch any i/o errors before we get here.
		if (retval != NAME_FOUND_INVISIBLE && retval != NAME_FOUND) {
			pathrelse(&new_entry_path);
			pathrelse(&old_entry_path);
			journal_end(&th, old_dir->i_sb, jbegin_count);
			reiserfs_write_unlock(old_dir->i_sb);
			return -EIO;
		}

		copy_item_head(&new_entry_ih, get_ih(&new_entry_path));

		reiserfs_prepare_for_journal(old_inode->i_sb, new_de.de_bh, 1);

		if (S_ISDIR(old_inode->i_mode)) {
			if ((retval =
			     search_by_entry_key(new_dir->i_sb,
						 &dot_dot_de.de_entry_key,
						 &dot_dot_entry_path,
						 &dot_dot_de)) != NAME_FOUND) {
				pathrelse(&dot_dot_entry_path);
				pathrelse(&new_entry_path);
				pathrelse(&old_entry_path);
				journal_end(&th, old_dir->i_sb, jbegin_count);
				reiserfs_write_unlock(old_dir->i_sb);
				return -EIO;
			}
			copy_item_head(&dot_dot_ih,
				       get_ih(&dot_dot_entry_path));
			// node containing ".." gets into transaction
			reiserfs_prepare_for_journal(old_inode->i_sb,
						     dot_dot_de.de_bh, 1);
		}
		/* we should check seals here, not do
		   this stuff, yes? Then, having
		   gathered everything into RAM we
		   should lock the buffers, yes?  -Hans */
		/* probably.  our rename needs to hold more
		 ** than one path at once.  The seals would
		 ** have to be written to deal with multi-path
		 ** issues -chris
		 */
		/* sanity checking before doing the rename - avoid races many
		 ** of the above checks could have scheduled.  We have to be
		 ** sure our items haven't been shifted by another process.
		 */
		if (item_moved(&new_entry_ih, &new_entry_path) ||
		    !entry_points_to_object(new_dentry->d_name.name,
					    new_dentry->d_name.len,
					    &new_de, new_dentry_inode) ||
		    item_moved(&old_entry_ih, &old_entry_path) ||
		    !entry_points_to_object(old_dentry->d_name.name,
					    old_dentry->d_name.len,
					    &old_de, old_inode)) {
			reiserfs_restore_prepared_buffer(old_inode->i_sb,
							 new_de.de_bh);
			reiserfs_restore_prepared_buffer(old_inode->i_sb,
							 old_de.de_bh);
			if (S_ISDIR(old_inode_mode))
				reiserfs_restore_prepared_buffer(old_inode->
								 i_sb,
								 dot_dot_de.
								 de_bh);
			continue;
		}
		if (S_ISDIR(old_inode_mode)) {
			if (item_moved(&dot_dot_ih, &dot_dot_entry_path) ||
			    !entry_points_to_object("..", 2, &dot_dot_de,
						    old_dir)) {
				reiserfs_restore_prepared_buffer(old_inode->
								 i_sb,
								 old_de.de_bh);
				reiserfs_restore_prepared_buffer(old_inode->
								 i_sb,
								 new_de.de_bh);
				reiserfs_restore_prepared_buffer(old_inode->
								 i_sb,
								 dot_dot_de.
								 de_bh);
				continue;
			}
		}

		RFALSE(S_ISDIR(old_inode_mode) &&
		       !buffer_journal_prepared(dot_dot_de.de_bh), "");

		break;
	}

	/* ok, all the changes can be done in one fell swoop when we
	   have claimed all the buffers needed. */

	mark_de_visible(new_de.de_deh + new_de.de_entry_num);
	set_ino_in_dir_entry(&new_de, INODE_PKEY(old_inode));
	journal_mark_dirty(&th, old_dir->i_sb, new_de.de_bh);

	mark_de_hidden(old_de.de_deh + old_de.de_entry_num);
	journal_mark_dirty(&th, old_dir->i_sb, old_de.de_bh);
	ctime = CURRENT_TIME_SEC;
	old_dir->i_ctime = old_dir->i_mtime = ctime;
	new_dir->i_ctime = new_dir->i_mtime = ctime;
	/* thanks to Alex Adriaanse <*****@*****.**> for patch which adds ctime update of
	   renamed object */
	old_inode->i_ctime = ctime;

	if (new_dentry_inode) {
		// adjust link number of the victim
		if (S_ISDIR(new_dentry_inode->i_mode)) {
			clear_nlink(new_dentry_inode);
		} else {
			drop_nlink(new_dentry_inode);
		}
		new_dentry_inode->i_ctime = ctime;
		savelink = new_dentry_inode->i_nlink;
	}

	if (S_ISDIR(old_inode_mode)) {
		/* adjust ".." of renamed directory */
		set_ino_in_dir_entry(&dot_dot_de, INODE_PKEY(new_dir));
		journal_mark_dirty(&th, new_dir->i_sb, dot_dot_de.de_bh);

		if (!new_dentry_inode)
			/* there (in new_dir) was no directory, so it got new link
			   (".."  of renamed directory) */
			INC_DIR_INODE_NLINK(new_dir);

		/* old directory lost one link - ".. " of renamed directory */
		DEC_DIR_INODE_NLINK(old_dir);
	}
	// looks like in 2.3.99pre3 brelse is atomic. so we can use pathrelse
	pathrelse(&new_entry_path);
	pathrelse(&dot_dot_entry_path);

	// FIXME: this reiserfs_cut_from_item's return value may screw up
	// anybody, but it will panic if will not be able to find the
	// entry. This needs one more clean up
	if (reiserfs_cut_from_item
	    (&th, &old_entry_path, &(old_de.de_entry_key), old_dir, NULL,
	     0) < 0)
		reiserfs_error(old_dir->i_sb, "vs-7060",
			       "couldn't not cut old name. Fsck later?");

	old_dir->i_size -= DEH_SIZE + old_de.de_entrylen;

	reiserfs_update_sd(&th, old_dir);
	reiserfs_update_sd(&th, new_dir);
	reiserfs_update_sd(&th, old_inode);

	if (new_dentry_inode) {
		if (savelink == 0)
			add_save_link(&th, new_dentry_inode,
				      0 /* not truncate */ );
		reiserfs_update_sd(&th, new_dentry_inode);
	}

	retval = journal_end(&th, old_dir->i_sb, jbegin_count);
	reiserfs_write_unlock(old_dir->i_sb);
	return retval;
}
예제 #10
0
int reiserfs_resize (struct super_block * s, unsigned long block_count_new)
{
	struct reiserfs_super_block * sb;
        struct reiserfs_bitmap_info *bitmap;
	struct buffer_head * bh;
	struct reiserfs_transaction_handle th;
	unsigned int bmap_nr_new, bmap_nr;
	unsigned int block_r_new, block_r;
	
	struct reiserfs_list_bitmap * jb;
	struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS];
	
	unsigned long int block_count, free_blocks;
	int i;
	int copy_size ;

	sb = SB_DISK_SUPER_BLOCK(s);

	if (SB_BLOCK_COUNT(s) >= block_count_new) {
		printk("can\'t shrink filesystem on-line\n");
		return -EINVAL;
	}

	/* check the device size */
	bh = sb_bread(s, block_count_new - 1);
	if (!bh) {
		printk("reiserfs_resize: can\'t read last block\n");
		return -EINVAL;
	}	
	bforget(bh);

	/* old disk layout detection; those partitions can be mounted, but
	 * cannot be resized */
	if (SB_BUFFER_WITH_SB(s)->b_blocknr *	SB_BUFFER_WITH_SB(s)->b_size 
		!= REISERFS_DISK_OFFSET_IN_BYTES ) {
		printk("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n");
		return -ENOTSUPP;
	}
       
	/* count used bits in last bitmap block */
	block_r = SB_BLOCK_COUNT(s) -
	        (SB_BMAP_NR(s) - 1) * s->s_blocksize * 8;
	
	/* count bitmap blocks in new fs */
	bmap_nr_new = block_count_new / ( s->s_blocksize * 8 );
	block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8;
	if (block_r_new) 
		bmap_nr_new++;
	else
		block_r_new = s->s_blocksize * 8;

	/* save old values */
	block_count = SB_BLOCK_COUNT(s);
	bmap_nr     = SB_BMAP_NR(s);

	/* resizing of reiserfs bitmaps (journal and real), if needed */
	if (bmap_nr_new > bmap_nr) {	    
	    /* reallocate journal bitmaps */
	    if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
		printk("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
		unlock_super(s) ;
		return -ENOMEM ;
	    }
	    /* the new journal bitmaps are zero filled, now we copy in the bitmap
	    ** node pointers from the old journal bitmap structs, and then
	    ** transfer the new data structures into the journal struct.
	    **
	    ** using the copy_size var below allows this code to work for
	    ** both shrinking and expanding the FS.
	    */
	    copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr ;
	    copy_size = copy_size * sizeof(struct reiserfs_list_bitmap_node *) ;
	    for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
		struct reiserfs_bitmap_node **node_tmp ;
		jb = SB_JOURNAL(s)->j_list_bitmap + i ;
		memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size) ;

		/* just in case vfree schedules on us, copy the new
		** pointer into the journal struct before freeing the 
		** old one
		*/
		node_tmp = jb->bitmaps ;
		jb->bitmaps = jbitmap[i].bitmaps ;
		vfree(node_tmp) ;
	    }	
	
	    /* allocate additional bitmap blocks, reallocate array of bitmap
	     * block pointers */
	    bitmap = vmalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
	    if (!bitmap) {
		printk("reiserfs_resize: unable to allocate memory.\n");
		return -ENOMEM;
	    }
	    memset (bitmap, 0, sizeof (struct reiserfs_bitmap_info) * SB_BMAP_NR(s));
	    for (i = 0; i < bmap_nr; i++)
		bitmap[i] = SB_AP_BITMAP(s)[i];
	    for (i = bmap_nr; i < bmap_nr_new; i++) {
		bitmap[i].bh = sb_getblk(s, i * s->s_blocksize * 8);
		memset(bitmap[i].bh->b_data, 0, sb_blocksize(sb));
		reiserfs_test_and_set_le_bit(0, bitmap[i].bh->b_data);

		set_buffer_uptodate(bitmap[i].bh);
		mark_buffer_dirty(bitmap[i].bh) ;
		sync_dirty_buffer(bitmap[i].bh);
		// update bitmap_info stuff
		bitmap[i].first_zero_hint=1;
		bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
	    }	
	    /* free old bitmap blocks array */
	    vfree(SB_AP_BITMAP(s));
	    SB_AP_BITMAP(s) = bitmap;
	}
	
	/* begin transaction */
	journal_begin(&th, s, 10);

	/* correct last bitmap blocks in old and new disk layout */
	reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr - 1].bh, 1);
	for (i = block_r; i < s->s_blocksize * 8; i++)
	    reiserfs_test_and_clear_le_bit(i, 
					   SB_AP_BITMAP(s)[bmap_nr - 1].bh->b_data);
	SB_AP_BITMAP(s)[bmap_nr - 1].free_count += s->s_blocksize * 8 - block_r;
	if ( !SB_AP_BITMAP(s)[bmap_nr - 1].first_zero_hint)
	    SB_AP_BITMAP(s)[bmap_nr - 1].first_zero_hint = block_r;

	journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr - 1].bh);

	reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr_new - 1].bh, 1);
	for (i = block_r_new; i < s->s_blocksize * 8; i++)
	    reiserfs_test_and_set_le_bit(i,
					 SB_AP_BITMAP(s)[bmap_nr_new - 1].bh->b_data);
	journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr_new - 1].bh);
 
	SB_AP_BITMAP(s)[bmap_nr_new - 1].free_count -= s->s_blocksize * 8 - block_r_new;
	/* Extreme case where last bitmap is the only valid block in itself. */
	if ( !SB_AP_BITMAP(s)[bmap_nr_new - 1].free_count )
	    SB_AP_BITMAP(s)[bmap_nr_new - 1].first_zero_hint = 0;
 	/* update super */
	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
	free_blocks = SB_FREE_BLOCKS(s);
	PUT_SB_FREE_BLOCKS(s, free_blocks + (block_count_new - block_count - (bmap_nr_new - bmap_nr)));
	PUT_SB_BLOCK_COUNT(s, block_count_new);
	PUT_SB_BMAP_NR(s, bmap_nr_new);
	s->s_dirt = 1;

	journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
	
	SB_JOURNAL(s)->j_must_wait = 1;
	journal_end(&th, s, 10);

	return 0;
}
예제 #11
0
/* makes object identifier unused */
void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
			       __u32 objectid_to_release)
{
	struct super_block *s = th->t_super;
	struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
	__le32 *map = objectid_map(s, rs);
	int i = 0;

	BUG_ON(!th->t_trans_id);
	//return;
	check_objectid_map(s, map);

	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
	journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));

	/* start at the beginning of the objectid map (i = 0) and go to
	   the end of it (i = disk_sb->s_oid_cursize).  Linear search is
	   what we use, though it is possible that binary search would be
	   more efficient after performing lots of deletions (which is
	   when oids is large.)  We only check even i's. */
	while (i < sb_oid_cursize(rs)) {
		if (objectid_to_release == le32_to_cpu(map[i])) {
			/* This incrementation unallocates the objectid. */
			//map[i]++;
			le32_add_cpu(&map[i], 1);

			/* Did we unallocate the last member of an odd sequence, and can shrink oids? */
			if (map[i] == map[i + 1]) {
				/* shrink objectid map */
				memmove(map + i, map + i + 2,
					(sb_oid_cursize(rs) - i -
					 2) * sizeof(__u32));
				//disk_sb->s_oid_cursize -= 2;
				set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);

				RFALSE(sb_oid_cursize(rs) < 2 ||
				       sb_oid_cursize(rs) > sb_oid_maxsize(rs),
				       "vs-15005: objectid map corrupted cur_size == %d (max == %d)",
				       sb_oid_cursize(rs), sb_oid_maxsize(rs));
			}
			return;
		}

		if (objectid_to_release > le32_to_cpu(map[i]) &&
		    objectid_to_release < le32_to_cpu(map[i + 1])) {
			/* size of objectid map is not changed */
			if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
				//objectid_map[i+1]--;
				le32_add_cpu(&map[i + 1], -1);
				return;
			}

			/* JDM comparing two little-endian values for equality -- safe */
			if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
				/* objectid map must be expanded, but there is no space */
				PROC_INFO_INC(s, leaked_oid);
				return;
			}

			/* expand the objectid map */
			memmove(map + i + 3, map + i + 1,
				(sb_oid_cursize(rs) - i - 1) * sizeof(__u32));
			map[i + 1] = cpu_to_le32(objectid_to_release);
			map[i + 2] = cpu_to_le32(objectid_to_release + 1);
			set_sb_oid_cursize(rs, sb_oid_cursize(rs) + 2);
			return;
		}
		i += 2;
	}

	reiserfs_error(s, "vs-15011", "tried to free free object id (%lu)",
		       (long unsigned)objectid_to_release);
}
예제 #12
0
//
// a portion of this function, particularly the VFS interface portion,
// was derived from minix or ext2's analog and evolved as the
// prototype did. You should be able to tell which portion by looking
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
static struct super_block * reiserfs_read_super (struct super_block * s, void * data, int silent)
{
    int size;
    struct inode *root_inode;
    kdev_t dev = s->s_dev;
    int j;
    extern int *blksize_size[];
    struct reiserfs_transaction_handle th ;
    int old_format = 0;
    unsigned long blocks;
    int jinit_done = 0 ;
    struct reiserfs_iget4_args args ;
    int old_magic;
    struct reiserfs_super_block * rs;


    memset (&s->u.reiserfs_sb, 0, sizeof (struct reiserfs_sb_info));

    if (parse_options ((char *) data, &(s->u.reiserfs_sb.s_mount_opt), &blocks) == 0) {
	return NULL;
    }

    if (blocks) {
  	printk("reserfs: resize option for remount only\n");
	return NULL;
    }	

    if (blksize_size[MAJOR(dev)] && blksize_size[MAJOR(dev)][MINOR(dev)] != 0) {
	/* as blocksize is set for partition we use it */
	size = blksize_size[MAJOR(dev)][MINOR(dev)];
    } else {
	size = BLOCK_SIZE;
	set_blocksize (s->s_dev, BLOCK_SIZE);
    }

    /* read block (64-th 1k block), which can contain reiserfs super block */
    if (read_super_block (s, size, REISERFS_DISK_OFFSET_IN_BYTES)) {
	// try old format (undistributed bitmap, super block in 8-th 1k block of a device)
	if (read_super_block (s, size, REISERFS_OLD_DISK_OFFSET_IN_BYTES)) 
	    goto error;
	else
	    old_format = 1;
    }

    rs = SB_DISK_SUPER_BLOCK (s);

    s->u.reiserfs_sb.s_mount_state = SB_REISERFS_STATE(s);
    s->u.reiserfs_sb.s_mount_state = REISERFS_VALID_FS ;

    if (old_format ? read_old_bitmaps(s) : read_bitmaps(s)) { 
	printk ("reiserfs_read_super: unable to read bitmap\n");
	goto error;
    }
#ifdef CONFIG_REISERFS_CHECK
    printk("reiserfs:warning: CONFIG_REISERFS_CHECK is set ON\n");
    printk("reiserfs:warning: - it is slow mode for debugging.\n");
#endif

    // set_device_ro(s->s_dev, 1) ;
    if (journal_init(s)) {
	printk("reiserfs_read_super: unable to initialize journal space\n") ;
	goto error ;
    } else {
	jinit_done = 1 ; /* once this is set, journal_release must be called
			 ** if we error out of the mount 
			 */
    }
    if (reread_meta_blocks(s)) {
	printk("reiserfs_read_super: unable to reread meta blocks after journal init\n") ;
	goto error ;
    }

    if (replay_only (s))
	goto error;

    if (is_read_only(s->s_dev) && !(s->s_flags & MS_RDONLY)) {
        printk("clm-7000: Detected readonly device, marking FS readonly\n") ;
	s->s_flags |= MS_RDONLY ;
    }
    args.objectid = REISERFS_ROOT_PARENT_OBJECTID ;
    root_inode = iget4 (s, REISERFS_ROOT_OBJECTID, 0, (void *)(&args));
    if (!root_inode) {
	printk ("reiserfs_read_super: get root inode failed\n");
	goto error;
    }

    s->s_root = d_alloc_root(root_inode);  
    if (!s->s_root) {
	iput(root_inode);
	goto error;
    }

    // define and initialize hash function
    s->u.reiserfs_sb.s_hash_function = hash_function (s);
    if (s->u.reiserfs_sb.s_hash_function == NULL) {
      dput(s->s_root) ;
      s->s_root = NULL ;
      goto error ;
    }

    rs = SB_DISK_SUPER_BLOCK (s);
    old_magic = strncmp (rs->s_magic,  REISER2FS_SUPER_MAGIC_STRING,
                           strlen ( REISER2FS_SUPER_MAGIC_STRING));
    if (!old_magic)
	set_bit(REISERFS_3_6, &(s->u.reiserfs_sb.s_properties));
    else
	set_bit(REISERFS_3_5, &(s->u.reiserfs_sb.s_properties));

    if (!(s->s_flags & MS_RDONLY)) {

	journal_begin(&th, s, 1) ;
	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;

        set_sb_state( rs, REISERFS_ERROR_FS );

        if ( old_magic ) {
	    // filesystem created under 3.5.x found
	    if (convert_reiserfs (s)) {
		reiserfs_warning("reiserfs: converting 3.5.x filesystem to the new format\n") ;
		// after this 3.5.x will not be able to mount this partition
		memcpy (rs->s_magic, REISER2FS_SUPER_MAGIC_STRING, 
			sizeof (REISER2FS_SUPER_MAGIC_STRING));

		reiserfs_convert_objectid_map_v1(s) ;
		set_bit(REISERFS_3_6, &(s->u.reiserfs_sb.s_properties));
		clear_bit(REISERFS_3_5, &(s->u.reiserfs_sb.s_properties));
	    } else {
		reiserfs_warning("reiserfs: using 3.5.x disk format\n") ;
	    }
	}

	journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
	journal_end(&th, s, 1) ;
	
	/* look for files which were to be removed in previous session */
	finish_unfinished (s);

	s->s_dirt = 0;
    } else {
	if ( old_magic ) {
	    reiserfs_warning("reiserfs: using 3.5.x disk format\n") ;
	}
    }
    // mark hash in super block: it could be unset. overwrite should be ok
    set_sb_hash_function_code( rs, function2code(s->u.reiserfs_sb.s_hash_function ) );

    handle_attrs( s );

    reiserfs_proc_info_init( s );
    reiserfs_proc_register( s, "version", reiserfs_version_in_proc );
    reiserfs_proc_register( s, "super", reiserfs_super_in_proc );
    reiserfs_proc_register( s, "per-level", reiserfs_per_level_in_proc );
    reiserfs_proc_register( s, "bitmap", reiserfs_bitmap_in_proc );
    reiserfs_proc_register( s, "on-disk-super", reiserfs_on_disk_super_in_proc );
    reiserfs_proc_register( s, "oidmap", reiserfs_oidmap_in_proc );
    reiserfs_proc_register( s, "journal", reiserfs_journal_in_proc );
    init_waitqueue_head (&(s->u.reiserfs_sb.s_wait));

    printk("%s\n", reiserfs_get_version_string()) ;
    return s;

 error:
    if (jinit_done) { /* kill the commit thread, free journal ram */
	journal_release_error(NULL, s) ;
    }
    if (SB_DISK_SUPER_BLOCK (s)) {
	for (j = 0; j < SB_BMAP_NR (s); j ++) {
	    if (SB_AP_BITMAP (s))
		brelse (SB_AP_BITMAP (s)[j]);
	}
	if (SB_AP_BITMAP (s))
	    reiserfs_kfree (SB_AP_BITMAP (s), sizeof (struct buffer_head *) * SB_BMAP_NR (s), s);
    }
    if (SB_BUFFER_WITH_SB (s))
	brelse(SB_BUFFER_WITH_SB (s));

    return NULL;
}
예제 #13
0
//
// a portion of this function, particularly the VFS interface portion,
// was derived from minix or ext2's analog and evolved as the
// prototype did. You should be able to tell which portion by looking
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
static int reiserfs_remount (struct super_block * s, int * flags, char * data)
{
  struct reiserfs_super_block * rs;
  struct reiserfs_transaction_handle th ;
  unsigned long blocks;
  unsigned long mount_options = 0;
#ifdef CONFIG_REISERFS_IMMUTABLE_HACK
  int do_update_suidimmu;
#endif

  rs = SB_DISK_SUPER_BLOCK (s);

  if (!parse_options(data, &mount_options, &blocks))
  	return 0;

#ifdef CONFIG_REISERFS_IMMUTABLE_HACK
    if (reiserfs_suid_immutable(s) && !capable(CAP_LINUX_IMMUTABLE))
	return -EPERM;
    do_update_suidimmu = 0;
    if (test_bit(SUID_IMMUTABLE, &mount_options)) {
      if (!capable(CAP_LINUX_IMMUTABLE))
	return -EPERM;
#ifdef CONFIG_REISERFS_IMMUTABLE_HACK_DEBUG
	printk("reiserfs: suidimmu ON\n");
#endif /* CONFIG_REISERFS_IMMUTABLE_HACK_DEBUG */
      if (!reiserfs_suid_immutable(s))
	do_update_suidimmu = 1;
      set_bit(SUID_IMMUTABLE, &(s->u.reiserfs_sb.s_mount_opt));
    } else {
#ifdef CONFIG_REISERFS_IMMUTABLE_HACK_DEBUG
	printk("reiserfs: suidimmu OFF\n");
#endif /* CONFIG_REISERFS_IMMUTABLE_HACK_DEBUG */
      if (reiserfs_suid_immutable(s))
	do_update_suidimmu = 1;
      clear_bit(SUID_IMMUTABLE, &(s->u.reiserfs_sb.s_mount_opt));
    }

    /*
     * update S_IMMUTABLE bit on inode->i_flags
     */
    if (do_update_suidimmu)
      update_suidimmu(s, reiserfs_suid_immutable(s));
#endif /* CONFIG_REISERFS_IMMUTABLE_HACK */

#define SET_OPT( opt, bits, super )					\
    if( ( bits ) & ( 1 << ( opt ) ) )					\
	    ( super ) -> u.reiserfs_sb.s_mount_opt |= ( 1 << ( opt ) )

  /* set options in the super-block bitmask */
  SET_OPT( NOTAIL, mount_options, s );
  SET_OPT( REISERFS_NO_BORDER, mount_options, s );
  SET_OPT( REISERFS_NO_UNHASHED_RELOCATION, mount_options, s );
  SET_OPT( REISERFS_HASHED_RELOCATION, mount_options, s );
  SET_OPT( REISERFS_TEST4, mount_options, s );
#undef SET_OPT

  if(blocks) {
      int rc = reiserfs_resize(s, blocks);
      if (rc != 0)
	  return rc;
  }

  if ((unsigned long)(*flags & MS_RDONLY) == (s->s_flags & MS_RDONLY)) {
    /* there is nothing to do to remount read-only fs as read-only fs */
    return 0;
  }
  
  if (*flags & MS_RDONLY) {
    /* try to remount file system with read-only permissions */
    if (sb_state(rs) == REISERFS_VALID_FS || s->u.reiserfs_sb.s_mount_state != REISERFS_VALID_FS) {
      return 0;
    }

    journal_begin(&th, s, 10) ;
    /* Mounting a rw partition read-only. */
    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
    set_sb_state( rs, s->u.reiserfs_sb.s_mount_state );
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    s->s_dirt = 0;
  } else {
    s->u.reiserfs_sb.s_mount_state = sb_state(rs) ;
    s->s_flags &= ~MS_RDONLY ; /* now it is safe to call journal_begin */
    journal_begin(&th, s, 10) ;

    /* Mount a partition which is read-only, read-write */
    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
    s->u.reiserfs_sb.s_mount_state = sb_state(rs);
    s->s_flags &= ~MS_RDONLY;
    set_sb_state( rs, REISERFS_ERROR_FS );
    /* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); */
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    s->s_dirt = 0;
    s->u.reiserfs_sb.s_mount_state = REISERFS_VALID_FS ;
  }
  /* this will force a full flush of all journal lists */
  SB_JOURNAL(s)->j_must_wait = 1 ;
  journal_end(&th, s, 10) ;

  if (!( *flags & MS_RDONLY ) )
    finish_unfinished( s );

  return 0;
}
예제 #14
0
static int reiserfs_remount (struct super_block * s, int * mount_flags, char * data)
{
  struct reiserfs_super_block * rs;
  struct reiserfs_transaction_handle th ;
  unsigned long blocks;
  unsigned long mount_options = 0;

  rs = SB_DISK_SUPER_BLOCK (s);

  if (!reiserfs_parse_options(s, data, &mount_options, &blocks))
  	return 0;

#define SET_OPT( opt, bits, super )					\
    if( ( bits ) & ( 1 << ( opt ) ) )					\
	    ( super ) -> u.reiserfs_sb.s_mount_opt |= ( 1 << ( opt ) )

  /* set options in the super-block bitmask */
  SET_OPT( REISERFS_SMALLTAIL, mount_options, s );
  SET_OPT( REISERFS_LARGETAIL, mount_options, s );
  SET_OPT( REISERFS_NO_BORDER, mount_options, s );
  SET_OPT( REISERFS_NO_UNHASHED_RELOCATION, mount_options, s );
  SET_OPT( REISERFS_HASHED_RELOCATION, mount_options, s );
  SET_OPT( REISERFS_TEST4, mount_options, s );
  SET_OPT( REISERFS_ATTRS, mount_options, s );
#undef SET_OPT

  handle_attrs( s );

  if(blocks) {
      int rc = reiserfs_resize(s, blocks);
      if (rc != 0)
	  return rc;
  }

  if (*mount_flags & MS_RDONLY) {
    /* remount read-only */
    if (s->s_flags & MS_RDONLY)
      /* it is read-only already */
      return 0;
    /* try to remount file system with read-only permissions */
    if (sb_state(rs) == REISERFS_VALID_FS || s->u.reiserfs_sb.s_mount_state != REISERFS_VALID_FS) {
      return 0;
    }

    journal_begin(&th, s, 10) ;
    /* Mounting a rw partition read-only. */
    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
    set_sb_state( rs, s->u.reiserfs_sb.s_mount_state );
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    s->s_dirt = 0;
  } else {
    /* remount read-write */
    if (!(s->s_flags & MS_RDONLY))
	return 0; /* We are read-write already */

    s->u.reiserfs_sb.s_mount_state = sb_state(rs) ;
    s->s_flags &= ~MS_RDONLY ; /* now it is safe to call journal_begin */
    journal_begin(&th, s, 10) ;
    
    /* Mount a partition which is read-only, read-write */
    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
    s->u.reiserfs_sb.s_mount_state = sb_state(rs);
    s->s_flags &= ~MS_RDONLY;
    set_sb_state( rs, REISERFS_ERROR_FS );
    /* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); */
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    s->s_dirt = 0;
    s->u.reiserfs_sb.s_mount_state = REISERFS_VALID_FS ;
  }
  /* this will force a full flush of all journal lists */
  SB_JOURNAL(s)->j_must_wait = 1 ;
  journal_end(&th, s, 10) ;

  if (!( *mount_flags & MS_RDONLY ) )
    finish_unfinished( s );

  return 0;
}
예제 #15
0
int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
{
	int err = 0;
	struct reiserfs_super_block *sb;
	struct reiserfs_bitmap_info *bitmap;
	struct reiserfs_bitmap_info *info;
	struct reiserfs_bitmap_info *old_bitmap = SB_AP_BITMAP(s);
	struct buffer_head *bh;
	struct reiserfs_transaction_handle th;
	unsigned int bmap_nr_new, bmap_nr;
	unsigned int block_r_new, block_r;

	struct reiserfs_list_bitmap *jb;
	struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS];

	unsigned long int block_count, free_blocks;
	int i;
	int copy_size;

	sb = SB_DISK_SUPER_BLOCK(s);

	if (SB_BLOCK_COUNT(s) >= block_count_new) {
		printk("can\'t shrink filesystem on-line\n");
		return -EINVAL;
	}

	/* check the device size */
	bh = sb_bread(s, block_count_new - 1);
	if (!bh) {
		printk("reiserfs_resize: can\'t read last block\n");
		return -EINVAL;
	}
	bforget(bh);

	/* old disk layout detection; those partitions can be mounted, but
	 * cannot be resized */
	if (SB_BUFFER_WITH_SB(s)->b_blocknr * SB_BUFFER_WITH_SB(s)->b_size
	    != REISERFS_DISK_OFFSET_IN_BYTES) {
		printk
		    ("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n");
		return -ENOTSUPP;
	}

	/* count used bits in last bitmap block */
	block_r = SB_BLOCK_COUNT(s) -
			(reiserfs_bmap_count(s) - 1) * s->s_blocksize * 8;

	/* count bitmap blocks in new fs */
	bmap_nr_new = block_count_new / (s->s_blocksize * 8);
	block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8;
	if (block_r_new)
		bmap_nr_new++;
	else
		block_r_new = s->s_blocksize * 8;

	/* save old values */
	block_count = SB_BLOCK_COUNT(s);
	bmap_nr = reiserfs_bmap_count(s);

	/* resizing of reiserfs bitmaps (journal and real), if needed */
	if (bmap_nr_new > bmap_nr) {
		/* reallocate journal bitmaps */
		if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
			printk
			    ("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
			return -ENOMEM;
		}
		/* the new journal bitmaps are zero filled, now we copy in the bitmap
		 ** node pointers from the old journal bitmap structs, and then
		 ** transfer the new data structures into the journal struct.
		 **
		 ** using the copy_size var below allows this code to work for
		 ** both shrinking and expanding the FS.
		 */
		copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr;
		copy_size =
		    copy_size * sizeof(struct reiserfs_list_bitmap_node *);
		for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
			struct reiserfs_bitmap_node **node_tmp;
			jb = SB_JOURNAL(s)->j_list_bitmap + i;
			memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);

			/* just in case vfree schedules on us, copy the new
			 ** pointer into the journal struct before freeing the
			 ** old one
			 */
			node_tmp = jb->bitmaps;
			jb->bitmaps = jbitmap[i].bitmaps;
			vfree(node_tmp);
		}

		/* allocate additional bitmap blocks, reallocate array of bitmap
		 * block pointers */
		bitmap =
		    vzalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
		if (!bitmap) {
			/* Journal bitmaps are still supersized, but the memory isn't
			 * leaked, so I guess it's ok */
			printk("reiserfs_resize: unable to allocate memory.\n");
			return -ENOMEM;
		}
		for (i = 0; i < bmap_nr; i++)
			bitmap[i] = old_bitmap[i];

		/* This doesn't go through the journal, but it doesn't have to.
		 * The changes are still atomic: We're synced up when the journal
		 * transaction begins, and the new bitmaps don't matter if the
		 * transaction fails. */
		for (i = bmap_nr; i < bmap_nr_new; i++) {
			/* don't use read_bitmap_block since it will cache
			 * the uninitialized bitmap */
			bh = sb_bread(s, i * s->s_blocksize * 8);
			if (!bh) {
				vfree(bitmap);
				return -EIO;
			}
			memset(bh->b_data, 0, sb_blocksize(sb));
			reiserfs_set_le_bit(0, bh->b_data);
			reiserfs_cache_bitmap_metadata(s, bh, bitmap + i);

			set_buffer_uptodate(bh);
			mark_buffer_dirty(bh);
			reiserfs_write_unlock(s);
			sync_dirty_buffer(bh);
			reiserfs_write_lock(s);
			// update bitmap_info stuff
			bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
			brelse(bh);
		}
		/* free old bitmap blocks array */
		SB_AP_BITMAP(s) = bitmap;
		vfree(old_bitmap);
	}

	/* begin transaction, if there was an error, it's fine. Yes, we have
	 * incorrect bitmaps now, but none of it is ever going to touch the
	 * disk anyway. */
	err = journal_begin(&th, s, 10);
	if (err)
		return err;

	/* Extend old last bitmap block - new blocks have been made available */
	info = SB_AP_BITMAP(s) + bmap_nr - 1;
	bh = reiserfs_read_bitmap_block(s, bmap_nr - 1);
	if (!bh) {
		int jerr = journal_end(&th, s, 10);
		if (jerr)
			return jerr;
		return -EIO;
	}

	reiserfs_prepare_for_journal(s, bh, 1);
	for (i = block_r; i < s->s_blocksize * 8; i++)
		reiserfs_clear_le_bit(i, bh->b_data);
	info->free_count += s->s_blocksize * 8 - block_r;

	journal_mark_dirty(&th, s, bh);
	brelse(bh);

	/* Correct new last bitmap block - It may not be full */
	info = SB_AP_BITMAP(s) + bmap_nr_new - 1;
	bh = reiserfs_read_bitmap_block(s, bmap_nr_new - 1);
	if (!bh) {
		int jerr = journal_end(&th, s, 10);
		if (jerr)
			return jerr;
		return -EIO;
	}

	reiserfs_prepare_for_journal(s, bh, 1);
	for (i = block_r_new; i < s->s_blocksize * 8; i++)
		reiserfs_set_le_bit(i, bh->b_data);
	journal_mark_dirty(&th, s, bh);
	brelse(bh);

	info->free_count -= s->s_blocksize * 8 - block_r_new;
	/* update super */
	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
	free_blocks = SB_FREE_BLOCKS(s);
	PUT_SB_FREE_BLOCKS(s,
			   free_blocks + (block_count_new - block_count -
					  (bmap_nr_new - bmap_nr)));
	PUT_SB_BLOCK_COUNT(s, block_count_new);
	PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new);

	journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));

	SB_JOURNAL(s)->j_must_wait = 1;
	return journal_end(&th, s, 10);
}
예제 #16
0
static int do_reiserfs_new_blocknrs (struct reiserfs_transaction_handle *th,
                                     unsigned long * free_blocknrs, 
				     unsigned long search_start, 
				     int amount_needed, int priority, 
				     int for_unformatted,
				     int for_prealloc)
{
  struct super_block * s = th->t_super;
  int i, j;
  unsigned long * block_list_start = free_blocknrs;
  int init_amount_needed = amount_needed;
  unsigned long new_block = 0 ; 

    if (SB_FREE_BLOCKS (s) < SPARE_SPACE && !priority)
	/* we can answer NO_DISK_SPACE being asked for new block with
	   priority 0 */
	return NO_DISK_SPACE;

  RFALSE( !s, "vs-4090: trying to get new block from nonexistent device");
  RFALSE( search_start == MAX_B_NUM,
	  "vs-4100: we are optimizing location based on "
	  "the bogus location of a temp buffer (%lu).", search_start);
  RFALSE( amount_needed < 1 || amount_needed > 2,
	  "vs-4110: amount_needed parameter incorrect (%d)", amount_needed);

  /* We continue the while loop if another process snatches our found
   * free block from us after we find it but before we successfully
   * mark it as in use */

  while (amount_needed--) {
    /* skip over any blocknrs already gotten last time. */
    if (*(free_blocknrs) != 0) {
      RFALSE( is_reusable (s, *free_blocknrs, 1) == 0, 
	      "vs-4120: bad blocknr on free_blocknrs list");
      free_blocknrs++;
      continue;
    }
    /* look for zero bits in bitmap */
    if (find_zero_bit_in_bitmap(s,search_start, &i, &j,for_unformatted) == 0) {
      if (find_zero_bit_in_bitmap(s,search_start,&i,&j, for_unformatted) == 0) {
				/* recode without the goto and without
				   the if.  It will require a
				   duplicate for.  This is worth the
				   code clarity.  Your way was
				   admirable, and just a bit too
				   clever in saving instructions.:-)
				   I'd say create a new function, but
				   that would slow things also, yes?
				   -Hans */
free_and_return:
	for ( ; block_list_start != free_blocknrs; block_list_start++) {
	  reiserfs_free_block (th, *block_list_start);
	  *block_list_start = 0;
	}
	if (for_prealloc) 
	    return NO_MORE_UNUSED_CONTIGUOUS_BLOCKS;
	else
	    return NO_DISK_SPACE;
      }
    }
    
    /* i and j now contain the results of the search. i = bitmap block
       number containing free block, j = offset in this block.  we
       compute the blocknr which is our result, store it in
       free_blocknrs, and increment the pointer so that on the next
       loop we will insert into the next location in the array.  Also
       in preparation for the next loop, search_start is changed so
       that the next search will not rescan the same range but will
       start where this search finished.  Note that while it is
       possible that schedule has occurred and blocks have been freed
       in that range, it is perhaps more important that the blocks
       returned be near each other than that they be near their other
       neighbors, and it also simplifies and speeds the code this way.  */

    /* journal: we need to make sure the block we are giving out is not
    ** a log block, horrible things would happen there.
    */
    new_block = (i * (s->s_blocksize << 3)) + j; 
    if (for_prealloc && (new_block - 1) != search_start) {
      /* preallocated blocks must be contiguous, bail if we didnt find one.
      ** this is not a bug.  We want to do the check here, before the
      ** bitmap block is prepared, and before we set the bit and log the
      ** bitmap. 
      **
      ** If we do the check after this function returns, we have to 
      ** call reiserfs_free_block for new_block, which would be pure
      ** overhead.
      **
      ** for_prealloc should only be set if the caller can deal with the
      ** NO_MORE_UNUSED_CONTIGUOUS_BLOCKS return value.  This can be
      ** returned before the disk is actually full
      */
      goto free_and_return ;
    }
    search_start = new_block ;
    if (search_start >= reiserfs_get_journal_block(s) &&
        search_start < (reiserfs_get_journal_block(s) + JOURNAL_BLOCK_COUNT)) {
	reiserfs_warning("vs-4130: reiserfs_new_blocknrs: trying to allocate log block %lu\n",
			 search_start) ;
	search_start++ ;
	amount_needed++ ;
	continue ;
    }
       

    reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[i], 1) ;

    RFALSE( buffer_locked (SB_AP_BITMAP (s)[i]) || 
	    is_reusable (s, search_start, 0) == 0,
	    "vs-4140: bitmap block is locked or bad block number found");

    /* if this bit was already set, we've scheduled, and someone else
    ** has allocated it.  loop around and try again
    */
    if (reiserfs_test_and_set_le_bit (j, SB_AP_BITMAP (s)[i]->b_data)) {
	reiserfs_warning("vs-4150: reiserfs_new_blocknrs, block not free");
	reiserfs_restore_prepared_buffer(s, SB_AP_BITMAP(s)[i]) ;
	amount_needed++ ;
	continue ;
    }    
    journal_mark_dirty (th, s, SB_AP_BITMAP (s)[i]); 
    *free_blocknrs = search_start ;
    free_blocknrs ++;
  }

  reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
  /* update free block count in super block */
  PUT_SB_FREE_BLOCKS( s, SB_FREE_BLOCKS(s) - init_amount_needed );
  journal_mark_dirty (th, s, SB_BUFFER_WITH_SB (s));
  s->s_dirt = 1;

  return CARRY_ON;
}