Exemplo n.º 1
0
/* XXX I know it could be merged with upper-level function;
   but may be result function would be too complex. */
static inline int allocate_without_wrapping_disk (reiserfs_blocknr_hint_t * hint,
					 b_blocknr_t * new_blocknrs,
					 b_blocknr_t start, b_blocknr_t finish,
					 int amount_needed, int prealloc_size)
{
    int rest = amount_needed;
    int nr_allocated;
  
    while (rest > 0 && start <= finish) {
	nr_allocated = scan_bitmap (hint->th, &start, finish, 1,
				    rest + prealloc_size, !hint->formatted_node,
				    hint->block);

	if (nr_allocated == 0)	/* no new blocks allocated, return */
	    break;
	
	/* fill free_blocknrs array first */
	while (rest > 0 && nr_allocated > 0) {
	    * new_blocknrs ++ = start ++;
	    rest --; nr_allocated --;
	}

	/* do we have something to fill prealloc. array also ? */
	if (nr_allocated > 0) {
	    /* it means prealloc_size was greater that 0 and we do preallocation */
	    list_add(&REISERFS_I(hint->inode)->i_prealloc_list,
		     &SB_JOURNAL(hint->th->t_super)->j_prealloc_list);
	    REISERFS_I(hint->inode)->i_prealloc_block = start;
	    REISERFS_I(hint->inode)->i_prealloc_count = nr_allocated;
	    break;
	}
    }

    return (amount_needed - rest);
}
Exemplo n.º 2
0
/* stolen from fs/buffer.c */
void reiserfs_unmap_buffer(struct buffer_head *bh)
{
	lock_buffer(bh);
	if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
		BUG();
	}
	clear_buffer_dirty(bh);
	/*
	 * Remove the buffer from whatever list it belongs to. We are mostly
	 * interested in removing it from per-sb j_dirty_buffers list, to avoid
	 * BUG() on attempt to write not mapped buffer
	 */
	if ((!list_empty(&bh->b_assoc_buffers) || bh->b_private) && bh->b_page) {
		struct inode *inode = bh->b_page->mapping->host;
		struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
		spin_lock(&j->j_dirty_buffers_lock);
		list_del_init(&bh->b_assoc_buffers);
		reiserfs_free_jh(bh);
		spin_unlock(&j->j_dirty_buffers_lock);
	}
	clear_buffer_mapped(bh);
	clear_buffer_req(bh);
	clear_buffer_new(bh);
	bh->b_bdev = NULL;
	unlock_buffer(bh);
}
Exemplo n.º 3
0
Arquivo: super.c Projeto: nhanh0/hah
//
// a portion of this function, particularly the VFS interface portion,
// was derived from minix or ext2's analog and evolved as the
// prototype did. You should be able to tell which portion by looking
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
static int reiserfs_remount (struct super_block * s, int * flags, char * data)
{
  struct reiserfs_super_block * rs;
  struct reiserfs_transaction_handle th ;
  unsigned long blocks;
  unsigned long mount_options;

  rs = SB_DISK_SUPER_BLOCK (s);

  if (!parse_options(data, &mount_options, &blocks))
  	return 0;

  if(blocks) {
      int rc = reiserfs_resize(s, blocks);
      if (rc != 0)
	  return rc;
  }

  if ((unsigned long)(*flags & MS_RDONLY) == (s->s_flags & MS_RDONLY)) {
    /* there is nothing to do to remount read-only fs as read-only fs */
    return 0;
  }
  
  if (*flags & MS_RDONLY) {
    /* try to remount file system with read-only permissions */
    if (sb_state(rs) == REISERFS_VALID_FS || s->u.reiserfs_sb.s_mount_state != REISERFS_VALID_FS) {
      return 0;
    }

    journal_begin(&th, s, 10) ;
    /* Mounting a rw partition read-only. */
    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
    set_sb_state( rs, s->u.reiserfs_sb.s_mount_state );
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    s->s_dirt = 0;
  } else {
    s->u.reiserfs_sb.s_mount_state = sb_state(rs) ;
    s->s_flags &= ~MS_RDONLY ; /* now it is safe to call journal_begin */
    journal_begin(&th, s, 10) ;

    /* Mount a partition which is read-only, read-write */
    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
    s->u.reiserfs_sb.s_mount_state = sb_state(rs);
    s->s_flags &= ~MS_RDONLY;
    set_sb_state( rs, REISERFS_ERROR_FS );
    /* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); */
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    s->s_dirt = 0;
    s->u.reiserfs_sb.s_mount_state = REISERFS_VALID_FS ;
  }
  /* this will force a full flush of all journal lists */
  SB_JOURNAL(s)->j_must_wait = 1 ;
  journal_end(&th, s, 10) ;

  if (!( *flags & MS_RDONLY ) )
    finish_unfinished( s );

  return 0;
}
Exemplo n.º 4
0
int reiserfs_remount (struct super_block * s, int * flags, char * data)
{
  struct reiserfs_super_block * rs;
  struct reiserfs_transaction_handle th ;
  unsigned long blocks;
  unsigned long mount_options;

  rs = SB_DISK_SUPER_BLOCK (s);

  if (!parse_options(data, &mount_options, &blocks))
  	return 0;

  if(blocks) 
  	reiserfs_resize(s, blocks);
	
  journal_begin(&th, s, 10) ;
  if ((unsigned long)(*flags & MS_RDONLY) == (s->s_flags & MS_RDONLY)) {
    /* there is nothing to do to remount read-only fs as read-only fs */
    journal_end(&th, s, 10) ;
    return 0;
  }
  if (*flags & MS_RDONLY) {
    /* try to remount file system with read-only permissions */
    if (le16_to_cpu (rs->s_state) == REISERFS_VALID_FS ||
	s->u.reiserfs_sb.s_mount_state != REISERFS_VALID_FS) {
      journal_end(&th, s, 10) ;
      return 0;
    }
    /* Mounting a rw partition read-only. */
    rs->s_state = cpu_to_le16 (s->u.reiserfs_sb.s_mount_state);
    /* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); journal victim */
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    s->s_dirt = 0;
  } else {
    /* Mount a partition which is read-only, read-write */
    s->u.reiserfs_sb.s_mount_state = le16_to_cpu (rs->s_state);
    s->s_flags &= ~MS_RDONLY;
    rs->s_state = cpu_to_le16 (REISERFS_ERROR_FS);
    /* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); */
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    s->s_dirt = 0;
    s->u.reiserfs_sb.s_mount_state = REISERFS_VALID_FS ;
    if (test_bit(NOTAIL, &mount_options)) {
      set_bit(NOTAIL, &(s->u.reiserfs_sb.s_mount_opt)) ;
    }
    
    /* check state, which file system had when remounting read-write */
#if 0 /* journal victim */    
    if (s->u.reiserfs_sb.s_mount_state != REISERFS_VALID_FS)
      printk ("REISERFS: remounting unchecked fs, "
	      "running reiserfsck is recommended\n");
#endif
  }
  /* this will force a full flush of all journal lists */
  SB_JOURNAL(s)->j_must_wait = 1 ;
  journal_end(&th, s, 10) ;
  return 0;
}
Exemplo n.º 5
0
void reiserfs_discard_all_prealloc (struct reiserfs_transaction_handle *th)
{
    struct list_head * plist = &SB_JOURNAL(th->t_super)->j_prealloc_list;
    struct inode * inode;

    while (!list_empty(plist)) {
        inode = list_entry(plist->next, struct inode, u.reiserfs_i.i_prealloc_list);
#ifdef CONFIG_REISERFS_CHECK
        if (!inode->u.reiserfs_i.i_prealloc_count) {
            reiserfs_warning("zam-4001:%s: inode is in prealloc list but has no preallocated blocks.\n", __FUNCTION__ );
        }
#endif
        __discard_prealloc(th, inode);
    }
}
Exemplo n.º 6
0
void reiserfs_discard_all_prealloc (struct reiserfs_transaction_handle *th)
{
    struct list_head * plist = &SB_JOURNAL(th->t_super)->j_prealloc_list;

    BUG_ON (!th->t_trans_id);

    while (!list_empty(plist)) {
        struct reiserfs_inode_info *ei;
        ei = list_entry(plist->next, struct reiserfs_inode_info, i_prealloc_list);
#ifdef CONFIG_REISERFS_CHECK
        if (!ei->i_prealloc_count) {
            reiserfs_warning (th->t_super, "zam-4001:%s: inode is in prealloc list but has no preallocated blocks.", __FUNCTION__);
        }
#endif
        __discard_prealloc(th, ei);
    }
}
Exemplo n.º 7
0
Arquivo: prints.c Projeto: 7799/linux
void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...)
{
	do_reiserfs_warning(fmt);

	if (reiserfs_error_panic(sb)) {
		panic(KERN_CRIT "REISERFS panic (device %s): %s\n", sb->s_id,
		      error_buf);
	}

	if (reiserfs_is_journal_aborted(SB_JOURNAL(sb)))
		return;

	printk(KERN_CRIT "REISERFS abort (device %s): %s\n", sb->s_id,
	       error_buf);

	sb->s_flags |= MS_RDONLY;
	reiserfs_abort_journal(sb, errno);
}
void reiserfs_unmap_buffer(struct buffer_head *bh)
{
	lock_buffer(bh);
	if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
		BUG();
	}
	clear_buffer_dirty(bh);
	if ((!list_empty(&bh->b_assoc_buffers) || bh->b_private) && bh->b_page) {
		struct inode *inode = bh->b_page->mapping->host;
		struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
		spin_lock(&j->j_dirty_buffers_lock);
		list_del_init(&bh->b_assoc_buffers);
		reiserfs_free_jh(bh);
		spin_unlock(&j->j_dirty_buffers_lock);
	}
	clear_buffer_mapped(bh);
	clear_buffer_req(bh);
	clear_buffer_new(bh);
	bh->b_bdev = NULL;
	unlock_buffer(bh);
}
Exemplo n.º 9
0
/* The function is NOT SCHEDULE-SAFE!  
** because the bitmap block we want to change could be locked, and on its
** way to the disk when we want to read it, and because of the 
** flush_async_commits.  Per bitmap block locks won't help much, and 
** really aren't needed, as we retry later on if we try to set the bit
** and it is already set.
*/
static int find_zero_bit_in_bitmap (struct super_block * s, 
                                    unsigned long search_start, 
				    int * bmap_nr, int * offset, 
				    int for_unformatted)
{
  int retry_count = 0 ;
  /* get bit location (bitmap number and bit offset) of search_start block */
  get_bit_address (s, search_start, bmap_nr, offset);

    /* note that we search forward in the bitmap, benchmarks have shown that it is better to allocate in increasing
       sequence, which is probably due to the disk spinning in the forward direction.. */
    if (find_forward (s, bmap_nr, offset, for_unformatted) == 0) {
      /* there wasn't a free block with number greater than our
         starting point, so we are going to go to the beginning of the disk */

retry:
      search_start = 0; /* caller will reset search_start for itself also. */
      get_bit_address (s, search_start, bmap_nr, offset);
      if (find_forward (s, bmap_nr,offset,for_unformatted) == 0) {
	if (for_unformatted) {	/* why only unformatted nodes? -Hans */
	  if (retry_count == 0) {
	    /* we've got a chance that flushing async commits will free up
	    ** some space.  Sync then retry
	    */
	    flush_async_commits(s) ;
	    retry_count++ ;
	    goto retry ;
	  } else if (retry_count > 0) {
	    /* nothing more we can do.  Make the others wait, flush
	    ** all log blocks to disk, and flush to their home locations.
	    ** this will free up any blocks held by the journal
	    */
	    SB_JOURNAL(s)->j_must_wait = 1 ;
	  }
	}
        return 0;
      }
    }
  return 1;
}
Exemplo n.º 10
0
/* Allocates blocks for a file to fulfil write request.
   Maps all unmapped but prepared pages from the list.
   Updates metadata with newly allocated blocknumbers as needed */
int reiserfs_allocate_blocks_for_region(
				struct reiserfs_transaction_handle *th,
				struct inode *inode, /* Inode we work with */
				loff_t pos, /* Writing position */
				int num_pages, /* number of pages write going
						  to touch */
				int write_bytes, /* amount of bytes to write */
				struct page **prepared_pages, /* array of
							         prepared pages
							       */
				int blocks_to_allocate /* Amount of blocks we
							  need to allocate to
							  fit the data into file
							 */
				)
{
    struct cpu_key key; // cpu key of item that we are going to deal with
    struct item_head *ih; // pointer to item head that we are going to deal with
    struct buffer_head *bh; // Buffer head that contains items that we are going to deal with
    __u32 * item; // pointer to item we are going to deal with
    INITIALIZE_PATH(path); // path to item, that we are going to deal with.
    b_blocknr_t *allocated_blocks; // Pointer to a place where allocated blocknumbers would be stored.
    reiserfs_blocknr_hint_t hint; // hint structure for block allocator.
    size_t res; // return value of various functions that we call.
    int curr_block; // current block used to keep track of unmapped blocks.
    int i; // loop counter
    int itempos; // position in item
    unsigned int from = (pos & (PAGE_CACHE_SIZE - 1)); // writing position in
						       // first page
    unsigned int to = ((pos + write_bytes - 1) & (PAGE_CACHE_SIZE - 1)) + 1; /* last modified byte offset in last page */
    __u64 hole_size ; // amount of blocks for a file hole, if it needed to be created.
    int modifying_this_item = 0; // Flag for items traversal code to keep track
				 // of the fact that we already prepared
				 // current block for journal
    int will_prealloc = 0;

    RFALSE(!blocks_to_allocate, "green-9004: tried to allocate zero blocks?");

    /* only preallocate if this is a small write */
    if (REISERFS_I(inode)->i_prealloc_count ||
       (!(write_bytes & (inode->i_sb->s_blocksize -1)) &&
        blocks_to_allocate <
        REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize))
        will_prealloc = REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize;

    allocated_blocks = kmalloc((blocks_to_allocate + will_prealloc) *
    					sizeof(b_blocknr_t), GFP_NOFS);

    /* First we compose a key to point at the writing position, we want to do
       that outside of any locking region. */
    make_cpu_key (&key, inode, pos+1, TYPE_ANY, 3/*key length*/);

    /* If we came here, it means we absolutely need to open a transaction,
       since we need to allocate some blocks */
    reiserfs_write_lock(inode->i_sb); // Journaling stuff and we need that.
    journal_begin(th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3 + 1); // Wish I know if this number enough
    reiserfs_update_inode_transaction(inode) ;

    /* Look for the in-tree position of our write, need path for block allocator */
    res = search_for_position_by_key(inode->i_sb, &key, &path);
    if ( res == IO_ERROR ) {
	res = -EIO;
	goto error_exit;
    }
   
    /* Allocate blocks */
    /* First fill in "hint" structure for block allocator */
    hint.th = th; // transaction handle.
    hint.path = &path; // Path, so that block allocator can determine packing locality or whatever it needs to determine.
    hint.inode = inode; // Inode is needed by block allocator too.
    hint.search_start = 0; // We have no hint on where to search free blocks for block allocator.
    hint.key = key.on_disk_key; // on disk key of file.
    hint.block = inode->i_blocks>>(inode->i_sb->s_blocksize_bits-9); // Number of disk blocks this file occupies already.
    hint.formatted_node = 0; // We are allocating blocks for unformatted node.
    hint.preallocate = will_prealloc;

    /* Call block allocator to allocate blocks */
    res = reiserfs_allocate_blocknrs(&hint, allocated_blocks, blocks_to_allocate, blocks_to_allocate);
    if ( res != CARRY_ON ) {
	if ( res == NO_DISK_SPACE ) {
	    /* We flush the transaction in case of no space. This way some
	       blocks might become free */
	    SB_JOURNAL(inode->i_sb)->j_must_wait = 1;
	    restart_transaction(th, inode, &path);

	    /* We might have scheduled, so search again */
	    res = search_for_position_by_key(inode->i_sb, &key, &path);
	    if ( res == IO_ERROR ) {
		res = -EIO;
		goto error_exit;
	    }

	    /* update changed info for hint structure. */
	    res = reiserfs_allocate_blocknrs(&hint, allocated_blocks, blocks_to_allocate, blocks_to_allocate);
	    if ( res != CARRY_ON ) {
		res = -ENOSPC; 
		pathrelse(&path);
		goto error_exit;
	    }
	} else {
	    res = -ENOSPC;
	    pathrelse(&path);
	    goto error_exit;
	}
    }

#ifdef __BIG_ENDIAN
        // Too bad, I have not found any way to convert a given region from
        // cpu format to little endian format
    {
        int i;
        for ( i = 0; i < blocks_to_allocate ; i++)
            allocated_blocks[i]=cpu_to_le32(allocated_blocks[i]);
    }
#endif

    /* Blocks allocating well might have scheduled and tree might have changed,
       let's search the tree again */
    /* find where in the tree our write should go */
    res = search_for_position_by_key(inode->i_sb, &key, &path);
    if ( res == IO_ERROR ) {
	res = -EIO;
	goto error_exit_free_blocks;
    }

    bh = get_last_bh( &path ); // Get a bufferhead for last element in path.
    ih = get_ih( &path );      // Get a pointer to last item head in path.
    item = get_item( &path );  // Get a pointer to last item in path

    /* Let's see what we have found */
    if ( res != POSITION_FOUND ) { /* position not found, this means that we
				      might need to append file with holes
				      first */
	// Since we are writing past the file's end, we need to find out if
	// there is a hole that needs to be inserted before our writing
	// position, and how many blocks it is going to cover (we need to
	//  populate pointers to file blocks representing the hole with zeros)

	{
	    int item_offset = 1;
	    /*
	     * if ih is stat data, its offset is 0 and we don't want to
	     * add 1 to pos in the hole_size calculation
	     */
	    if (is_statdata_le_ih(ih))
	        item_offset = 0;
	    hole_size = (pos + item_offset -
	            (le_key_k_offset( get_inode_item_key_version(inode),
		    &(ih->ih_key)) +
		    op_bytes_number(ih, inode->i_sb->s_blocksize))) >>
		    inode->i_sb->s_blocksize_bits;
	}
Exemplo n.º 11
0
int reiserfs_resize (struct super_block * s, unsigned long block_count_new)
{
	struct reiserfs_super_block * sb;
        struct reiserfs_bitmap_info *bitmap;
	struct buffer_head * bh;
	struct reiserfs_transaction_handle th;
	unsigned int bmap_nr_new, bmap_nr;
	unsigned int block_r_new, block_r;
	
	struct reiserfs_list_bitmap * jb;
	struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS];
	
	unsigned long int block_count, free_blocks;
	int i;
	int copy_size ;

	sb = SB_DISK_SUPER_BLOCK(s);

	if (SB_BLOCK_COUNT(s) >= block_count_new) {
		printk("can\'t shrink filesystem on-line\n");
		return -EINVAL;
	}

	/* check the device size */
	bh = sb_bread(s, block_count_new - 1);
	if (!bh) {
		printk("reiserfs_resize: can\'t read last block\n");
		return -EINVAL;
	}	
	bforget(bh);

	/* old disk layout detection; those partitions can be mounted, but
	 * cannot be resized */
	if (SB_BUFFER_WITH_SB(s)->b_blocknr *	SB_BUFFER_WITH_SB(s)->b_size 
		!= REISERFS_DISK_OFFSET_IN_BYTES ) {
		printk("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n");
		return -ENOTSUPP;
	}
       
	/* count used bits in last bitmap block */
	block_r = SB_BLOCK_COUNT(s) -
	        (SB_BMAP_NR(s) - 1) * s->s_blocksize * 8;
	
	/* count bitmap blocks in new fs */
	bmap_nr_new = block_count_new / ( s->s_blocksize * 8 );
	block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8;
	if (block_r_new) 
		bmap_nr_new++;
	else
		block_r_new = s->s_blocksize * 8;

	/* save old values */
	block_count = SB_BLOCK_COUNT(s);
	bmap_nr     = SB_BMAP_NR(s);

	/* resizing of reiserfs bitmaps (journal and real), if needed */
	if (bmap_nr_new > bmap_nr) {	    
	    /* reallocate journal bitmaps */
	    if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
		printk("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
		unlock_super(s) ;
		return -ENOMEM ;
	    }
	    /* the new journal bitmaps are zero filled, now we copy in the bitmap
	    ** node pointers from the old journal bitmap structs, and then
	    ** transfer the new data structures into the journal struct.
	    **
	    ** using the copy_size var below allows this code to work for
	    ** both shrinking and expanding the FS.
	    */
	    copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr ;
	    copy_size = copy_size * sizeof(struct reiserfs_list_bitmap_node *) ;
	    for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
		struct reiserfs_bitmap_node **node_tmp ;
		jb = SB_JOURNAL(s)->j_list_bitmap + i ;
		memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size) ;

		/* just in case vfree schedules on us, copy the new
		** pointer into the journal struct before freeing the 
		** old one
		*/
		node_tmp = jb->bitmaps ;
		jb->bitmaps = jbitmap[i].bitmaps ;
		vfree(node_tmp) ;
	    }	
	
	    /* allocate additional bitmap blocks, reallocate array of bitmap
	     * block pointers */
	    bitmap = vmalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
	    if (!bitmap) {
		printk("reiserfs_resize: unable to allocate memory.\n");
		return -ENOMEM;
	    }
	    memset (bitmap, 0, sizeof (struct reiserfs_bitmap_info) * SB_BMAP_NR(s));
	    for (i = 0; i < bmap_nr; i++)
		bitmap[i] = SB_AP_BITMAP(s)[i];
	    for (i = bmap_nr; i < bmap_nr_new; i++) {
		bitmap[i].bh = sb_getblk(s, i * s->s_blocksize * 8);
		memset(bitmap[i].bh->b_data, 0, sb_blocksize(sb));
		reiserfs_test_and_set_le_bit(0, bitmap[i].bh->b_data);

		set_buffer_uptodate(bitmap[i].bh);
		mark_buffer_dirty(bitmap[i].bh) ;
		sync_dirty_buffer(bitmap[i].bh);
		// update bitmap_info stuff
		bitmap[i].first_zero_hint=1;
		bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
	    }	
	    /* free old bitmap blocks array */
	    vfree(SB_AP_BITMAP(s));
	    SB_AP_BITMAP(s) = bitmap;
	}
	
	/* begin transaction */
	journal_begin(&th, s, 10);

	/* correct last bitmap blocks in old and new disk layout */
	reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr - 1].bh, 1);
	for (i = block_r; i < s->s_blocksize * 8; i++)
	    reiserfs_test_and_clear_le_bit(i, 
					   SB_AP_BITMAP(s)[bmap_nr - 1].bh->b_data);
	SB_AP_BITMAP(s)[bmap_nr - 1].free_count += s->s_blocksize * 8 - block_r;
	if ( !SB_AP_BITMAP(s)[bmap_nr - 1].first_zero_hint)
	    SB_AP_BITMAP(s)[bmap_nr - 1].first_zero_hint = block_r;

	journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr - 1].bh);

	reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr_new - 1].bh, 1);
	for (i = block_r_new; i < s->s_blocksize * 8; i++)
	    reiserfs_test_and_set_le_bit(i,
					 SB_AP_BITMAP(s)[bmap_nr_new - 1].bh->b_data);
	journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr_new - 1].bh);
 
	SB_AP_BITMAP(s)[bmap_nr_new - 1].free_count -= s->s_blocksize * 8 - block_r_new;
	/* Extreme case where last bitmap is the only valid block in itself. */
	if ( !SB_AP_BITMAP(s)[bmap_nr_new - 1].free_count )
	    SB_AP_BITMAP(s)[bmap_nr_new - 1].first_zero_hint = 0;
 	/* update super */
	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
	free_blocks = SB_FREE_BLOCKS(s);
	PUT_SB_FREE_BLOCKS(s, free_blocks + (block_count_new - block_count - (bmap_nr_new - bmap_nr)));
	PUT_SB_BLOCK_COUNT(s, block_count_new);
	PUT_SB_BMAP_NR(s, bmap_nr_new);
	s->s_dirt = 1;

	journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
	
	SB_JOURNAL(s)->j_must_wait = 1;
	journal_end(&th, s, 10);

	return 0;
}
Exemplo n.º 12
0
//
// a portion of this function, particularly the VFS interface portion,
// was derived from minix or ext2's analog and evolved as the
// prototype did. You should be able to tell which portion by looking
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
static int reiserfs_remount (struct super_block * s, int * flags, char * data)
{
  struct reiserfs_super_block * rs;
  struct reiserfs_transaction_handle th ;
  unsigned long blocks;
  unsigned long mount_options = 0;
#ifdef CONFIG_REISERFS_IMMUTABLE_HACK
  int do_update_suidimmu;
#endif

  rs = SB_DISK_SUPER_BLOCK (s);

  if (!parse_options(data, &mount_options, &blocks))
  	return 0;

#ifdef CONFIG_REISERFS_IMMUTABLE_HACK
    if (reiserfs_suid_immutable(s) && !capable(CAP_LINUX_IMMUTABLE))
	return -EPERM;
    do_update_suidimmu = 0;
    if (test_bit(SUID_IMMUTABLE, &mount_options)) {
      if (!capable(CAP_LINUX_IMMUTABLE))
	return -EPERM;
#ifdef CONFIG_REISERFS_IMMUTABLE_HACK_DEBUG
	printk("reiserfs: suidimmu ON\n");
#endif /* CONFIG_REISERFS_IMMUTABLE_HACK_DEBUG */
      if (!reiserfs_suid_immutable(s))
	do_update_suidimmu = 1;
      set_bit(SUID_IMMUTABLE, &(s->u.reiserfs_sb.s_mount_opt));
    } else {
#ifdef CONFIG_REISERFS_IMMUTABLE_HACK_DEBUG
	printk("reiserfs: suidimmu OFF\n");
#endif /* CONFIG_REISERFS_IMMUTABLE_HACK_DEBUG */
      if (reiserfs_suid_immutable(s))
	do_update_suidimmu = 1;
      clear_bit(SUID_IMMUTABLE, &(s->u.reiserfs_sb.s_mount_opt));
    }

    /*
     * update S_IMMUTABLE bit on inode->i_flags
     */
    if (do_update_suidimmu)
      update_suidimmu(s, reiserfs_suid_immutable(s));
#endif /* CONFIG_REISERFS_IMMUTABLE_HACK */

#define SET_OPT( opt, bits, super )					\
    if( ( bits ) & ( 1 << ( opt ) ) )					\
	    ( super ) -> u.reiserfs_sb.s_mount_opt |= ( 1 << ( opt ) )

  /* set options in the super-block bitmask */
  SET_OPT( NOTAIL, mount_options, s );
  SET_OPT( REISERFS_NO_BORDER, mount_options, s );
  SET_OPT( REISERFS_NO_UNHASHED_RELOCATION, mount_options, s );
  SET_OPT( REISERFS_HASHED_RELOCATION, mount_options, s );
  SET_OPT( REISERFS_TEST4, mount_options, s );
#undef SET_OPT

  if(blocks) {
      int rc = reiserfs_resize(s, blocks);
      if (rc != 0)
	  return rc;
  }

  if ((unsigned long)(*flags & MS_RDONLY) == (s->s_flags & MS_RDONLY)) {
    /* there is nothing to do to remount read-only fs as read-only fs */
    return 0;
  }
  
  if (*flags & MS_RDONLY) {
    /* try to remount file system with read-only permissions */
    if (sb_state(rs) == REISERFS_VALID_FS || s->u.reiserfs_sb.s_mount_state != REISERFS_VALID_FS) {
      return 0;
    }

    journal_begin(&th, s, 10) ;
    /* Mounting a rw partition read-only. */
    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
    set_sb_state( rs, s->u.reiserfs_sb.s_mount_state );
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    s->s_dirt = 0;
  } else {
    s->u.reiserfs_sb.s_mount_state = sb_state(rs) ;
    s->s_flags &= ~MS_RDONLY ; /* now it is safe to call journal_begin */
    journal_begin(&th, s, 10) ;

    /* Mount a partition which is read-only, read-write */
    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
    s->u.reiserfs_sb.s_mount_state = sb_state(rs);
    s->s_flags &= ~MS_RDONLY;
    set_sb_state( rs, REISERFS_ERROR_FS );
    /* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); */
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    s->s_dirt = 0;
    s->u.reiserfs_sb.s_mount_state = REISERFS_VALID_FS ;
  }
  /* this will force a full flush of all journal lists */
  SB_JOURNAL(s)->j_must_wait = 1 ;
  journal_end(&th, s, 10) ;

  if (!( *flags & MS_RDONLY ) )
    finish_unfinished( s );

  return 0;
}
Exemplo n.º 13
0
static int show_journal(struct seq_file *m, void *unused)
{
	struct super_block *sb = m->private;
	struct reiserfs_sb_info *r = REISERFS_SB(sb);
	struct reiserfs_super_block *rs = r->s_rs;
	struct journal_params *jp = &rs->s_v1.s_journal;
	char b[BDEVNAME_SIZE];

	seq_printf(m,		/* on-disk fields */
		   "jp_journal_1st_block: \t%i\n"
		   "jp_journal_dev: \t%s[%x]\n"
		   "jp_journal_size: \t%i\n"
		   "jp_journal_trans_max: \t%i\n"
		   "jp_journal_magic: \t%i\n"
		   "jp_journal_max_batch: \t%i\n"
		   "jp_journal_max_commit_age: \t%i\n"
		   "jp_journal_max_trans_age: \t%i\n"
		   /* incore fields */
		   "j_1st_reserved_block: \t%i\n"
		   "j_state: \t%li\n"
		   "j_trans_id: \t%u\n"
		   "j_mount_id: \t%lu\n"
		   "j_start: \t%lu\n"
		   "j_len: \t%lu\n"
		   "j_len_alloc: \t%lu\n"
		   "j_wcount: \t%i\n"
		   "j_bcount: \t%lu\n"
		   "j_first_unflushed_offset: \t%lu\n"
		   "j_last_flush_trans_id: \t%u\n"
		   "j_trans_start_time: \t%li\n"
		   "j_list_bitmap_index: \t%i\n"
		   "j_must_wait: \t%i\n"
		   "j_next_full_flush: \t%i\n"
		   "j_next_async_flush: \t%i\n"
		   "j_cnode_used: \t%i\n" "j_cnode_free: \t%i\n" "\n"
		   /* reiserfs_proc_info_data_t.journal fields */
		   "in_journal: \t%12lu\n"
		   "in_journal_bitmap: \t%12lu\n"
		   "in_journal_reusable: \t%12lu\n"
		   "lock_journal: \t%12lu\n"
		   "lock_journal_wait: \t%12lu\n"
		   "journal_begin: \t%12lu\n"
		   "journal_relock_writers: \t%12lu\n"
		   "journal_relock_wcount: \t%12lu\n"
		   "mark_dirty: \t%12lu\n"
		   "mark_dirty_already: \t%12lu\n"
		   "mark_dirty_notjournal: \t%12lu\n"
		   "restore_prepared: \t%12lu\n"
		   "prepare: \t%12lu\n"
		   "prepare_retry: \t%12lu\n",
		   DJP(jp_journal_1st_block),
		   bdevname(SB_JOURNAL(sb)->j_dev_bd, b),
		   DJP(jp_journal_dev),
		   DJP(jp_journal_size),
		   DJP(jp_journal_trans_max),
		   DJP(jp_journal_magic),
		   DJP(jp_journal_max_batch),
		   SB_JOURNAL(sb)->j_max_commit_age,
		   DJP(jp_journal_max_trans_age),
		   JF(j_1st_reserved_block),
		   JF(j_state),
		   JF(j_trans_id),
		   JF(j_mount_id),
		   JF(j_start),
		   JF(j_len),
		   JF(j_len_alloc),
		   atomic_read(&r->s_journal->j_wcount),
		   JF(j_bcount),
		   JF(j_first_unflushed_offset),
		   JF(j_last_flush_trans_id),
		   JF(j_trans_start_time),
		   JF(j_list_bitmap_index),
		   JF(j_must_wait),
		   JF(j_next_full_flush),
		   JF(j_next_async_flush),
		   JF(j_cnode_used),
		   JF(j_cnode_free),
		   SFPJ(in_journal),
		   SFPJ(in_journal_bitmap),
		   SFPJ(in_journal_reusable),
		   SFPJ(lock_journal),
		   SFPJ(lock_journal_wait),
		   SFPJ(journal_being),
		   SFPJ(journal_relock_writers),
		   SFPJ(journal_relock_wcount),
		   SFPJ(mark_dirty),
		   SFPJ(mark_dirty_already),
		   SFPJ(mark_dirty_notjournal),
		   SFPJ(restore_prepared), SFPJ(prepare), SFPJ(prepare_retry)
	    );
	return 0;
}
Exemplo n.º 14
0
static int reiserfs_remount (struct super_block * s, int * mount_flags, char * data)
{
  struct reiserfs_super_block * rs;
  struct reiserfs_transaction_handle th ;
  unsigned long blocks;
  unsigned long mount_options = 0;

  rs = SB_DISK_SUPER_BLOCK (s);

  if (!reiserfs_parse_options(s, data, &mount_options, &blocks))
  	return 0;

#define SET_OPT( opt, bits, super )					\
    if( ( bits ) & ( 1 << ( opt ) ) )					\
	    ( super ) -> u.reiserfs_sb.s_mount_opt |= ( 1 << ( opt ) )

  /* set options in the super-block bitmask */
  SET_OPT( REISERFS_SMALLTAIL, mount_options, s );
  SET_OPT( REISERFS_LARGETAIL, mount_options, s );
  SET_OPT( REISERFS_NO_BORDER, mount_options, s );
  SET_OPT( REISERFS_NO_UNHASHED_RELOCATION, mount_options, s );
  SET_OPT( REISERFS_HASHED_RELOCATION, mount_options, s );
  SET_OPT( REISERFS_TEST4, mount_options, s );
  SET_OPT( REISERFS_ATTRS, mount_options, s );
#undef SET_OPT

  handle_attrs( s );

  if(blocks) {
      int rc = reiserfs_resize(s, blocks);
      if (rc != 0)
	  return rc;
  }

  if (*mount_flags & MS_RDONLY) {
    /* remount read-only */
    if (s->s_flags & MS_RDONLY)
      /* it is read-only already */
      return 0;
    /* try to remount file system with read-only permissions */
    if (sb_state(rs) == REISERFS_VALID_FS || s->u.reiserfs_sb.s_mount_state != REISERFS_VALID_FS) {
      return 0;
    }

    journal_begin(&th, s, 10) ;
    /* Mounting a rw partition read-only. */
    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
    set_sb_state( rs, s->u.reiserfs_sb.s_mount_state );
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    s->s_dirt = 0;
  } else {
    /* remount read-write */
    if (!(s->s_flags & MS_RDONLY))
	return 0; /* We are read-write already */

    s->u.reiserfs_sb.s_mount_state = sb_state(rs) ;
    s->s_flags &= ~MS_RDONLY ; /* now it is safe to call journal_begin */
    journal_begin(&th, s, 10) ;
    
    /* Mount a partition which is read-only, read-write */
    reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
    s->u.reiserfs_sb.s_mount_state = sb_state(rs);
    s->s_flags &= ~MS_RDONLY;
    set_sb_state( rs, REISERFS_ERROR_FS );
    /* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); */
    journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
    s->s_dirt = 0;
    s->u.reiserfs_sb.s_mount_state = REISERFS_VALID_FS ;
  }
  /* this will force a full flush of all journal lists */
  SB_JOURNAL(s)->j_must_wait = 1 ;
  journal_end(&th, s, 10) ;

  if (!( *mount_flags & MS_RDONLY ) )
    finish_unfinished( s );

  return 0;
}
Exemplo n.º 15
0
int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
{
	int err = 0;
	struct reiserfs_super_block *sb;
	struct reiserfs_bitmap_info *bitmap;
	struct reiserfs_bitmap_info *info;
	struct reiserfs_bitmap_info *old_bitmap = SB_AP_BITMAP(s);
	struct buffer_head *bh;
	struct reiserfs_transaction_handle th;
	unsigned int bmap_nr_new, bmap_nr;
	unsigned int block_r_new, block_r;

	struct reiserfs_list_bitmap *jb;
	struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS];

	unsigned long int block_count, free_blocks;
	int i;
	int copy_size;

	sb = SB_DISK_SUPER_BLOCK(s);

	if (SB_BLOCK_COUNT(s) >= block_count_new) {
		printk("can\'t shrink filesystem on-line\n");
		return -EINVAL;
	}

	/* check the device size */
	bh = sb_bread(s, block_count_new - 1);
	if (!bh) {
		printk("reiserfs_resize: can\'t read last block\n");
		return -EINVAL;
	}
	bforget(bh);

	/* old disk layout detection; those partitions can be mounted, but
	 * cannot be resized */
	if (SB_BUFFER_WITH_SB(s)->b_blocknr * SB_BUFFER_WITH_SB(s)->b_size
	    != REISERFS_DISK_OFFSET_IN_BYTES) {
		printk
		    ("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n");
		return -ENOTSUPP;
	}

	/* count used bits in last bitmap block */
	block_r = SB_BLOCK_COUNT(s) -
			(reiserfs_bmap_count(s) - 1) * s->s_blocksize * 8;

	/* count bitmap blocks in new fs */
	bmap_nr_new = block_count_new / (s->s_blocksize * 8);
	block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8;
	if (block_r_new)
		bmap_nr_new++;
	else
		block_r_new = s->s_blocksize * 8;

	/* save old values */
	block_count = SB_BLOCK_COUNT(s);
	bmap_nr = reiserfs_bmap_count(s);

	/* resizing of reiserfs bitmaps (journal and real), if needed */
	if (bmap_nr_new > bmap_nr) {
		/* reallocate journal bitmaps */
		if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
			printk
			    ("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
			return -ENOMEM;
		}
		/* the new journal bitmaps are zero filled, now we copy in the bitmap
		 ** node pointers from the old journal bitmap structs, and then
		 ** transfer the new data structures into the journal struct.
		 **
		 ** using the copy_size var below allows this code to work for
		 ** both shrinking and expanding the FS.
		 */
		copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr;
		copy_size =
		    copy_size * sizeof(struct reiserfs_list_bitmap_node *);
		for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
			struct reiserfs_bitmap_node **node_tmp;
			jb = SB_JOURNAL(s)->j_list_bitmap + i;
			memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);

			/* just in case vfree schedules on us, copy the new
			 ** pointer into the journal struct before freeing the
			 ** old one
			 */
			node_tmp = jb->bitmaps;
			jb->bitmaps = jbitmap[i].bitmaps;
			vfree(node_tmp);
		}

		/* allocate additional bitmap blocks, reallocate array of bitmap
		 * block pointers */
		bitmap =
		    vzalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
		if (!bitmap) {
			/* Journal bitmaps are still supersized, but the memory isn't
			 * leaked, so I guess it's ok */
			printk("reiserfs_resize: unable to allocate memory.\n");
			return -ENOMEM;
		}
		for (i = 0; i < bmap_nr; i++)
			bitmap[i] = old_bitmap[i];

		/* This doesn't go through the journal, but it doesn't have to.
		 * The changes are still atomic: We're synced up when the journal
		 * transaction begins, and the new bitmaps don't matter if the
		 * transaction fails. */
		for (i = bmap_nr; i < bmap_nr_new; i++) {
			/* don't use read_bitmap_block since it will cache
			 * the uninitialized bitmap */
			bh = sb_bread(s, i * s->s_blocksize * 8);
			if (!bh) {
				vfree(bitmap);
				return -EIO;
			}
			memset(bh->b_data, 0, sb_blocksize(sb));
			reiserfs_set_le_bit(0, bh->b_data);
			reiserfs_cache_bitmap_metadata(s, bh, bitmap + i);

			set_buffer_uptodate(bh);
			mark_buffer_dirty(bh);
			reiserfs_write_unlock(s);
			sync_dirty_buffer(bh);
			reiserfs_write_lock(s);
			// update bitmap_info stuff
			bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
			brelse(bh);
		}
		/* free old bitmap blocks array */
		SB_AP_BITMAP(s) = bitmap;
		vfree(old_bitmap);
	}

	/* begin transaction, if there was an error, it's fine. Yes, we have
	 * incorrect bitmaps now, but none of it is ever going to touch the
	 * disk anyway. */
	err = journal_begin(&th, s, 10);
	if (err)
		return err;

	/* Extend old last bitmap block - new blocks have been made available */
	info = SB_AP_BITMAP(s) + bmap_nr - 1;
	bh = reiserfs_read_bitmap_block(s, bmap_nr - 1);
	if (!bh) {
		int jerr = journal_end(&th, s, 10);
		if (jerr)
			return jerr;
		return -EIO;
	}

	reiserfs_prepare_for_journal(s, bh, 1);
	for (i = block_r; i < s->s_blocksize * 8; i++)
		reiserfs_clear_le_bit(i, bh->b_data);
	info->free_count += s->s_blocksize * 8 - block_r;

	journal_mark_dirty(&th, s, bh);
	brelse(bh);

	/* Correct new last bitmap block - It may not be full */
	info = SB_AP_BITMAP(s) + bmap_nr_new - 1;
	bh = reiserfs_read_bitmap_block(s, bmap_nr_new - 1);
	if (!bh) {
		int jerr = journal_end(&th, s, 10);
		if (jerr)
			return jerr;
		return -EIO;
	}

	reiserfs_prepare_for_journal(s, bh, 1);
	for (i = block_r_new; i < s->s_blocksize * 8; i++)
		reiserfs_set_le_bit(i, bh->b_data);
	journal_mark_dirty(&th, s, bh);
	brelse(bh);

	info->free_count -= s->s_blocksize * 8 - block_r_new;
	/* update super */
	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
	free_blocks = SB_FREE_BLOCKS(s);
	PUT_SB_FREE_BLOCKS(s,
			   free_blocks + (block_count_new - block_count -
					  (bmap_nr_new - bmap_nr)));
	PUT_SB_BLOCK_COUNT(s, block_count_new);
	PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new);

	journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));

	SB_JOURNAL(s)->j_must_wait = 1;
	return journal_end(&th, s, 10);
}
Exemplo n.º 16
0
/* 
** We pre-allocate 8 blocks.  Pre-allocation is used for files > 16 KB only.
** This lowers fragmentation on large files by grabbing a contiguous set of
** blocks at once.  It also limits the number of times the bitmap block is
** logged by making X number of allocation changes in a single transaction.
**
** We are using a border to divide the disk into two parts.  The first part
** is used for tree blocks, which have a very high turnover rate (they
** are constantly allocated then freed)
**
** The second part of the disk is for the unformatted nodes of larger files.
** Putting them away from the tree blocks lowers fragmentation, and makes
** it easier to group files together.  There are a number of different
** allocation schemes being tried right now, each is documented below.
**
** A great deal of the allocator's speed comes because reiserfs_get_block
** sends us the block number of the last unformatted node in the file.  Once
** a given block is allocated past the border, we don't collide with the
** blocks near the search_start again.
** 
*/
int reiserfs_new_unf_blocknrs2 (struct reiserfs_transaction_handle *th, 
				struct inode       * p_s_inode,
				unsigned long      * free_blocknrs,
				unsigned long        search_start)
{
  int ret=0, blks_gotten=0;
  unsigned long border = 0;
  unsigned long bstart = 0;
  unsigned long hash_in, hash_out;
  unsigned long saved_search_start=search_start;
  int allocated[PREALLOCATION_SIZE];
  int blks;

  if (!reiserfs_no_border(th->t_super)) {
    /* we default to having the border at the 10% mark of the disk.  This
    ** is an arbitrary decision and it needs tuning.  It also needs a limit
    ** to prevent it from taking too much space on huge drives.
    */
    bstart = (SB_BLOCK_COUNT(th->t_super) / 10); 
  }
  if (!reiserfs_no_unhashed_relocation(th->t_super)) {
    /* this is a very simple first attempt at preventing too much grouping
    ** around the border value.  Since k_dir_id is never larger than the
    ** highest allocated oid, it is far from perfect, and files will tend
    ** to be grouped towards the start of the border
    */
    border = le32_to_cpu(INODE_PKEY(p_s_inode)->k_dir_id) % (SB_BLOCK_COUNT(th->t_super) - bstart - 1) ;
  } else {
    /* why would we want to delcare a local variable to this if statement
    ** name border????? -chris
    ** unsigned long border = 0;
    */
    if (!reiserfs_hashed_relocation(th->t_super)) {
      hash_in = le32_to_cpu((INODE_PKEY(p_s_inode))->k_dir_id);
				/* I wonder if the CPU cost of the
                                   hash will obscure the layout
                                   effect? Of course, whether that
                                   effect is good or bad we don't
                                   know.... :-) */
      
      hash_out = keyed_hash(((char *) (&hash_in)), 4);
      border = hash_out % (SB_BLOCK_COUNT(th->t_super) - bstart - 1) ;
    }
  }
  border += bstart ;
  allocated[0] = 0 ; /* important.  Allows a check later on to see if at
                      * least one block was allocated.  This prevents false
		      * no disk space returns
		      */

  if ( (p_s_inode->i_size < 4 * 4096) || 
       !(S_ISREG(p_s_inode->i_mode)) )
    {
      if ( search_start < border 
	   || (
				/* allow us to test whether it is a
                                   good idea to prevent files from
                                   getting too far away from their
                                   packing locality by some unexpected
                                   means.  This might be poor code for
                                   directories whose files total
                                   larger than 1/10th of the disk, and
                                   it might be good code for
                                   suffering from old insertions when the disk
                                   was almost full. */
               /* changed from !reiserfs_test3(th->t_super), which doesn't
               ** seem like a good idea.  Think about adding blocks to
               ** a large file.  If you've allocated 10% of the disk
               ** in contiguous blocks, you start over at the border value
               ** for every new allocation.  This throws away all the
               ** information sent in about the last block that was allocated
               ** in the file.  Not a good general case at all.
               ** -chris
               */
	       reiserfs_test4(th->t_super) && 
	       (search_start > border + (SB_BLOCK_COUNT(th->t_super) / 10))
	       )
	   )
	search_start=border;
  
      ret = do_reiserfs_new_blocknrs(th, free_blocknrs, search_start, 
				     1/*amount_needed*/, 
				     0/*use reserved blocks for root */,
				     1/*for_formatted*/,
				     0/*for prealloc */) ;  
      return ret;
    }

  /* take a block off the prealloc list and return it -Hans */
  if (p_s_inode->u.reiserfs_i.i_prealloc_count > 0) {
    p_s_inode->u.reiserfs_i.i_prealloc_count--;
    *free_blocknrs = p_s_inode->u.reiserfs_i.i_prealloc_block++;

    /* if no more preallocated blocks, remove inode from list */
    if (! p_s_inode->u.reiserfs_i.i_prealloc_count) {
      list_del(&p_s_inode->u.reiserfs_i.i_prealloc_list);
    }
    
    return ret;
  }

				/* else get a new preallocation for the file */
  reiserfs_discard_prealloc (th, p_s_inode);
  /* this uses the last preallocated block as the search_start.  discard
  ** prealloc does not zero out this number.
  */
  if (search_start <= p_s_inode->u.reiserfs_i.i_prealloc_block) {
    search_start = p_s_inode->u.reiserfs_i.i_prealloc_block;
  }
  
  /* doing the compare again forces search_start to be >= the border,
  ** even if the file already had prealloction done.  This seems extra,
  ** and should probably be removed
  */
  if ( search_start < border ) search_start=border; 

  /* If the disk free space is already below 10% we should 
  ** start looking for the free blocks from the beginning 
  ** of the partition, before the border line.
  */
  if ( SB_FREE_BLOCKS(th->t_super) <= (SB_BLOCK_COUNT(th->t_super) / 10) ) {
    search_start=saved_search_start;
  }

  *free_blocknrs = 0;
  blks = PREALLOCATION_SIZE-1;
  for (blks_gotten=0; blks_gotten<PREALLOCATION_SIZE; blks_gotten++) {
    ret = do_reiserfs_new_blocknrs(th, free_blocknrs, search_start, 
				   1/*amount_needed*/, 
				   0/*for root reserved*/,
				   1/*for_formatted*/,
				   (blks_gotten > 0)/*must_be_contiguous*/) ;
    /* if we didn't find a block this time, adjust blks to reflect
    ** the actual number of blocks allocated
    */ 
    if (ret != CARRY_ON) {
      blks = blks_gotten > 0 ? (blks_gotten - 1) : 0 ;
      break ;
    }
    allocated[blks_gotten]= *free_blocknrs;
#ifdef CONFIG_REISERFS_CHECK
    if ( (blks_gotten>0) && (allocated[blks_gotten] - allocated[blks_gotten-1]) != 1 ) {
      /* this should be caught by new_blocknrs now, checking code */
      reiserfs_warning("yura-1, reiserfs_new_unf_blocknrs2: pre-allocated not contiguous set of blocks!\n") ;
      reiserfs_free_block(th, allocated[blks_gotten]);
      blks = blks_gotten-1; 
      break;
    }
#endif
    if (blks_gotten==0) {
      p_s_inode->u.reiserfs_i.i_prealloc_block = *free_blocknrs;
    }
    search_start = *free_blocknrs; 
    *free_blocknrs = 0;
  }
  p_s_inode->u.reiserfs_i.i_prealloc_count = blks;
  *free_blocknrs = p_s_inode->u.reiserfs_i.i_prealloc_block;
  p_s_inode->u.reiserfs_i.i_prealloc_block++;

  /* if inode has preallocated blocks, link him to list */
  if (p_s_inode->u.reiserfs_i.i_prealloc_count) {
    list_add(&p_s_inode->u.reiserfs_i.i_prealloc_list,
	     &SB_JOURNAL(th->t_super)->j_prealloc_list);
  } 
  /* we did actually manage to get 1 block */
  if (ret != CARRY_ON && allocated[0] > 0) {
    return CARRY_ON ;
  }
  /* NO_MORE_UNUSED_CONTIGUOUS_BLOCKS should only mean something to
  ** the preallocation code.  The rest of the filesystem asks for a block
  ** and should either get it, or know the disk is full.  The code
  ** above should never allow ret == NO_MORE_UNUSED_CONTIGUOUS_BLOCK,
  ** as it doesn't send for_prealloc = 1 to do_reiserfs_new_blocknrs
  ** unless it has already successfully allocated at least one block.
  ** Just in case, we translate into a return value the rest of the
  ** filesystem can understand.
  **
  ** It is an error to change this without making the
  ** rest of the filesystem understand NO_MORE_UNUSED_CONTIGUOUS_BLOCKS
  ** If you consider it a bug to return NO_DISK_SPACE here, fix the rest
  ** of the fs first.
  */
  if (ret == NO_MORE_UNUSED_CONTIGUOUS_BLOCKS) {
#ifdef CONFIG_REISERFS_CHECK
    reiserfs_warning("reiser-2015: this shouldn't happen, may cause false out of disk space error");
#endif
     return NO_DISK_SPACE; 
  }
  return ret;
}