Пример #1
0
static void determine_search_start(reiserfs_blocknr_hint_t *hint,
                                   int amount_needed)
{
    struct super_block *s = hint->th->t_super;
    int unfm_hint;

    hint->beg = 0;
    hint->end = SB_BLOCK_COUNT(s) - 1;

    /* This is former border algorithm. Now with tunable border offset */
    if (concentrating_formatted_nodes(s))
        set_border_in_hint(s, hint);

#ifdef DISPLACE_NEW_PACKING_LOCALITIES
    /* whenever we create a new directory, we displace it.  At first we will
       hash for location, later we might look for a moderately empty place for
       it */
    if (displacing_new_packing_localities(s)
            && hint->th->displace_new_blocks) {
        displace_new_packing_locality(hint);

        /* we do not continue determine_search_start,
         * if new packing locality is being displaced */
        return;
    }
#endif

    /* all persons should feel encouraged to add more special cases here and
     * test them */

    if (displacing_large_files(s) && !hint->formatted_node
            && this_blocknr_allocation_would_make_it_a_large_file(hint)) {
        displace_large_file(hint);
        return;
    }

    /* if none of our special cases is relevant, use the left neighbor in the
       tree order of the new node we are allocating for */
    if (hint->formatted_node && TEST_OPTION(hashed_formatted_nodes,s)) {
        hash_formatted_node(hint);
        return;
    }

    unfm_hint = get_left_neighbor(hint);

    /* Mimic old block allocator behaviour, that is if VFS allowed for preallocation,
       new blocks are displaced based on directory ID. Also, if suggested search_start
       is less than last preallocated block, we start searching from it, assuming that
       HDD dataflow is faster in forward direction */
    if ( TEST_OPTION(old_way, s)) {
        if (!hint->formatted_node) {
            if ( !reiserfs_hashed_relocation(s))
                old_way(hint);
            else if (!reiserfs_no_unhashed_relocation(s))
                old_hashed_relocation(hint);

            if ( hint->inode && hint->search_start < REISERFS_I(hint->inode)->i_prealloc_block)
                hint->search_start = REISERFS_I(hint->inode)->i_prealloc_block;
        }
        return;
    }

    /* This is an approach proposed by Hans */
    if ( TEST_OPTION(hundredth_slices, s) && ! (displacing_large_files(s) && !hint->formatted_node)) {
        hundredth_slices(hint);
        return;
    }

    /* old_hashed_relocation only works on unformatted */
    if (!unfm_hint && !hint->formatted_node &&
            TEST_OPTION(old_hashed_relocation, s))
    {
        old_hashed_relocation(hint);
    }
    /* new_hashed_relocation works with both formatted/unformatted nodes */
    if ((!unfm_hint || hint->formatted_node) &&
            TEST_OPTION(new_hashed_relocation, s))
    {
        new_hashed_relocation(hint);
    }
    /* dirid grouping works only on unformatted nodes */
    if (!unfm_hint && !hint->formatted_node && TEST_OPTION(dirid_groups,s))
    {
        dirid_groups(hint);
    }

#ifdef DISPLACE_NEW_PACKING_LOCALITIES
    if (hint->formatted_node && TEST_OPTION(dirid_groups,s))
    {
        dirid_groups(hint);
    }
#endif

    /* oid grouping works only on unformatted nodes */
    if (!unfm_hint && !hint->formatted_node && TEST_OPTION(oid_groups,s))
    {
        oid_groups(hint);
    }
    return;
}
Пример #2
0
static int show_super(struct seq_file *m, void *unused)
{
	struct super_block *sb = m->private;
	struct reiserfs_sb_info *r = REISERFS_SB(sb);

	seq_printf(m, "state: \t%s\n"
		   "mount options: \t%s%s%s%s%s%s%s%s%s%s%s\n"
		   "gen. counter: \t%i\n"
		   "s_disk_reads: \t%i\n"
		   "s_disk_writes: \t%i\n"
		   "s_fix_nodes: \t%i\n"
		   "s_do_balance: \t%i\n"
		   "s_unneeded_left_neighbor: \t%i\n"
		   "s_good_search_by_key_reada: \t%i\n"
		   "s_bmaps: \t%i\n"
		   "s_bmaps_without_search: \t%i\n"
		   "s_direct2indirect: \t%i\n"
		   "s_indirect2direct: \t%i\n"
		   "\n"
		   "max_hash_collisions: \t%i\n"
		   "breads: \t%lu\n"
		   "bread_misses: \t%lu\n"
		   "search_by_key: \t%lu\n"
		   "search_by_key_fs_changed: \t%lu\n"
		   "search_by_key_restarted: \t%lu\n"
		   "insert_item_restarted: \t%lu\n"
		   "paste_into_item_restarted: \t%lu\n"
		   "cut_from_item_restarted: \t%lu\n"
		   "delete_solid_item_restarted: \t%lu\n"
		   "delete_item_restarted: \t%lu\n"
		   "leaked_oid: \t%lu\n"
		   "leaves_removable: \t%lu\n",
		   SF(s_mount_state) == REISERFS_VALID_FS ?
		   "REISERFS_VALID_FS" : "REISERFS_ERROR_FS",
		   reiserfs_r5_hash(sb) ? "FORCE_R5 " : "",
		   reiserfs_rupasov_hash(sb) ? "FORCE_RUPASOV " : "",
		   reiserfs_tea_hash(sb) ? "FORCE_TEA " : "",
		   reiserfs_hash_detect(sb) ? "DETECT_HASH " : "",
		   reiserfs_no_border(sb) ? "NO_BORDER " : "BORDER ",
		   reiserfs_no_unhashed_relocation(sb) ?
		   "NO_UNHASHED_RELOCATION " : "",
		   reiserfs_hashed_relocation(sb) ? "UNHASHED_RELOCATION " : "",
		   reiserfs_test4(sb) ? "TEST4 " : "",
		   have_large_tails(sb) ? "TAILS " : have_small_tails(sb) ?
		   "SMALL_TAILS " : "NO_TAILS ",
		   replay_only(sb) ? "REPLAY_ONLY " : "",
		   convert_reiserfs(sb) ? "CONV " : "",
		   atomic_read(&r->s_generation_counter),
		   SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
		   SF(s_do_balance), SF(s_unneeded_left_neighbor),
		   SF(s_good_search_by_key_reada), SF(s_bmaps),
		   SF(s_bmaps_without_search), SF(s_direct2indirect),
		   SF(s_indirect2direct), SFP(max_hash_collisions), SFP(breads),
		   SFP(bread_miss), SFP(search_by_key),
		   SFP(search_by_key_fs_changed), SFP(search_by_key_restarted),
		   SFP(insert_item_restarted), SFP(paste_into_item_restarted),
		   SFP(cut_from_item_restarted),
		   SFP(delete_solid_item_restarted), SFP(delete_item_restarted),
		   SFP(leaked_oid), SFP(leaves_removable));

	return 0;
}
Пример #3
0
int reiserfs_super_in_proc( char *buffer, char **start, off_t offset,
			    int count, int *eof, void *data )
{
	struct super_block *sb;
	struct reiserfs_sb_info *r;
	int len = 0;
    
	sb = procinfo_prologue( ( kdev_t ) ( long ) data );
	if( sb == NULL )
		return -ENOENT;
	r = &sb->u.reiserfs_sb;
	len += sprintf( &buffer[ len ], 
			"state: \t%s\n"
			"mount options: \t%s%s%s%s%s%s%s%s%s%s%s%s\n"
			"gen. counter: \t%i\n"
			"s_kmallocs: \t%i\n"
			"s_disk_reads: \t%i\n"
			"s_disk_writes: \t%i\n"
			"s_fix_nodes: \t%i\n"
			"s_do_balance: \t%i\n"
			"s_unneeded_left_neighbor: \t%i\n"
			"s_good_search_by_key_reada: \t%i\n"
			"s_bmaps: \t%i\n"
			"s_bmaps_without_search: \t%i\n"
			"s_direct2indirect: \t%i\n"
			"s_indirect2direct: \t%i\n"
			"\n"
			"max_hash_collisions: \t%i\n"

			"breads: \t%lu\n"
			"bread_misses: \t%lu\n"

			"search_by_key: \t%lu\n"
			"search_by_key_fs_changed: \t%lu\n"
			"search_by_key_restarted: \t%lu\n"
			
			"insert_item_restarted: \t%lu\n"
			"paste_into_item_restarted: \t%lu\n"
			"cut_from_item_restarted: \t%lu\n"
			"delete_solid_item_restarted: \t%lu\n"
			"delete_item_restarted: \t%lu\n"

			"leaked_oid: \t%lu\n"
			"leaves_removable: \t%lu\n",

			SF( s_mount_state ) == REISERFS_VALID_FS ?
			"REISERFS_VALID_FS" : "REISERFS_ERROR_FS",
			reiserfs_r5_hash( sb ) ? "FORCE_R5 " : "",
			reiserfs_rupasov_hash( sb ) ? "FORCE_RUPASOV " : "",
			reiserfs_tea_hash( sb ) ? "FORCE_TEA " : "",
			reiserfs_hash_detect( sb ) ? "DETECT_HASH " : "",
			reiserfs_no_border( sb ) ? "NO_BORDER " : "BORDER ",
			reiserfs_no_unhashed_relocation( sb ) ? "NO_UNHASHED_RELOCATION " : "",
			reiserfs_hashed_relocation( sb ) ? "UNHASHED_RELOCATION " : "",
			reiserfs_test4( sb ) ? "TEST4 " : "",
			have_large_tails( sb ) ? "TAILS " : have_small_tails(sb)?"SMALL_TAILS ":"NO_TAILS ",
			replay_only( sb ) ? "REPLAY_ONLY " : "",
			reiserfs_dont_log( sb ) ? "DONT_LOG " : "LOG ",
			convert_reiserfs( sb ) ? "CONV " : "",

			atomic_read( &r -> s_generation_counter ),
			SF( s_kmallocs ),
			SF( s_disk_reads ),
			SF( s_disk_writes ),
			SF( s_fix_nodes ),
			SF( s_do_balance ),
			SF( s_unneeded_left_neighbor ),
			SF( s_good_search_by_key_reada ),
			SF( s_bmaps ),
			SF( s_bmaps_without_search ),
			SF( s_direct2indirect ),
			SF( s_indirect2direct ),
			SFP( max_hash_collisions ),
			SFP( breads ),
			SFP( bread_miss ),
			SFP( search_by_key ),
			SFP( search_by_key_fs_changed ),
			SFP( search_by_key_restarted ),

			SFP( insert_item_restarted ),
			SFP( paste_into_item_restarted ),
			SFP( cut_from_item_restarted ),
			SFP( delete_solid_item_restarted ),
			SFP( delete_item_restarted ),

			SFP( leaked_oid ),
			SFP( leaves_removable ) );

	procinfo_epilogue( sb );
	return reiserfs_proc_tail( len, buffer, start, offset, count, eof );
}
Пример #4
0
/* 
** We pre-allocate 8 blocks.  Pre-allocation is used for files > 16 KB only.
** This lowers fragmentation on large files by grabbing a contiguous set of
** blocks at once.  It also limits the number of times the bitmap block is
** logged by making X number of allocation changes in a single transaction.
**
** We are using a border to divide the disk into two parts.  The first part
** is used for tree blocks, which have a very high turnover rate (they
** are constantly allocated then freed)
**
** The second part of the disk is for the unformatted nodes of larger files.
** Putting them away from the tree blocks lowers fragmentation, and makes
** it easier to group files together.  There are a number of different
** allocation schemes being tried right now, each is documented below.
**
** A great deal of the allocator's speed comes because reiserfs_get_block
** sends us the block number of the last unformatted node in the file.  Once
** a given block is allocated past the border, we don't collide with the
** blocks near the search_start again.
** 
*/
int reiserfs_new_unf_blocknrs2 (struct reiserfs_transaction_handle *th, 
				struct inode       * p_s_inode,
				unsigned long      * free_blocknrs,
				unsigned long        search_start)
{
  int ret=0, blks_gotten=0;
  unsigned long border = 0;
  unsigned long bstart = 0;
  unsigned long hash_in, hash_out;
  unsigned long saved_search_start=search_start;
  int allocated[PREALLOCATION_SIZE];
  int blks;

  if (!reiserfs_no_border(th->t_super)) {
    /* we default to having the border at the 10% mark of the disk.  This
    ** is an arbitrary decision and it needs tuning.  It also needs a limit
    ** to prevent it from taking too much space on huge drives.
    */
    bstart = (SB_BLOCK_COUNT(th->t_super) / 10); 
  }
  if (!reiserfs_no_unhashed_relocation(th->t_super)) {
    /* this is a very simple first attempt at preventing too much grouping
    ** around the border value.  Since k_dir_id is never larger than the
    ** highest allocated oid, it is far from perfect, and files will tend
    ** to be grouped towards the start of the border
    */
    border = le32_to_cpu(INODE_PKEY(p_s_inode)->k_dir_id) % (SB_BLOCK_COUNT(th->t_super) - bstart - 1) ;
  } else {
    /* why would we want to delcare a local variable to this if statement
    ** name border????? -chris
    ** unsigned long border = 0;
    */
    if (!reiserfs_hashed_relocation(th->t_super)) {
      hash_in = le32_to_cpu((INODE_PKEY(p_s_inode))->k_dir_id);
				/* I wonder if the CPU cost of the
                                   hash will obscure the layout
                                   effect? Of course, whether that
                                   effect is good or bad we don't
                                   know.... :-) */
      
      hash_out = keyed_hash(((char *) (&hash_in)), 4);
      border = hash_out % (SB_BLOCK_COUNT(th->t_super) - bstart - 1) ;
    }
  }
  border += bstart ;
  allocated[0] = 0 ; /* important.  Allows a check later on to see if at
                      * least one block was allocated.  This prevents false
		      * no disk space returns
		      */

  if ( (p_s_inode->i_size < 4 * 4096) || 
       !(S_ISREG(p_s_inode->i_mode)) )
    {
      if ( search_start < border 
	   || (
				/* allow us to test whether it is a
                                   good idea to prevent files from
                                   getting too far away from their
                                   packing locality by some unexpected
                                   means.  This might be poor code for
                                   directories whose files total
                                   larger than 1/10th of the disk, and
                                   it might be good code for
                                   suffering from old insertions when the disk
                                   was almost full. */
               /* changed from !reiserfs_test3(th->t_super), which doesn't
               ** seem like a good idea.  Think about adding blocks to
               ** a large file.  If you've allocated 10% of the disk
               ** in contiguous blocks, you start over at the border value
               ** for every new allocation.  This throws away all the
               ** information sent in about the last block that was allocated
               ** in the file.  Not a good general case at all.
               ** -chris
               */
	       reiserfs_test4(th->t_super) && 
	       (search_start > border + (SB_BLOCK_COUNT(th->t_super) / 10))
	       )
	   )
	search_start=border;
  
      ret = do_reiserfs_new_blocknrs(th, free_blocknrs, search_start, 
				     1/*amount_needed*/, 
				     0/*use reserved blocks for root */,
				     1/*for_formatted*/,
				     0/*for prealloc */) ;  
      return ret;
    }

  /* take a block off the prealloc list and return it -Hans */
  if (p_s_inode->u.reiserfs_i.i_prealloc_count > 0) {
    p_s_inode->u.reiserfs_i.i_prealloc_count--;
    *free_blocknrs = p_s_inode->u.reiserfs_i.i_prealloc_block++;

    /* if no more preallocated blocks, remove inode from list */
    if (! p_s_inode->u.reiserfs_i.i_prealloc_count) {
      list_del(&p_s_inode->u.reiserfs_i.i_prealloc_list);
    }
    
    return ret;
  }

				/* else get a new preallocation for the file */
  reiserfs_discard_prealloc (th, p_s_inode);
  /* this uses the last preallocated block as the search_start.  discard
  ** prealloc does not zero out this number.
  */
  if (search_start <= p_s_inode->u.reiserfs_i.i_prealloc_block) {
    search_start = p_s_inode->u.reiserfs_i.i_prealloc_block;
  }
  
  /* doing the compare again forces search_start to be >= the border,
  ** even if the file already had prealloction done.  This seems extra,
  ** and should probably be removed
  */
  if ( search_start < border ) search_start=border; 

  /* If the disk free space is already below 10% we should 
  ** start looking for the free blocks from the beginning 
  ** of the partition, before the border line.
  */
  if ( SB_FREE_BLOCKS(th->t_super) <= (SB_BLOCK_COUNT(th->t_super) / 10) ) {
    search_start=saved_search_start;
  }

  *free_blocknrs = 0;
  blks = PREALLOCATION_SIZE-1;
  for (blks_gotten=0; blks_gotten<PREALLOCATION_SIZE; blks_gotten++) {
    ret = do_reiserfs_new_blocknrs(th, free_blocknrs, search_start, 
				   1/*amount_needed*/, 
				   0/*for root reserved*/,
				   1/*for_formatted*/,
				   (blks_gotten > 0)/*must_be_contiguous*/) ;
    /* if we didn't find a block this time, adjust blks to reflect
    ** the actual number of blocks allocated
    */ 
    if (ret != CARRY_ON) {
      blks = blks_gotten > 0 ? (blks_gotten - 1) : 0 ;
      break ;
    }
    allocated[blks_gotten]= *free_blocknrs;
#ifdef CONFIG_REISERFS_CHECK
    if ( (blks_gotten>0) && (allocated[blks_gotten] - allocated[blks_gotten-1]) != 1 ) {
      /* this should be caught by new_blocknrs now, checking code */
      reiserfs_warning("yura-1, reiserfs_new_unf_blocknrs2: pre-allocated not contiguous set of blocks!\n") ;
      reiserfs_free_block(th, allocated[blks_gotten]);
      blks = blks_gotten-1; 
      break;
    }
#endif
    if (blks_gotten==0) {
      p_s_inode->u.reiserfs_i.i_prealloc_block = *free_blocknrs;
    }
    search_start = *free_blocknrs; 
    *free_blocknrs = 0;
  }
  p_s_inode->u.reiserfs_i.i_prealloc_count = blks;
  *free_blocknrs = p_s_inode->u.reiserfs_i.i_prealloc_block;
  p_s_inode->u.reiserfs_i.i_prealloc_block++;

  /* if inode has preallocated blocks, link him to list */
  if (p_s_inode->u.reiserfs_i.i_prealloc_count) {
    list_add(&p_s_inode->u.reiserfs_i.i_prealloc_list,
	     &SB_JOURNAL(th->t_super)->j_prealloc_list);
  } 
  /* we did actually manage to get 1 block */
  if (ret != CARRY_ON && allocated[0] > 0) {
    return CARRY_ON ;
  }
  /* NO_MORE_UNUSED_CONTIGUOUS_BLOCKS should only mean something to
  ** the preallocation code.  The rest of the filesystem asks for a block
  ** and should either get it, or know the disk is full.  The code
  ** above should never allow ret == NO_MORE_UNUSED_CONTIGUOUS_BLOCK,
  ** as it doesn't send for_prealloc = 1 to do_reiserfs_new_blocknrs
  ** unless it has already successfully allocated at least one block.
  ** Just in case, we translate into a return value the rest of the
  ** filesystem can understand.
  **
  ** It is an error to change this without making the
  ** rest of the filesystem understand NO_MORE_UNUSED_CONTIGUOUS_BLOCKS
  ** If you consider it a bug to return NO_DISK_SPACE here, fix the rest
  ** of the fs first.
  */
  if (ret == NO_MORE_UNUSED_CONTIGUOUS_BLOCKS) {
#ifdef CONFIG_REISERFS_CHECK
    reiserfs_warning("reiser-2015: this shouldn't happen, may cause false out of disk space error");
#endif
     return NO_DISK_SPACE; 
  }
  return ret;
}