Exemple #1
0
/* We need to take a copy of the mount flags since things like
 * MS_RDONLY don't get set until *after* we're called.
 * mount_flags != mount_options */
int reiserfs_xattr_init(struct super_block *s, int mount_flags)
{
	int err = 0;
	struct dentry *privroot = REISERFS_SB(s)->priv_root;

	err = xattr_mount_check(s);
	if (err)
		goto error;

	if (!privroot->d_inode && !(mount_flags & MS_RDONLY)) {
		reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s);
		err = create_privroot(REISERFS_SB(s)->priv_root);
		mutex_unlock(&s->s_root->d_inode->i_mutex);
	}

	if (privroot->d_inode) {
		s->s_xattr = reiserfs_xattr_handlers;
		reiserfs_mutex_lock_safe(&privroot->d_inode->i_mutex, s);
		if (!REISERFS_SB(s)->xattr_root) {
			struct dentry *dentry;
			dentry = lookup_one_len(XAROOT_NAME, privroot,
						strlen(XAROOT_NAME));
			if (!IS_ERR(dentry))
				REISERFS_SB(s)->xattr_root = dentry;
			else
				err = PTR_ERR(dentry);
		}
		mutex_unlock(&privroot->d_inode->i_mutex);
	}

error:
	if (err) {
		clear_bit(REISERFS_XATTRS_USER, &(REISERFS_SB(s)->s_mount_opt));
		clear_bit(REISERFS_POSIXACL, &(REISERFS_SB(s)->s_mount_opt));
	}

	/* The super_block MS_POSIXACL must mirror the (no)acl mount option. */
	if (reiserfs_posixacl(s))
		s->s_flags |= MS_POSIXACL;
	else
		s->s_flags &= ~MS_POSIXACL;

	return err;
}
static uint32_t
get_third_component(struct reiserfs_sb_info *sbi, const char *name, int len)
{
	uint32_t res;

	if (!len || (len == 1 && name[0] == '.'))
		return (DOT_OFFSET);

	if (len == 2 && name[0] == '.' && name[1] == '.')
		return (DOT_DOT_OFFSET);

	res = REISERFS_SB(sbi)->s_hash_function(name, len);

	/* Take bits from 7-th to 30-th including both bounds */
	res = GET_HASH_VALUE(res);
	if (res == 0)
		/*
		 * Needed to have no names before "." and ".." those have hash
		 * value == 0 and generation counters 1 and 2 accordingly
		 */
		res = 128;

	return (res + MAX_GENERATION_NUMBER);
}
int
reiserfs_lookup(struct vop_cachedlookup_args *ap)
{
	int error, retval;
	struct vnode *vdp         = ap->a_dvp;
	struct vnode **vpp        = ap->a_vpp;
	struct componentname *cnp = ap->a_cnp;

	int flags         = cnp->cn_flags;
	struct thread *td = cnp->cn_thread;
	struct cpu_key *saved_ino;

	struct vnode *vp;
	struct vnode *pdp;  /* Saved dp during symlink work */
	struct reiserfs_node *dp;
	struct reiserfs_dir_entry de;
	INITIALIZE_PATH(path_to_entry);

	char c = cnp->cn_nameptr[cnp->cn_namelen];
	cnp->cn_nameptr[cnp->cn_namelen] = '\0';
	reiserfs_log(LOG_DEBUG, "looking for `%s', %ld (%s)\n",
	    cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_pnbuf);
	cnp->cn_nameptr[cnp->cn_namelen] = c;

	vp = NULL;
	dp = VTOI(vdp);

	if (REISERFS_MAX_NAME(dp->i_reiserfs->s_blocksize) < cnp->cn_namelen)
		return (ENAMETOOLONG);

	reiserfs_log(LOG_DEBUG, "searching entry\n");
	de.de_gen_number_bit_string = 0;
	retval = reiserfs_find_entry(dp, cnp->cn_nameptr, cnp->cn_namelen,
	    &path_to_entry, &de);
	pathrelse(&path_to_entry);

	if (retval == NAME_FOUND) {
		reiserfs_log(LOG_DEBUG, "found\n");
	} else {
		reiserfs_log(LOG_DEBUG, "not found\n");
	}

	if (retval == NAME_FOUND) {
#if 0
		/* Hide the .reiserfs_priv directory */
		if (reiserfs_xattrs(dp->i_reiserfs) &&
		    !old_format_only(dp->i_reiserfs) &&
		    REISERFS_SB(dp->i_reiserfs)->priv_root &&
		    REISERFS_SB(dp->i_reiserfs)->priv_root->d_inode &&
		    de.de_objectid == le32toh(INODE_PKEY(REISERFS_SB(
		    dp->i_reiserfs)->priv_root->d_inode)->k_objectid)) {
			return (EACCES);
		}
#endif

		reiserfs_log(LOG_DEBUG, "reading vnode\n");
		pdp = vdp;
		if (flags & ISDOTDOT) {
			saved_ino = (struct cpu_key *)&(de.de_dir_id);
			VOP_UNLOCK(pdp, 0);
			error = reiserfs_iget(vdp->v_mount,
			    saved_ino, &vp, td);
			vn_lock(pdp, LK_EXCLUSIVE | LK_RETRY);
			if (error != 0)
				return (error);
			*vpp = vp;
		} else if (de.de_objectid == dp->i_number &&
		    de.de_dir_id == dp->i_ino) {
			VREF(vdp); /* We want ourself, ie "." */
			*vpp = vdp;
		} else {
			if ((error = reiserfs_iget(vdp->v_mount,
			    (struct cpu_key *)&(de.de_dir_id), &vp, td)) != 0)
				return (error);
			*vpp = vp;
		}

		/*
		 * Propogate the priv_object flag so we know we're in the
		 * priv tree
		 */
		/*if (is_reiserfs_priv_object(dir))
			REISERFS_I(inode)->i_flags |= i_priv_object;*/
	} else {
		if (retval == IO_ERROR) {
			reiserfs_log(LOG_DEBUG, "IO error\n");
			return (EIO);
		}

		return (ENOENT);
	}

	/* Insert name into cache if appropriate. */
	if (cnp->cn_flags & MAKEENTRY)
		cache_enter(vdp, *vpp, cnp);

	reiserfs_log(LOG_DEBUG, "done\n");
	return (0);
}
int
reiserfs_readdir(struct vop_readdir_args  /* {
		struct vnode *a_vp;
		struct uio *a_uio;
		struct ucred *a_cred;
		int *a_eofflag;
		int *a_ncookies;
		u_long **a_cookies;
	} */*ap)
{
	int error = 0;
	struct dirent dstdp;
	struct uio *uio = ap->a_uio;

	off_t next_pos;
	struct buf *bp;
	struct item_head *ih;
	struct cpu_key pos_key;
	const struct key *rkey;
	struct reiserfs_node *ip;
	struct reiserfs_dir_entry de;
	INITIALIZE_PATH(path_to_entry);
	int entry_num, item_num, search_res;

	/* The NFS part */
	int ncookies = 0;
	u_long *cookies = NULL;

	/*
	 * Form key for search the next directory entry using f_pos field of
	 * file structure
	 */
	ip = VTOI(ap->a_vp);
	make_cpu_key(&pos_key,
	    ip, uio->uio_offset ? uio->uio_offset : DOT_OFFSET,
	    TYPE_DIRENTRY, 3);
	next_pos = cpu_key_k_offset(&pos_key);

	reiserfs_log(LOG_DEBUG, "listing entries for "
	    "(objectid=%d, dirid=%d)\n",
	    pos_key.on_disk_key.k_objectid, pos_key.on_disk_key.k_dir_id);
	reiserfs_log(LOG_DEBUG, "uio_offset = %jd, uio_resid = %d\n",
	    (intmax_t)uio->uio_offset, uio->uio_resid);

	if (ap->a_ncookies && ap->a_cookies) {
		cookies = (u_long *)malloc(
		    uio->uio_resid / 16 * sizeof(u_long),
		    M_REISERFSCOOKIES, M_WAITOK);
	}

	while (1) {
		//research:
		/*
		 * Search the directory item, containing entry with
		 * specified key
		 */
		reiserfs_log(LOG_DEBUG, "search directory to read\n");
		search_res = search_by_entry_key(ip->i_reiserfs, &pos_key,
		    &path_to_entry, &de);
		if (search_res == IO_ERROR) {
			error = EIO;
			goto out;
		}

		entry_num = de.de_entry_num;
		item_num  = de.de_item_num;
		bp = de.de_bp;
		ih = de.de_ih;

		if (search_res == POSITION_FOUND ||
		    entry_num < I_ENTRY_COUNT(ih)) {
			/*
			 * Go through all entries in the directory item
			 * beginning from the entry, that has been found.
			 */
			struct reiserfs_de_head *deh = B_I_DEH(bp, ih) +
			    entry_num;

			if (ap->a_ncookies == NULL) {
				cookies = NULL;
			} else {
				//ncookies = 
			}

			reiserfs_log(LOG_DEBUG,
			    "walking through directory entries\n");
			for (; entry_num < I_ENTRY_COUNT(ih);
			    entry_num++, deh++) {
				int d_namlen;
				char *d_name;
				off_t d_off;
				ino_t d_ino;

				if (!de_visible(deh)) {
					/* It is hidden entry */
					continue;
				}

				d_namlen = entry_length(bp, ih, entry_num);
				d_name   = B_I_DEH_ENTRY_FILE_NAME(bp, ih, deh);
				if (!d_name[d_namlen - 1])
					d_namlen = strlen(d_name);
				reiserfs_log(LOG_DEBUG, "  - `%s' (len=%d)\n",
				    d_name, d_namlen);

				if (d_namlen > REISERFS_MAX_NAME(
				    ip->i_reiserfs->s_blocksize)) {
					/* Too big to send back to VFS */
					continue;
				}

#if 0
				/* Ignore the .reiserfs_priv entry */
				if (reiserfs_xattrs(ip->i_reiserfs) &&
				    !old_format_only(ip->i_reiserfs) &&
				    filp->f_dentry == ip->i_reiserfs->s_root &&
				    REISERFS_SB(ip->i_reiserfs)->priv_root &&
				    REISERFS_SB(ip->i_reiserfs)->priv_root->d_inode &&
				    deh_objectid(deh) ==
				    le32toh(INODE_PKEY(REISERFS_SB(
				    ip->i_reiserfs)->priv_root->d_inode)->k_objectid)) {
					continue;
				}
#endif

				d_off = deh_offset(deh);
				d_ino = deh_objectid(deh);
				uio->uio_offset = d_off;

				/* Copy to user land */
				dstdp.d_fileno = d_ino;
				dstdp.d_type   = DT_UNKNOWN;
				dstdp.d_namlen = d_namlen;
				dstdp.d_reclen = GENERIC_DIRSIZ(&dstdp);
				bcopy(d_name, dstdp.d_name, dstdp.d_namlen);
				bzero(dstdp.d_name + dstdp.d_namlen,
				    dstdp.d_reclen -
				    offsetof(struct dirent, d_name) -
				    dstdp.d_namlen);

				if (d_namlen > 0) {
					if (dstdp.d_reclen <= uio->uio_resid) {
						reiserfs_log(LOG_DEBUG, "     copying to user land\n");
						error = uiomove(&dstdp,
						    dstdp.d_reclen, uio);
						if (error)
							goto end;
						if (cookies != NULL) {
							cookies[ncookies] =
							    d_off;
							ncookies++;
						}
					} else
						break;
				} else {
					error = EIO;
					break;
				}

				next_pos = deh_offset(deh) + 1;
			}
			reiserfs_log(LOG_DEBUG, "...done\n");
		}

		reiserfs_log(LOG_DEBUG, "checking item num (%d == %d ?)\n",
		    item_num, B_NR_ITEMS(bp) - 1);
		if (item_num != B_NR_ITEMS(bp) - 1) {
			/* End of directory has been reached */
			reiserfs_log(LOG_DEBUG, "end reached\n");
			if (ap->a_eofflag)
				*ap->a_eofflag = 1;
			goto end;
		}

		/*
		 * Item we went through is last item of node. Using right
		 * delimiting key check is it directory end
		 */
		reiserfs_log(LOG_DEBUG, "get right key\n");
		rkey = get_rkey(&path_to_entry, ip->i_reiserfs);
		reiserfs_log(LOG_DEBUG, "right key = (objectid=%d, dirid=%d)\n",
		    rkey->k_objectid, rkey->k_dir_id);

		reiserfs_log(LOG_DEBUG, "compare it to MIN_KEY\n");
		reiserfs_log(LOG_DEBUG, "MIN KEY = (objectid=%d, dirid=%d)\n",
		    MIN_KEY.k_objectid, MIN_KEY.k_dir_id);
		if (comp_le_keys(rkey, &MIN_KEY) == 0) {
			/* Set pos_key to key, that is the smallest and greater
			 * that key of the last entry in the item */
			reiserfs_log(LOG_DEBUG, "continuing on the right\n");
			set_cpu_key_k_offset(&pos_key, next_pos);
			continue;
		}

		reiserfs_log(LOG_DEBUG, "compare it to pos_key\n");
		reiserfs_log(LOG_DEBUG, "pos key = (objectid=%d, dirid=%d)\n",
		    pos_key.on_disk_key.k_objectid,
		    pos_key.on_disk_key.k_dir_id);
		if (COMP_SHORT_KEYS(rkey, &pos_key)) {
			/* End of directory has been reached */
			reiserfs_log(LOG_DEBUG, "end reached (right)\n");
			if (ap->a_eofflag)
				*ap->a_eofflag = 1;
			goto end;
		}

		/* Directory continues in the right neighboring block */
		reiserfs_log(LOG_DEBUG, "continuing with a new offset\n");
		set_cpu_key_k_offset(&pos_key,
		    le_key_k_offset(KEY_FORMAT_3_5, rkey));
		reiserfs_log(LOG_DEBUG,
		    "new pos key = (objectid=%d, dirid=%d)\n",
		    pos_key.on_disk_key.k_objectid,
		    pos_key.on_disk_key.k_dir_id);
	}

end:
	uio->uio_offset = next_pos;
	pathrelse(&path_to_entry);
	reiserfs_check_path(&path_to_entry);
out:
	if (error && cookies != NULL) {
		free(cookies, M_REISERFSCOOKIES);
	} else if (ap->a_ncookies != NULL && ap->a_cookies != NULL) {
		*ap->a_ncookies = ncookies;
		*ap->a_cookies  = cookies;
	}
	return (error);
}
/* Allocates blocks for a file to fulfil write request.
   Maps all unmapped but prepared pages from the list.
   Updates metadata with newly allocated blocknumbers as needed */
int reiserfs_allocate_blocks_for_region(
				struct reiserfs_transaction_handle *th,
				struct inode *inode, /* Inode we work with */
				loff_t pos, /* Writing position */
				int num_pages, /* number of pages write going
						  to touch */
				int write_bytes, /* amount of bytes to write */
				struct page **prepared_pages, /* array of
							         prepared pages
							       */
				int blocks_to_allocate /* Amount of blocks we
							  need to allocate to
							  fit the data into file
							 */
				)
{
    struct cpu_key key; // cpu key of item that we are going to deal with
    struct item_head *ih; // pointer to item head that we are going to deal with
    struct buffer_head *bh; // Buffer head that contains items that we are going to deal with
    __u32 * item; // pointer to item we are going to deal with
    INITIALIZE_PATH(path); // path to item, that we are going to deal with.
    b_blocknr_t *allocated_blocks; // Pointer to a place where allocated blocknumbers would be stored.
    reiserfs_blocknr_hint_t hint; // hint structure for block allocator.
    size_t res; // return value of various functions that we call.
    int curr_block; // current block used to keep track of unmapped blocks.
    int i; // loop counter
    int itempos; // position in item
    unsigned int from = (pos & (PAGE_CACHE_SIZE - 1)); // writing position in
						       // first page
    unsigned int to = ((pos + write_bytes - 1) & (PAGE_CACHE_SIZE - 1)) + 1; /* last modified byte offset in last page */
    __u64 hole_size ; // amount of blocks for a file hole, if it needed to be created.
    int modifying_this_item = 0; // Flag for items traversal code to keep track
				 // of the fact that we already prepared
				 // current block for journal
    int will_prealloc = 0;

    RFALSE(!blocks_to_allocate, "green-9004: tried to allocate zero blocks?");

    /* only preallocate if this is a small write */
    if (REISERFS_I(inode)->i_prealloc_count ||
       (!(write_bytes & (inode->i_sb->s_blocksize -1)) &&
        blocks_to_allocate <
        REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize))
        will_prealloc = REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize;

    allocated_blocks = kmalloc((blocks_to_allocate + will_prealloc) *
    					sizeof(b_blocknr_t), GFP_NOFS);

    /* First we compose a key to point at the writing position, we want to do
       that outside of any locking region. */
    make_cpu_key (&key, inode, pos+1, TYPE_ANY, 3/*key length*/);

    /* If we came here, it means we absolutely need to open a transaction,
       since we need to allocate some blocks */
    reiserfs_write_lock(inode->i_sb); // Journaling stuff and we need that.
    journal_begin(th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3 + 1); // Wish I know if this number enough
    reiserfs_update_inode_transaction(inode) ;

    /* Look for the in-tree position of our write, need path for block allocator */
    res = search_for_position_by_key(inode->i_sb, &key, &path);
    if ( res == IO_ERROR ) {
	res = -EIO;
	goto error_exit;
    }
   
    /* Allocate blocks */
    /* First fill in "hint" structure for block allocator */
    hint.th = th; // transaction handle.
    hint.path = &path; // Path, so that block allocator can determine packing locality or whatever it needs to determine.
    hint.inode = inode; // Inode is needed by block allocator too.
    hint.search_start = 0; // We have no hint on where to search free blocks for block allocator.
    hint.key = key.on_disk_key; // on disk key of file.
    hint.block = inode->i_blocks>>(inode->i_sb->s_blocksize_bits-9); // Number of disk blocks this file occupies already.
    hint.formatted_node = 0; // We are allocating blocks for unformatted node.
    hint.preallocate = will_prealloc;

    /* Call block allocator to allocate blocks */
    res = reiserfs_allocate_blocknrs(&hint, allocated_blocks, blocks_to_allocate, blocks_to_allocate);
    if ( res != CARRY_ON ) {
	if ( res == NO_DISK_SPACE ) {
	    /* We flush the transaction in case of no space. This way some
	       blocks might become free */
	    SB_JOURNAL(inode->i_sb)->j_must_wait = 1;
	    restart_transaction(th, inode, &path);

	    /* We might have scheduled, so search again */
	    res = search_for_position_by_key(inode->i_sb, &key, &path);
	    if ( res == IO_ERROR ) {
		res = -EIO;
		goto error_exit;
	    }

	    /* update changed info for hint structure. */
	    res = reiserfs_allocate_blocknrs(&hint, allocated_blocks, blocks_to_allocate, blocks_to_allocate);
	    if ( res != CARRY_ON ) {
		res = -ENOSPC; 
		pathrelse(&path);
		goto error_exit;
	    }
	} else {
	    res = -ENOSPC;
	    pathrelse(&path);
	    goto error_exit;
	}
    }

#ifdef __BIG_ENDIAN
        // Too bad, I have not found any way to convert a given region from
        // cpu format to little endian format
    {
        int i;
        for ( i = 0; i < blocks_to_allocate ; i++)
            allocated_blocks[i]=cpu_to_le32(allocated_blocks[i]);
    }
#endif

    /* Blocks allocating well might have scheduled and tree might have changed,
       let's search the tree again */
    /* find where in the tree our write should go */
    res = search_for_position_by_key(inode->i_sb, &key, &path);
    if ( res == IO_ERROR ) {
	res = -EIO;
	goto error_exit_free_blocks;
    }

    bh = get_last_bh( &path ); // Get a bufferhead for last element in path.
    ih = get_ih( &path );      // Get a pointer to last item head in path.
    item = get_item( &path );  // Get a pointer to last item in path

    /* Let's see what we have found */
    if ( res != POSITION_FOUND ) { /* position not found, this means that we
				      might need to append file with holes
				      first */
	// Since we are writing past the file's end, we need to find out if
	// there is a hole that needs to be inserted before our writing
	// position, and how many blocks it is going to cover (we need to
	//  populate pointers to file blocks representing the hole with zeros)

	{
	    int item_offset = 1;
	    /*
	     * if ih is stat data, its offset is 0 and we don't want to
	     * add 1 to pos in the hole_size calculation
	     */
	    if (is_statdata_le_ih(ih))
	        item_offset = 0;
	    hole_size = (pos + item_offset -
	            (le_key_k_offset( get_inode_item_key_version(inode),
		    &(ih->ih_key)) +
		    op_bytes_number(ih, inode->i_sb->s_blocksize))) >>
		    inode->i_sb->s_blocksize_bits;
	}
static int show_journal(struct seq_file *m, void *unused)
{
	struct super_block *sb = m->private;
	struct reiserfs_sb_info *r = REISERFS_SB(sb);
	struct reiserfs_super_block *rs = r->s_rs;
	struct journal_params *jp = &rs->s_v1.s_journal;
	char b[BDEVNAME_SIZE];

	seq_printf(m,		/* on-disk fields */
		   "jp_journal_1st_block: \t%i\n"
		   "jp_journal_dev: \t%s[%x]\n"
		   "jp_journal_size: \t%i\n"
		   "jp_journal_trans_max: \t%i\n"
		   "jp_journal_magic: \t%i\n"
		   "jp_journal_max_batch: \t%i\n"
		   "jp_journal_max_commit_age: \t%i\n"
		   "jp_journal_max_trans_age: \t%i\n"
		   /* incore fields */
		   "j_1st_reserved_block: \t%i\n"
		   "j_state: \t%li\n"
		   "j_trans_id: \t%u\n"
		   "j_mount_id: \t%lu\n"
		   "j_start: \t%lu\n"
		   "j_len: \t%lu\n"
		   "j_len_alloc: \t%lu\n"
		   "j_wcount: \t%i\n"
		   "j_bcount: \t%lu\n"
		   "j_first_unflushed_offset: \t%lu\n"
		   "j_last_flush_trans_id: \t%u\n"
		   "j_trans_start_time: \t%li\n"
		   "j_list_bitmap_index: \t%i\n"
		   "j_must_wait: \t%i\n"
		   "j_next_full_flush: \t%i\n"
		   "j_next_async_flush: \t%i\n"
		   "j_cnode_used: \t%i\n" "j_cnode_free: \t%i\n" "\n"
		   /* reiserfs_proc_info_data_t.journal fields */
		   "in_journal: \t%12lu\n"
		   "in_journal_bitmap: \t%12lu\n"
		   "in_journal_reusable: \t%12lu\n"
		   "lock_journal: \t%12lu\n"
		   "lock_journal_wait: \t%12lu\n"
		   "journal_begin: \t%12lu\n"
		   "journal_relock_writers: \t%12lu\n"
		   "journal_relock_wcount: \t%12lu\n"
		   "mark_dirty: \t%12lu\n"
		   "mark_dirty_already: \t%12lu\n"
		   "mark_dirty_notjournal: \t%12lu\n"
		   "restore_prepared: \t%12lu\n"
		   "prepare: \t%12lu\n"
		   "prepare_retry: \t%12lu\n",
		   DJP(jp_journal_1st_block),
		   bdevname(SB_JOURNAL(sb)->j_dev_bd, b),
		   DJP(jp_journal_dev),
		   DJP(jp_journal_size),
		   DJP(jp_journal_trans_max),
		   DJP(jp_journal_magic),
		   DJP(jp_journal_max_batch),
		   SB_JOURNAL(sb)->j_max_commit_age,
		   DJP(jp_journal_max_trans_age),
		   JF(j_1st_reserved_block),
		   JF(j_state),
		   JF(j_trans_id),
		   JF(j_mount_id),
		   JF(j_start),
		   JF(j_len),
		   JF(j_len_alloc),
		   atomic_read(&r->s_journal->j_wcount),
		   JF(j_bcount),
		   JF(j_first_unflushed_offset),
		   JF(j_last_flush_trans_id),
		   JF(j_trans_start_time),
		   JF(j_list_bitmap_index),
		   JF(j_must_wait),
		   JF(j_next_full_flush),
		   JF(j_next_async_flush),
		   JF(j_cnode_used),
		   JF(j_cnode_free),
		   SFPJ(in_journal),
		   SFPJ(in_journal_bitmap),
		   SFPJ(in_journal_reusable),
		   SFPJ(lock_journal),
		   SFPJ(lock_journal_wait),
		   SFPJ(journal_being),
		   SFPJ(journal_relock_writers),
		   SFPJ(journal_relock_wcount),
		   SFPJ(mark_dirty),
		   SFPJ(mark_dirty_already),
		   SFPJ(mark_dirty_notjournal),
		   SFPJ(restore_prepared), SFPJ(prepare), SFPJ(prepare_retry)
	    );
	return 0;
}
Exemple #7
0
static inline int this_blocknr_allocation_would_make_it_a_large_file(reiserfs_blocknr_hint_t *hint)
{
    return hint->block == REISERFS_SB(hint->th->t_super)->s_alloc_options.large_file_size;
}
int balance_internal(struct tree_balance *tb,	/* tree_balance structure               */
		     int h,	/* level of the tree                    */
		     int child_pos, struct item_head *insert_key,	/* key for insertion on higher level    */
		     struct buffer_head **insert_ptr	/* node for insertion on higher level */
    )
    /* if inserting/pasting
       {
       child_pos is the position of the node-pointer in S[h] that        *
       pointed to S[h-1] before balancing of the h-1 level;              *
       this means that new pointers and items must be inserted AFTER *
       child_pos
       }
       else
       {
       it is the position of the leftmost pointer that must be deleted (together with
       its corresponding key to the left of the pointer)
       as a result of the previous level's balancing.
       }
     */
{
	struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h);
	struct buffer_info bi;
	int order;		/* we return this: it is 0 if there is no S[h], else it is tb->S[h]->b_item_order */
	int insert_num, n, k;
	struct buffer_head *S_new;
	struct item_head new_insert_key;
	struct buffer_head *new_insert_ptr = NULL;
	struct item_head *new_insert_key_addr = insert_key;

	RFALSE(h < 1, "h (%d) can not be < 1 on internal level", h);

	PROC_INFO_INC(tb->tb_sb, balance_at[h]);

	order =
	    (tbSh) ? PATH_H_POSITION(tb->tb_path,
				     h + 1) /*tb->S[h]->b_item_order */ : 0;

	/* Using insert_size[h] calculate the number insert_num of items
	   that must be inserted to or deleted from S[h]. */
	insert_num = tb->insert_size[h] / ((int)(KEY_SIZE + DC_SIZE));

	/* Check whether insert_num is proper * */
	RFALSE(insert_num < -2 || insert_num > 2,
	       "incorrect number of items inserted to the internal node (%d)",
	       insert_num);
	RFALSE(h > 1 && (insert_num > 1 || insert_num < -1),
	       "incorrect number of items (%d) inserted to the internal node on a level (h=%d) higher than last internal level",
	       insert_num, h);

	/* Make balance in case insert_num < 0 */
	if (insert_num < 0) {
		balance_internal_when_delete(tb, h, child_pos);
		return order;
	}

	k = 0;
	if (tb->lnum[h] > 0) {
		/* shift lnum[h] items from S[h] to the left neighbor L[h].
		   check how many of new items fall into L[h] or CFL[h] after
		   shifting */
		n = B_NR_ITEMS(tb->L[h]);	/* number of items in L[h] */
		if (tb->lnum[h] <= child_pos) {
			/* new items don't fall into L[h] or CFL[h] */
			internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
					    tb->lnum[h]);
			/*internal_shift_left (tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,tb->lnum[h]); */
			child_pos -= tb->lnum[h];
		} else if (tb->lnum[h] > child_pos + insert_num) {
			/* all new items fall into L[h] */
			internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
					    tb->lnum[h] - insert_num);
			/*                  internal_shift_left(tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,
			   tb->lnum[h]-insert_num);
			 */
			/* insert insert_num keys and node-pointers into L[h] */
			bi.tb = tb;
			bi.bi_bh = tb->L[h];
			bi.bi_parent = tb->FL[h];
			bi.bi_position = get_left_neighbor_position(tb, h);
			internal_insert_childs(&bi,
					       /*tb->L[h], tb->S[h-1]->b_next */
					       n + child_pos + 1,
					       insert_num, insert_key,
					       insert_ptr);

			insert_num = 0;
		} else {
			struct disk_child *dc;

			/* some items fall into L[h] or CFL[h], but some don't fall */
			internal_shift1_left(tb, h, child_pos + 1);
			/* calculate number of new items that fall into L[h] */
			k = tb->lnum[h] - child_pos - 1;
			bi.tb = tb;
			bi.bi_bh = tb->L[h];
			bi.bi_parent = tb->FL[h];
			bi.bi_position = get_left_neighbor_position(tb, h);
			internal_insert_childs(&bi,
					       /*tb->L[h], tb->S[h-1]->b_next, */
					       n + child_pos + 1, k,
					       insert_key, insert_ptr);

			replace_lkey(tb, h, insert_key + k);

			/* replace the first node-ptr in S[h] by node-ptr to insert_ptr[k] */
			dc = B_N_CHILD(tbSh, 0);
			put_dc_size(dc,
				    MAX_CHILD_SIZE(insert_ptr[k]) -
				    B_FREE_SPACE(insert_ptr[k]));
			put_dc_block_number(dc, insert_ptr[k]->b_blocknr);

			do_balance_mark_internal_dirty(tb, tbSh, 0);

			k++;
			insert_key += k;
			insert_ptr += k;
			insert_num -= k;
			child_pos = 0;
		}
	}
	/* tb->lnum[h] > 0 */
	if (tb->rnum[h] > 0) {
		/*shift rnum[h] items from S[h] to the right neighbor R[h] */
		/* check how many of new items fall into R or CFR after shifting */
		n = B_NR_ITEMS(tbSh);	/* number of items in S[h] */
		if (n - tb->rnum[h] >= child_pos)
			/* new items fall into S[h] */
			/*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],tb->rnum[h]); */
			internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
					     tb->rnum[h]);
		else if (n + insert_num - tb->rnum[h] < child_pos) {
			/* all new items fall into R[h] */
			/*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],
			   tb->rnum[h] - insert_num); */
			internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
					     tb->rnum[h] - insert_num);

			/* insert insert_num keys and node-pointers into R[h] */
			bi.tb = tb;
			bi.bi_bh = tb->R[h];
			bi.bi_parent = tb->FR[h];
			bi.bi_position = get_right_neighbor_position(tb, h);
			internal_insert_childs(&bi,
					       /*tb->R[h],tb->S[h-1]->b_next */
					       child_pos - n - insert_num +
					       tb->rnum[h] - 1,
					       insert_num, insert_key,
					       insert_ptr);
			insert_num = 0;
		} else {
			struct disk_child *dc;

			/* one of the items falls into CFR[h] */
			internal_shift1_right(tb, h, n - child_pos + 1);
			/* calculate number of new items that fall into R[h] */
			k = tb->rnum[h] - n + child_pos - 1;
			bi.tb = tb;
			bi.bi_bh = tb->R[h];
			bi.bi_parent = tb->FR[h];
			bi.bi_position = get_right_neighbor_position(tb, h);
			internal_insert_childs(&bi,
					       /*tb->R[h], tb->R[h]->b_child, */
					       0, k, insert_key + 1,
					       insert_ptr + 1);

			replace_rkey(tb, h, insert_key + insert_num - k - 1);

			/* replace the first node-ptr in R[h] by node-ptr insert_ptr[insert_num-k-1] */
			dc = B_N_CHILD(tb->R[h], 0);
			put_dc_size(dc,
				    MAX_CHILD_SIZE(insert_ptr
						   [insert_num - k - 1]) -
				    B_FREE_SPACE(insert_ptr
						 [insert_num - k - 1]));
			put_dc_block_number(dc,
					    insert_ptr[insert_num - k -
						       1]->b_blocknr);

			do_balance_mark_internal_dirty(tb, tb->R[h], 0);

			insert_num -= (k + 1);
		}
	}

    /** Fill new node that appears instead of S[h] **/
	RFALSE(tb->blknum[h] > 2, "blknum can not be > 2 for internal level");
	RFALSE(tb->blknum[h] < 0, "blknum can not be < 0");

	if (!tb->blknum[h]) {	/* node S[h] is empty now */
		RFALSE(!tbSh, "S[h] is equal NULL");

		/* do what is needed for buffer thrown from tree */
		reiserfs_invalidate_buffer(tb, tbSh);
		return order;
	}

	if (!tbSh) {
		/* create new root */
		struct disk_child *dc;
		struct buffer_head *tbSh_1 = PATH_H_PBUFFER(tb->tb_path, h - 1);
		struct block_head *blkh;

		if (tb->blknum[h] != 1)
			reiserfs_panic(NULL, "ibalance-3", "One new node "
				       "required for creating the new root");
		/* S[h] = empty buffer from the list FEB. */
		tbSh = get_FEB(tb);
		blkh = B_BLK_HEAD(tbSh);
		set_blkh_level(blkh, h + 1);

		/* Put the unique node-pointer to S[h] that points to S[h-1]. */

		dc = B_N_CHILD(tbSh, 0);
		put_dc_block_number(dc, tbSh_1->b_blocknr);
		put_dc_size(dc,
			    (MAX_CHILD_SIZE(tbSh_1) - B_FREE_SPACE(tbSh_1)));

		tb->insert_size[h] -= DC_SIZE;
		set_blkh_free_space(blkh, blkh_free_space(blkh) - DC_SIZE);

		do_balance_mark_internal_dirty(tb, tbSh, 0);

		/*&&&&&&&&&&&&&&&&&&&&&&&& */
		check_internal(tbSh);
		/*&&&&&&&&&&&&&&&&&&&&&&&& */

		/* put new root into path structure */
		PATH_OFFSET_PBUFFER(tb->tb_path, ILLEGAL_PATH_ELEMENT_OFFSET) =
		    tbSh;

		/* Change root in structure super block. */
		PUT_SB_ROOT_BLOCK(tb->tb_sb, tbSh->b_blocknr);
		PUT_SB_TREE_HEIGHT(tb->tb_sb, SB_TREE_HEIGHT(tb->tb_sb) + 1);
		do_balance_mark_sb_dirty(tb, REISERFS_SB(tb->tb_sb)->s_sbh, 1);
	}

	if (tb->blknum[h] == 2) {
		int snum;
		struct buffer_info dest_bi, src_bi;

		/* S_new = free buffer from list FEB */
		S_new = get_FEB(tb);

		set_blkh_level(B_BLK_HEAD(S_new), h + 1);

		dest_bi.tb = tb;
		dest_bi.bi_bh = S_new;
		dest_bi.bi_parent = NULL;
		dest_bi.bi_position = 0;
		src_bi.tb = tb;
		src_bi.bi_bh = tbSh;
		src_bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
		src_bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);

		n = B_NR_ITEMS(tbSh);	/* number of items in S[h] */
		snum = (insert_num + n + 1) / 2;
		if (n - snum >= child_pos) {
			/* new items don't fall into S_new */
			/*  store the delimiting key for the next level */
			/* new_insert_key = (n - snum)'th key in S[h] */
			memcpy(&new_insert_key, B_N_PDELIM_KEY(tbSh, n - snum),
			       KEY_SIZE);
			/* last parameter is del_par */
			internal_move_pointers_items(&dest_bi, &src_bi,
						     LAST_TO_FIRST, snum, 0);
			/*            internal_move_pointers_items(S_new, tbSh, LAST_TO_FIRST, snum, 0); */
		} else if (n + insert_num - snum < child_pos) {
			/* all new items fall into S_new */
			/*  store the delimiting key for the next level */
			/* new_insert_key = (n + insert_item - snum)'th key in S[h] */
			memcpy(&new_insert_key,
			       B_N_PDELIM_KEY(tbSh, n + insert_num - snum),
			       KEY_SIZE);
			/* last parameter is del_par */
			internal_move_pointers_items(&dest_bi, &src_bi,
						     LAST_TO_FIRST,
						     snum - insert_num, 0);
			/*                  internal_move_pointers_items(S_new,tbSh,1,snum - insert_num,0); */

			/* insert insert_num keys and node-pointers into S_new */
			internal_insert_childs(&dest_bi,
					       /*S_new,tb->S[h-1]->b_next, */
					       child_pos - n - insert_num +
					       snum - 1,
					       insert_num, insert_key,
					       insert_ptr);

			insert_num = 0;
		} else {
			struct disk_child *dc;

			/* some items fall into S_new, but some don't fall */
			/* last parameter is del_par */
			internal_move_pointers_items(&dest_bi, &src_bi,
						     LAST_TO_FIRST,
						     n - child_pos + 1, 1);
			/*                  internal_move_pointers_items(S_new,tbSh,1,n - child_pos + 1,1); */
			/* calculate number of new items that fall into S_new */
			k = snum - n + child_pos - 1;

			internal_insert_childs(&dest_bi, /*S_new, */ 0, k,
					       insert_key + 1, insert_ptr + 1);

			/* new_insert_key = insert_key[insert_num - k - 1] */
			memcpy(&new_insert_key, insert_key + insert_num - k - 1,
			       KEY_SIZE);
			/* replace first node-ptr in S_new by node-ptr to insert_ptr[insert_num-k-1] */

			dc = B_N_CHILD(S_new, 0);
			put_dc_size(dc,
				    (MAX_CHILD_SIZE
				     (insert_ptr[insert_num - k - 1]) -
				     B_FREE_SPACE(insert_ptr
						  [insert_num - k - 1])));
			put_dc_block_number(dc,
					    insert_ptr[insert_num - k -
						       1]->b_blocknr);

			do_balance_mark_internal_dirty(tb, S_new, 0);

			insert_num -= (k + 1);
		}
		/* new_insert_ptr = node_pointer to S_new */
		new_insert_ptr = S_new;

		RFALSE(!buffer_journaled(S_new) || buffer_journal_dirty(S_new)
		       || buffer_dirty(S_new), "cm-00001: bad S_new (%b)",
		       S_new);

		// S_new is released in unfix_nodes
	}

	n = B_NR_ITEMS(tbSh);	/*number of items in S[h] */

	if (0 <= child_pos && child_pos <= n && insert_num > 0) {
		bi.tb = tb;
		bi.bi_bh = tbSh;
		bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
		bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
		internal_insert_childs(&bi,	/*tbSh, */
				       /*          ( tb->S[h-1]->b_parent == tb->S[h] ) ? tb->S[h-1]->b_next :  tb->S[h]->b_child->b_next, */
				       child_pos, insert_num, insert_key,
				       insert_ptr);
	}

	memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
	insert_ptr[0] = new_insert_ptr;

	return order;
}
int indirect2direct(struct reiserfs_transaction_handle *th,
		    struct inode *inode, struct page *page,
		    struct treepath *path,	
		    const struct cpu_key *item_key,	
		    loff_t n_new_file_size,	
		    char *mode)
{
	struct super_block *sb = inode->i_sb;
	struct item_head s_ih;
	unsigned long block_size = sb->s_blocksize;
	char *tail;
	int tail_len, round_tail_len;
	loff_t pos, pos1;	
	struct cpu_key key;

	BUG_ON(!th->t_trans_id);

	REISERFS_SB(sb)->s_indirect2direct++;

	*mode = M_SKIP_BALANCING;

	
	copy_item_head(&s_ih, PATH_PITEM_HEAD(path));

	tail_len = (n_new_file_size & (block_size - 1));
	if (get_inode_sd_version(inode) == STAT_DATA_V2)
		round_tail_len = ROUND_UP(tail_len);
	else
		round_tail_len = tail_len;

	pos =
	    le_ih_k_offset(&s_ih) - 1 + (ih_item_len(&s_ih) / UNFM_P_SIZE -
					 1) * sb->s_blocksize;
	pos1 = pos;

	
	
	

	tail = (char *)kmap(page);	

	if (path_changed(&s_ih, path)) {
		
		if (search_for_position_by_key(sb, item_key, path)
		    == POSITION_NOT_FOUND)
			reiserfs_panic(sb, "PAP-5520",
				       "item to be converted %K does not exist",
				       item_key);
		copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
#ifdef CONFIG_REISERFS_CHECK
		pos = le_ih_k_offset(&s_ih) - 1 +
		    (ih_item_len(&s_ih) / UNFM_P_SIZE -
		     1) * sb->s_blocksize;
		if (pos != pos1)
			reiserfs_panic(sb, "vs-5530", "tail position "
				       "changed while we were reading it");
#endif
	}

	
	make_le_item_head(&s_ih, NULL, get_inode_item_key_version(inode),
			  pos1 + 1, TYPE_DIRECT, round_tail_len,
			  0xffff  );

	tail = tail + (pos & (PAGE_CACHE_SIZE - 1));

	PATH_LAST_POSITION(path)++;

	key = *item_key;
	set_cpu_key_k_type(&key, TYPE_DIRECT);
	key.key_length = 4;
	
	if (reiserfs_insert_item(th, path, &key, &s_ih, inode,
				 tail ? tail : NULL) < 0) {
		kunmap(page);
		return block_size - round_tail_len;
	}
	kunmap(page);

	
	reiserfs_update_sd(th, inode);

	
	
	

	*mode = M_CUT;

	
	
	REISERFS_I(inode)->i_first_direct_byte = pos1 + 1;

	return block_size - round_tail_len;
}
int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
		    struct treepath *path, struct buffer_head *unbh,
		    loff_t tail_offset)
{
	struct super_block *sb = inode->i_sb;
	struct buffer_head *up_to_date_bh;
	struct item_head *p_le_ih = PATH_PITEM_HEAD(path);
	unsigned long total_tail = 0;
	struct cpu_key end_key;	
	struct item_head ind_ih;	
	int blk_size, retval;	
	unp_t unfm_ptr;		

	BUG_ON(!th->t_trans_id);

	REISERFS_SB(sb)->s_direct2indirect++;

	blk_size = sb->s_blocksize;

	copy_item_head(&ind_ih, p_le_ih);
	set_le_ih_k_offset(&ind_ih, tail_offset);
	set_le_ih_k_type(&ind_ih, TYPE_INDIRECT);

	
	make_cpu_key(&end_key, inode, tail_offset, TYPE_INDIRECT, 4);

	
	if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) {
		reiserfs_error(sb, "PAP-14030",
			       "pasted or inserted byte exists in "
			       "the tree %K. Use fsck to repair.", &end_key);
		pathrelse(path);
		return -EIO;
	}

	p_le_ih = PATH_PITEM_HEAD(path);

	unfm_ptr = cpu_to_le32(unbh->b_blocknr);

	if (is_statdata_le_ih(p_le_ih)) {
		
		set_ih_free_space(&ind_ih, 0);	
		put_ih_item_len(&ind_ih, UNFM_P_SIZE);
		PATH_LAST_POSITION(path)++;
		retval =
		    reiserfs_insert_item(th, path, &end_key, &ind_ih, inode,
					 (char *)&unfm_ptr);
	} else {
		
		retval = reiserfs_paste_into_item(th, path, &end_key, inode,
						    (char *)&unfm_ptr,
						    UNFM_P_SIZE);
	}
	if (retval) {
		return retval;
	}
	
	

	
	make_cpu_key(&end_key, inode, max_reiserfs_offset(inode), TYPE_DIRECT,
		     4);

	while (1) {
		int tail_size;

		if (search_for_position_by_key(sb, &end_key, path) ==
		    POSITION_FOUND)
			reiserfs_panic(sb, "PAP-14050",
				       "direct item (%K) not found", &end_key);
		p_le_ih = PATH_PITEM_HEAD(path);
		RFALSE(!is_direct_le_ih(p_le_ih),
		       "vs-14055: direct item expected(%K), found %h",
		       &end_key, p_le_ih);
		tail_size = (le_ih_k_offset(p_le_ih) & (blk_size - 1))
		    + ih_item_len(p_le_ih) - 1;

		if (!unbh->b_page || buffer_uptodate(unbh)
		    || PageUptodate(unbh->b_page)) {
			up_to_date_bh = NULL;
		} else {
			up_to_date_bh = unbh;
		}
		retval = reiserfs_delete_item(th, path, &end_key, inode,
						up_to_date_bh);

		total_tail += retval;
		if (tail_size == retval)
			
			break;

	}
	if (up_to_date_bh) {
		unsigned pgoff =
		    (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
		char *kaddr = kmap_atomic(up_to_date_bh->b_page);
		memset(kaddr + pgoff, 0, blk_size - total_tail);
		kunmap_atomic(kaddr);
	}

	REISERFS_I(inode)->i_first_direct_byte = U32_MAX;

	return 0;
}
/* path points to first direct item of the file regarless of how many of
   them are there */
int direct2indirect (struct reiserfs_transaction_handle *th, struct inode * inode, 
		     struct path * path, struct buffer_head * unbh,
		     loff_t tail_offset)
{
    struct super_block * sb = inode->i_sb;
    struct buffer_head *up_to_date_bh ;
    struct item_head * p_le_ih = PATH_PITEM_HEAD (path);
    unsigned long total_tail = 0 ;
    struct cpu_key end_key;  /* Key to search for the last byte of the
				converted item. */
    struct item_head ind_ih; /* new indirect item to be inserted or
                                key of unfm pointer to be pasted */
    int	n_blk_size,
      n_retval;	  /* returned value for reiserfs_insert_item and clones */
    unp_t unfm_ptr;  /* Handle on an unformatted node
				       that will be inserted in the
				       tree. */


    REISERFS_SB(sb)->s_direct2indirect ++;

    n_blk_size = sb->s_blocksize;

    /* and key to search for append or insert pointer to the new
       unformatted node. */
    copy_item_head (&ind_ih, p_le_ih);
    set_le_ih_k_offset (&ind_ih, tail_offset);
    set_le_ih_k_type (&ind_ih, TYPE_INDIRECT);

    /* Set the key to search for the place for new unfm pointer */
    make_cpu_key (&end_key, inode, tail_offset, TYPE_INDIRECT, 4);

    // FIXME: we could avoid this 
    if ( search_for_position_by_key (sb, &end_key, path) == POSITION_FOUND ) {
	reiserfs_warning (sb, "PAP-14030: direct2indirect: "
			"pasted or inserted byte exists in the tree %K. "
			"Use fsck to repair.", &end_key);
	pathrelse(path);
	return -EIO;
    }
    
    p_le_ih = PATH_PITEM_HEAD (path);

    unfm_ptr = cpu_to_le32 (unbh->b_blocknr);
    
    if ( is_statdata_le_ih (p_le_ih) )  {
	/* Insert new indirect item. */
	set_ih_free_space (&ind_ih, 0); /* delete at nearest future */
        put_ih_item_len( &ind_ih, UNFM_P_SIZE );
	PATH_LAST_POSITION (path)++;
	n_retval = reiserfs_insert_item (th, path, &end_key, &ind_ih, inode,
					 (char *)&unfm_ptr);
    } else {
	/* Paste into last indirect item of an object. */
	n_retval = reiserfs_paste_into_item(th, path, &end_key, inode,
					    (char *)&unfm_ptr, UNFM_P_SIZE);
    }
    if ( n_retval ) {
	return n_retval;
    }

    // note: from here there are two keys which have matching first
    // three key components. They only differ by the fourth one.


    /* Set the key to search for the direct items of the file */
    make_cpu_key (&end_key, inode, max_reiserfs_offset (inode), TYPE_DIRECT, 4);

    /* Move bytes from the direct items to the new unformatted node
       and delete them. */
    while (1)  {
	int tail_size;

	/* end_key.k_offset is set so, that we will always have found
           last item of the file */
	if ( search_for_position_by_key (sb, &end_key, path) == POSITION_FOUND )
	    reiserfs_panic (sb, "PAP-14050: direct2indirect: "
			    "direct item (%K) not found", &end_key);
	p_le_ih = PATH_PITEM_HEAD (path);
	RFALSE( !is_direct_le_ih (p_le_ih),
	        "vs-14055: direct item expected(%K), found %h",
                &end_key, p_le_ih);
        tail_size = (le_ih_k_offset (p_le_ih) & (n_blk_size - 1))
            + ih_item_len(p_le_ih) - 1;

	/* we only send the unbh pointer if the buffer is not up to date.
	** this avoids overwriting good data from writepage() with old data
	** from the disk or buffer cache
	** Special case: unbh->b_page will be NULL if we are coming through
	** DIRECT_IO handler here.
	*/
	if (!unbh->b_page || buffer_uptodate(unbh) || PageUptodate(unbh->b_page)) {
	    up_to_date_bh = NULL ;
	} else {
	    up_to_date_bh = unbh ;
	}
	n_retval = reiserfs_delete_item (th, path, &end_key, inode, 
	                                 up_to_date_bh) ;

	total_tail += n_retval ;
	if (tail_size == n_retval)
	    // done: file does not have direct items anymore
	    break;

    }
    /* if we've copied bytes from disk into the page, we need to zero
    ** out the unused part of the block (it was not up to date before)
    */
    if (up_to_date_bh) {
        unsigned pgoff = (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
	char *kaddr=kmap_atomic(up_to_date_bh->b_page, KM_USER0);
	memset(kaddr + pgoff, 0, n_blk_size - total_tail) ;
	kunmap_atomic(kaddr, KM_USER0);
    }

    REISERFS_I(inode)->i_first_direct_byte = U32_MAX;

    return 0;
}
/* this first locks inode (neither reads nor sync are permitted),
   reads tail through page cache, insert direct item. When direct item
   inserted successfully inode is left locked. Return value is always
   what we expect from it (number of cut bytes). But when tail remains
   in the unformatted node, we set mode to SKIP_BALANCING and unlock
   inode */
int indirect2direct (struct reiserfs_transaction_handle *th, 
		     struct inode * p_s_inode,
		     struct page *page, 
		     struct path * p_s_path, /* path to the indirect item. */
		     const struct cpu_key * p_s_item_key, /* Key to look for unformatted node pointer to be cut. */
		     loff_t n_new_file_size, /* New file size. */
		     char * p_c_mode)
{
    struct super_block * p_s_sb = p_s_inode->i_sb;
    struct item_head      s_ih;
    unsigned long n_block_size = p_s_sb->s_blocksize;
    char * tail;
    int tail_len, round_tail_len;
    loff_t pos, pos1; /* position of first byte of the tail */
    struct cpu_key key;

    REISERFS_SB(p_s_sb)->s_indirect2direct ++;

    *p_c_mode = M_SKIP_BALANCING;

    /* store item head path points to. */
    copy_item_head (&s_ih, PATH_PITEM_HEAD(p_s_path));

    tail_len = (n_new_file_size & (n_block_size - 1));
    if (get_inode_sd_version (p_s_inode) == STAT_DATA_V2)
	round_tail_len = ROUND_UP (tail_len);
    else
	round_tail_len = tail_len;

    pos = le_ih_k_offset (&s_ih) - 1 + (ih_item_len(&s_ih) / UNFM_P_SIZE - 1) * p_s_sb->s_blocksize;
    pos1 = pos;

    // we are protected by i_sem. The tail can not disapper, not
    // append can be done either
    // we are in truncate or packing tail in file_release

    tail = (char *)kmap(page) ; /* this can schedule */

    if (path_changed (&s_ih, p_s_path)) {
	/* re-search indirect item */
	if ( search_for_position_by_key (p_s_sb, p_s_item_key, p_s_path) == POSITION_NOT_FOUND )
	    reiserfs_panic(p_s_sb, "PAP-5520: indirect2direct: "
			   "item to be converted %K does not exist", p_s_item_key);
	copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
#ifdef CONFIG_REISERFS_CHECK
	pos = le_ih_k_offset (&s_ih) - 1 + 
	    (ih_item_len(&s_ih) / UNFM_P_SIZE - 1) * p_s_sb->s_blocksize;
	if (pos != pos1)
	    reiserfs_panic (p_s_sb, "vs-5530: indirect2direct: "
			    "tail position changed while we were reading it");
#endif
    }


    /* Set direct item header to insert. */
    make_le_item_head (&s_ih, NULL, get_inode_item_key_version (p_s_inode), pos1 + 1,
		       TYPE_DIRECT, round_tail_len, 0xffff/*ih_free_space*/);

    /* we want a pointer to the first byte of the tail in the page.
    ** the page was locked and this part of the page was up to date when
    ** indirect2direct was called, so we know the bytes are still valid
    */
    tail = tail + (pos & (PAGE_CACHE_SIZE - 1)) ;

    PATH_LAST_POSITION(p_s_path)++;

    key = *p_s_item_key;
    set_cpu_key_k_type (&key, TYPE_DIRECT);
    key.key_length = 4;
    /* Insert tail as new direct item in the tree */
    if ( reiserfs_insert_item(th, p_s_path, &key, &s_ih, p_s_inode,
			      tail ? tail : NULL) < 0 ) {
	/* No disk memory. So we can not convert last unformatted node
	   to the direct item.  In this case we used to adjust
	   indirect items's ih_free_space. Now ih_free_space is not
	   used, it would be ideal to write zeros to corresponding
	   unformatted node. For now i_size is considered as guard for
	   going out of file size */
	kunmap(page) ;
	return n_block_size - round_tail_len;
    }
    kunmap(page) ;

    /* make sure to get the i_blocks changes from reiserfs_insert_item */
    reiserfs_update_sd(th, p_s_inode);

    // note: we have now the same as in above direct2indirect
    // conversion: there are two keys which have matching first three
    // key components. They only differ by the fouhth one.

    /* We have inserted new direct item and must remove last
       unformatted node. */
    *p_c_mode = M_CUT;

    /* we store position of first direct item in the in-core inode */
    //mark_file_with_tail (p_s_inode, pos1 + 1);
    REISERFS_I(p_s_inode)->i_first_direct_byte = pos1 + 1;

    return n_block_size - round_tail_len;
}
static int show_super(struct seq_file *m, void *unused)
{
	struct super_block *sb = m->private;
	struct reiserfs_sb_info *r = REISERFS_SB(sb);

	seq_printf(m, "state: \t%s\n"
		   "mount options: \t%s%s%s%s%s%s%s%s%s%s%s\n"
		   "gen. counter: \t%i\n"
		   "s_disk_reads: \t%i\n"
		   "s_disk_writes: \t%i\n"
		   "s_fix_nodes: \t%i\n"
		   "s_do_balance: \t%i\n"
		   "s_unneeded_left_neighbor: \t%i\n"
		   "s_good_search_by_key_reada: \t%i\n"
		   "s_bmaps: \t%i\n"
		   "s_bmaps_without_search: \t%i\n"
		   "s_direct2indirect: \t%i\n"
		   "s_indirect2direct: \t%i\n"
		   "\n"
		   "max_hash_collisions: \t%i\n"
		   "breads: \t%lu\n"
		   "bread_misses: \t%lu\n"
		   "search_by_key: \t%lu\n"
		   "search_by_key_fs_changed: \t%lu\n"
		   "search_by_key_restarted: \t%lu\n"
		   "insert_item_restarted: \t%lu\n"
		   "paste_into_item_restarted: \t%lu\n"
		   "cut_from_item_restarted: \t%lu\n"
		   "delete_solid_item_restarted: \t%lu\n"
		   "delete_item_restarted: \t%lu\n"
		   "leaked_oid: \t%lu\n"
		   "leaves_removable: \t%lu\n",
		   SF(s_mount_state) == REISERFS_VALID_FS ?
		   "REISERFS_VALID_FS" : "REISERFS_ERROR_FS",
		   reiserfs_r5_hash(sb) ? "FORCE_R5 " : "",
		   reiserfs_rupasov_hash(sb) ? "FORCE_RUPASOV " : "",
		   reiserfs_tea_hash(sb) ? "FORCE_TEA " : "",
		   reiserfs_hash_detect(sb) ? "DETECT_HASH " : "",
		   reiserfs_no_border(sb) ? "NO_BORDER " : "BORDER ",
		   reiserfs_no_unhashed_relocation(sb) ?
		   "NO_UNHASHED_RELOCATION " : "",
		   reiserfs_hashed_relocation(sb) ? "UNHASHED_RELOCATION " : "",
		   reiserfs_test4(sb) ? "TEST4 " : "",
		   have_large_tails(sb) ? "TAILS " : have_small_tails(sb) ?
		   "SMALL_TAILS " : "NO_TAILS ",
		   replay_only(sb) ? "REPLAY_ONLY " : "",
		   convert_reiserfs(sb) ? "CONV " : "",
		   atomic_read(&r->s_generation_counter),
		   SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
		   SF(s_do_balance), SF(s_unneeded_left_neighbor),
		   SF(s_good_search_by_key_reada), SF(s_bmaps),
		   SF(s_bmaps_without_search), SF(s_direct2indirect),
		   SF(s_indirect2direct), SFP(max_hash_collisions), SFP(breads),
		   SFP(bread_miss), SFP(search_by_key),
		   SFP(search_by_key_fs_changed), SFP(search_by_key_restarted),
		   SFP(insert_item_restarted), SFP(paste_into_item_restarted),
		   SFP(cut_from_item_restarted),
		   SFP(delete_solid_item_restarted), SFP(delete_item_restarted),
		   SFP(leaked_oid), SFP(leaves_removable));

	return 0;
}
static void add_file(struct super_block *sb, char *name,
		     int (*func) (struct seq_file *, void *))
{
	proc_create_data(name, 0, REISERFS_SB(sb)->procdir,
			 &r_file_operations, func);
}
Exemple #15
0
static int reiserfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
	struct inode *inode = filp->f_path.dentry->d_inode;
	struct cpu_key pos_key;	/* key of current position in the directory (key of directory entry) */
	INITIALIZE_PATH(path_to_entry);
	struct buffer_head *bh;
	int item_num, entry_num;
	const struct reiserfs_key *rkey;
	struct item_head *ih, tmp_ih;
	int search_res;
	char *local_buf;
	loff_t next_pos;
	char small_buf[32];	/* avoid kmalloc if we can */
	struct reiserfs_dir_entry de;
	int ret = 0;

	reiserfs_write_lock(inode->i_sb);

	reiserfs_check_lock_depth(inode->i_sb, "readdir");

	/* form key for search the next directory entry using f_pos field of
	   file structure */
	make_cpu_key(&pos_key, inode,
		     (filp->f_pos) ? (filp->f_pos) : DOT_OFFSET, TYPE_DIRENTRY,
		     3);
	next_pos = cpu_key_k_offset(&pos_key);

	/*  reiserfs_warning (inode->i_sb, "reiserfs_readdir 1: f_pos = %Ld", filp->f_pos); */

	path_to_entry.reada = PATH_READA;
	while (1) {
	      research:
		/* search the directory item, containing entry with specified key */
		search_res =
		    search_by_entry_key(inode->i_sb, &pos_key, &path_to_entry,
					&de);
		if (search_res == IO_ERROR) {
			// FIXME: we could just skip part of directory which could
			// not be read
			ret = -EIO;
			goto out;
		}
		entry_num = de.de_entry_num;
		bh = de.de_bh;
		item_num = de.de_item_num;
		ih = de.de_ih;
		store_ih(&tmp_ih, ih);

		/* we must have found item, that is item of this directory, */
		RFALSE(COMP_SHORT_KEYS(&(ih->ih_key), &pos_key),
		       "vs-9000: found item %h does not match to dir we readdir %K",
		       ih, &pos_key);
		RFALSE(item_num > B_NR_ITEMS(bh) - 1,
		       "vs-9005 item_num == %d, item amount == %d",
		       item_num, B_NR_ITEMS(bh));

		/* and entry must be not more than number of entries in the item */
		RFALSE(I_ENTRY_COUNT(ih) < entry_num,
		       "vs-9010: entry number is too big %d (%d)",
		       entry_num, I_ENTRY_COUNT(ih));

		if (search_res == POSITION_FOUND
		    || entry_num < I_ENTRY_COUNT(ih)) {
			/* go through all entries in the directory item beginning from the entry, that has been found */
			struct reiserfs_de_head *deh =
			    B_I_DEH(bh, ih) + entry_num;

			for (; entry_num < I_ENTRY_COUNT(ih);
			     entry_num++, deh++) {
				int d_reclen;
				char *d_name;
				off_t d_off;
				ino_t d_ino;

				if (!de_visible(deh))
					/* it is hidden entry */
					continue;
				d_reclen = entry_length(bh, ih, entry_num);
				d_name = B_I_DEH_ENTRY_FILE_NAME(bh, ih, deh);
				if (!d_name[d_reclen - 1])
					d_reclen = strlen(d_name);

				if (d_reclen >
				    REISERFS_MAX_NAME(inode->i_sb->
						      s_blocksize)) {
					/* too big to send back to VFS */
					continue;
				}

				/* Ignore the .reiserfs_priv entry */
				if (reiserfs_xattrs(inode->i_sb) &&
				    !old_format_only(inode->i_sb) &&
				    filp->f_path.dentry == inode->i_sb->s_root &&
				    REISERFS_SB(inode->i_sb)->priv_root &&
				    REISERFS_SB(inode->i_sb)->priv_root->d_inode
				    && deh_objectid(deh) ==
				    le32_to_cpu(INODE_PKEY
						(REISERFS_SB(inode->i_sb)->
						 priv_root->d_inode)->
						k_objectid)) {
					continue;
				}

				d_off = deh_offset(deh);
				filp->f_pos = d_off;
				d_ino = deh_objectid(deh);
				if (d_reclen <= 32) {
					local_buf = small_buf;
				} else {
					local_buf = kmalloc(d_reclen,
							    GFP_NOFS);
					if (!local_buf) {
						pathrelse(&path_to_entry);
						ret = -ENOMEM;
						goto out;
					}
					if (item_moved(&tmp_ih, &path_to_entry)) {
						kfree(local_buf);
						goto research;
					}
				}
				// Note, that we copy name to user space via temporary
				// buffer (local_buf) because filldir will block if
				// user space buffer is swapped out. At that time
				// entry can move to somewhere else
				memcpy(local_buf, d_name, d_reclen);
				if (filldir
				    (dirent, local_buf, d_reclen, d_off, d_ino,
				     DT_UNKNOWN) < 0) {
					if (local_buf != small_buf) {
						kfree(local_buf);
					}
					goto end;
				}
				if (local_buf != small_buf) {
					kfree(local_buf);
				}
				// next entry should be looked for with such offset
				next_pos = deh_offset(deh) + 1;

				if (item_moved(&tmp_ih, &path_to_entry)) {
					goto research;
				}
			}	/* for */
		}

		if (item_num != B_NR_ITEMS(bh) - 1)
			// end of directory has been reached
			goto end;

		/* item we went through is last item of node. Using right
		   delimiting key check is it directory end */
		rkey = get_rkey(&path_to_entry, inode->i_sb);
		if (!comp_le_keys(rkey, &MIN_KEY)) {
			/* set pos_key to key, that is the smallest and greater
			   that key of the last entry in the item */
			set_cpu_key_k_offset(&pos_key, next_pos);
			continue;
		}

		if (COMP_SHORT_KEYS(rkey, &pos_key)) {
			// end of directory has been reached
			goto end;
		}

		/* directory continues in the right neighboring block */
		set_cpu_key_k_offset(&pos_key,
				     le_key_k_offset(KEY_FORMAT_3_5, rkey));

	}			/* while */

      end:
	filp->f_pos = next_pos;
	pathrelse(&path_to_entry);
	reiserfs_check_path(&path_to_entry);
      out:
	reiserfs_write_unlock(inode->i_sb);
	return ret;
}
/* Delete insert_num node pointers together with their left items
 * and balance current node.*/
static void balance_internal_when_delete(struct tree_balance *tb,
					 int h, int child_pos)
{
	int insert_num;
	int n;
	struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h);
	struct buffer_info bi;

	insert_num = tb->insert_size[h] / ((int)(DC_SIZE + KEY_SIZE));

	/* delete child-node-pointer(s) together with their left item(s) */
	bi.tb = tb;
	bi.bi_bh = tbSh;
	bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
	bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);

	internal_delete_childs(&bi, child_pos, -insert_num);

	RFALSE(tb->blknum[h] > 1,
	       "tb->blknum[%d]=%d when insert_size < 0", h, tb->blknum[h]);

	n = B_NR_ITEMS(tbSh);

	if (tb->lnum[h] == 0 && tb->rnum[h] == 0) {
		if (tb->blknum[h] == 0) {
			/* node S[h] (root of the tree) is empty now */
			struct buffer_head *new_root;

			RFALSE(n
			       || B_FREE_SPACE(tbSh) !=
			       MAX_CHILD_SIZE(tbSh) - DC_SIZE,
			       "buffer must have only 0 keys (%d)", n);
			RFALSE(bi.bi_parent, "root has parent (%p)",
			       bi.bi_parent);

			/* choose a new root */
			if (!tb->L[h - 1] || !B_NR_ITEMS(tb->L[h - 1]))
				new_root = tb->R[h - 1];
			else
				new_root = tb->L[h - 1];
			/* switch super block's tree root block number to the new value */
			PUT_SB_ROOT_BLOCK(tb->tb_sb, new_root->b_blocknr);
			//REISERFS_SB(tb->tb_sb)->s_rs->s_tree_height --;
			PUT_SB_TREE_HEIGHT(tb->tb_sb,
					   SB_TREE_HEIGHT(tb->tb_sb) - 1);

			do_balance_mark_sb_dirty(tb,
						 REISERFS_SB(tb->tb_sb)->s_sbh,
						 1);
			/*&&&&&&&&&&&&&&&&&&&&&& */
			if (h > 1)
				/* use check_internal if new root is an internal node */
				check_internal(new_root);
			/*&&&&&&&&&&&&&&&&&&&&&& */

			/* do what is needed for buffer thrown from tree */
			reiserfs_invalidate_buffer(tb, tbSh);
			return;
		}
		return;
	}

	if (tb->L[h] && tb->lnum[h] == -B_NR_ITEMS(tb->L[h]) - 1) {	/* join S[h] with L[h] */

		RFALSE(tb->rnum[h] != 0,
		       "invalid tb->rnum[%d]==%d when joining S[h] with L[h]",
		       h, tb->rnum[h]);

		internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, n + 1);
		reiserfs_invalidate_buffer(tb, tbSh);

		return;
	}

	if (tb->R[h] && tb->rnum[h] == -B_NR_ITEMS(tb->R[h]) - 1) {	/* join S[h] with R[h] */
		RFALSE(tb->lnum[h] != 0,
		       "invalid tb->lnum[%d]==%d when joining S[h] with R[h]",
		       h, tb->lnum[h]);

		internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, n + 1);

		reiserfs_invalidate_buffer(tb, tbSh);
		return;
	}

	if (tb->lnum[h] < 0) {	/* borrow from left neighbor L[h] */
		RFALSE(tb->rnum[h] != 0,
		       "wrong tb->rnum[%d]==%d when borrow from L[h]", h,
		       tb->rnum[h]);
		/*internal_shift_right (tb, h, tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], -tb->lnum[h]); */
		internal_shift_right(INTERNAL_SHIFT_FROM_L_TO_S, tb, h,
				     -tb->lnum[h]);
		return;
	}

	if (tb->rnum[h] < 0) {	/* borrow from right neighbor R[h] */
		RFALSE(tb->lnum[h] != 0,
		       "invalid tb->lnum[%d]==%d when borrow from R[h]",
		       h, tb->lnum[h]);
		internal_shift_left(INTERNAL_SHIFT_FROM_R_TO_S, tb, h, -tb->rnum[h]);	/*tb->S[h], tb->CFR[h], tb->rkey[h], tb->R[h], -tb->rnum[h]); */
		return;
	}

	if (tb->lnum[h] > 0) {	/* split S[h] into two parts and put them into neighbors */
		RFALSE(tb->rnum[h] == 0 || tb->lnum[h] + tb->rnum[h] != n + 1,
		       "invalid tb->lnum[%d]==%d or tb->rnum[%d]==%d when S[h](item number == %d) is split between them",
		       h, tb->lnum[h], h, tb->rnum[h], n);

		internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h]);	/*tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], tb->lnum[h]); */
		internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
				     tb->rnum[h]);

		reiserfs_invalidate_buffer(tb, tbSh);

		return;
	}
	reiserfs_panic(tb->tb_sb, "ibalance-2",
		       "unexpected tb->lnum[%d]==%d or tb->rnum[%d]==%d",
		       h, tb->lnum[h], h, tb->rnum[h]);
}
Exemple #17
0
/*
 * Common code for mount and mountroot
 */ 
static int
reiserfs_mountfs(struct vnode *devvp, struct mount *mp, struct thread *td)
{
	int error, old_format = 0;
	struct reiserfs_mount *rmp;
	struct reiserfs_sb_info *sbi;
	struct reiserfs_super_block *rs;
	struct cdev *dev;

	struct g_consumer *cp;
	struct bufobj *bo;

	//ronly = (mp->mnt_flag & MNT_RDONLY) != 0;

	dev = devvp->v_rdev;
	dev_ref(dev);
	DROP_GIANT();
	g_topology_lock();
	error = g_vfs_open(devvp, &cp, "reiserfs", /* read-only */ 0);
	g_topology_unlock();
	PICKUP_GIANT();
	VOP_UNLOCK(devvp, 0);
	if (error) {
		dev_rel(dev);
		return (error);
	}

	bo = &devvp->v_bufobj;
	bo->bo_private = cp;
	bo->bo_ops = g_vfs_bufops;

	if (devvp->v_rdev->si_iosize_max != 0)
		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
	if (mp->mnt_iosize_max > MAXPHYS)
		mp->mnt_iosize_max = MAXPHYS;

	rmp = NULL;
	sbi = NULL;

	/* rmp contains any information about this specific mount */
	rmp = malloc(sizeof *rmp, M_REISERFSMNT, M_WAITOK | M_ZERO);
	if (!rmp) {
		error = (ENOMEM);
		goto out;
	}
	sbi = malloc(sizeof *sbi, M_REISERFSMNT, M_WAITOK | M_ZERO);
	if (!sbi) {
		error = (ENOMEM);
		goto out;
	}
	rmp->rm_reiserfs = sbi;
	rmp->rm_mountp   = mp;
	rmp->rm_devvp    = devvp;
	rmp->rm_dev      = dev;
	rmp->rm_bo       = &devvp->v_bufobj;
	rmp->rm_cp       = cp;

	/* Set default values for options: non-aggressive tails */
	REISERFS_SB(sbi)->s_mount_opt = (1 << REISERFS_SMALLTAIL);
	REISERFS_SB(sbi)->s_rd_only   = 1;
	REISERFS_SB(sbi)->s_devvp     = devvp;

	/* Read the super block */
	if ((error = read_super_block(rmp, REISERFS_OLD_DISK_OFFSET)) == 0) {
		/* The read process succeeded, it's an old format */
		old_format = 1;
	} else if ((error = read_super_block(rmp, REISERFS_DISK_OFFSET)) != 0) {
		reiserfs_log(LOG_ERR, "can not find a ReiserFS filesystem\n");
		goto out;
	}

	rs = SB_DISK_SUPER_BLOCK(sbi);

	/*
	 * Let's do basic sanity check to verify that underlying device is
	 * not smaller than the filesystem. If the check fails then abort and
	 * scream, because bad stuff will happen otherwise.
	 */
#if 0
	if (s->s_bdev && s->s_bdev->bd_inode &&
	    i_size_read(s->s_bdev->bd_inode) <
	    sb_block_count(rs) * sb_blocksize(rs)) {
		reiserfs_log(LOG_ERR,
		    "reiserfs: filesystem cannot be mounted because it is "
		    "bigger than the device.\n");
		reiserfs_log(LOG_ERR, "reiserfs: you may need to run fsck "
		    "rr may be you forgot to reboot after fdisk when it "
		    "told you to.\n");
		goto out;
	}
#endif

	/*
	 * XXX This is from the original Linux code, but why affecting 2 values
	 * to the same variable?
	 */
	sbi->s_mount_state = SB_REISERFS_STATE(sbi);
	sbi->s_mount_state = REISERFS_VALID_FS;

	if ((error = (old_format ?
	    read_old_bitmaps(rmp) : read_bitmaps(rmp)))) {
		reiserfs_log(LOG_ERR, "unable to read bitmap\n");
		goto out;
	}

	/* Make data=ordered the default */
	if (!reiserfs_data_log(sbi) && !reiserfs_data_ordered(sbi) &&
	    !reiserfs_data_writeback(sbi)) {
		REISERFS_SB(sbi)->s_mount_opt |= (1 << REISERFS_DATA_ORDERED);
	}

	if (reiserfs_data_log(sbi)) {
		reiserfs_log(LOG_INFO, "using journaled data mode\n");
	} else if (reiserfs_data_ordered(sbi)) {
		reiserfs_log(LOG_INFO, "using ordered data mode\n");
	} else {
		reiserfs_log(LOG_INFO, "using writeback data mode\n");
	}

	/* TODO Not yet supported */
#if 0
	if(journal_init(sbi, jdev_name, old_format, commit_max_age)) {
		reiserfs_log(LOG_ERR, "unable to initialize journal space\n");
		goto out;
	} else {
		jinit_done = 1 ; /* once this is set, journal_release must
				    be called if we error out of the mount */
	}

	if (reread_meta_blocks(sbi)) {
		reiserfs_log(LOG_ERR,
		    "unable to reread meta blocks after journal init\n");
		goto out;
	}
#endif

	/* Define and initialize hash function */
	sbi->s_hash_function = hash_function(rmp);

	if (sbi->s_hash_function == NULL) {
		reiserfs_log(LOG_ERR, "couldn't determined hash function\n");
		error = (EINVAL);
		goto out;
	}

	if (is_reiserfs_3_5(rs) ||
	    (is_reiserfs_jr(rs) && SB_VERSION(sbi) == REISERFS_VERSION_1))
		bit_set(&(sbi->s_properties), REISERFS_3_5);
	else
		bit_set(&(sbi->s_properties), REISERFS_3_6);

	mp->mnt_data = rmp;
	mp->mnt_stat.f_fsid.val[0] = dev2udev(dev);
	mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_kern_flag |= MNTK_MPSAFE;
	MNT_IUNLOCK(mp);
#if defined(si_mountpoint)
	devvp->v_rdev->si_mountpoint = mp;
#endif

	return (0);

out:
	reiserfs_log(LOG_INFO, "*** error during mount ***\n");
	if (sbi) {
		if (SB_AP_BITMAP(sbi)) {
			int i;
			for (i = 0; i < SB_BMAP_NR(sbi); i++) {
				if (!SB_AP_BITMAP(sbi)[i].bp_data)
					break;
				free(SB_AP_BITMAP(sbi)[i].bp_data,
				    M_REISERFSMNT);
			}
			free(SB_AP_BITMAP(sbi), M_REISERFSMNT);
		}

		if (sbi->s_rs) {
			free(sbi->s_rs, M_REISERFSMNT);
			sbi->s_rs = NULL;
		}
	}

	if (cp != NULL) {
		DROP_GIANT();
		g_topology_lock();
		g_vfs_close(cp);
		g_topology_unlock();
		PICKUP_GIANT();
	}

	if (sbi)
		free(sbi, M_REISERFSMNT);
	if (rmp)
		free(rmp, M_REISERFSMNT);
	dev_rel(dev);
	return (error);
}
Exemple #18
0
/* block allocator related options are parsed here */
int reiserfs_parse_alloc_options(struct super_block * s, char * options)
{
    char * this_char, * value;

    REISERFS_SB(s)->s_alloc_options.bits = 0; /* clear default settings */

    while ( (this_char = strsep (&options, ":")) != NULL ) {
        if ((value = strchr (this_char, '=')) != NULL)
            *value++ = 0;

        if (!strcmp(this_char, "concentrating_formatted_nodes")) {
            int temp;
            SET_OPTION(concentrating_formatted_nodes);
            temp = (value && *value) ? simple_strtoul (value, &value, 0) : 10;
            if (temp <= 0 || temp > 100) {
                REISERFS_SB(s)->s_alloc_options.border = 10;
            } else {
                REISERFS_SB(s)->s_alloc_options.border = 100 / temp;
            }
            continue;
        }
        if (!strcmp(this_char, "displacing_large_files")) {
            SET_OPTION(displacing_large_files);
            REISERFS_SB(s)->s_alloc_options.large_file_size =
                (value && *value) ? simple_strtoul (value, &value, 0) : 16;
            continue;
        }
        if (!strcmp(this_char, "displacing_new_packing_localities")) {
            SET_OPTION(displacing_new_packing_localities);
            continue;
        };

        if (!strcmp(this_char, "old_hashed_relocation")) {
            SET_OPTION(old_hashed_relocation);
            continue;
        }

        if (!strcmp(this_char, "new_hashed_relocation")) {
            SET_OPTION(new_hashed_relocation);
            continue;
        }

        if (!strcmp(this_char, "dirid_groups")) {
            SET_OPTION(dirid_groups);
            continue;
        }
        if (!strcmp(this_char, "oid_groups")) {
            SET_OPTION(oid_groups);
            continue;
        }
        if (!strcmp(this_char, "packing_groups")) {
            SET_OPTION(packing_groups);
            continue;
        }
        if (!strcmp(this_char, "hashed_formatted_nodes")) {
            SET_OPTION(hashed_formatted_nodes);
            continue;
        }

        if (!strcmp(this_char, "skip_busy")) {
            SET_OPTION(skip_busy);
            continue;
        }

        if (!strcmp(this_char, "hundredth_slices")) {
            SET_OPTION(hundredth_slices);
            continue;
        }

        if (!strcmp(this_char, "old_way")) {
            SET_OPTION(old_way);
            continue;
        }

        if (!strcmp(this_char, "displace_based_on_dirid")) {
            SET_OPTION(displace_based_on_dirid);
            continue;
        }

        if (!strcmp(this_char, "preallocmin")) {
            REISERFS_SB(s)->s_alloc_options.preallocmin =
                (value && *value) ? simple_strtoul (value, &value, 0) : 4;
            continue;
        }

        if (!strcmp(this_char, "preallocsize")) {
            REISERFS_SB(s)->s_alloc_options.preallocsize =
                (value && *value) ? simple_strtoul (value, &value, 0) : PREALLOCATION_SIZE;
            continue;
        }

        reiserfs_warning (s, "zam-4001: %s : unknown option - %s",
                          __FUNCTION__ , this_char);
        return 1;
    }

    reiserfs_warning (s, "allocator options = [%08x]\n", SB_ALLOC_OPTS(s));
    return 0;
}
Exemple #19
0
void reiserfs_lock_check_recursive(struct super_block *sb)
{
    struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);

    WARN_ONCE((sb_i->lock_depth > 0), "Unwanted recursive reiserfs lock!\n");
}
Exemple #20
0
/* this stores initial state of tree balance in the print_tb_buf */
void store_print_tb(struct tree_balance *tb)
{
	int h = 0;
	int i;
	struct buffer_head *tbSh, *tbFh;

	if (!tb)
		return;

	sprintf(print_tb_buf, "\n"
		"BALANCING %d\n"
		"MODE=%c, ITEM_POS=%d POS_IN_ITEM=%d\n"
		"=====================================================================\n"
		"* h *    S    *    L    *    R    *   F   *   FL  *   FR  *  CFL  *  CFR  *\n",
		REISERFS_SB(tb->tb_sb)->s_do_balance,
		tb->tb_mode, PATH_LAST_POSITION(tb->tb_path),
		tb->tb_path->pos_in_item);

	for (h = 0; h < ARRAY_SIZE(tb->insert_size); h++) {
		if (PATH_H_PATH_OFFSET(tb->tb_path, h) <=
		    tb->tb_path->path_length
		    && PATH_H_PATH_OFFSET(tb->tb_path,
					  h) > ILLEGAL_PATH_ELEMENT_OFFSET) {
			tbSh = PATH_H_PBUFFER(tb->tb_path, h);
			tbFh = PATH_H_PPARENT(tb->tb_path, h);
		} else {
			tbSh = NULL;
			tbFh = NULL;
		}
		sprintf(print_tb_buf + strlen(print_tb_buf),
			"* %d * %3lld(%2d) * %3lld(%2d) * %3lld(%2d) * %5lld * %5lld * %5lld * %5lld * %5lld *\n",
			h,
			(tbSh) ? (long long)(tbSh->b_blocknr) : (-1LL),
			(tbSh) ? atomic_read(&(tbSh->b_count)) : -1,
			(tb->L[h]) ? (long long)(tb->L[h]->b_blocknr) : (-1LL),
			(tb->L[h]) ? atomic_read(&(tb->L[h]->b_count)) : -1,
			(tb->R[h]) ? (long long)(tb->R[h]->b_blocknr) : (-1LL),
			(tb->R[h]) ? atomic_read(&(tb->R[h]->b_count)) : -1,
			(tbFh) ? (long long)(tbFh->b_blocknr) : (-1LL),
			(tb->FL[h]) ? (long long)(tb->FL[h]->
						  b_blocknr) : (-1LL),
			(tb->FR[h]) ? (long long)(tb->FR[h]->
						  b_blocknr) : (-1LL),
			(tb->CFL[h]) ? (long long)(tb->CFL[h]->
						   b_blocknr) : (-1LL),
			(tb->CFR[h]) ? (long long)(tb->CFR[h]->
						   b_blocknr) : (-1LL));
	}

	sprintf(print_tb_buf + strlen(print_tb_buf),
		"=====================================================================\n"
		"* h * size * ln * lb * rn * rb * blkn * s0 * s1 * s1b * s2 * s2b * curb * lk * rk *\n"
		"* 0 * %4d * %2d * %2d * %2d * %2d * %4d * %2d * %2d * %3d * %2d * %3d * %4d * %2d * %2d *\n",
		tb->insert_size[0], tb->lnum[0], tb->lbytes, tb->rnum[0],
		tb->rbytes, tb->blknum[0], tb->s0num, tb->s1num, tb->s1bytes,
		tb->s2num, tb->s2bytes, tb->cur_blknum, tb->lkey[0],
		tb->rkey[0]);

	/* this prints balance parameters for non-leaf levels */
	h = 0;
	do {
		h++;
		sprintf(print_tb_buf + strlen(print_tb_buf),
			"* %d * %4d * %2d *    * %2d *    * %2d *\n",
			h, tb->insert_size[h], tb->lnum[h], tb->rnum[h],
			tb->blknum[h]);
	} while (tb->insert_size[h]);

	sprintf(print_tb_buf + strlen(print_tb_buf),
		"=====================================================================\n"
		"FEB list: ");

	/* print FEB list (list of buffers in form (bh (b_blocknr, b_count), that will be used for new nodes) */
	h = 0;
	for (i = 0; i < ARRAY_SIZE(tb->FEB); i++)
		sprintf(print_tb_buf + strlen(print_tb_buf),
			"%p (%llu %d)%s", tb->FEB[i],
			tb->FEB[i] ? (unsigned long long)tb->FEB[i]->
			b_blocknr : 0ULL,
			tb->FEB[i] ? atomic_read(&(tb->FEB[i]->b_count)) : 0,
			(i == ARRAY_SIZE(tb->FEB) - 1) ? "\n" : ", ");

	sprintf(print_tb_buf + strlen(print_tb_buf),
		"======================== the end ====================================\n");
}
Exemple #21
0
static inline bool is_privroot_deh(struct inode *dir, struct reiserfs_de_head *deh)
{
	struct dentry *privroot = REISERFS_SB(dir->i_sb)->priv_root;
	return (d_really_is_positive(privroot) &&
	        deh->deh_objectid == INODE_PKEY(d_inode(privroot))->k_objectid);
}