Exemplo n.º 1
0
Arquivo: llog.c Projeto: 7799/linux
static int llog_read_header(const struct lu_env *env,
			    struct llog_handle *handle,
			    struct obd_uuid *uuid)
{
	struct llog_operations *lop;
	int rc;

	rc = llog_handle2ops(handle, &lop);
	if (rc)
		return rc;

	if (lop->lop_read_header == NULL)
		return -EOPNOTSUPP;

	rc = lop->lop_read_header(env, handle);
	if (rc == LLOG_EEMPTY) {
		struct llog_log_hdr *llh = handle->lgh_hdr;

		handle->lgh_last_idx = 0; /* header is record with index 0 */
		llh->llh_count = 1;	 /* for the header record */
		llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC;
		llh->llh_hdr.lrh_len = llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE;
		llh->llh_hdr.lrh_index = llh->llh_tail.lrt_index = 0;
		llh->llh_timestamp = cfs_time_current_sec();
		if (uuid)
			memcpy(&llh->llh_tgtuuid, uuid,
			       sizeof(llh->llh_tgtuuid));
		llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap);
		ext2_set_bit(0, llh->llh_bitmap);
		rc = 0;
	}
	return rc;
}
Exemplo n.º 2
0
/*
 * bitmap_file_set_bit -- called before performing a write to the md device
 * to set (and eventually sync) a particular bit in the bitmap file
 *
 * we set the bit immediately, then we record the page number so that
 * when an unplug occurs, we can flush the dirty pages out to disk
 */
static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
{
	unsigned long bit;
	struct page *page;
	void *kaddr;
	unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap);

	if (!bitmap->filemap) {
		return;
	}

	page = filemap_get_page(bitmap, chunk);
	if (!page) return;
	bit = file_page_offset(chunk);

 	/* set the bit */
	kaddr = kmap_atomic(page, KM_USER0);
	if (bitmap->flags & BITMAP_HOSTENDIAN)
		set_bit(bit, kaddr);
	else
		ext2_set_bit(bit, kaddr);
	kunmap_atomic(kaddr, KM_USER0);
	PRINTK("set file bit %lu page %lu\n", bit, page->index);

	/* record page number so it gets flushed to disk when unplug occurs */
	set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);

}
Exemplo n.º 3
0
/* alloc a new block */
int ext2_alloc_block ( unsigned int goal)
{
        unsigned int block;
        unsigned int block_group;
        unsigned int bit;
        struct ext2_group_desc *desc;
        struct ext2_sb_info * sbi = EXT2_SBI();
        void *bitmap;

        block_group = ext2_get_group_num (goal, BLOCK);
        bit = ext2_get_group_offset (goal, BLOCK);
        
        bitmap = ext2_read_block_bitmap (block_group);
        block = ext2_grab_block (bitmap, bit);

        if ( !block)
                ext2_error ("no free blocks any more");

        desc = ext2_get_group_desc (block_group);
        desc->bg_free_blocks_count --;
        sbi->s_free_blocks_count --;
        ext2_set_bit (bitmap, block);

        return block;
}
Exemplo n.º 4
0
Arquivo: log.c Projeto: ddn-lixi/mtfs
/* returns negative on error; 0 if success; 1 if success & log destroyed */
int mlog_cancel_rec(struct mlog_handle *loghandle, int index)
{
	struct mlog_log_hdr *mlh = loghandle->mgh_hdr;
	int ret = 0;
	MENTRY();

	MDEBUG("Canceling %d in log %llx\n",
	       index, loghandle->mgh_id.mgl_oid);

	if (index == 0) {
		MERROR("Can't cancel index 0 which is header\n");
		ret = -EINVAL;
		goto out;
	}

	if (!ext2_clear_bit(index, mlh->mlh_bitmap)) {
		MDEBUG("Catalog index %u already clear?\n", index);
		ret = -ENOENT;
		goto out;
	}

	mlh->mlh_count--;

	if ((mlh->mlh_flags & MLOG_F_ZAP_WHEN_EMPTY) &&
	    (mlh->mlh_count == 1) &&
	    (loghandle->mgh_last_idx == (MLOG_BITMAP_BYTES * 8) - 1)) {
		ret = mlog_destroy(loghandle);
		if (ret) {
			MERROR("Failure destroying log after last cancel: %d\n",
			       ret);
			ext2_set_bit(index, mlh->mlh_bitmap);
			mlh->mlh_count++;
		} else {
			ret = 1;
		}
		goto out;
	}

	ret = mlog_write_rec(loghandle, &mlh->mlh_hdr, NULL, 0, NULL, 0);
	if (ret) {
		MERROR("Failure re-writing header %d\n", ret);
		ext2_set_bit(index, mlh->mlh_bitmap);
		mlh->mlh_count++;
	}
out:
	MRETURN(ret);
}
Exemplo n.º 5
0
Arquivo: llog.c Projeto: Lezval/lustre
/* returns negative on error; 0 if success; 1 if success & log destroyed */
int llog_cancel_rec(const struct lu_env *env, struct llog_handle *loghandle,
		    int index)
{
        struct llog_log_hdr *llh = loghandle->lgh_hdr;
        int rc = 0;
        ENTRY;

        CDEBUG(D_RPCTRACE, "Canceling %d in log "DOSTID"\n",
               index, POSTID(&loghandle->lgh_id.lgl_oi));

        if (index == 0) {
                CERROR("Can't cancel index 0 which is header\n");
                RETURN(-EINVAL);
        }

	spin_lock(&loghandle->lgh_hdr_lock);
	if (!ext2_clear_bit(index, llh->llh_bitmap)) {
		spin_unlock(&loghandle->lgh_hdr_lock);
		CDEBUG(D_RPCTRACE, "Catalog index %u already clear?\n", index);
		RETURN(-ENOENT);
	}

	llh->llh_count--;

	if ((llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
	    (llh->llh_count == 1) &&
	    (loghandle->lgh_last_idx == (LLOG_BITMAP_BYTES * 8) - 1)) {
		spin_unlock(&loghandle->lgh_hdr_lock);
		rc = llog_destroy(env, loghandle);
		if (rc < 0) {
			CERROR("%s: can't destroy empty llog #"DOSTID
			       "#%08x: rc = %d\n",
			       loghandle->lgh_ctxt->loc_obd->obd_name,
			       POSTID(&loghandle->lgh_id.lgl_oi),
			       loghandle->lgh_id.lgl_ogen, rc);
			GOTO(out_err, rc);
		}
		RETURN(1);
	}
	spin_unlock(&loghandle->lgh_hdr_lock);

	rc = llog_write(env, loghandle, &llh->llh_hdr, NULL, 0, NULL, 0);
	if (rc < 0) {
		CERROR("%s: fail to write header for llog #"DOSTID
		       "#%08x: rc = %d\n",
		       loghandle->lgh_ctxt->loc_obd->obd_name,
		       POSTID(&loghandle->lgh_id.lgl_oi),
		       loghandle->lgh_id.lgl_ogen, rc);
		GOTO(out_err, rc);
	}
	RETURN(0);
out_err:
	spin_lock(&loghandle->lgh_hdr_lock);
	ext2_set_bit(index, llh->llh_bitmap);
	llh->llh_count++;
	spin_unlock(&loghandle->lgh_hdr_lock);
	return rc;
}
Exemplo n.º 6
0
int llog_read_header(const struct lu_env *env, struct llog_handle *handle,
		     const struct obd_uuid *uuid)
{
	struct llog_operations *lop;
	int rc;
	ENTRY;

	rc = llog_handle2ops(handle, &lop);
	if (rc)
		RETURN(rc);

	if (lop->lop_read_header == NULL)
		RETURN(-EOPNOTSUPP);

	rc = lop->lop_read_header(env, handle);
	if (rc == LLOG_EEMPTY) {
		struct llog_log_hdr *llh = handle->lgh_hdr;

		/* lrh_len should be initialized in llog_init_handle */
		handle->lgh_last_idx = 0; /* header is record with index 0 */
		handle->lgh_write_offset = 0;
		llh->llh_count = 1;         /* for the header record */
		llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC;
		LASSERT(handle->lgh_ctxt->loc_chunk_size >=
						LLOG_MIN_CHUNK_SIZE);
		llh->llh_hdr.lrh_len = handle->lgh_ctxt->loc_chunk_size;
		llh->llh_hdr.lrh_index = 0;
		llh->llh_timestamp = cfs_time_current_sec();
		if (uuid)
			memcpy(&llh->llh_tgtuuid, uuid,
			       sizeof(llh->llh_tgtuuid));
		llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap);
		/* Since update llog header might also call this function,
		 * let's reset the bitmap to 0 here */
		memset(LLOG_HDR_BITMAP(llh), 0, llh->llh_hdr.lrh_len -
						llh->llh_bitmap_offset -
						sizeof(llh->llh_tail));
		ext2_set_bit(0, LLOG_HDR_BITMAP(llh));
		LLOG_HDR_TAIL(llh)->lrt_len = llh->llh_hdr.lrh_len;
		LLOG_HDR_TAIL(llh)->lrt_index = llh->llh_hdr.lrh_index;
		rc = 0;
	}
	RETURN(rc);
}
Exemplo n.º 7
0
static __inline__ void
store_block (unsigned block, uchar * src, unsigned len)
{
	ulong offset = block * TftpBlkSize + TftpBlockWrapOffset;
	ulong newsize = offset + len;
#ifdef CONFIG_SYS_DIRECT_FLASH_TFTP
	int i, rc = 0;

	for (i=0; i<CONFIG_SYS_MAX_FLASH_BANKS; i++) {
		/* start address in flash? */
		if (flash_info[i].flash_id == FLASH_UNKNOWN)
			continue;
		if (load_addr + offset >= flash_info[i].start[0]) {
			rc = 1;
			break;
		}
	}

	if (rc) { /* Flash is destination for this packet */
		rc = flash_write ((char *)src, (ulong)(load_addr+offset), len);
		if (rc) {
			flash_perror (rc);
			NetState = NETLOOP_FAIL;
			return;
		}
	}
	else
#endif /* CONFIG_SYS_DIRECT_FLASH_TFTP */
	{
		(void)memcpy((void *)(load_addr + offset), src, len);
	}
#ifdef CONFIG_MCAST_TFTP
	if (Multicast)
		ext2_set_bit(block, Bitmap);
#endif

	if (NetBootFileXferSize < newsize)
		NetBootFileXferSize = newsize;
}
Exemplo n.º 8
0
struct inode * ext2_new_inode (const struct inode * dir, int mode)
{
	struct super_block * sb;
	struct buffer_head * bh;
	struct buffer_head * bh2;
	int group, i;
	ino_t ino;
	struct inode * inode;
	struct ext2_group_desc * desc;
	struct ext2_super_block * es;
	int err;

	sb = dir->i_sb;
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);

	lock_super (sb);
	es = sb->u.ext2_sb.s_es;
repeat:
	if (S_ISDIR(mode))
		group = find_group_dir(sb, dir->u.ext2_i.i_block_group);
	else 
		group = find_group_other(sb, dir->u.ext2_i.i_block_group);

	err = -ENOSPC;
	if (group == -1)
		goto fail;

	err = -EIO;
	bh = load_inode_bitmap (sb, group);
	if (IS_ERR(bh))
		goto fail2;

	i = ext2_find_first_zero_bit ((unsigned long *) bh->b_data,
				      EXT2_INODES_PER_GROUP(sb));
	if (i >= EXT2_INODES_PER_GROUP(sb))
		goto bad_count;
	ext2_set_bit (i, bh->b_data);

	mark_buffer_dirty(bh);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ll_rw_block (WRITE, 1, &bh);
		wait_on_buffer (bh);
	}

	ino = group * EXT2_INODES_PER_GROUP(sb) + i + 1;
	if (ino < EXT2_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
		ext2_error (sb, "ext2_new_inode",
			    "reserved inode or inode > inodes count - "
			    "block_group = %d,inode=%ld", group, ino);
		err = -EIO;
		goto fail2;
	}

	es->s_free_inodes_count =
		cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) - 1);
	mark_buffer_dirty(sb->u.ext2_sb.s_sbh);
	sb->s_dirt = 1;
	inode->i_uid = current->fsuid;
	if (test_opt (sb, GRPID))
		inode->i_gid = dir->i_gid;
	else if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;
	inode->i_mode = mode;

	inode->i_ino = ino;
	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
	inode->u.ext2_i.i_new_inode = 1;
	inode->u.ext2_i.i_flags = dir->u.ext2_i.i_flags & ~EXT2_BTREE_FL;
	if (S_ISLNK(mode))
		inode->u.ext2_i.i_flags &= ~(EXT2_IMMUTABLE_FL|EXT2_APPEND_FL);
	inode->u.ext2_i.i_block_group = group;
	if (inode->u.ext2_i.i_flags & EXT2_SYNC_FL)
		inode->i_flags |= S_SYNC;
	insert_inode_hash(inode);
	inode->i_generation = event++;
	mark_inode_dirty(inode);

	unlock_super (sb);
	if(DQUOT_ALLOC_INODE(inode)) {
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		return ERR_PTR(-EDQUOT);
	}
	ext2_debug ("allocating inode %lu\n", inode->i_ino);
	return inode;

fail2:
	desc = ext2_get_group_desc (sb, group, &bh2);
	desc->bg_free_inodes_count =
		cpu_to_le16(le16_to_cpu(desc->bg_free_inodes_count) + 1);
	if (S_ISDIR(mode))
		desc->bg_used_dirs_count =
			cpu_to_le16(le16_to_cpu(desc->bg_used_dirs_count) - 1);
	mark_buffer_dirty(bh2);
fail:
	unlock_super(sb);
	make_bad_inode(inode);
	iput(inode);
	return ERR_PTR(err);

bad_count:
	ext2_error (sb, "ext2_new_inode",
		    "Free inodes count corrupted in group %d",
		    group);
	/* Is it really ENOSPC? */
	err = -ENOSPC;
	if (sb->s_flags & MS_RDONLY)
		goto fail;

	desc = ext2_get_group_desc (sb, group, &bh2);
	desc->bg_free_inodes_count = 0;
	mark_buffer_dirty(bh2);
	goto repeat;
}
Exemplo n.º 9
0
/**
 * Implementation of the llog_operations::lop_write
 *
 * This function writes the new record in the llog or modify the existed one.
 *
 * \param[in]  env		execution environment
 * \param[in]  loghandle	llog handle of the current llog
 * \param[in]  rec		llog record header. This is a real header of
 *				the full llog record to write. This is
 *				the beginning of buffer to write, the length
 *				of buffer is stored in \a rec::lrh_len
 * \param[out] reccookie	pointer to the cookie to return back if needed.
 *				It is used for further cancel of this llog
 *				record.
 * \param[in]  idx		index of the llog record. If \a idx == -1 then
 *				this is append case, otherwise \a idx is
 *				the index of record to modify
 * \param[in]  th		current transaction handle
 *
 * \retval			0 on successful write && \a reccookie == NULL
 *				1 on successful write && \a reccookie != NULL
 * \retval			negative error if write failed
 */
static int llog_osd_write_rec(const struct lu_env *env,
			      struct llog_handle *loghandle,
			      struct llog_rec_hdr *rec,
			      struct llog_cookie *reccookie,
			      int idx, struct thandle *th)
{
	struct llog_thread_info	*lgi = llog_info(env);
	struct llog_log_hdr	*llh;
	int			 reclen = rec->lrh_len;
	int			 index, rc;
	struct llog_rec_tail	*lrt;
	struct dt_object	*o;
	size_t			 left;
	bool			 header_is_updated = false;

	ENTRY;

	LASSERT(env);
	llh = loghandle->lgh_hdr;
	LASSERT(llh);
	o = loghandle->lgh_obj;
	LASSERT(o);
	LASSERT(th);

	CDEBUG(D_OTHER, "new record %x to "DFID"\n",
	       rec->lrh_type, PFID(lu_object_fid(&o->do_lu)));

	/* record length should not bigger than LLOG_CHUNK_SIZE */
	if (reclen > LLOG_CHUNK_SIZE)
		RETURN(-E2BIG);

	rc = dt_attr_get(env, o, &lgi->lgi_attr, NULL);
	if (rc)
		RETURN(rc);

	/**
	 * The modification case.
	 * If idx set then the record with that index must be modified.
	 * There are three cases possible:
	 * 1) the common case is the llog header update (idx == 0)
	 * 2) the llog record modification during llog process.
	 *    This is indicated by the \a loghandle::lgh_cur_idx > 0.
	 *    In that case the \a loghandle::lgh_cur_offset
	 * 3) otherwise this is assumed that llog consist of records of
	 *    fixed size, i.e. catalog. The llog header must has llh_size
	 *    field equal to record size. The record offset is calculated
	 *    just by /a idx value
	 *
	 * During modification we don't need extra header update because
	 * the bitmap and record count are not changed. The record header
	 * and tail remains the same too.
	 */
	if (idx != LLOG_NEXT_IDX) {
		/* llog can be empty only when first record is being written */
		LASSERT(ergo(idx > 0, lgi->lgi_attr.la_size > 0));

		if (!ext2_test_bit(idx, llh->llh_bitmap)) {
			CERROR("%s: modify unset record %u\n",
			       o->do_lu.lo_dev->ld_obd->obd_name, idx);
			RETURN(-ENOENT);
		}

		if (idx != rec->lrh_index) {
			CERROR("%s: modify index mismatch %d %u\n",
			       o->do_lu.lo_dev->ld_obd->obd_name, idx,
			       rec->lrh_index);
			RETURN(-EFAULT);
		}

		if (idx == LLOG_HEADER_IDX) {
			/* llog header update */
			LASSERT(reclen == sizeof(struct llog_log_hdr));
			LASSERT(rec == &llh->llh_hdr);

			lgi->lgi_off = 0;
			lgi->lgi_buf.lb_len = reclen;
			lgi->lgi_buf.lb_buf = rec;
			rc = dt_record_write(env, o, &lgi->lgi_buf,
					     &lgi->lgi_off, th);
			RETURN(rc);
		} else if (loghandle->lgh_cur_idx > 0) {
			/**
			 * The lgh_cur_offset can be used only if index is
			 * the same.
			 */
			if (idx != loghandle->lgh_cur_idx) {
				CERROR("%s: modify index mismatch %d %d\n",
				       o->do_lu.lo_dev->ld_obd->obd_name, idx,
				       loghandle->lgh_cur_idx);
				RETURN(-EFAULT);
			}

			lgi->lgi_off = loghandle->lgh_cur_offset;
			CDEBUG(D_OTHER, "modify record "DOSTID": idx:%d, "
			       "len:%u offset %llu\n",
			       POSTID(&loghandle->lgh_id.lgl_oi), idx,
			       rec->lrh_len, (long long)lgi->lgi_off);
		} else if (llh->llh_size > 0) {
			if (llh->llh_size != rec->lrh_len) {
				CERROR("%s: wrong record size, llh_size is %u"
				       " but record size is %u\n",
				       o->do_lu.lo_dev->ld_obd->obd_name,
				       llh->llh_size, rec->lrh_len);
				RETURN(-EINVAL);
			}
			lgi->lgi_off = sizeof(*llh) + (idx - 1) * reclen;
		} else {
			/* This can be result of lgh_cur_idx is not set during
			 * llog processing or llh_size is not set to proper
			 * record size for fixed records llog. Therefore it is
			 * impossible to get record offset. */
			CERROR("%s: can't get record offset, idx:%d, "
			       "len:%u.\n", o->do_lu.lo_dev->ld_obd->obd_name,
			       idx, rec->lrh_len);
			RETURN(-EFAULT);
		}

		/* update only data, header and tail remain the same */
		lgi->lgi_off += sizeof(struct llog_rec_hdr);
		lgi->lgi_buf.lb_len = REC_DATA_LEN(rec);
		lgi->lgi_buf.lb_buf = REC_DATA(rec);
		rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
		if (rc == 0 && reccookie) {
			reccookie->lgc_lgl = loghandle->lgh_id;
			reccookie->lgc_index = idx;
			rc = 1;
		}
		RETURN(rc);
	}

	/**
	 * The append case.
	 * The most common case of using llog. The new index is assigned to
	 * the new record, new bit is set in llog bitmap and llog count is
	 * incremented.
	 *
	 * Make sure that records don't cross a chunk boundary, so we can
	 * process them page-at-a-time if needed.  If it will cross a chunk
	 * boundary, write in a fake (but referenced) entry to pad the chunk.
	 */
	LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
	lgi->lgi_off = lgi->lgi_attr.la_size;
	left = LLOG_CHUNK_SIZE - (lgi->lgi_off & (LLOG_CHUNK_SIZE - 1));
	/* NOTE: padding is a record, but no bit is set */
	if (left != 0 && left != reclen &&
	    left < (reclen + LLOG_MIN_REC_SIZE)) {
		index = loghandle->lgh_last_idx + 1;
		rc = llog_osd_pad(env, o, &lgi->lgi_off, left, index, th);
		if (rc)
			RETURN(rc);
		loghandle->lgh_last_idx++; /* for pad rec */
	}
	/* if it's the last idx in log file, then return -ENOSPC */
	if (loghandle->lgh_last_idx >= LLOG_BITMAP_SIZE(llh) - 1)
		RETURN(-ENOSPC);

	/* increment the last_idx along with llh_tail index, they should
	 * be equal for a llog lifetime */
	loghandle->lgh_last_idx++;
	index = loghandle->lgh_last_idx;
	llh->llh_tail.lrt_index = index;
	/**
	 * NB: the caller should make sure only 1 process access
	 * the lgh_last_idx, e.g. append should be exclusive.
	 * Otherwise it might hit the assert.
	 */
	LASSERT(index < LLOG_BITMAP_SIZE(llh));
	rec->lrh_index = index;
	lrt = rec_tail(rec);
	lrt->lrt_len = rec->lrh_len;
	lrt->lrt_index = rec->lrh_index;

	/* the lgh_hdr_lock protects llog header data from concurrent
	 * update/cancel, the llh_count and llh_bitmap are protected */
	spin_lock(&loghandle->lgh_hdr_lock);
	if (ext2_set_bit(index, llh->llh_bitmap)) {
		CERROR("%s: index %u already set in log bitmap\n",
		       o->do_lu.lo_dev->ld_obd->obd_name, index);
		spin_unlock(&loghandle->lgh_hdr_lock);
		LBUG(); /* should never happen */
	}
	llh->llh_count++;
	spin_unlock(&loghandle->lgh_hdr_lock);

	lgi->lgi_off = 0;
	lgi->lgi_buf.lb_len = llh->llh_hdr.lrh_len;
	lgi->lgi_buf.lb_buf = &llh->llh_hdr;
	rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
	if (rc)
		GOTO(out, rc);

	header_is_updated = true;
	rc = dt_attr_get(env, o, &lgi->lgi_attr, NULL);
	if (rc)
		GOTO(out, rc);

	LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
	lgi->lgi_off = lgi->lgi_attr.la_size;
	lgi->lgi_buf.lb_len = reclen;
	lgi->lgi_buf.lb_buf = rec;
	rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
	if (rc < 0)
		GOTO(out, rc);

	CDEBUG(D_OTHER, "added record "DOSTID": idx: %u, %u\n",
	       POSTID(&loghandle->lgh_id.lgl_oi), index, rec->lrh_len);
	if (reccookie != NULL) {
		reccookie->lgc_lgl = loghandle->lgh_id;
		reccookie->lgc_index = index;
		if ((rec->lrh_type == MDS_UNLINK_REC) ||
		    (rec->lrh_type == MDS_SETATTR64_REC))
			reccookie->lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
		else if (rec->lrh_type == OST_SZ_REC)
			reccookie->lgc_subsys = LLOG_SIZE_ORIG_CTXT;
		else
			reccookie->lgc_subsys = -1;
		rc = 1;
	}
	RETURN(rc);
out:
	/* cleanup llog for error case */
	spin_lock(&loghandle->lgh_hdr_lock);
	ext2_clear_bit(index, llh->llh_bitmap);
	llh->llh_count--;
	spin_unlock(&loghandle->lgh_hdr_lock);

	/* restore llog last_idx */
	loghandle->lgh_last_idx--;
	llh->llh_tail.lrt_index = loghandle->lgh_last_idx;

	/* restore the header on disk if it was written */
	if (header_is_updated) {
		lgi->lgi_off = 0;
		lgi->lgi_buf.lb_len = llh->llh_hdr.lrh_len;
		lgi->lgi_buf.lb_buf = &llh->llh_hdr;
		dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
	}

	RETURN(rc);
}
Exemplo n.º 10
0
/* appends if idx == -1, otherwise overwrites record idx. */
static int llog_osd_write_rec(const struct lu_env *env,
			      struct llog_handle *loghandle,
			      struct llog_rec_hdr *rec,
			      struct llog_cookie *reccookie, int cookiecount,
			      void *buf, int idx, struct thandle *th)
{
	struct llog_thread_info	*lgi = llog_info(env);
	struct llog_log_hdr	*llh;
	int			 reclen = rec->lrh_len;
	int			 index, rc;
	struct llog_rec_tail	*lrt;
	struct dt_object	*o;
	size_t			 left;

	ENTRY;

	LASSERT(env);
	llh = loghandle->lgh_hdr;
	LASSERT(llh);
	o = loghandle->lgh_obj;
	LASSERT(o);
	LASSERT(th);

	CDEBUG(D_OTHER, "new record %x to "DFID"\n",
	       rec->lrh_type, PFID(lu_object_fid(&o->do_lu)));

	/* record length should not bigger than LLOG_CHUNK_SIZE */
	if (buf)
		rc = (reclen > LLOG_CHUNK_SIZE - sizeof(struct llog_rec_hdr) -
		      sizeof(struct llog_rec_tail)) ? -E2BIG : 0;
	else
		rc = (reclen > LLOG_CHUNK_SIZE) ? -E2BIG : 0;
	if (rc)
		RETURN(rc);

	rc = dt_attr_get(env, o, &lgi->lgi_attr, NULL);
	if (rc)
		RETURN(rc);

	if (buf)
		/* write_blob adds header and tail to lrh_len. */
		reclen = sizeof(*rec) + rec->lrh_len +
			 sizeof(struct llog_rec_tail);

	if (idx != -1) {
		/* no header: only allowed to insert record 1 */
		if (idx != 1 && lgi->lgi_attr.la_size == 0)
			LBUG();

		if (idx && llh->llh_size && llh->llh_size != rec->lrh_len)
			RETURN(-EINVAL);

		if (!ext2_test_bit(idx, llh->llh_bitmap))
			CERROR("%s: modify unset record %u\n",
			       o->do_lu.lo_dev->ld_obd->obd_name, idx);
		if (idx != rec->lrh_index)
			CERROR("%s: index mismatch %d %u\n",
			       o->do_lu.lo_dev->ld_obd->obd_name, idx,
			       rec->lrh_index);

		lgi->lgi_off = 0;
		rc = llog_osd_write_blob(env, o, &llh->llh_hdr, NULL,
					 &lgi->lgi_off, th);
		/* we are done if we only write the header or on error */
		if (rc || idx == 0)
			RETURN(rc);

		if (buf) {
			/* We assume that caller has set lgh_cur_* */
			lgi->lgi_off = loghandle->lgh_cur_offset;
			CDEBUG(D_OTHER,
			       "modify record "LPX64": idx:%d/%u/%d, len:%u "
			       "offset %llu\n",
			       loghandle->lgh_id.lgl_oid, idx, rec->lrh_index,
			       loghandle->lgh_cur_idx, rec->lrh_len,
			       (long long)(lgi->lgi_off - sizeof(*llh)));
			if (rec->lrh_index != loghandle->lgh_cur_idx) {
				CERROR("%s: modify idx mismatch %u/%d\n",
				       o->do_lu.lo_dev->ld_obd->obd_name, idx,
				       loghandle->lgh_cur_idx);
				RETURN(-EFAULT);
			}
		} else {
			/* Assumes constant lrh_len */
			lgi->lgi_off = sizeof(*llh) + (idx - 1) * reclen;
		}

		rc = llog_osd_write_blob(env, o, rec, buf, &lgi->lgi_off, th);
		if (rc == 0 && reccookie) {
			reccookie->lgc_lgl = loghandle->lgh_id;
			reccookie->lgc_index = idx;
			rc = 1;
		}
		RETURN(rc);
	}

	/* Make sure that records don't cross a chunk boundary, so we can
	 * process them page-at-a-time if needed.  If it will cross a chunk
	 * boundary, write in a fake (but referenced) entry to pad the chunk.
	 *
	 * We know that llog_current_log() will return a loghandle that is
	 * big enough to hold reclen, so all we care about is padding here.
	 */
	LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
	lgi->lgi_off = lgi->lgi_attr.la_size;
	left = LLOG_CHUNK_SIZE - (lgi->lgi_off & (LLOG_CHUNK_SIZE - 1));
	/* NOTE: padding is a record, but no bit is set */
	if (left != 0 && left != reclen &&
	    left < (reclen + LLOG_MIN_REC_SIZE)) {
		index = loghandle->lgh_last_idx + 1;
		rc = llog_osd_pad(env, o, &lgi->lgi_off, left, index, th);
		if (rc)
			RETURN(rc);
		loghandle->lgh_last_idx++; /*for pad rec*/
	}
	/* if it's the last idx in log file, then return -ENOSPC */
	if (loghandle->lgh_last_idx >= LLOG_BITMAP_SIZE(llh) - 1)
		RETURN(-ENOSPC);

	loghandle->lgh_last_idx++;
	index = loghandle->lgh_last_idx;
	LASSERT(index < LLOG_BITMAP_SIZE(llh));
	rec->lrh_index = index;
	if (buf == NULL) {
		lrt = (struct llog_rec_tail *)((char *)rec + rec->lrh_len -
					       sizeof(*lrt));
		lrt->lrt_len = rec->lrh_len;
		lrt->lrt_index = rec->lrh_index;
	}
	/* The caller should make sure only 1 process access the lgh_last_idx,
	 * Otherwise it might hit the assert.*/
	LASSERT(index < LLOG_BITMAP_SIZE(llh));
	spin_lock(&loghandle->lgh_hdr_lock);
	if (ext2_set_bit(index, llh->llh_bitmap)) {
		CERROR("%s: index %u already set in log bitmap\n",
		       o->do_lu.lo_dev->ld_obd->obd_name, index);
		spin_unlock(&loghandle->lgh_hdr_lock);
		LBUG(); /* should never happen */
	}
	llh->llh_count++;
	spin_unlock(&loghandle->lgh_hdr_lock);
	llh->llh_tail.lrt_index = index;

	lgi->lgi_off = 0;
	rc = llog_osd_write_blob(env, o, &llh->llh_hdr, NULL, &lgi->lgi_off,
				 th);
	if (rc)
		RETURN(rc);

	rc = dt_attr_get(env, o, &lgi->lgi_attr, NULL);
	if (rc)
		RETURN(rc);
	LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
	lgi->lgi_off = lgi->lgi_attr.la_size;

	rc = llog_osd_write_blob(env, o, rec, buf, &lgi->lgi_off, th);
	if (rc)
		RETURN(rc);

	CDEBUG(D_RPCTRACE, "added record "LPX64": idx: %u, %u\n",
	       loghandle->lgh_id.lgl_oid, index, rec->lrh_len);
	if (rc == 0 && reccookie) {
		reccookie->lgc_lgl = loghandle->lgh_id;
		reccookie->lgc_index = index;
		if ((rec->lrh_type == MDS_UNLINK_REC) ||
		    (rec->lrh_type == MDS_SETATTR64_REC))
			reccookie->lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
		else if (rec->lrh_type == OST_SZ_REC)
			reccookie->lgc_subsys = LLOG_SIZE_ORIG_CTXT;
		else
			reccookie->lgc_subsys = -1;
		rc = 1;
	}
	RETURN(rc);
}
Exemplo n.º 11
0
static inline void log_set_bit(struct log_c *l,
                               uint32_t *bs, unsigned bit)
{
    ext2_set_bit(bit, (unsigned long *) bs);
    l->touched_cleaned = 1;
}
Exemplo n.º 12
0
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory\'s block
 * group to find a free inode.
 */
struct inode * ext2_new_inode (const struct inode * dir, int mode, int * err)
{
	struct super_block * sb;
	struct buffer_head * bh;
	struct buffer_head * bh2;
	int i, j, avefreei;
	struct inode * inode;
	int bitmap_nr;
	struct ext2_group_desc * gdp;
	struct ext2_group_desc * tmp;
	struct ext2_super_block * es;

	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink) {
		*err = -EPERM;
		return NULL;
	}

	inode = get_empty_inode ();
	if (!inode) {
		*err = -ENOMEM;
		return NULL;
	}

	sb = dir->i_sb;
	inode->i_sb = sb;
	inode->i_flags = 0;
	lock_super (sb);
	es = sb->u.ext2_sb.s_es;
repeat:
	gdp = NULL; i=0;
	
	*err = -ENOSPC;
	if (S_ISDIR(mode)) {
		avefreei = le32_to_cpu(es->s_free_inodes_count) /
			sb->u.ext2_sb.s_groups_count;
/* I am not yet convinced that this next bit is necessary.
		i = dir->u.ext2_i.i_block_group;
		for (j = 0; j < sb->u.ext2_sb.s_groups_count; j++) {
			tmp = ext2_get_group_desc (sb, i, &bh2);
			if (tmp &&
			    (le16_to_cpu(tmp->bg_used_dirs_count) << 8) < 
			     le16_to_cpu(tmp->bg_free_inodes_count)) {
				gdp = tmp;
				break;
			}
			else
			i = ++i % sb->u.ext2_sb.s_groups_count;
		}
*/
		if (!gdp) {
			for (j = 0; j < sb->u.ext2_sb.s_groups_count; j++) {
				tmp = ext2_get_group_desc (sb, j, &bh2);
				if (tmp &&
				    le16_to_cpu(tmp->bg_free_inodes_count) &&
				    le16_to_cpu(tmp->bg_free_inodes_count) >= avefreei) {
					if (!gdp || 
					    (le16_to_cpu(tmp->bg_free_blocks_count) >
					     le16_to_cpu(gdp->bg_free_blocks_count))) {
						i = j;
						gdp = tmp;
					}
				}
			}
		}
	}
	else 
	{
		/*
		 * Try to place the inode in its parent directory
		 */
		i = dir->u.ext2_i.i_block_group;
		tmp = ext2_get_group_desc (sb, i, &bh2);
		if (tmp && le16_to_cpu(tmp->bg_free_inodes_count))
			gdp = tmp;
		else
		{
			/*
			 * Use a quadratic hash to find a group with a
			 * free inode
			 */
			for (j = 1; j < sb->u.ext2_sb.s_groups_count; j <<= 1) {
				i += j;
				if (i >= sb->u.ext2_sb.s_groups_count)
					i -= sb->u.ext2_sb.s_groups_count;
				tmp = ext2_get_group_desc (sb, i, &bh2);
				if (tmp &&
				    le16_to_cpu(tmp->bg_free_inodes_count)) {
					gdp = tmp;
					break;
				}
			}
		}
		if (!gdp) {
			/*
			 * That failed: try linear search for a free inode
			 */
			i = dir->u.ext2_i.i_block_group + 1;
			for (j = 2; j < sb->u.ext2_sb.s_groups_count; j++) {
				if (++i >= sb->u.ext2_sb.s_groups_count)
					i = 0;
				tmp = ext2_get_group_desc (sb, i, &bh2);
				if (tmp &&
				    le16_to_cpu(tmp->bg_free_inodes_count)) {
					gdp = tmp;
					break;
				}
			}
		}
	}

	if (!gdp) {
		unlock_super (sb);
		iput(inode);
		return NULL;
	}
	bitmap_nr = load_inode_bitmap (sb, i);
	if (bitmap_nr < 0) {
		unlock_super (sb);
		iput(inode);
		*err = -EIO;
		return NULL;
	}

	bh = sb->u.ext2_sb.s_inode_bitmap[bitmap_nr];
	if ((j = ext2_find_first_zero_bit ((unsigned long *) bh->b_data,
				      EXT2_INODES_PER_GROUP(sb))) <
	    EXT2_INODES_PER_GROUP(sb)) {
		if (ext2_set_bit (j, bh->b_data)) {
			ext2_warning (sb, "ext2_new_inode",
				      "bit already set for inode %d", j);
			goto repeat;
		}
		mark_buffer_dirty(bh, 1);
		if (sb->s_flags & MS_SYNCHRONOUS) {
			ll_rw_block (WRITE, 1, &bh);
			wait_on_buffer (bh);
		}
	} else {
		if (le16_to_cpu(gdp->bg_free_inodes_count) != 0) {
			ext2_error (sb, "ext2_new_inode",
				    "Free inodes count corrupted in group %d",
				    i);
			unlock_super (sb);
			iput (inode);
			return NULL;
		}
		goto repeat;
	}
	j += i * EXT2_INODES_PER_GROUP(sb) + 1;
	if (j < EXT2_FIRST_INO(sb) || j > le32_to_cpu(es->s_inodes_count)) {
		ext2_error (sb, "ext2_new_inode",
			    "reserved inode or inode > inodes count - "
			    "block_group = %d,inode=%d", i, j);
		unlock_super (sb);
		iput (inode);
		return NULL;
	}
	gdp->bg_free_inodes_count =
		cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
	if (S_ISDIR(mode))
		gdp->bg_used_dirs_count =
			cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
	mark_buffer_dirty(bh2, 1);
	es->s_free_inodes_count =
		cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) - 1);
	mark_buffer_dirty(sb->u.ext2_sb.s_sbh, 1);
	sb->s_dirt = 1;
	inode->i_mode = mode;
	inode->i_sb = sb;
	inode->i_nlink = 1;
	inode->i_dev = sb->s_dev;
	inode->i_uid = current->fsuid;
	if (test_opt (sb, GRPID))
		inode->i_gid = dir->i_gid;
	else if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;

	inode->i_ino = j;
	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
	inode->u.ext2_i.i_new_inode = 1;
	inode->u.ext2_i.i_flags = dir->u.ext2_i.i_flags;
	if (S_ISLNK(mode))
		inode->u.ext2_i.i_flags &= ~(EXT2_IMMUTABLE_FL | EXT2_APPEND_FL);
	inode->u.ext2_i.i_faddr = 0;
	inode->u.ext2_i.i_frag_no = 0;
	inode->u.ext2_i.i_frag_size = 0;
	inode->u.ext2_i.i_file_acl = 0;
	inode->u.ext2_i.i_dir_acl = 0;
	inode->u.ext2_i.i_dtime = 0;
	inode->u.ext2_i.i_block_group = i;
	inode->i_op = NULL;
	if (inode->u.ext2_i.i_flags & EXT2_SYNC_FL)
		inode->i_flags |= MS_SYNCHRONOUS;
	insert_inode_hash(inode);
	mark_inode_dirty(inode);
	inc_inode_version (inode, gdp, mode);

	unlock_super (sb);
	if(DQUOT_ALLOC_INODE(sb, inode)) {
		sb->dq_op->drop(inode);
		inode->i_nlink = 0;
		iput(inode);
		*err = -EDQUOT;
		return NULL;
	}
	ext2_debug ("allocating inode %lu\n", inode->i_ino);

	*err = 0;
	return inode;
}
Exemplo n.º 13
0
/* returns negative on error; 0 if success; 1 if success & log destroyed */
int llog_cancel_rec(const struct lu_env *env, struct llog_handle *loghandle,
		    int index)
{
	struct llog_thread_info *lgi = llog_info(env);
	struct dt_device	*dt;
	struct llog_log_hdr	*llh = loghandle->lgh_hdr;
	struct thandle		*th;
	int			 rc;
	int rc1;
	bool subtract_count = false;

	ENTRY;

	CDEBUG(D_RPCTRACE, "Canceling %d in log "DOSTID"\n", index,
	       POSTID(&loghandle->lgh_id.lgl_oi));

	if (index == 0) {
		CERROR("Can't cancel index 0 which is header\n");
		RETURN(-EINVAL);
	}

	LASSERT(loghandle != NULL);
	LASSERT(loghandle->lgh_ctxt != NULL);
	LASSERT(loghandle->lgh_obj != NULL);

	dt = lu2dt_dev(loghandle->lgh_obj->do_lu.lo_dev);

	th = dt_trans_create(env, dt);
	if (IS_ERR(th))
		RETURN(PTR_ERR(th));

	rc = llog_declare_write_rec(env, loghandle, &llh->llh_hdr, index, th);
	if (rc < 0)
		GOTO(out_trans, rc);

	if ((llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY))
		rc = llog_declare_destroy(env, loghandle, th);

	th->th_wait_submit = 1;
	rc = dt_trans_start_local(env, dt, th);
	if (rc < 0)
		GOTO(out_trans, rc);

	down_write(&loghandle->lgh_lock);
	/* clear bitmap */
	mutex_lock(&loghandle->lgh_hdr_mutex);
	if (!ext2_clear_bit(index, LLOG_HDR_BITMAP(llh))) {
		CDEBUG(D_RPCTRACE, "Catalog index %u already clear?\n", index);
		GOTO(out_unlock, rc);
	}

	loghandle->lgh_hdr->llh_count--;
	subtract_count = true;
	/* Pass this index to llog_osd_write_rec(), which will use the index
	 * to only update the necesary bitmap. */
	lgi->lgi_cookie.lgc_index = index;
	/* update header */
	rc = llog_write_rec(env, loghandle, &llh->llh_hdr, &lgi->lgi_cookie,
			    LLOG_HEADER_IDX, th);
	if (rc != 0)
		GOTO(out_unlock, rc);

	if ((llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
	    (llh->llh_count == 1) &&
	    ((loghandle->lgh_last_idx == LLOG_HDR_BITMAP_SIZE(llh) - 1) ||
	     (loghandle->u.phd.phd_cat_handle != NULL &&
	      loghandle->u.phd.phd_cat_handle->u.chd.chd_current_log !=
		loghandle))) {
		/* never try to destroy it again */
		llh->llh_flags &= ~LLOG_F_ZAP_WHEN_EMPTY;
		rc = llog_trans_destroy(env, loghandle, th);
		if (rc < 0) {
			/* Sigh, can not destroy the final plain llog, but
			 * the bitmap has been clearly, so the record can not
			 * be accessed anymore, let's return 0 for now, and
			 * the orphan will be handled by LFSCK. */
			CERROR("%s: can't destroy empty llog #"DOSTID
			       "#%08x: rc = %d\n",
			       loghandle->lgh_ctxt->loc_obd->obd_name,
			       POSTID(&loghandle->lgh_id.lgl_oi),
			       loghandle->lgh_id.lgl_ogen, rc);
			GOTO(out_unlock, rc);
		}
		rc = LLOG_DEL_PLAIN;
	}

out_unlock:
	mutex_unlock(&loghandle->lgh_hdr_mutex);
	up_write(&loghandle->lgh_lock);
out_trans:
	rc1 = dt_trans_stop(env, dt, th);
	if (rc == 0)
		rc = rc1;
	if (rc < 0 && subtract_count) {
		mutex_lock(&loghandle->lgh_hdr_mutex);
		loghandle->lgh_hdr->llh_count++;
		ext2_set_bit(index, LLOG_HDR_BITMAP(llh));
		mutex_unlock(&loghandle->lgh_hdr_mutex);
	}
	RETURN(rc);
}
Exemplo n.º 14
0
/* appends if idx == -1, otherwise overwrites record idx. */
static int llog_lvfs_write_rec(struct llog_handle *loghandle,
                               struct llog_rec_hdr *rec,
                               struct llog_cookie *reccookie, int cookiecount,
                               void *buf, int idx)
{
        struct llog_log_hdr *llh;
        int reclen = rec->lrh_len, index, rc;
        struct llog_rec_tail *lrt;
        struct obd_device *obd;
        struct file *file;
        size_t left;
        ENTRY;

        llh = loghandle->lgh_hdr;
        file = loghandle->lgh_file;
        obd = loghandle->lgh_ctxt->loc_exp->exp_obd;

        /* record length should not bigger than LLOG_CHUNK_SIZE */
        if (buf)
                rc = (reclen > LLOG_CHUNK_SIZE - sizeof(struct llog_rec_hdr) -
                      sizeof(struct llog_rec_tail)) ? -E2BIG : 0;
        else
                rc = (reclen > LLOG_CHUNK_SIZE) ? -E2BIG : 0;
        if (rc)
                RETURN(rc);

        if (buf)
                /* write_blob adds header and tail to lrh_len. */
                reclen = sizeof(*rec) + rec->lrh_len +
                         sizeof(struct llog_rec_tail);

        if (idx != -1) {
                loff_t saved_offset;

                /* no header: only allowed to insert record 1 */
                if (idx != 1 && !i_size_read(file->f_dentry->d_inode)) {
                        CERROR("idx != -1 in empty log\n");
                        LBUG();
                }

                if (idx && llh->llh_size && llh->llh_size != rec->lrh_len)
                        RETURN(-EINVAL);

                if (!ext2_test_bit(idx, llh->llh_bitmap))
                        CERROR("Modify unset record %u\n", idx);
                if (idx != rec->lrh_index)
                        CERROR("Index mismatch %d %u\n", idx, rec->lrh_index);

                rc = llog_lvfs_write_blob(obd, file, &llh->llh_hdr, NULL, 0);
                /* we are done if we only write the header or on error */
                if (rc || idx == 0)
                        RETURN(rc);

                /* Assumes constant lrh_len */
                saved_offset = sizeof(*llh) + (idx - 1) * reclen;

                if (buf) {
                        struct llog_rec_hdr check;

                        /* We assume that caller has set lgh_cur_* */
                        saved_offset = loghandle->lgh_cur_offset;
                        CDEBUG(D_OTHER,
                               "modify record "LPX64": idx:%d/%u/%d, len:%u "
                               "offset %llu\n",
                               loghandle->lgh_id.lgl_oid, idx, rec->lrh_index,
                               loghandle->lgh_cur_idx, rec->lrh_len,
                               (long long)(saved_offset - sizeof(*llh)));
                        if (rec->lrh_index != loghandle->lgh_cur_idx) {
                                CERROR("modify idx mismatch %u/%d\n",
                                       idx, loghandle->lgh_cur_idx);
                                RETURN(-EFAULT);
                        }
#if 1  /* FIXME remove this safety check at some point */
                        /* Verify that the record we're modifying is the
                           right one. */
                        rc = llog_lvfs_read_blob(obd, file, &check,
                                                 sizeof(check), saved_offset);
                        if (check.lrh_index != idx || check.lrh_len != reclen) {
                                CERROR("Bad modify idx %u/%u size %u/%u (%d)\n",
                                       idx, check.lrh_index, reclen,
                                       check.lrh_len, rc);
                                RETURN(-EFAULT);
                        }
#endif
                }

                rc = llog_lvfs_write_blob(obd, file, rec, buf, saved_offset);
                if (rc == 0 && reccookie) {
                        reccookie->lgc_lgl = loghandle->lgh_id;
                        reccookie->lgc_index = idx;
                        rc = 1;
                }
                RETURN(rc);
        }

        /* Make sure that records don't cross a chunk boundary, so we can
         * process them page-at-a-time if needed.  If it will cross a chunk
         * boundary, write in a fake (but referenced) entry to pad the chunk.
         *
         * We know that llog_current_log() will return a loghandle that is
         * big enough to hold reclen, so all we care about is padding here.
         */
        left = LLOG_CHUNK_SIZE - (file->f_pos & (LLOG_CHUNK_SIZE - 1));

        /* NOTE: padding is a record, but no bit is set */
        if (left != 0 && left != reclen &&
            left < (reclen + LLOG_MIN_REC_SIZE)) {
                 index = loghandle->lgh_last_idx + 1;
                 rc = llog_lvfs_pad(obd, file, left, index);
                 if (rc)
                         RETURN(rc);
                 loghandle->lgh_last_idx++; /*for pad rec*/
         }
         /* if it's the last idx in log file, then return -ENOSPC */
         if (loghandle->lgh_last_idx >= LLOG_BITMAP_SIZE(llh) - 1)
                 RETURN(-ENOSPC);
        loghandle->lgh_last_idx++;
        index = loghandle->lgh_last_idx;
        LASSERT(index < LLOG_BITMAP_SIZE(llh));
        rec->lrh_index = index;
        if (buf == NULL) {
                lrt = (struct llog_rec_tail *)
                        ((char *)rec + rec->lrh_len - sizeof(*lrt));
                lrt->lrt_len = rec->lrh_len;
                lrt->lrt_index = rec->lrh_index;
        }
        /*The caller should make sure only 1 process access the lgh_last_idx,
         *Otherwise it might hit the assert.*/
        LASSERT(index < LLOG_BITMAP_SIZE(llh));
        if (ext2_set_bit(index, llh->llh_bitmap)) {
                CERROR("argh, index %u already set in log bitmap?\n", index);
                LBUG(); /* should never happen */
        }
        llh->llh_count++;
        llh->llh_tail.lrt_index = index;

        rc = llog_lvfs_write_blob(obd, file, &llh->llh_hdr, NULL, 0);
        if (rc)
                RETURN(rc);

        rc = llog_lvfs_write_blob(obd, file, rec, buf, file->f_pos);
        if (rc)
                RETURN(rc);

        CDEBUG(D_RPCTRACE, "added record "LPX64": idx: %u, %u \n",
               loghandle->lgh_id.lgl_oid, index, rec->lrh_len);
        if (rc == 0 && reccookie) {
                reccookie->lgc_lgl = loghandle->lgh_id;
                reccookie->lgc_index = index;
                if ((rec->lrh_type == MDS_UNLINK_REC) ||
                    (rec->lrh_type == MDS_SETATTR_REC) ||
                    (rec->lrh_type == MDS_SETATTR64_REC))
                        reccookie->lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
                else if (rec->lrh_type == OST_SZ_REC)
                        reccookie->lgc_subsys = LLOG_SIZE_ORIG_CTXT;
                else if (rec->lrh_type == OST_RAID1_REC)
                        reccookie->lgc_subsys = LLOG_RD1_ORIG_CTXT;
                else
                        reccookie->lgc_subsys = -1;
                rc = 1;
        }
        if (rc == 0 && rec->lrh_type == LLOG_GEN_REC)
                rc = 1;

        RETURN(rc);
}
Exemplo n.º 15
0
Arquivo: log.c Projeto: ddn-lixi/mtfs
/* appends if idx == -1, otherwise overwrites record idx. */
static int mlog_vfs_write_rec(struct mlog_handle *loghandle,
			      struct mlog_rec_hdr *rec,
			      struct mlog_cookie *reccookie,
			      int cookiecount,
			      void *buf, int idx)
{
	struct mlog_log_hdr *mlh;
	int reclen = rec->mrh_len;
	int index = 0;
	int ret = 0;
	struct mlog_rec_tail *mrt;
	struct file *file;
	size_t left;
	struct mtfs_lowerfs *lowerfs = NULL;
	MENTRY();

	mlh = loghandle->mgh_hdr;
	file = loghandle->mgh_file;
	lowerfs = loghandle->mgh_ctxt->moc_lowerfs;

	/* record length should not bigger than MLOG_CHUNK_SIZE */
	if (buf){
		ret = (reclen > MLOG_CHUNK_SIZE - sizeof(struct mlog_rec_hdr) -
		      sizeof(struct mlog_rec_tail)) ? -E2BIG : 0;
	} else {
		ret = (reclen > MLOG_CHUNK_SIZE) ? -E2BIG : 0;
	}

	if (ret) {
		goto out;
	}
	if (buf) {
		/* write_blob adds header and tail to mrh_len. */ 
		reclen = sizeof(*rec) + rec->mrh_len + 
			 sizeof(struct mlog_rec_tail);
	}

	if (idx != -1) {
		loff_t saved_offset;

		/* no header: only allowed to insert record 1 */
		if (idx != 1 && !i_size_read(file->f_dentry->d_inode)) {
			MERROR("idx != -1 in empty log\n");
			MBUG();
		}

		if (idx && mlh->mlh_size && mlh->mlh_size != rec->mrh_len) {
			ret = -EINVAL;
			goto out;
		}

		if (!ext2_test_bit(idx, mlh->mlh_bitmap)) {
			MERROR("Modify unset record %u\n", idx);
		}

		if (idx != rec->mrh_index) {
			MERROR("Index mismatch %d %u\n", idx, rec->mrh_index);
		}

		ret = mlog_vfs_write_blob(lowerfs, file, &mlh->mlh_hdr, NULL, 0);
		/* we are done if we only write the header or on error */
		if (ret || idx == 0) {
			goto out;
		}

		/* Assumes constant mrh_len */
		saved_offset = sizeof(*mlh) + (idx - 1) * reclen;

		if (buf) {
			struct mlog_rec_hdr check;

			/* We assume that caller has set mgh_cur_* */
			saved_offset = loghandle->mgh_cur_offset;

			MDEBUG("modify record %I64x: idx:%d/%u/%d, len:%u "
			       "offset %llu\n",
			       loghandle->mgh_id.mgl_oid, idx, rec->mrh_index,
			       loghandle->mgh_cur_idx, rec->mrh_len,
			       (long long)(saved_offset - sizeof(*mlh)));
			if (rec->mrh_index != loghandle->mgh_cur_idx) {
				MERROR("modify idx mismatch %u/%d\n",
				       idx, loghandle->mgh_cur_idx);
				ret = -EFAULT;
				goto out;
			}
#if 1  /* FIXME remove this safety check at some point */
			/* Verify that the record we're modifying is the 
			   right one. */
			ret = mlog_vfs_read_blob(lowerfs, file, &check,
					    sizeof(check), saved_offset);
			if (check.mrh_index != idx || check.mrh_len != reclen) {
				MERROR("bad modify idx %u/%u size %u/%u (%d)\n",
				       idx, check.mrh_index, reclen, 
				       check.mrh_len, ret);
				ret = -EFAULT;
				goto out;
			}
#endif
		}

		ret = mlog_vfs_write_blob(lowerfs, file, rec, buf, saved_offset);
		if (ret == 0 && reccookie) {
			reccookie->mgc_mgl = loghandle->mgh_id;
			reccookie->mgc_index = idx;
			ret = 1;
		}
		goto out;
	}

	/* Make sure that records don't cross a chunk boundary, so we can
	 * process them page-at-a-time if needed.  If it will cross a chunk
	 * boundary, write in a fake (but referenced) entry to pad the chunk.
	 *
	 * We know that mlog_current_log() will return a loghandle that is
	 * big enough to hold reclen, so all we care about is padding here.
	 */
	left = MLOG_CHUNK_SIZE - (file->f_pos & (MLOG_CHUNK_SIZE - 1));
	/* NOTE: padding is a record, but no bit is set */
	if (left != 0 && left != reclen &&
	    left < (reclen + MLOG_MIN_REC_SIZE)) {
		index = loghandle->mgh_last_idx + 1;
		ret = mlog_vfs_pad(lowerfs, file, left, index);
		if (ret) {
			goto out;
		}
		loghandle->mgh_last_idx++; /*for pad rec*/
	}

	/* if it's the last idx in log file, then return -ENOSPC */
	if (loghandle->mgh_last_idx >= MLOG_BITMAP_SIZE(mlh) - 1) {
		ret = -ENOSPC;
		goto out;
	}
	index = ++loghandle->mgh_last_idx;
	rec->mrh_index = index;
	if (buf == NULL) {
		mrt = (struct mlog_rec_tail *)
		       ((char *)rec + rec->mrh_len - sizeof(*mrt));
		mrt->mrt_len = rec->mrh_len;
		mrt->mrt_index = rec->mrh_index;
	}
	/*The caller should make sure only 1 process access the mgh_last_idx,
	 *Otherwise it might hit the assert.*/
	MASSERT(index < MLOG_BITMAP_SIZE(mlh));
	if (ext2_set_bit(index, mlh->mlh_bitmap)) {
		MERROR("argh, index %u already set in log bitmap?\n", index);
		MBUG(); /* should never happen */
	}
	mlh->mlh_count++;
	mlh->mlh_tail.mrt_index = index;

	ret = mlog_vfs_write_blob(lowerfs, file, &mlh->mlh_hdr, NULL, 0);
	if (ret) {
		goto out;
	}

	ret = mlog_vfs_write_blob(lowerfs, file, rec, buf, file->f_pos);
	if (ret) {
		goto out;
	}

	MDEBUG("added record %I64x: idx: %u, %u bytes\n",
	       loghandle->mgh_id.mgl_oid, index, rec->mrh_len);
	if (ret == 0 && reccookie) {
		reccookie->mgc_mgl = loghandle->mgh_id;
		reccookie->mgc_index = index;
		ret = 1;
	}
	if (ret == 0 && rec->mrh_type == MLOG_GEN_REC) {
		ret = 1;
	}

out:
	MRETURN(ret);
}
Exemplo n.º 16
0
Arquivo: log.c Projeto: ddn-lixi/mtfs
int mlog_init_handle(struct mlog_handle *handle, int flags,
		     struct mlog_uuid *uuid)
{
	int ret = 0;
	struct mlog_log_hdr *mlh = NULL;
	MENTRY();

	MASSERT(handle->mgh_hdr == NULL);

	MTFS_ALLOC(mlh, sizeof(*mlh));
	if (mlh == NULL) {
		MERROR("not enough memory\n");
		ret = -ENOMEM;
		goto out;
	}

	handle->mgh_hdr = mlh;
	/* first assign flags to use mlog_client_ops */
	mlh->mlh_flags = flags;
	ret = mlog_read_header(handle);
	if (ret == 0) {
		flags = mlh->mlh_flags;
		if (uuid && !mlog_uuid_equals(uuid, &mlh->mlh_tgtuuid)) {
			MERROR("uuid mismatch: %s/%s\n", (char *)uuid->uuid,
			       (char *)mlh->mlh_tgtuuid.uuid);
			ret = -EEXIST;
		}
		goto out;
	} else if (ret != MLOG_EEMPTY || !flags) {
		/* set a pesudo flag for initialization */
		flags = MLOG_F_IS_CAT;
		goto out;
	}
	ret = 0;

	handle->mgh_last_idx = 0; /* header is record with index 0 */
	mlh->mlh_count = 1;	 /* for the header record */
	mlh->mlh_hdr.mrh_type = MLOG_HDR_MAGIC;
	mlh->mlh_hdr.mrh_len = mlh->mlh_tail.mrt_len = MLOG_CHUNK_SIZE;
	mlh->mlh_hdr.mrh_index = mlh->mlh_tail.mrt_index = 0;
	mlh->mlh_timestamp = get_seconds();
	if (uuid)
		memcpy(&mlh->mlh_tgtuuid, uuid, sizeof(mlh->mlh_tgtuuid));
	mlh->mlh_bitmap_offset = offsetof(typeof(*mlh), mlh_bitmap);
	ext2_set_bit(0, mlh->mlh_bitmap);

out:
	if (flags & MLOG_F_IS_CAT) {
		MTFS_INIT_LIST_HEAD(&handle->u.chd.chd_head);
		mlh->mlh_size = sizeof(struct mlog_logid_rec);
	} else if (flags & MLOG_F_IS_PLAIN) {
		MTFS_INIT_LIST_HEAD(&handle->u.phd.phd_entry);
	} else {
		MERROR("Unknown flags: %#x (Expected %#x or %#x\n",
		       flags, MLOG_F_IS_CAT, MLOG_F_IS_PLAIN);
		MBUG();
	}

	if (ret) {
		MTFS_FREE(mlh, sizeof(*mlh));
		handle->mgh_hdr = NULL;
	}

	MRETURN(ret);
}