Esempio n. 1
0
/*
 * Reblock a record's data.  Both the B-Tree element and record pointers
 * to the data must be adjusted.
 */
static int
hammer_reblock_data(struct hammer_ioc_reblock *reblock,
		    hammer_cursor_t cursor, hammer_btree_elm_t elm)
{
	hammer_buffer_t data_buffer = NULL;
	hammer_off_t odata_offset;
	hammer_off_t ndata_offset;
	int error;
	void *ndata;

	error = hammer_btree_extract_data(cursor);
	if (error)
		return (error);
	ndata = hammer_alloc_data(cursor->trans, elm->leaf.data_len,
				  elm->leaf.base.rec_type,
				  &ndata_offset, &data_buffer,
				  0, &error);
	if (error)
		goto done;
	hammer_io_notmeta(data_buffer);

	/*
	 * Move the data.  Note that we must invalidate any cached
	 * data buffer in the cursor before calling blockmap_free.
	 * The blockmap_free may free up the entire big-block and
	 * will not be able to invalidate it if the cursor is holding
	 * a data buffer cached in that big-block.
	 */
	hammer_modify_buffer_noundo(cursor->trans, data_buffer);
	bcopy(cursor->data, ndata, elm->leaf.data_len);
	hammer_modify_buffer_done(data_buffer);
	hammer_cursor_invalidate_cache(cursor);

	hammer_blockmap_free(cursor->trans,
			     elm->leaf.data_offset, elm->leaf.data_len);

	hammer_modify_node(cursor->trans, cursor->node,
			   &elm->leaf.data_offset, sizeof(hammer_off_t));
	odata_offset = elm->leaf.data_offset;
	elm->leaf.data_offset = ndata_offset;
	hammer_modify_node_done(cursor->node);

	if (hammer_debug_general & 0x4000) {
		hdkprintf("%08x %016jx -> %016jx\n",
			(elm ? elm->base.localization : -1),
			(intmax_t)odata_offset,
			(intmax_t)ndata_offset);
	}
done:
	if (data_buffer)
		hammer_rel_buffer(data_buffer, 0);
	return (error);
}
Esempio n. 2
0
/*
 * Reblock a record's data.  Both the B-Tree element and record pointers
 * to the data must be adjusted.
 */
static int
hammer_reblock_data(struct hammer_ioc_reblock *reblock,
		    hammer_cursor_t cursor, hammer_btree_elm_t elm)
{
	struct hammer_buffer *data_buffer = NULL;
	hammer_off_t ndata_offset;
	int error;
	void *ndata;

	error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA |
					     HAMMER_CURSOR_GET_LEAF);
	if (error)
		return (error);
	ndata = hammer_alloc_data(cursor->trans, elm->leaf.data_len,
				  elm->leaf.base.rec_type,
				  &ndata_offset, &data_buffer,
				  0, &error);
	if (error)
		goto done;
	hammer_io_notmeta(data_buffer);

	/*
	 * Move the data.  Note that we must invalidate any cached
	 * data buffer in the cursor before calling blockmap_free.
	 * The blockmap_free may free up the entire large-block and
	 * will not be able to invalidate it if the cursor is holding
	 * a data buffer cached in that large block.
	 */
	hammer_modify_buffer(cursor->trans, data_buffer, NULL, 0);
	bcopy(cursor->data, ndata, elm->leaf.data_len);
	hammer_modify_buffer_done(data_buffer);
	hammer_cursor_invalidate_cache(cursor);

	hammer_blockmap_free(cursor->trans,
			     elm->leaf.data_offset, elm->leaf.data_len);

	hammer_modify_node(cursor->trans, cursor->node,
			   &elm->leaf.data_offset, sizeof(hammer_off_t));
	elm->leaf.data_offset = ndata_offset;
	hammer_modify_node_done(cursor->node);

done:
	if (data_buffer)
		hammer_rel_buffer(data_buffer, 0);
	return (error);
}
Esempio n. 3
0
/*
 * Reblock a record's data.  Both the B-Tree element and record pointers
 * to the data must be adjusted.
 */
static int
hammer_reblock_data(struct hammer_ioc_reblock *reblock,
		    hammer_cursor_t cursor, hammer_btree_elm_t elm)
{
	struct hammer_buffer *data_buffer = NULL;
	hammer_off_t ndata_offset;
	int error;
	void *ndata;

	error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA |
					     HAMMER_CURSOR_GET_LEAF);
	if (error)
		return (error);
	ndata = hammer_alloc_data(cursor->trans, elm->leaf.data_len,
				  elm->leaf.base.rec_type,
				  &ndata_offset, &data_buffer, &error);
	if (error)
		goto done;

	/*
	 * Move the data
	 */
	hammer_modify_buffer(cursor->trans, data_buffer, NULL, 0);
	bcopy(cursor->data, ndata, elm->leaf.data_len);
	hammer_modify_buffer_done(data_buffer);

	hammer_blockmap_free(cursor->trans,
			     elm->leaf.data_offset, elm->leaf.data_len);

	hammer_modify_node(cursor->trans, cursor->node,
			   &elm->leaf.data_offset, sizeof(hammer_off_t));
	elm->leaf.data_offset = ndata_offset;
	hammer_modify_node_done(cursor->node);

done:
	if (data_buffer)
		hammer_rel_buffer(data_buffer, 0);
	return (error);
}
Esempio n. 4
0
/*
 * Write out a new record.
 */
static
int
hammer_mirror_write(hammer_cursor_t cursor,
		    struct hammer_ioc_mrecord_rec *mrec,
		    char *udata)
{
	hammer_transaction_t trans;
	hammer_buffer_t data_buffer;
	hammer_off_t ndata_offset;
	hammer_tid_t high_tid;
	void *ndata;
	int error;
	int doprop;

	trans = cursor->trans;
	data_buffer = NULL;

	/*
	 * Get the sync lock so the whole mess is atomic
	 */
	hammer_sync_lock_sh(trans);

	/*
	 * Allocate and adjust data
	 */
	if (mrec->leaf.data_len && mrec->leaf.data_offset) {
		ndata = hammer_alloc_data(trans, mrec->leaf.data_len,
					  mrec->leaf.base.rec_type,
					  &ndata_offset, &data_buffer,
					  0, &error);
		if (ndata == NULL)
			return(error);
		mrec->leaf.data_offset = ndata_offset;
		hammer_modify_buffer(trans, data_buffer, NULL, 0);
		error = copyin(udata, ndata, mrec->leaf.data_len);
		if (error == 0) {
			if (hammer_crc_test_leaf(ndata, &mrec->leaf) == 0) {
				kprintf("data crc mismatch on pipe\n");
				error = EINVAL;
			} else {
				error = hammer_mirror_localize_data(
							ndata, &mrec->leaf);
			}
		}
		hammer_modify_buffer_done(data_buffer);
	} else {
		mrec->leaf.data_offset = 0;
		error = 0;
		ndata = NULL;
	}
	if (error)
		goto failed;

	/*
	 * Do the insertion.  This can fail with a EDEADLK or EALREADY
	 */
	cursor->flags |= HAMMER_CURSOR_INSERT;
	error = hammer_btree_lookup(cursor);
	if (error != ENOENT) {
		if (error == 0)
			error = EALREADY;
		goto failed;
	}

	error = hammer_btree_insert(cursor, &mrec->leaf, &doprop);

	/*
	 * Cursor is left on the current element, we want to skip it now.
	 */
	cursor->flags |= HAMMER_CURSOR_ATEDISK;
	cursor->flags &= ~HAMMER_CURSOR_INSERT;

	/*
	 * Track a count of active inodes.
	 */
	if (error == 0 &&
	    mrec->leaf.base.rec_type == HAMMER_RECTYPE_INODE &&
	    mrec->leaf.base.delete_tid == 0) {
		hammer_modify_volume_field(trans,
					   trans->rootvol,
					   vol0_stat_inodes);
		++trans->hmp->rootvol->ondisk->vol0_stat_inodes;
		hammer_modify_volume_done(trans->rootvol);
	}

	/*
	 * vol0_next_tid must track the highest TID stored in the filesystem.
	 * We do not need to generate undo for this update.
	 */
	high_tid = mrec->leaf.base.create_tid;
	if (high_tid < mrec->leaf.base.delete_tid)
		high_tid = mrec->leaf.base.delete_tid;
	if (trans->rootvol->ondisk->vol0_next_tid < high_tid) {
		hammer_modify_volume(trans, trans->rootvol, NULL, 0);
		trans->rootvol->ondisk->vol0_next_tid = high_tid;
		hammer_modify_volume_done(trans->rootvol);
	}

	/*
	 * WARNING!  cursor's leaf pointer may have changed after
	 *	     do_propagation returns.
	 */
	if (error == 0 && doprop)
		hammer_btree_do_propagation(cursor, NULL, &mrec->leaf);

failed:
	/*
	 * Cleanup
	 */
	if (error && mrec->leaf.data_offset) {
		hammer_blockmap_free(cursor->trans,
				     mrec->leaf.data_offset,
				     mrec->leaf.data_len);
	}
	hammer_sync_unlock(trans);
	if (data_buffer)
		hammer_rel_buffer(data_buffer, 0);
	return(error);
}