void MyCollisionListener::contactPointAddedCallback(	hkpContactPointAddedEvent& event )
{
	//
	// draw the contact point as a little red star
	//
	{
		const hkVector4& start = event.m_contactPoint->getPosition();
		for ( int i = 0; i < 20; i++ )
		{
			hkVector4 dir( hkMath::sin( i * 1.0f ), hkMath::cos( i * 1.0f ), hkMath::sin(i * 5.0f ) );
			dir.setMul4(0.3f, dir);
			hkVector4 end;		end.setAdd4(start, dir);
			HK_DISPLAY_LINE(start, end, hkColor::RED);
		}
	}

	//
	//	collect all information in our own data structure
	//  as the havok memory manager is really really fast,
	//  allocating lots of small structures is acceptable
	//
	if ( event.m_contactPointProperties->getUserData() == HK_NULL )
	{
		ContactPointInfo* info = new ContactPointInfo;
		info->m_uniqueId = m_uniqueIdCounter++;
		event.m_contactPointProperties->setUserData( reinterpret_cast<hkUlong>(info) );

		//
		//	printf some information 
		//
		if (m_reportLevel >= hkDemoEnvironment::REPORT_INFO)
		{
			if ( event.isToi() )
			{
 				hkprintf("Toi userId=%i created\n", info->m_uniqueId );
			}
			else
			{
				int cpId = event.asManifoldEvent().m_contactPointId;
				hkprintf("Contact Point userId=%i created: contactId=%i\n", info->m_uniqueId, cpId );
			}
		}
	}

	// By setting the  ProcessContactCallbackDelay to 0 we will receive callbacks for 
	// any collisions processed for this body every frame (simulation step), i.e. the delay between
	// any such callbacks is 0 frames.

	// If you wish to only be notified every N frames simply set the delay to be N-1.
	// The default is 65536, i.e. (for practical purpose) once for the first collision only, until
	// the bodies separate to outside the collision tolerance. 
	event.m_callbackFiredFrom->setProcessContactCallbackDelay(0);
}
//
// contactProcessCallback
//
void MyCollisionListener::contactProcessCallback( hkpContactProcessEvent& event )
{
	hkpProcessCollisionData& result = *event.m_collisionData;
	int size = result.getNumContactPoints();

	for (int i = 0; i < size; i++ )
	{
		hkpProcessCdPoint& cp = result.m_contactPoints[i];
		{
			ContactPointInfo* info = reinterpret_cast<ContactPointInfo*>( event.m_contactPointProperties[i]->getUserData() );
			if ( (info) && (m_reportLevel >= hkDemoEnvironment::REPORT_INFO) )
			{
				int contactId = cp.m_contactPointId;
				hkprintf("Contact userId=%i processed. Impulse %f. Contact Point Id=%i\n", info->m_uniqueId, event.m_contactPointProperties[i]->getImpulseApplied(), contactId );
			}
		}

		// draw the contact points and normals in white
		{
			const hkVector4& start = result.m_contactPoints[i].m_contact.getPosition();
			hkVector4 normal       = result.m_contactPoints[i].m_contact.getNormal();

			// For ease of display only, we'll always draw the normal "up" (it points from entity 'B'
			// to entity 'A', but the order of A,B is arbitrary) so that we can see it. Thus, if it's 
			// pointing "down", flip its direction (and scale), only for display.
			normal.mul4(5.0f * normal(1));
			HK_DISPLAY_ARROW(start, normal, hkColor::WHITE);
		}
	}
}
Пример #3
0
/*
 * Flush a single inode that is part of a flush group.
 *
 * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
 * the front-end should have reserved sufficient space on the media.  Any
 * error other then EWOULDBLOCK will force the mount to be read-only.
 */
static
int
hammer_flusher_flush_inode(hammer_inode_t ip, void *data)
{
	hammer_flusher_info_t info = data;
	hammer_mount_t hmp = info->hmp;
	hammer_transaction_t trans = &info->trans;
	int error;

	/*
	 * Several slaves are operating on the same flush group concurrently.
	 * The SLAVEFLUSH flag prevents them from tripping over each other.
	 *
	 * NOTE: It is possible for a EWOULDBLOCK'd ip returned by one slave
	 *	 to be resynced by another, but normally such inodes are not
	 *	 revisited until the master loop gets to them.
	 */
	if (ip->flags & HAMMER_INODE_SLAVEFLUSH)
		return(0);
	ip->flags |= HAMMER_INODE_SLAVEFLUSH;
	++hammer_stats_inode_flushes;

	hammer_flusher_clean_loose_ios(hmp);
	vm_wait_nominal();
	error = hammer_sync_inode(trans, ip);

	/*
	 * EWOULDBLOCK can happen under normal operation, all other errors
	 * are considered extremely serious.  We must set WOULDBLOCK
	 * mechanics to deal with the mess left over from the abort of the
	 * previous flush.
	 */
	if (error) {
		ip->flags |= HAMMER_INODE_WOULDBLOCK;
		if (error == EWOULDBLOCK)
			error = 0;
	}
	hammer_flush_inode_done(ip, error);
	/* ip invalid */

	while (hmp->flusher.finalize_want)
		tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
	if (hammer_flusher_undo_exhausted(trans, 1)) {
		hkprintf("Warning: UNDO area too small!\n");
		hammer_flusher_finalize(trans, 1);
	} else if (hammer_flusher_meta_limit(trans->hmp)) {
		hammer_flusher_finalize(trans, 0);
	}
	return (0);
}
void MyCollisionListener::contactPointConfirmedCallback( hkpContactPointConfirmedEvent& event)
{
	ContactPointInfo* info = reinterpret_cast<ContactPointInfo*>( event.m_contactPointProperties->getUserData() );
	if (!info )
	{
		return;
	}

	if (m_reportLevel >= hkDemoEnvironment::REPORT_INFO)
	{
		if ( event.isToi() )
		{
			hkprintf("Toi userId=%i confirmed\n", info->m_uniqueId );
		}
		else
		{
			hkContactPointId id = event.getContactPointId();

			// you can get the contactMgr here by calling:  event.getContactMgr();
			hkprintf("Contact userId=%i confirmed. contactId=%i\n", info->m_uniqueId, id );
		}
	}
}
	// We obtain a handle to the contact point through the hkpContactPointAddedEvent structure and from this we can extract
	// the position in World Space. 
void MyCollisionListener::contactPointRemovedCallback( hkpContactPointRemovedEvent& event )
{
	ContactPointInfo* info = reinterpret_cast<ContactPointInfo*>( event.m_contactPointProperties->getUserData() );
	if ( !info )
	{
		return;
	}

	if (m_reportLevel >= hkDemoEnvironment::REPORT_INFO)
	{
		if ( event.isToi() )
		{
			hkprintf("Toi userId=%i deleted\n", info->m_uniqueId );
		}
		else
		{
			int cpId = event.m_contactPointId;
			hkprintf("Contact Point userId=%i deleted. contactId=%i\n", info->m_uniqueId, cpId );
		}
	}
	delete info;
	event.m_contactPointProperties->setUserData( HK_NULL );
}
Пример #6
0
void WindChimesCollisionListener::contactPointAddedCallback( hkpContactPointAddedEvent& event )
{
	hkReal noteStrength = hkMath::fabs(event.m_projectedVelocity);

	hkString bodyA = static_cast<hkpRigidBody*>(event.m_bodyA->getRootCollidable()->getOwner())->getName();
	hkString bodyB = static_cast<hkpRigidBody*>(event.m_bodyB->getRootCollidable()->getOwner())->getName();

	if( hkString::strCmp(bodyA.cString(),"Gong") == 0 || hkString::strCmp(bodyB.cString(),"Gong") == 0 )
	{
		noteStrength *= 3;
	}

	hkprintf( "%s %f (%s, %s)\n", noteNames[m_noteIndex], noteStrength, bodyA.cString(), bodyB.cString() );

}
Пример #7
0
/*
 * Retrieve as many snapshot ids as possible or until the array is
 * full, starting after the last transaction id passed in.  If count
 * is 0 we retrieve starting at the beginning.
 *
 * NOTE: Because the b-tree key field is signed but transaction ids
 *       are unsigned the returned list will be signed-sorted instead
 *	 of unsigned sorted.  The Caller must still sort the aggregate
 *	 results.
 */
static
int
hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
			struct hammer_ioc_snapshot *snap)
{
	struct hammer_cursor cursor;
	int error;

	/*
	 * Validate structure
	 */
	if (snap->index != 0)
		return (EINVAL);
	if (snap->count > HAMMER_SNAPS_PER_IOCTL)
		return (EINVAL);

	/*
	 * Look for keys starting after the previous iteration, or at
	 * the beginning if snap->count is 0.
	 */
	error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
	if (error) {
		hammer_done_cursor(&cursor);
		return(error);
	}

	cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
	cursor.key_beg.create_tid = 0;
	cursor.key_beg.delete_tid = 0;
	cursor.key_beg.obj_type = 0;
	cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT;
	cursor.key_beg.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
	if (snap->count == 0)
		cursor.key_beg.key = HAMMER_MIN_KEY;
	else
		cursor.key_beg.key = (int64_t)snap->snaps[snap->count - 1].tid + 1;

	cursor.key_end = cursor.key_beg;
	cursor.key_end.key = HAMMER_MAX_KEY;
	cursor.asof = HAMMER_MAX_TID;
	cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE | HAMMER_CURSOR_ASOF;

	snap->count = 0;

	error = hammer_btree_first(&cursor);
	while (error == 0 && snap->count < HAMMER_SNAPS_PER_IOCTL) {
		error = hammer_btree_extract_leaf(&cursor);
		if (error)
			break;
		if (cursor.leaf->base.rec_type == HAMMER_RECTYPE_SNAPSHOT) {
			error = hammer_btree_extract_data(&cursor);
			snap->snaps[snap->count] = cursor.data->snap;

			/*
			 * The snap data tid should match the key but might
			 * not due to a bug in the HAMMER v3 conversion code.
			 *
			 * This error will work itself out over time but we
			 * have to force a match or the snapshot will not
			 * be deletable.
			 */
			if (cursor.data->snap.tid !=
			    (hammer_tid_t)cursor.leaf->base.key) {
				hkprintf("lo=%08x snapshot key "
					"0x%016jx data mismatch 0x%016jx\n",
					cursor.key_beg.localization,
					(uintmax_t)cursor.data->snap.tid,
					cursor.leaf->base.key);
				hkprintf("Probably left over from the "
					"original v3 conversion, hammer "
					"cleanup should get it eventually\n");
				snap->snaps[snap->count].tid =
					cursor.leaf->base.key;
			}
			++snap->count;
		}
		error = hammer_btree_iterate(&cursor);
	}

	if (error == ENOENT) {
		snap->head.flags |= HAMMER_IOC_SNAPSHOT_EOF;
		error = 0;
	}
	snap->head.error = error;
	hammer_done_cursor(&cursor);
	return(0);
}
Пример #8
0
int
hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag,
	     struct ucred *cred)
{
	struct hammer_transaction trans;
	struct hammer_mount *hmp;
	int error;

	error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0);
	hmp = ip->hmp;

	hammer_start_transaction(&trans, hmp);

	switch(com) {
	case HAMMERIOC_PRUNE:
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0) {
			error = hammer_ioc_prune(&trans, ip,
					(struct hammer_ioc_prune *)data);
		}
		break;
	case HAMMERIOC_GETHISTORY:
		error = hammer_ioc_gethistory(&trans, ip,
					(struct hammer_ioc_history *)data);
		break;
	case HAMMERIOC_REBLOCK:
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0) {
			error = hammer_ioc_reblock(&trans, ip,
					(struct hammer_ioc_reblock *)data);
		}
		break;
	case HAMMERIOC_REBALANCE:
		/*
		 * Rebalancing needs to lock a lot of B-Tree nodes.  The
		 * children and children's children.  Systems with very
		 * little memory will not be able to do it.
		 */
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0 && nbuf < HAMMER_REBALANCE_MIN_BUFS) {
			hkprintf("System has insufficient buffers "
				"to rebalance the tree.  nbuf < %d\n",
				HAMMER_REBALANCE_MIN_BUFS);
			error = ENOSPC;
		}
		if (error == 0) {
			error = hammer_ioc_rebalance(&trans, ip,
					(struct hammer_ioc_rebalance *)data);
		}
		break;
	case HAMMERIOC_SYNCTID:
		error = hammer_ioc_synctid(&trans, ip,
					(struct hammer_ioc_synctid *)data);
		break;
	case HAMMERIOC_GET_PSEUDOFS:
		error = hammer_ioc_get_pseudofs(&trans, ip,
				    (struct hammer_ioc_pseudofs_rw *)data);
		break;
	case HAMMERIOC_SET_PSEUDOFS:
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0) {
			error = hammer_ioc_set_pseudofs(&trans, ip, cred,
				    (struct hammer_ioc_pseudofs_rw *)data);
		}
		break;
	case HAMMERIOC_UPG_PSEUDOFS:
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0) {
			error = hammer_ioc_upgrade_pseudofs(&trans, ip,
				    (struct hammer_ioc_pseudofs_rw *)data);
		}
		break;
	case HAMMERIOC_DGD_PSEUDOFS:
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0) {
			error = hammer_ioc_downgrade_pseudofs(&trans, ip,
				    (struct hammer_ioc_pseudofs_rw *)data);
		}
		break;
	case HAMMERIOC_RMR_PSEUDOFS:
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0) {
			error = hammer_ioc_destroy_pseudofs(&trans, ip,
				    (struct hammer_ioc_pseudofs_rw *)data);
		}
		break;
	case HAMMERIOC_WAI_PSEUDOFS:
		if (error == 0) {
			error = hammer_ioc_wait_pseudofs(&trans, ip,
				    (struct hammer_ioc_pseudofs_rw *)data);
		}
		break;
	case HAMMERIOC_MIRROR_READ:
		if (error == 0) {
			error = hammer_ioc_mirror_read(&trans, ip,
				    (struct hammer_ioc_mirror_rw *)data);
		}
		break;
	case HAMMERIOC_MIRROR_WRITE:
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0) {
			error = hammer_ioc_mirror_write(&trans, ip,
				    (struct hammer_ioc_mirror_rw *)data);
		}
		break;
	case HAMMERIOC_GET_VERSION:
		error = hammer_ioc_get_version(&trans, ip,
				    (struct hammer_ioc_version *)data);
		break;
	case HAMMERIOC_GET_INFO:
		error = hammer_ioc_get_info(&trans,
				    (struct hammer_ioc_info *)data);
		break;
	case HAMMERIOC_SET_VERSION:
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0) {
			error = hammer_ioc_set_version(&trans, ip,
					    (struct hammer_ioc_version *)data);
		}
		break;
	case HAMMERIOC_ADD_VOLUME:
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0) {
			error = priv_check_cred(cred, PRIV_HAMMER_VOLUME, 0);
			if (error == 0)
				error = hammer_ioc_volume_add(&trans, ip,
					    (struct hammer_ioc_volume *)data);
		}
		break;
	case HAMMERIOC_DEL_VOLUME:
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0) {
			error = priv_check_cred(cred, PRIV_HAMMER_VOLUME, 0);
			if (error == 0)
				error = hammer_ioc_volume_del(&trans, ip,
					    (struct hammer_ioc_volume *)data);
		}
		break;
	case HAMMERIOC_LIST_VOLUMES:
		error = hammer_ioc_volume_list(&trans, ip,
		    (struct hammer_ioc_volume_list *)data);
		break;
	case HAMMERIOC_ADD_SNAPSHOT:
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0) {
			error = hammer_ioc_add_snapshot(
					&trans, ip, (struct hammer_ioc_snapshot *)data);
		}
		break;
	case HAMMERIOC_DEL_SNAPSHOT:
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0) {
			error = hammer_ioc_del_snapshot(
					&trans, ip, (struct hammer_ioc_snapshot *)data);
		}
		break;
	case HAMMERIOC_GET_SNAPSHOT:
		error = hammer_ioc_get_snapshot(
					&trans, ip, (struct hammer_ioc_snapshot *)data);
		break;
	case HAMMERIOC_GET_CONFIG:
		error = hammer_ioc_get_config(
					&trans, ip, (struct hammer_ioc_config *)data);
		break;
	case HAMMERIOC_SET_CONFIG:
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0) {
			error = hammer_ioc_set_config(
					&trans, ip, (struct hammer_ioc_config *)data);
		}
		break;
	case HAMMERIOC_DEDUP:
		if (error == 0 && hmp->ronly)
			error = EROFS;
		if (error == 0) {
			error = hammer_ioc_dedup(
					&trans, ip, (struct hammer_ioc_dedup *)data);
		}
		break;
	case HAMMERIOC_GET_DATA:
		if (error == 0) {
			error = hammer_ioc_get_data(
					&trans, ip, (struct hammer_ioc_data *)data);
		}
		break;
	case HAMMERIOC_SCAN_PSEUDOFS:
		error = hammer_ioc_scan_pseudofs(
				&trans, ip, (struct hammer_ioc_pseudofs_rw *)data);
		break;
	default:
		error = EOPNOTSUPP;
		break;
	}
	hammer_done_transaction(&trans);
	return (error);
}
Пример #9
0
/*
 * Set version info
 */
static
int
hammer_ioc_set_version(hammer_transaction_t trans, hammer_inode_t ip,
		   struct hammer_ioc_version *ver)
{
	hammer_mount_t hmp = trans->hmp;
	struct hammer_cursor cursor;
	hammer_volume_t volume;
	int error;
	int over = hmp->version;

	/*
	 * Generally do not allow downgrades.  However, version 4 can
	 * be downgraded to version 3.
	 */
	if (ver->cur_version < hmp->version) {
		if (!(ver->cur_version == 3 && hmp->version == 4))
			return(EINVAL);
	}
	if (ver->cur_version == hmp->version)
		return(0);
	if (ver->cur_version > HAMMER_VOL_VERSION_MAX)
		return(EINVAL);
	if (hmp->ronly)
		return(EROFS);

	/*
	 * Update the root volume header and the version cached in
	 * the hammer_mount structure.
	 */
	error = hammer_init_cursor(trans, &cursor, NULL, NULL);
	if (error)
		goto failed;
	hammer_lock_ex(&hmp->flusher.finalize_lock);
	hammer_sync_lock_ex(trans);
	hmp->version = ver->cur_version;

	/*
	 * If upgrading from version < 4 to version >= 4 the UNDO FIFO
	 * must be reinitialized.
	 */
	if (over < HAMMER_VOL_VERSION_FOUR &&
	    ver->cur_version >= HAMMER_VOL_VERSION_FOUR) {
		hkprintf("upgrade undo to version 4\n");
		error = hammer_upgrade_undo_4(trans);
		if (error)
			goto failed;
	}

	/*
	 * Adjust the version in the volume header
	 */
	volume = hammer_get_root_volume(hmp, &error);
	KKASSERT(error == 0);
	hammer_modify_volume_field(cursor.trans, volume, vol_version);
	volume->ondisk->vol_version = ver->cur_version;
	hammer_modify_volume_done(volume);
	hammer_rel_volume(volume, 0);

	hammer_sync_unlock(trans);
	hammer_unlock(&hmp->flusher.finalize_lock);
failed:
	ver->head.error = error;
	hammer_done_cursor(&cursor);
	return(0);
}
Пример #10
0
/*
 * Reblock the B-Tree (leaf) node, record, and/or data if necessary.
 *
 * XXX We have no visibility into internal B-Tree nodes at the moment,
 * only leaf nodes.
 */
static int
hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
		      hammer_cursor_t cursor, hammer_btree_elm_t elm)
{
	hammer_mount_t hmp;
	hammer_off_t tmp_offset;
	hammer_node_ondisk_t ondisk;
	struct hammer_btree_leaf_elm leaf;
	int error;
	int bytes;
	int cur;
	int iocflags;

	error = 0;
	hmp = cursor->trans->hmp;

	/*
	 * Reblock data.  Note that data embedded in a record is reblocked
	 * by the record reblock code.  Data processing only occurs at leaf
	 * nodes and for RECORD element types.
	 */
	if (cursor->node->ondisk->type != HAMMER_BTREE_TYPE_LEAF)
		goto skip;
	if (elm->leaf.base.btype != HAMMER_BTREE_TYPE_RECORD)
		return(EINVAL);
	tmp_offset = elm->leaf.data_offset;
	if (tmp_offset == 0)
		goto skip;

	/*
	 * If reblock->vol_no is specified we only want to reblock data
	 * in that volume, but ignore everything else.
	 */
	if (reblock->vol_no != -1 &&
	    reblock->vol_no != HAMMER_VOL_DECODE(tmp_offset))
		goto skip;

	/*
	 * NOTE: Localization restrictions may also have been set-up, we can't
	 *	 just set the match flags willy-nilly here.
	 */
	switch(elm->leaf.base.rec_type) {
	case HAMMER_RECTYPE_INODE:
	case HAMMER_RECTYPE_SNAPSHOT:
	case HAMMER_RECTYPE_CONFIG:
		iocflags = HAMMER_IOC_DO_INODES;
		break;
	case HAMMER_RECTYPE_EXT:
	case HAMMER_RECTYPE_FIX:
	case HAMMER_RECTYPE_PFS:
	case HAMMER_RECTYPE_DIRENTRY:
		iocflags = HAMMER_IOC_DO_DIRS;
		break;
	case HAMMER_RECTYPE_DATA:
	case HAMMER_RECTYPE_DB:
		iocflags = HAMMER_IOC_DO_DATA;
		break;
	default:
		iocflags = 0;
		break;
	}
	if (reblock->head.flags & iocflags) {
		++reblock->data_count;
		reblock->data_byte_count += elm->leaf.data_len;
		bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
		if (hammer_debug_general & 0x4000)
			hdkprintf("D %6d/%d\n", bytes, reblock->free_level);
		/*
		 * Start data reblock if
		 * 1. there is no error
		 * 2. the data and allocator offset are not in the same
		 *    big-block, or free level threshold is 0
		 * 3. free bytes in the data's big-block is larger than
		 *    free level threshold (means if threshold is 0 then
		 *    do reblock no matter what).
		 */
		if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
		    bytes >= reblock->free_level) {
			/*
			 * This is nasty, the uncache code may have to get
			 * vnode locks and because of that we can't hold
			 * the cursor locked.
			 *
			 * WARNING: See warnings in hammer_unlock_cursor()
			 *	    function.
			 */
			leaf = elm->leaf;
			hammer_unlock_cursor(cursor);
			hammer_io_direct_uncache(hmp, &leaf);
			hammer_lock_cursor(cursor);

			/*
			 * elm may have become stale or invalid, reload it.
			 * ondisk variable is temporary only.  Note that
			 * cursor->node and thus cursor->node->ondisk may
			 * also changed.
			 */
			ondisk = cursor->node->ondisk;
			elm = &ondisk->elms[cursor->index];
			if (cursor->flags & HAMMER_CURSOR_RETEST) {
				hkprintf("debug: retest on reblocker uncache\n");
				error = EDEADLK;
			} else if (ondisk->type != HAMMER_BTREE_TYPE_LEAF ||
				   cursor->index >= ondisk->count) {
				hkprintf("debug: shifted on reblocker uncache\n");
				error = EDEADLK;
			} else if (bcmp(&elm->leaf, &leaf, sizeof(leaf))) {
				hkprintf("debug: changed on reblocker uncache\n");
				error = EDEADLK;
			}
			if (error == 0)
				error = hammer_cursor_upgrade(cursor);
			if (error == 0) {
				KKASSERT(cursor->index < ondisk->count);
				error = hammer_reblock_data(reblock,
							    cursor, elm);
			}
			if (error == 0) {
				++reblock->data_moves;
				reblock->data_byte_moves += elm->leaf.data_len;
			}
		}
	}

skip:
	/*
	 * Reblock a B-Tree internal or leaf node.  A leaf node is reblocked
	 * on initial entry only (element 0).  An internal node is reblocked
	 * when entered upward from its first leaf node only (also element 0,
	 * see hammer_btree_iterate() where cursor moves up and may return).
	 * Further revisits of the internal node (index > 0) are ignored.
	 */
	tmp_offset = cursor->node->node_offset;

	/*
	 * If reblock->vol_no is specified we only want to reblock data
	 * in that volume, but ignore everything else.
	 */
	if (reblock->vol_no != -1 &&
	    reblock->vol_no != HAMMER_VOL_DECODE(tmp_offset))
		goto end;

	if (cursor->index == 0 &&
	    error == 0 && (reblock->head.flags & HAMMER_IOC_DO_BTREE)) {
		++reblock->btree_count;
		bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
		if (hammer_debug_general & 0x4000)
			hdkprintf("B %6d/%d\n", bytes, reblock->free_level);
		/*
		 * Start node reblock if
		 * 1. there is no error
		 * 2. the node and allocator offset are not in the same
		 *    big-block, or free level threshold is 0
		 * 3. free bytes in the node's big-block is larger than
		 *    free level threshold (means if threshold is 0 then
		 *    do reblock no matter what).
		 */
		if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
		    bytes >= reblock->free_level) {
			error = hammer_cursor_upgrade(cursor);
			if (error == 0) {
				if (cursor->parent) {
					KKASSERT(cursor->parent_index <
						 cursor->parent->ondisk->count);
					elm = &cursor->parent->ondisk->elms[cursor->parent_index];
				} else {
					elm = NULL;
				}
				switch(cursor->node->ondisk->type) {
				case HAMMER_BTREE_TYPE_LEAF:
					error = hammer_reblock_leaf_node(
							reblock, cursor, elm);
					break;
				case HAMMER_BTREE_TYPE_INTERNAL:
					error = hammer_reblock_int_node(
							reblock, cursor, elm);
					break;
				default:
					hpanic("Illegal B-Tree node type");
				}
			}
			if (error == 0) {
				++reblock->btree_moves;
			}
		}
	}
end:
	hammer_cursor_downgrade(cursor);
	return(error);
}
Пример #11
0
static __inline int
_vnode_validate(hammer_dedup_cache_t dcp, void *data, int *errorp)
{
	struct hammer_transaction trans;
	hammer_inode_t ip;
	struct vnode *vp;
	struct buf *bp;
	off_t dooffset;
	int result, error;

	result = error = 0;
	*errorp = 0;

	hammer_simple_transaction(&trans, dcp->hmp);

	ip = hammer_get_inode(&trans, NULL, dcp->obj_id, HAMMER_MAX_TID,
	    dcp->localization, 0, &error);
	if (ip == NULL) {
		hkprintf("dedup: unable to find objid %016jx:%08x\n",
		    (intmax_t)dcp->obj_id, dcp->localization);
		*errorp = 1;
		goto failed2;
	}

	error = hammer_get_vnode(ip, &vp);
	if (error) {
		hkprintf("dedup: unable to acquire vnode for %016jx:%08x\n",
		    (intmax_t)dcp->obj_id, dcp->localization);
		*errorp = 2;
		goto failed;
	}

	if ((bp = findblk(ip->vp, dcp->file_offset, FINDBLK_NBLOCK)) != NULL) {
		bremfree(bp);

		/* XXX if (mapped to userspace) goto done, *errorp = 4 */

		if ((bp->b_flags & B_CACHE) == 0 || bp->b_flags & B_DIRTY) {
			*errorp = 5;
			goto done;
		}

		if (bp->b_bio2.bio_offset != dcp->data_offset) {
			error = VOP_BMAP(ip->vp, dcp->file_offset, &dooffset,
			    NULL, NULL, BUF_CMD_READ);
			if (error) {
				*errorp = 6;
				goto done;
			}

			if (dooffset != dcp->data_offset) {
				*errorp = 7;
				goto done;
			}
			hammer_live_dedup_bmap_saves++;
		}

		if (bcmp(data, bp->b_data, dcp->bytes) == 0)
			result = 1;

done:
		bqrelse(bp);
	} else {
		*errorp = 3;
	}
	vput(vp);

failed:
	hammer_rel_inode(ip, 0);
failed2:
	hammer_done_transaction(&trans);
	return (result);
}
Пример #12
0
int
hammer_ioc_dedup(hammer_transaction_t trans, hammer_inode_t ip,
		 struct hammer_ioc_dedup *dedup)
{
	struct hammer_cursor cursor1, cursor2;
	int error;
	int seq;

	/*
	 * Enforce hammer filesystem version requirements
	 */
	if (trans->hmp->version < HAMMER_VOL_VERSION_FIVE) {
		hkprintf("Filesystem must be upgraded to v5 "
			"before you can run dedup\n");
		return (EOPNOTSUPP);
	}

	/*
	 * Cursor1, return an error -> candidate goes to pass2 list
	 */
	error = hammer_init_cursor(trans, &cursor1, NULL, NULL);
	if (error)
		goto done_cursor;
	cursor1.key_beg = dedup->elm1;
	cursor1.flags |= HAMMER_CURSOR_BACKEND;

	error = hammer_btree_lookup(&cursor1);
	if (error)
		goto done_cursor;
	error = hammer_btree_extract(&cursor1, HAMMER_CURSOR_GET_LEAF |
						HAMMER_CURSOR_GET_DATA);
	if (error)
		goto done_cursor;

	/*
	 * Cursor2, return an error -> candidate goes to pass2 list
	 */
	error = hammer_init_cursor(trans, &cursor2, NULL, NULL);
	if (error)
		goto done_cursors;
	cursor2.key_beg = dedup->elm2;
	cursor2.flags |= HAMMER_CURSOR_BACKEND;

	error = hammer_btree_lookup(&cursor2);
	if (error)
		goto done_cursors;
	error = hammer_btree_extract(&cursor2, HAMMER_CURSOR_GET_LEAF |
						HAMMER_CURSOR_GET_DATA);
	if (error)
		goto done_cursors;

	/*
	 * Zone validation. We can't de-dup any of the other zones
	 * (BTREE or META) or bad things will happen.
	 *
	 * Return with error = 0, but set an INVALID_ZONE flag.
	 */
	error = validate_zone(cursor1.leaf->data_offset) +
			    validate_zone(cursor2.leaf->data_offset);
	if (error) {
		dedup->head.flags |= HAMMER_IOC_DEDUP_INVALID_ZONE;
		error = 0;
		goto done_cursors;
	}

	/*
	 * Comparison checks
	 *
	 * If zones don't match or data_len fields aren't the same
	 * we consider it to be a comparison failure.
	 *
	 * Return with error = 0, but set a CMP_FAILURE flag.
	 */
	if ((cursor1.leaf->data_offset & HAMMER_OFF_ZONE_MASK) !=
	    (cursor2.leaf->data_offset & HAMMER_OFF_ZONE_MASK)) {
		dedup->head.flags |= HAMMER_IOC_DEDUP_CMP_FAILURE;
		goto done_cursors;
	}
	if (cursor1.leaf->data_len != cursor2.leaf->data_len) {
		dedup->head.flags |= HAMMER_IOC_DEDUP_CMP_FAILURE;
		goto done_cursors;
	}

	/* byte-by-byte comparison to be sure */
	if (bcmp(cursor1.data, cursor2.data, cursor1.leaf->data_len)) {
		dedup->head.flags |= HAMMER_IOC_DEDUP_CMP_FAILURE;
		goto done_cursors;
	}

	/*
	 * Upgrade both cursors together to an exclusive lock
	 *
	 * Return an error -> candidate goes to pass2 list
	 */
	hammer_sync_lock_sh(trans);
	error = hammer_cursor_upgrade2(&cursor1, &cursor2);
	if (error) {
		hammer_sync_unlock(trans);
		goto done_cursors;
	}

	error = hammer_blockmap_dedup(cursor1.trans,
			cursor1.leaf->data_offset, cursor1.leaf->data_len);
	if (error) {
		if (error == ERANGE) {
			/* Return with error = 0, but set an UNDERFLOW flag */
			dedup->head.flags |= HAMMER_IOC_DEDUP_UNDERFLOW;
			error = 0;
		}

		/* Return all other errors -> block goes to pass2 list */
		goto downgrade_cursors;
	}

	/*
	 * The cursor2's cache must be invalidated before calling
	 * hammer_blockmap_free(), otherwise it will not be able to
	 * invalidate the underlying data buffer.
	 */
	hammer_cursor_invalidate_cache(&cursor2);
	hammer_blockmap_free(cursor2.trans,
			cursor2.leaf->data_offset, cursor2.leaf->data_len);

	hammer_modify_node(cursor2.trans, cursor2.node,
			&cursor2.leaf->data_offset, sizeof(hammer_off_t));
	cursor2.leaf->data_offset = cursor1.leaf->data_offset;
	hammer_modify_node_done(cursor2.node);

downgrade_cursors:
	hammer_cursor_downgrade2(&cursor1, &cursor2);
	hammer_sync_unlock(trans);
done_cursors:
	hammer_done_cursor(&cursor2);
done_cursor:
	hammer_done_cursor(&cursor1);

	/*
	 * Avoid deadlocking the buffer cache
	 */
	seq = trans->hmp->flusher.done;
	while (hammer_flusher_meta_halflimit(trans->hmp) ||
	       hammer_flusher_undo_exhausted(trans, 2)) {
		hammer_flusher_wait(trans->hmp, seq);
		seq = hammer_flusher_async_one(trans->hmp);
	}
	return (error);
}