Esempio n. 1
0
int
hammer_dedup_validate(hammer_dedup_cache_t dcp, int zone, int bytes,
		      void *data)
{
	int error;

	/*
	 * Zone validation
	 */
	if (HAMMER_ZONE_DECODE(dcp->data_offset) != zone)
		return (0);

	/*
	 * Block length validation
	 */
	if (dcp->bytes != bytes)
		return (0);

	/*
	 * Byte-by-byte data comparison
	 *
	 * The data we need for validation may already be present in the
	 * buffer cache in two flavours: vnode-based buffer or
	 * block-device-based buffer.  In case vnode-based buffer wasn't
	 * there or if a non-blocking attempt to acquire it failed use
	 * device-based buffer (for large-zone data blocks it will
	 * generate a separate read).
	 *
	 * XXX vnode-based checking is not MP safe so when live-dedup
	 *     is turned on we must always use the device buffer.
	 */
#if 0
	if (hammer_double_buffer) {
		error = 1;
	} else if (_vnode_validate(dcp, data, &error)) {
		hammer_live_dedup_vnode_bcmps++;
		return (1);
	} else {
		if (error == 3)
			hammer_live_dedup_findblk_failures++;
	}

	/*
	 * If there was an error previously or if double buffering is
	 * enabled.
	 */
	if (error) {
		if (_dev_validate(dcp, data, &error)) {
			hammer_live_dedup_device_bcmps++;
			return (1);
		}
	}
#endif
	if (_dev_validate(dcp, data, &error)) {
		hammer_live_dedup_device_bcmps++;
		return (1);
	}

	return (0);
}
Esempio n. 2
0
/*
 * Convert a zone-3 undo offset into a zone-2 buffer offset.
 */
hammer_off_t
hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
{
	hammer_volume_t root_volume;
	hammer_blockmap_t undomap __debugvar;
	hammer_off_t result_offset;
	int i;

	KKASSERT((zone3_off & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_UNDO);
	root_volume = hammer_get_root_volume(hmp, errorp);
	if (*errorp)
		return(0);
	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
	KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
	KKASSERT(zone3_off < undomap->alloc_offset);

	/*
	 * undo offsets[i] in zone-2 +
	 * big-block offset of zone-3 address
	 * which results zone-2 address
	 */
	i = (zone3_off & HAMMER_OFF_SHORT_MASK) / HAMMER_BIGBLOCK_SIZE;
	result_offset = root_volume->ondisk->vol0_undo_array[i] +
			(zone3_off & HAMMER_BIGBLOCK_MASK64);

	hammer_rel_volume(root_volume, 0);
	return(result_offset);
}
Esempio n. 3
0
/*
 * Convert a zone-3 undo offset into a zone-2 buffer offset.
 */
hammer_off_t
hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
{
	hammer_volume_t root_volume;
	hammer_blockmap_t undomap __debugvar;
	hammer_off_t result_offset;

	KKASSERT(hammer_is_zone_undo(zone3_off));
	root_volume = hammer_get_root_volume(hmp, errorp);
	if (*errorp)
		return(0);
	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
	KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
	KKASSERT(zone3_off < undomap->alloc_offset);

	result_offset = hammer_xlate_to_undo(root_volume->ondisk, zone3_off);

	hammer_rel_volume(root_volume, 0);
	return(result_offset);
}
Esempio n. 4
0
hammer_off_t
blockmap_lookup_save(hammer_off_t zone_offset,
                     hammer_blockmap_layer1_t save_layer1,
                     hammer_blockmap_layer2_t save_layer2,
                     int *errorp)
{
    struct volume_info *root_volume = NULL;
    hammer_volume_ondisk_t ondisk;
    hammer_blockmap_t blockmap;
    hammer_blockmap_t freemap;
    hammer_blockmap_layer1_t layer1;
    hammer_blockmap_layer2_t layer2;
    struct buffer_info *buffer1 = NULL;
    struct buffer_info *buffer2 = NULL;
    hammer_off_t layer1_offset;
    hammer_off_t layer2_offset;
    hammer_off_t result_offset = HAMMER_OFF_BAD;;
    int zone;
    int error = 0;

    if (save_layer1)
        bzero(save_layer1, sizeof(*save_layer1));
    if (save_layer2)
        bzero(save_layer2, sizeof(*save_layer2));

    zone = HAMMER_ZONE_DECODE(zone_offset);

    if (zone <= HAMMER_ZONE_RAW_VOLUME_INDEX) {
        error = -1;
        goto done;
    }
    if (zone >= HAMMER_MAX_ZONES) {
        error = -2;
        goto done;
    }

    root_volume = get_root_volume();
    ondisk = root_volume->ondisk;
    blockmap = &ondisk->vol0_blockmap[zone];

    /*
     * Handle blockmap offset translations.
     */
    if (hammer_is_index_record(zone)) {
        result_offset = hammer_xlate_to_zone2(zone_offset);
    } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
        if (zone_offset >= blockmap->alloc_offset) {
            error = -3;
            goto done;
        }
        result_offset = hammer_xlate_to_undo(ondisk, zone_offset);
    } else {
        /* assert(zone == HAMMER_ZONE_RAW_BUFFER_INDEX); */
        result_offset = zone_offset;
    }

    /*
     * The blockmap should match the requested zone (else the volume
     * header is mashed).
     */
    if (hammer_is_index_record(zone) &&
            HAMMER_ZONE_DECODE(blockmap->alloc_offset) != zone) {
        error = -4;
        goto done;
    }

    /*
     * Validate that the big-block is assigned to the zone.  Also
     * assign save_layer{1,2} if not NULL.
     */
    freemap = &ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];

    /*
     * Dive layer 1.
     */
    layer1_offset = freemap->phys_offset +
                    HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
    layer1 = get_buffer_data(layer1_offset, &buffer1, 0);

    if (layer1 == NULL) {
        error = -5;
        goto done;
    }
    if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
        error = -6;
        goto done;
    }
    if (save_layer1)
        *save_layer1 = *layer1;

    /*
     * Dive layer 2, each entry represents a big-block.
     */
    layer2_offset = layer1->phys_offset +
                    HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
    layer2 = get_buffer_data(layer2_offset, &buffer2, 0);

    if (layer2 == NULL) {
        error = -7;
        goto done;
    }
    if (layer2->zone != zone) {
        error = -8;
        goto done;
    }
    if (save_layer2)
        *save_layer2 = *layer2;

done:
    rel_buffer(buffer1);
    rel_buffer(buffer2);

    if (errorp)
        *errorp = error;

    return(result_offset);
}
Esempio n. 5
0
/*
 * Allocate a chunk of data out of a blockmap.  This is a simplified
 * version which uses next_offset as a simple allocation iterator.
 */
void *
alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
               struct buffer_info **bufferp)
{
    struct volume_info *volume;
    hammer_blockmap_t blockmap;
    hammer_blockmap_t freemap;
    struct buffer_info *buffer1 = NULL;
    struct buffer_info *buffer2 = NULL;
    hammer_blockmap_layer1_t layer1;
    hammer_blockmap_layer2_t layer2;
    hammer_off_t tmp_offset;
    hammer_off_t layer1_offset;
    hammer_off_t layer2_offset;
    hammer_off_t block_offset;
    void *ptr;

    volume = get_root_volume();

    blockmap = &volume->ondisk->vol0_blockmap[zone];
    freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
    assert(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);

    /*
     * Alignment and buffer-boundary issues.  If the allocation would
     * cross a buffer boundary we have to skip to the next buffer.
     */
    bytes = HAMMER_DATA_DOALIGN(bytes);
    assert(bytes > 0 && bytes <= HAMMER_BUFSIZE);  /* not HAMMER_XBUFSIZE */
    assert(hammer_is_index_record(zone));

again:
    assert(blockmap->next_offset != HAMMER_ZONE_ENCODE(zone + 1, 0));

    tmp_offset = blockmap->next_offset + bytes - 1;
    if ((blockmap->next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
        blockmap->next_offset = tmp_offset & ~HAMMER_BUFMASK64;
    }
    block_offset = blockmap->next_offset & HAMMER_BIGBLOCK_MASK;

    /*
     * Dive layer 1.
     */
    layer1_offset = freemap->phys_offset +
                    HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
    layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
    assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
    assert(!(block_offset == 0 && layer1->blocks_free == 0));

    /*
     * Dive layer 2, each entry represents a big-block.
     */
    layer2_offset = layer1->phys_offset +
                    HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
    layer2 = get_buffer_data(layer2_offset, &buffer2, 0);

    if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX)
        errx(1, "alloc_blockmap: layer2 ran out of space!");

    /*
     * If we are entering a new big-block assign ownership to our
     * zone.  If the big-block is owned by another zone skip it.
     */
    if (layer2->zone == 0) {
        --layer1->blocks_free;
        hammer_crc_set_layer1(layer1);
        layer2->zone = zone;
        --volume->ondisk->vol0_stat_freebigblocks;
        assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
        assert(layer2->append_off == 0);
    }
    if (layer2->zone != zone) {
        blockmap->next_offset =
            HAMMER_ZONE_LAYER2_NEXT_OFFSET(blockmap->next_offset);
        goto again;
    }

    assert(layer2->append_off == block_offset);
    layer2->bytes_free -= bytes;
    *result_offp = blockmap->next_offset;
    blockmap->next_offset += bytes;
    layer2->append_off = (int)blockmap->next_offset & HAMMER_BIGBLOCK_MASK;
    hammer_crc_set_layer2(layer2);

    ptr = get_buffer_data(*result_offp, bufferp, 0);
    (*bufferp)->cache.modified = 1;

    buffer1->cache.modified = 1;
    buffer2->cache.modified = 1;

    rel_buffer(buffer1);
    rel_buffer(buffer2);
    return(ptr);
}