Esempio n. 1
0
/*
 * Allocate a big-block for zone-3 for UNDO/REDO FIFO.
 */
hammer_off_t
alloc_undo_bigblock(struct volume_info *volume)
{
    hammer_blockmap_t freemap;
    struct buffer_info *buffer1 = NULL;
    struct buffer_info *buffer2 = NULL;
    hammer_blockmap_layer1_t layer1;
    hammer_blockmap_layer2_t layer2;
    hammer_off_t layer1_offset;
    hammer_off_t layer2_offset;
    hammer_off_t result_offset;

    /* Only root volume needs formatting */
    assert(volume->vol_no == HAMMER_ROOT_VOLNO);

    result_offset = bootstrap_bigblock(volume);
    freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];

    /*
     * Dive layer 1.
     */
    layer1_offset = freemap->phys_offset +
                    HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
    layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
    assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
    --layer1->blocks_free;
    hammer_crc_set_layer1(layer1);
    buffer1->cache.modified = 1;

    /*
     * Dive layer 2, each entry represents a big-block.
     */
    layer2_offset = layer1->phys_offset +
                    HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
    layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
    assert(layer2->zone == 0);
    layer2->zone = HAMMER_ZONE_UNDO_INDEX;
    layer2->append_off = HAMMER_BIGBLOCK_SIZE;
    layer2->bytes_free = 0;
    hammer_crc_set_layer2(layer2);
    buffer2->cache.modified = 1;

    --volume->ondisk->vol0_stat_freebigblocks;

    rel_buffer(buffer1);
    rel_buffer(buffer2);

    return(result_offset);
}
Esempio n. 2
0
struct buffer *wait_response(int cfd) {
    int rbytes = 0, rc, r, remain, resp_size;
    struct buffer *response;

    rc = wait_socket_data(cfd, 3000, CR_READ);
    if (rc <= 0) { // timeout or error
       logger(DEBUG, "wait response error, as %s!", strerror(errno));
        return NULL;
    }

    response = alloc_buffer(128);
    remain = get_buffer_cap(response);
    TIME_START();
    while (rbytes < 4) {
        r = read(cfd, get_buffer_data(response) + rbytes, remain);
        if (r <= 0) {
            if (r == -1 &&
                   (errno == EAGAIN || errno == EINTR || errno == EWOULDBLOCK)) continue;
            if (r == 0) close(cfd);
            logger(DEBUG, "wait response error, as %s!", strerror(errno));
            goto err_cleanup;
        }
        rbytes += r;
        remain -= r;
        incr_buffer_used(response, r);
    }
    resp_size = read_int32_buffer(response) + 4; 
    need_expand(response, resp_size);
    while (rbytes < resp_size) {
        r = read(cfd, get_buffer_data(response) + rbytes, resp_size - rbytes);
        if (r <= 0) {
            if (r == -1 &&
                   (errno == EAGAIN || errno == EINTR || errno == EWOULDBLOCK)) continue;
            if (r == 0) close(cfd);
            logger(DEBUG, "wait response error, as %s!", strerror(errno));
            goto err_cleanup;
        }
        rbytes += r;
        incr_buffer_used(response, r);
    }
    TIME_END();
    logger(DEBUG, "Total time cost %lldus in wait response", TIME_COST());
    return response;

err_cleanup:
    dealloc_buffer(response);
    return NULL;
}
Esempio n. 3
0
XCam3AStats *
X3aStats::get_stats ()
{
    SmartPtr<BufferData> data = get_buffer_data ();
    SmartPtr<X3aStatsData> stats = data.dynamic_cast_ptr<X3aStatsData> ();

    XCAM_FAIL_RETURN(
        WARNING,
        stats.ptr(),
        NULL,
        "X3aStats get_stats failed with NULL");
    return stats->get_stats ();
}
Esempio n. 4
0
hammer_off_t
blockmap_lookup_save(hammer_off_t zone_offset,
                     hammer_blockmap_layer1_t save_layer1,
                     hammer_blockmap_layer2_t save_layer2,
                     int *errorp)
{
    struct volume_info *root_volume = NULL;
    hammer_volume_ondisk_t ondisk;
    hammer_blockmap_t blockmap;
    hammer_blockmap_t freemap;
    hammer_blockmap_layer1_t layer1;
    hammer_blockmap_layer2_t layer2;
    struct buffer_info *buffer1 = NULL;
    struct buffer_info *buffer2 = NULL;
    hammer_off_t layer1_offset;
    hammer_off_t layer2_offset;
    hammer_off_t result_offset = HAMMER_OFF_BAD;;
    int zone;
    int error = 0;

    if (save_layer1)
        bzero(save_layer1, sizeof(*save_layer1));
    if (save_layer2)
        bzero(save_layer2, sizeof(*save_layer2));

    zone = HAMMER_ZONE_DECODE(zone_offset);

    if (zone <= HAMMER_ZONE_RAW_VOLUME_INDEX) {
        error = -1;
        goto done;
    }
    if (zone >= HAMMER_MAX_ZONES) {
        error = -2;
        goto done;
    }

    root_volume = get_root_volume();
    ondisk = root_volume->ondisk;
    blockmap = &ondisk->vol0_blockmap[zone];

    /*
     * Handle blockmap offset translations.
     */
    if (hammer_is_index_record(zone)) {
        result_offset = hammer_xlate_to_zone2(zone_offset);
    } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
        if (zone_offset >= blockmap->alloc_offset) {
            error = -3;
            goto done;
        }
        result_offset = hammer_xlate_to_undo(ondisk, zone_offset);
    } else {
        /* assert(zone == HAMMER_ZONE_RAW_BUFFER_INDEX); */
        result_offset = zone_offset;
    }

    /*
     * The blockmap should match the requested zone (else the volume
     * header is mashed).
     */
    if (hammer_is_index_record(zone) &&
            HAMMER_ZONE_DECODE(blockmap->alloc_offset) != zone) {
        error = -4;
        goto done;
    }

    /*
     * Validate that the big-block is assigned to the zone.  Also
     * assign save_layer{1,2} if not NULL.
     */
    freemap = &ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];

    /*
     * Dive layer 1.
     */
    layer1_offset = freemap->phys_offset +
                    HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
    layer1 = get_buffer_data(layer1_offset, &buffer1, 0);

    if (layer1 == NULL) {
        error = -5;
        goto done;
    }
    if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
        error = -6;
        goto done;
    }
    if (save_layer1)
        *save_layer1 = *layer1;

    /*
     * Dive layer 2, each entry represents a big-block.
     */
    layer2_offset = layer1->phys_offset +
                    HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
    layer2 = get_buffer_data(layer2_offset, &buffer2, 0);

    if (layer2 == NULL) {
        error = -7;
        goto done;
    }
    if (layer2->zone != zone) {
        error = -8;
        goto done;
    }
    if (save_layer2)
        *save_layer2 = *layer2;

done:
    rel_buffer(buffer1);
    rel_buffer(buffer2);

    if (errorp)
        *errorp = error;

    return(result_offset);
}
Esempio n. 5
0
/*
 * Allocate a chunk of data out of a blockmap.  This is a simplified
 * version which uses next_offset as a simple allocation iterator.
 */
void *
alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
               struct buffer_info **bufferp)
{
    struct volume_info *volume;
    hammer_blockmap_t blockmap;
    hammer_blockmap_t freemap;
    struct buffer_info *buffer1 = NULL;
    struct buffer_info *buffer2 = NULL;
    hammer_blockmap_layer1_t layer1;
    hammer_blockmap_layer2_t layer2;
    hammer_off_t tmp_offset;
    hammer_off_t layer1_offset;
    hammer_off_t layer2_offset;
    hammer_off_t block_offset;
    void *ptr;

    volume = get_root_volume();

    blockmap = &volume->ondisk->vol0_blockmap[zone];
    freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
    assert(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);

    /*
     * Alignment and buffer-boundary issues.  If the allocation would
     * cross a buffer boundary we have to skip to the next buffer.
     */
    bytes = HAMMER_DATA_DOALIGN(bytes);
    assert(bytes > 0 && bytes <= HAMMER_BUFSIZE);  /* not HAMMER_XBUFSIZE */
    assert(hammer_is_index_record(zone));

again:
    assert(blockmap->next_offset != HAMMER_ZONE_ENCODE(zone + 1, 0));

    tmp_offset = blockmap->next_offset + bytes - 1;
    if ((blockmap->next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
        blockmap->next_offset = tmp_offset & ~HAMMER_BUFMASK64;
    }
    block_offset = blockmap->next_offset & HAMMER_BIGBLOCK_MASK;

    /*
     * Dive layer 1.
     */
    layer1_offset = freemap->phys_offset +
                    HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
    layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
    assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
    assert(!(block_offset == 0 && layer1->blocks_free == 0));

    /*
     * Dive layer 2, each entry represents a big-block.
     */
    layer2_offset = layer1->phys_offset +
                    HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
    layer2 = get_buffer_data(layer2_offset, &buffer2, 0);

    if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX)
        errx(1, "alloc_blockmap: layer2 ran out of space!");

    /*
     * If we are entering a new big-block assign ownership to our
     * zone.  If the big-block is owned by another zone skip it.
     */
    if (layer2->zone == 0) {
        --layer1->blocks_free;
        hammer_crc_set_layer1(layer1);
        layer2->zone = zone;
        --volume->ondisk->vol0_stat_freebigblocks;
        assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
        assert(layer2->append_off == 0);
    }
    if (layer2->zone != zone) {
        blockmap->next_offset =
            HAMMER_ZONE_LAYER2_NEXT_OFFSET(blockmap->next_offset);
        goto again;
    }

    assert(layer2->append_off == block_offset);
    layer2->bytes_free -= bytes;
    *result_offp = blockmap->next_offset;
    blockmap->next_offset += bytes;
    layer2->append_off = (int)blockmap->next_offset & HAMMER_BIGBLOCK_MASK;
    hammer_crc_set_layer2(layer2);

    ptr = get_buffer_data(*result_offp, bufferp, 0);
    (*bufferp)->cache.modified = 1;

    buffer1->cache.modified = 1;
    buffer2->cache.modified = 1;

    rel_buffer(buffer1);
    rel_buffer(buffer2);
    return(ptr);
}