Esempio n. 1
0
bool LeafNode::write_to(BlockWriter& writer, size_t& skeleton_size)
{
    assert(status_ == kNew || status_ == kFullLoaded);

    size_t skeleton_pos = writer.pos();
    skeleton_size = 8 + 8 + buckets_info_size_;
    if (!writer.skip(skeleton_size)) return false;

    Slice buffer;
    if (tree_->compressor_) {
        size_t buffer_length = 0;
        for (size_t i = 0; i < records_.buckets_number(); i++) {
            if (buffer_length < records_.bucket_length(i)) {
                buffer_length = records_.bucket_length(i);
            }
        }
        buffer = Slice::alloc(buffer_length);
    }

    assert(records_.buckets_number() == buckets_info_.size());
    for (size_t i = 0; i < records_.buckets_number(); i++ ) {
        RecordBucket* bucket = records_.bucket(i);

        buckets_info_[i].offset = writer.pos();
        if (!write_bucket(writer, bucket, buffer)) {
            if (buffer.size()) {
                buffer.destroy();
            }
            return false;
        }
        buckets_info_[i].length = writer.pos() - buckets_info_[i].offset;
        buckets_info_[i].uncompressed_length = records_.bucket_length(i);
    }
    size_t last_pos = writer.pos();

    if (buffer.size()) {
        buffer.destroy();
    }

    writer.seek(skeleton_pos);
    if (!writer.writeUInt64(left_sibling_)) return false;
    if (!writer.writeUInt64(right_sibling_)) return false;

    if (!write_buckets_info(writer)) {
        LOG_ERROR("write buckets info error, nid " << nid_);
        return false;
    }
    writer.seek(last_pos);
    return true;
}
Esempio n. 2
0
bool InnerNode::write_to(BlockWriter& writer, size_t& skeleton_size)
{
    // get length of skeleton and reserve space for skeleton
    size_t skeleton_offset = writer.pos();
    size_t skeleton_length = 1 + 4 + 8 + 4 + 4 + 4;
    for (size_t i = 0; i < pivots_.size(); i++) {
        skeleton_length += pivot_size(pivots_[i].key);
    }
    if (!writer.skip(skeleton_length)) return false;

    // prepare buffer if compression is enabled
    Slice buffer;
    if (tree_->compressor_) {
        // get buffer length to serialize msgbuf
        size_t buffer_length = first_msgbuf_->size();
        for (size_t i = 0; i < pivots_.size(); i++) {
            if (pivots_[i].msgbuf->size() > buffer_length)
                buffer_length = pivots_[i].msgbuf->size();
        }

        buffer = Slice::alloc(buffer_length);
    }

    // write the first msgbuf
    first_msgbuf_offset_ = writer.pos();
    if (!write_msgbuf(writer, first_msgbuf_, buffer)) return false;
    first_msgbuf_length_ = writer.pos() - first_msgbuf_offset_;
    first_msgbuf_uncompressed_length_ = first_msgbuf_->size();

    // write rest msgbufs
    for (size_t i = 0; i < pivots_.size(); i++) {
        pivots_[i].offset = writer.pos();
        if (!write_msgbuf(writer, pivots_[i].msgbuf, buffer)) return false;
        pivots_[i].length = writer.pos() - pivots_[i].offset;
        pivots_[i].uncompressed_length = pivots_[i].msgbuf->size();
    }

    if (buffer.size()) {
        buffer.destroy();
    }

    size_t last_offset = writer.pos();

    // seek to the head and write index
    writer.seek(skeleton_offset);
    if (!writer.writeBool(bottom_)) return false;
    if (!writer.writeUInt32(pivots_.size())) return false;

    if (!writer.writeUInt64(first_child_)) return false;
    if (!writer.writeUInt32(first_msgbuf_offset_)) return false;
    if (!writer.writeUInt32(first_msgbuf_length_)) return false;
    if (!writer.writeUInt32(first_msgbuf_uncompressed_length_)) return false;

    for (size_t i = 0; i < pivots_.size(); i++) {
        if (!writer.writeSlice(pivots_[i].key)) return false;
        if (!writer.writeUInt64(pivots_[i].child)) return false;
        if (!writer.writeUInt32(pivots_[i].offset)) return false;
        if (!writer.writeUInt32(pivots_[i].length)) return false;
        if (!writer.writeUInt32(pivots_[i].uncompressed_length)) return false;
    }

    writer.seek(last_offset);
    skeleton_size = skeleton_length;
    return true;
}