size_t compress(void* sink, size_t sink_size) {
  VAST_ENTER_WITH(VAST_ARG(sink, sink_size));
  size_t n;
  ::snappy::RawCompress(reinterpret_cast<char const*>(uncompressed_.data()),
                        valid_bytes_, reinterpret_cast<char*>(sink), &n);
  VAST_ASSERT(n <= sink_size);
  VAST_ASSERT(n > 0);
  VAST_RETURN(n);
}
size_t lz4_output_stream::compress(void* sink, size_t sink_size) {
  VAST_ENTER_WITH(VAST_ARG(sink, sink_size));
  VAST_ASSERT(sink_size >= valid_bytes_);
  auto n = LZ4_compress_default(
    reinterpret_cast<char const*>(uncompressed_.data()),
    reinterpret_cast<char*>(sink),
    static_cast<int>(valid_bytes_),
    static_cast<int>(sink_size));
  VAST_ASSERT(n > 0);
  VAST_RETURN(n);
}
size_t lz4_input_stream::uncompress(void const* source, size_t size) {
  VAST_ENTER_WITH(VAST_ARG(source, size));
  // LZ4 does not offer functionality to estimate the output size. It operates
  // on at most 64KB blocks, so we need to ensure this maximum.
  VAST_ASSERT(uncompressed_.size() >= 64 << 10);
  auto n = LZ4_decompress_safe(
    reinterpret_cast<char const*>(source),
    reinterpret_cast<char*>(uncompressed_.data()),
    static_cast<int>(size),
    static_cast<int>(uncompressed_.size()));
  VAST_ASSERT(n > 0);
  VAST_RETURN(n);
}
size_t uncompress(void const* source, size_t size) {
  VAST_ENTER_WITH(VAST_ARG(source, size));
  size_t n;
  auto success = ::snappy::GetUncompressedLength(
    reinterpret_cast<char const*>(source), size, &n);
  VAST_ASSERT(success);
  if (uncompressed_.size() < size)
    uncompressed_.resize(64 << 10);
  success
    = ::snappy::RawUncompress(reinterpret_cast<char const*>(source), size,
                              reinterpret_cast<char*>(uncompressed_.data()));
  VAST_ASSERT(success);
  VAST_RETURN(n);
}
bool compressed_input_stream::next(void const** data, size_t* size) {
  VAST_ENTER_WITH(VAST_ARG(data, size));
  VAST_ASSERT(!uncompressed_.empty());
  if (rewind_bytes_ > 0) {
    VAST_ASSERT(rewind_bytes_ <= valid_bytes_);
    *data = uncompressed_.data() - valid_bytes_ + rewind_bytes_;
    *size = rewind_bytes_;
    rewind_bytes_ = 0;
    VAST_RETURN(true);
  }

  uint32_t compressed_block_size;
  if (!source_.read<uint32_t>(&compressed_block_size))
    VAST_RETURN(false);
  if (compressed_block_size == 0)
    VAST_RETURN(false); // Compressed blocks shall never have size 0.

  void const* src_data;
  size_t src_size;
  if (!source_.raw(&src_data, &src_size))
    VAST_RETURN(false);

  if (compressed_block_size > src_size) {
    // Compressed block is too big, we need to first copy from the source until
    // we have the entire block.
    compressed_.resize(compressed_block_size);
    if (!source_.read_raw(compressed_.data(), compressed_block_size))
      VAST_RETURN(false);
    valid_bytes_ = uncompress(compressed_.data(), compressed_block_size);
    if (valid_bytes_ == 0)
      VAST_RETURN(false);
  } else {
    // The full block is available as contiguous buffer from the source, we can
    // directly decompress it.
    valid_bytes_ = uncompress(src_data, compressed_block_size);
    if (!source_.skip(compressed_block_size) || valid_bytes_ == 0)
      VAST_RETURN(false);
  }

  *data = uncompressed_.data();
  *size = valid_bytes_;
  total_bytes_ += valid_bytes_;
  VAST_RETURN(true);
}
Esempio n. 6
0
bool array_output_stream::next(void** data, size_t* size) {
  if (position_ == size_) {
    last_size_ = 0;
    return false;
  }
  VAST_ASSERT(position_ < size_);
  last_size_ = std::min(block_size_, size_ - position_);
  *data = data_ + position_;
  *size = last_size_;
  position_ += last_size_;
  return true;
}
table_slice_ptr column_major_matrix_table_slice_builder::finish() {
  // Sanity check.
  if (col_ != 0 || rows_ == 0)
    return nullptr;
  // Get uninitialized memory that keeps the slice object plus the full matrix.
  using impl = column_major_matrix_table_slice;
  table_slice_header header{layout(), rows_, 0};
  auto result = impl::make_uninitialized(std::move(header));
  // Construct the data block.
  auto data_ptr = result->storage();
  for (auto& col_vec : columns_) {
    VAST_ASSERT(col_vec.size() == rows_);
    std::uninitialized_move(col_vec.begin(), col_vec.end(), data_ptr);
    data_ptr += rows_;
    col_vec.clear();
  }
  rows_ = 0;
  return table_slice_ptr{result, false};
}
bool compressed_output_stream::flush() {
  VAST_ENTER();
  if (valid_bytes_ == 0)
    VAST_RETURN(true);

  void* dst_data;
  size_t dst_size;
  if (!sink_.raw(&dst_data, &dst_size))
    VAST_RETURN(false);

  auto compressed_bound = compressed_size(valid_bytes_);
  compressed_.resize(compressed_bound);
  size_t n;
  if (4 + compressed_bound > dst_size) {
    // Block may be too large for the output stream buffer. Thus we need to
    // compress it first into a temporary buffer and then write it out in raw
    // form.
    n = compress(compressed_.data(), compressed_.size());
    VAST_ASSERT(n > 0);
    VAST_ASSERT(n <= std::numeric_limits<uint32_t>::max());
    VAST_ASSERT(n <= compressed_bound);
    total_bytes_ += sink_.write<uint32_t>(&n);
    total_bytes_ += sink_.write_raw(compressed_.data(), n);
  } else {
    // We have enough space to directly write the full block into the
    // underlying output buffer, no need to use the scratch space.
    n = compress(4 + reinterpret_cast<uint8_t*>(dst_data), compressed_.size());
    VAST_ASSERT(n > 0);
    VAST_ASSERT(n <= std::numeric_limits<uint32_t>::max());
    VAST_ASSERT(n <= compressed_bound);
    auto four = sink_.write<uint32_t>(&n);
    if (four != sizeof(uint32_t))
      VAST_RETURN(false);
    total_bytes_ += four + n;
    sink_.skip(n);
  }
  valid_bytes_ = 0;
  VAST_RETURN(true);
}
size_t null_output_stream::compress(void* sink, size_t sink_size) {
  VAST_ENTER_WITH(VAST_ARG(sink, sink_size));
  VAST_ASSERT(sink_size >= valid_bytes_);
  std::memcpy(sink, uncompressed_.data(), valid_bytes_);
  VAST_RETURN(valid_bytes_);
}
size_t null_input_stream::uncompress(void const* source, size_t size) {
  VAST_ENTER_WITH(VAST_ARG(source, size));
  VAST_ASSERT(uncompressed_.size() >= size);
  std::memcpy(uncompressed_.data(), source, size);
  VAST_RETURN(size);
}
Esempio n. 11
0
 vast::error const& error() const {
   VAST_ASSERT(error_);
   return *error_;
 }
Esempio n. 12
0
bool event_evaluator::operator()(schema_extractor const&, data const&) {
  VAST_ASSERT(!"schema extract should have been resolved");
  return false;
}
Esempio n. 13
0
behavior ascii(stateful_actor<ascii_state>* self,
               std::unique_ptr<std::ostream> out) {
  VAST_ASSERT(out != nullptr);
  self->state.out = move(out);
  return make(self);
}
Esempio n. 14
0
 /// Retrieves the value of the trial.
 /// @returns A mutable reference to the contained value.
 /// @pre `*this == true`.
 T& value() {
   VAST_ASSERT(engaged_);
   return value_;
 }
Esempio n. 15
0
Stream& operator<<(Stream& out, abstract_actor const* a) {
  VAST_ASSERT(a != nullptr);
  out << *a;
  return out;
}
Esempio n. 16
0
Stream& operator<<(Stream& out, stateful_actor<T, Base> const* a) {
  VAST_ASSERT(a != nullptr);
  out << *a;
  return out;
}
Esempio n. 17
0
 c_string_parser(const char* str) : str_{str} {
   VAST_ASSERT(str != nullptr);
 }
Esempio n. 18
0
 /// Retrieves the value of the trial.
 /// @returns The contained value.
 /// @pre `*this == true`.
 T const& value() const {
   VAST_ASSERT(engaged_);
   return value_;
 }
Esempio n. 19
0
 inline T operator()(T) {
   VAST_ASSERT(!"sizeof(T) is not 1, 2, 4, or 8");
 }
Esempio n. 20
0
 /// Retrieves the error of the trial.
 /// @returns The contained error.
 /// @pre `*this == false`.
 vast::error const& error() const {
   VAST_ASSERT(!engaged_);
   return error_;
 }
Esempio n. 21
0
 /// Retrieves the error of the trial.
 /// @returns The contained error.
 /// @pre `*this == false`.
 vast::error& error() {
   VAST_ASSERT(!engaged_);
   return error_;
 }
Esempio n. 22
0
char& output_iterator::dereference() const {
  VAST_ASSERT(i_ < buf_.size());
  return *buf_.as<char>(i_);
}
Esempio n. 23
0
bool event_evaluator::operator()(type_extractor const&, data const&) {
  VAST_ASSERT(!"type extractor should have been optimized away");
  return false;
}