/**
 * Test basic functionality
 *
 */
TEST_F(StorageManagerTests, BasicTest) {
  peloton::storage::StorageManager storage_manager;

  std::vector<peloton::BackendType> backend_types = {peloton::BACKEND_TYPE_MM};

  size_t length = 256;
  size_t rounds = 100;

  for (auto backend_type : backend_types) {
    LOG_INFO("Backend :: %d", backend_type);

    for (size_t round_itr = 0; round_itr < rounds; round_itr++) {
      // Allocate
      auto location = storage_manager.Allocate(backend_type, length);

      // Fill it up
      PL_MEMSET(location, '-', length);

      // Sync
      storage_manager.Sync(backend_type, location, length);

      // Release
      storage_manager.Release(backend_type, location);
    }
  }
}
Example #2
0
// Return false if the tuple's table (tile group) is dropped.
// In such case, this recycled tuple can not be added to the recycled_list.
// Since no one will use it any more, keeping track of it is useless.
// Note that, if we drop a single tile group without dropping the whole table,
// such assumption is problematic.
bool GCManager::ResetTuple(const TupleMetadata &tuple_metadata) {
  auto &manager = catalog::Manager::GetInstance();
  auto tile_group = manager.GetTileGroup(tuple_metadata.tile_group_id);

  // During the resetting, a table may deconstruct because of the DROP TABLE request
  if (tile_group == nullptr) {
    LOG_TRACE("Garbage tuple(%u, %u) in table %u no longer exists",
             tuple_metadata.tile_group_id, tuple_metadata.tuple_slot_id,
             tuple_metadata.table_id);
    return false;
  }

  // From now on, the tile group shared pointer is held by us
  // It's safe to set headers from now on.

  auto tile_group_header = tile_group->GetHeader();

  // Reset the header
  tile_group_header->SetTransactionId(tuple_metadata.tuple_slot_id,
                                      INVALID_TXN_ID);
  tile_group_header->SetBeginCommitId(tuple_metadata.tuple_slot_id, MAX_CID);
  tile_group_header->SetEndCommitId(tuple_metadata.tuple_slot_id, MAX_CID);
  tile_group_header->SetPrevItemPointer(tuple_metadata.tuple_slot_id,
                                        INVALID_ITEMPOINTER);
  tile_group_header->SetNextItemPointer(tuple_metadata.tuple_slot_id,
                                        INVALID_ITEMPOINTER);
  PL_MEMSET(
      tile_group_header->GetReservedFieldRef(tuple_metadata.tuple_slot_id), 0,
      storage::TileGroupHeader::GetReservedSize());

  LOG_TRACE("Garbage tuple(%u, %u) in table %u is reset",
           tuple_metadata.tile_group_id, tuple_metadata.tuple_slot_id,
           tuple_metadata.table_id);
  return true;
}
Example #3
0
TileGroupHeader::TileGroupHeader(const BackendType &backend_type,
                                 const int &tuple_count)
    : backend_type(backend_type),
      data(nullptr),
      num_tuple_slots(tuple_count),
      next_tuple_slot(0),
      tile_header_lock() {
  header_size = num_tuple_slots * header_entry_size;

  // allocate storage space for header
  auto &storage_manager = storage::StorageManager::GetInstance();
  data = reinterpret_cast<char *>(
      storage_manager.Allocate(backend_type, header_size));
  PL_ASSERT(data != nullptr);

  // zero out the data
  PL_MEMSET(data, 0, header_size);

  // Set MVCC Initial Value
  for (oid_t tuple_slot_id = START_OID; tuple_slot_id < num_tuple_slots;
       tuple_slot_id++) {
    SetTransactionId(tuple_slot_id, INVALID_TXN_ID);
    SetBeginCommitId(tuple_slot_id, MAX_CID);
    SetEndCommitId(tuple_slot_id, MAX_CID);
    SetNextItemPointer(tuple_slot_id, INVALID_ITEMPOINTER);
    SetPrevItemPointer(tuple_slot_id, INVALID_ITEMPOINTER);

    SetInsertCommit(tuple_slot_id, false);  // unused
    SetDeleteCommit(tuple_slot_id, false);  // unused
  }
}
std::unique_ptr<LogBuffer> CircularBufferPool::Get() {
  unsigned int current_idx = GET_BUFFER_POOL_INDEX(tail_.fetch_add(1));
  while (true) {
    if (buffers_[current_idx]) {
      break;
    } else {
      // pause for a minimum amount of time
      _mm_pause();
    }
  }
  LOG_TRACE("CircularBufferPool::Get - current_idx: %u", current_idx);
  std::unique_ptr<LogBuffer> buff = std::move(buffers_[current_idx]);
  PL_MEMSET(buffers_ + current_idx, 0, sizeof(std::unique_ptr<LogBuffer>));
  return buff;
}
Example #5
0
/*
 * is_cpu_genuine_intel -- checks for genuine Intel CPU
 */
int is_cpu_genuine_intel(void) {
  unsigned cpuinfo[4] = {0};

  union {
    char name[0x20];
    unsigned cpuinfo[3];
  } vendor;

  PL_MEMSET(&vendor, 0, sizeof(vendor));

  cpuid(0x0, 0x0, cpuinfo);

  vendor.cpuinfo[0] = cpuinfo[EBX_IDX];
  vendor.cpuinfo[1] = cpuinfo[EDX_IDX];
  vendor.cpuinfo[2] = cpuinfo[ECX_IDX];

  return (strncmp(vendor.name, "GenuineIntel", sizeof(vendor.name))) == 0;
}
bool TransactionLevelGCManager::ResetTuple(const ItemPointer &location) {
  auto &manager = catalog::Manager::GetInstance();
  auto tile_group = manager.GetTileGroup(location.block).get();

  auto tile_group_header = tile_group->GetHeader();

  // Reset the header
  tile_group_header->SetTransactionId(location.offset, INVALID_TXN_ID);
  tile_group_header->SetBeginCommitId(location.offset, MAX_CID);
  tile_group_header->SetEndCommitId(location.offset, MAX_CID);
  tile_group_header->SetPrevItemPointer(location.offset, INVALID_ITEMPOINTER);
  tile_group_header->SetNextItemPointer(location.offset, INVALID_ITEMPOINTER);

  PL_MEMSET(
    tile_group_header->GetReservedFieldRef(location.offset), 0,
    storage::TileGroupHeader::GetReservedSize());

  // Reclaim the varlen pool
  CheckAndReclaimVarlenColumns(tile_group, location.offset);

  LOG_TRACE("Garbage tuple(%u, %u) is reset", location.block, location.offset);
  return true;
}
Example #7
0
Tile::Tile(BackendType backend_type, TileGroupHeader *tile_header,
           const catalog::Schema &tuple_schema, TileGroup *tile_group,
           int tuple_count)
    : database_id(INVALID_OID),
      table_id(INVALID_OID),
      tile_group_id(INVALID_OID),
      tile_id(INVALID_OID),
      backend_type(backend_type),
      schema(tuple_schema),
      data(NULL),
      tile_group(tile_group),
      pool(NULL),
      num_tuple_slots(tuple_count),
      column_count(tuple_schema.GetColumnCount()),
      tuple_length(tuple_schema.GetLength()),
      uninlined_data_size(0),
      column_header(NULL),
      column_header_size(INVALID_OID),
      tile_group_header(tile_header) {
  PL_ASSERT(tuple_count > 0);

  tile_size = tuple_count * tuple_length;

  // allocate tuple storage space for inlined data
  auto &storage_manager = storage::StorageManager::GetInstance();
  data = reinterpret_cast<char *>(
      storage_manager.Allocate(backend_type, tile_size));
  PL_ASSERT(data != NULL);

  // zero out the data
  PL_MEMSET(data, 0, tile_size);

  // allocate pool for blob storage if schema not inlined
  // if (schema.IsInlined() == false) {
  pool = new type::EphemeralPool();
  //}
}
Example #8
0
void NetworkAddress::FillAddr(struct sockaddr_in* addr) const {
  addr->sin_family = AF_INET;
  addr->sin_port = port_;
  addr->sin_addr.s_addr = ip_address_;
  PL_MEMSET(addr->sin_zero, 0, sizeof(addr->sin_zero));
}
Example #9
0
// Allocate a continous block of memory of the specified size conveniently
// initialized to 0s
void *VarlenPool::AllocateZeroes(std::size_t size) {
  return PL_MEMSET(Allocate(size), 0, size);
}
Example #10
0
/**
 * Private constructor that initializes storage and the specifies the type of
 * value
 * that will be stored in this instance
 */
Value::Value(const ValueType type) {
  PL_MEMSET(m_data, 0, 16);
  SetValueType(type);
  m_sourceInlined = true;
  m_cleanUp = true;
}
Example #11
0
/**
 * Public constructor that initializes to an Value that is unusable
 * with other Values.  Useful for declaring storage for an Value.
 */
Value::Value() {
  PL_MEMSET(m_data, 0, 16);
  SetValueType(VALUE_TYPE_INVALID);
  m_sourceInlined = true;
  m_cleanUp = true;
}
//===--------------------------------------------------------------------===//
// Circular Buffer Pool
//===--------------------------------------------------------------------===//
CircularBufferPool::CircularBufferPool()
    : head_(ATOMIC_VAR_INIT(0)), tail_(ATOMIC_VAR_INIT(0)) {
  PL_MEMSET(buffers_, 0, BUFFER_POOL_SIZE * sizeof(std::unique_ptr<LogBuffer>));
}
Example #13
0
void LibeventServer::StartServer() {
  if (FLAGS_socket_family == "AF_INET") {
    struct sockaddr_in sin;
    PL_MEMSET(&sin, 0, sizeof(sin));
    sin.sin_family = AF_INET;
    sin.sin_addr.s_addr = INADDR_ANY;
    sin.sin_port = htons(port_);

    int listen_fd;

    listen_fd = socket(AF_INET, SOCK_STREAM, 0);

    if (listen_fd < 0) {
      throw ConnectionException("Failed to create listen socket");
    }

    int conn_backlog = 12;
    int reuse = 1;
    setsockopt(listen_fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse));

    /* Initialize SSL listener connection */
    SSL_load_error_strings();
    SSL_library_init();

    if ((ssl_context = SSL_CTX_new(TLSv1_server_method())) == nullptr)
    {
      throw ConnectionException("Error creating SSL context.");
    }

    LOG_INFO("private key file path %s", private_key_file_.c_str());
    /*
     * Temporarily commented to pass tests START
    // register private key
    if (SSL_CTX_use_PrivateKey_file(ssl_context, private_key_file_.c_str(),
                                    SSL_FILETYPE_PEM) == 0)
    {
      SSL_CTX_free(ssl_context);
      throw ConnectionException("Error associating private key.\n");
    }
    LOG_INFO("certificate file path %s", certificate_file_.c_str());
    // register public key (certificate)
    if (SSL_CTX_use_certificate_file(ssl_context, certificate_file_.c_str(),
                                     SSL_FILETYPE_PEM) == 0)
    {
      SSL_CTX_free(ssl_context);
      throw ConnectionException("Error associating certificate.\n");
    }
    * Temporarily commented to pass tests END
    */
    if (bind(listen_fd, (struct sockaddr *) &sin, sizeof(sin)) < 0)
    {
      SSL_CTX_free(ssl_context);
      throw ConnectionException("Failed binding socket.");
    }

    if (listen(listen_fd, conn_backlog) < 0)
    {
      SSL_CTX_free(ssl_context);
      throw ConnectionException("Error listening onsocket.");
    }

    master_thread_->Start();

    LibeventServer::CreateNewConn(listen_fd, EV_READ | EV_PERSIST,
                                  master_thread_.get(), CONN_LISTENING);

    LOG_INFO("Listening on port %llu", (unsigned long long) port_);
    event_base_dispatch(base_);
    LibeventServer::GetConn(listen_fd)->CloseSocket();

    // Free events and event base
    event_free(LibeventServer::GetConn(listen_fd)->event);
    event_free(ev_stop_);
    event_free(ev_timeout_);
    event_base_free(base_);

    master_thread_->Stop();
    LOG_INFO("Server Closed");
  }

  // This socket family code is not implemented yet
  else {
    throw ConnectionException("Unsupported socket family");
  }
}