Example #1
0
void LogManager::LogUpdate(cid_t commit_id, const ItemPointer &old_version,
		const ItemPointer &new_version) {
  if (this->IsInLoggingMode()) {
    auto &manager = catalog::Manager::GetInstance();

    auto new_tuple_tile_group = manager.GetTileGroup(new_version.block);

    auto logger = this->GetBackendLogger();
    auto schema = manager.GetTableWithOid(new_tuple_tile_group->GetDatabaseId(),
                                          new_tuple_tile_group->GetTableId())
                      ->GetSchema();
    // Can we avoid allocate tuple in head each time?
    std::unique_ptr<storage::Tuple> tuple(new storage::Tuple(schema, true));
    for (oid_t col = 0; col < schema->GetColumnCount(); col++) {
      tuple->SetValue(col,
                      new_tuple_tile_group->GetValue(new_version.offset, col),
                      logger->GetVarlenPool());
    }
    std::unique_ptr<LogRecord> record(
        logger->GetTupleRecord(LOGRECORD_TYPE_TUPLE_UPDATE, commit_id,
                               new_tuple_tile_group->GetTableId(),
                               new_tuple_tile_group->GetDatabaseId(),
                               new_version, old_version, tuple.get()));

    logger->Log(record.get());
  }
}
Example #2
0
void LogManager::LogInsert(cid_t commit_id, const ItemPointer &new_location) {
  if (this->IsInLoggingMode()) {
    auto logger = this->GetBackendLogger();
    auto &manager = catalog::Manager::GetInstance();

    auto new_tuple_tile_group = manager.GetTileGroup(new_location.block);

    auto tile_group = manager.GetTileGroup(new_location.block);
    auto schema =
        manager.GetTableWithOid(tile_group->GetDatabaseId(),
                                tile_group->GetTableId())->GetSchema();
    std::unique_ptr<storage::Tuple> tuple(new storage::Tuple(schema, true));
    for (oid_t col = 0; col < schema->GetColumnCount(); col++) {
      tuple->SetValue(col,
                      new_tuple_tile_group->GetValue(new_location.offset, col),
                      logger->GetVarlenPool());
    }

    std::unique_ptr<LogRecord> record(logger->GetTupleRecord(
        LOGRECORD_TYPE_TUPLE_INSERT, commit_id, tile_group->GetTableId(),
        new_tuple_tile_group->GetDatabaseId(), new_location,
        INVALID_ITEMPOINTER, tuple.get()));
    logger->Log(record.get());
  }
}
Example #3
0
const std::string DataTable::GetInfo() const {
  std::ostringstream os;

  // os << "=====================================================\n";
  // os << "TABLE :\n";

  oid_t tile_group_count = GetTileGroupCount();
  // os << "Tile Group Count : " << tile_group_count << "\n";

  oid_t tuple_count = 0;
  oid_t table_id = 0;
  for (oid_t tile_group_itr = 0; tile_group_itr < tile_group_count;
       tile_group_itr++) {
    auto tile_group = GetTileGroup(tile_group_itr);
    table_id = tile_group->GetTableId();
    auto tile_tuple_count = tile_group->GetNextTupleSlot();

    // os << "Tile Group Id  : " << tile_group_itr
    //    << " Tuple Count : " << tile_tuple_count << "\n";
    // os << (*tile_group);

    tuple_count += tile_tuple_count;
  }

  os << "Table " << table_id << " Tuple Count :: " << tuple_count << "\n";

  // os << "=====================================================\n";

  return os.str();
}
Example #4
0
// just for debugging
void TupleRecord::Print() {
  std::cout << "#LOG TYPE:" << LogRecordTypeToString(GetType()) << "\n";
  std::cout << " #Db  ID:" << GetDatabaseOid() << "\n";
  std::cout << " #Tb  ID:" << GetTableId() << "\n";
  std::cout << " #Txn ID:" << GetTransactionId() << "\n";
  std::cout << " #Insert Location :" << GetInsertLocation().block;
  std::cout << " " << GetInsertLocation().offset << "\n";
  std::cout << " #Delete Location :" << GetDeleteLocation().block;
  std::cout << " " << GetDeleteLocation().offset << "\n";
  std::cout << "\n";
}
Example #5
0
void LogManager::LogDelete(cid_t commit_id, const ItemPointer &delete_location) {
  if (this->IsInLoggingMode()) {
    auto logger = this->GetBackendLogger();
    auto &manager = catalog::Manager::GetInstance();
    auto tile_group = manager.GetTileGroup(delete_location.block);

    std::unique_ptr<LogRecord> record(logger->GetTupleRecord(
        LOGRECORD_TYPE_TUPLE_DELETE, commit_id, tile_group->GetTableId(),
        tile_group->GetDatabaseId(), INVALID_ITEMPOINTER, delete_location));

    logger->Log(record.get());
  }
}
Example #6
0
const std::string TupleRecord::GetInfo() const {
  std::ostringstream os;

  os << "#LOG TYPE:" << LogRecordTypeToString(GetType()) << "\n";
  os << " #Db  ID:" << GetDatabaseOid() << "\n";
  os << " #Tb  ID:" << GetTableId() << "\n";
  os << " #Txn ID:" << GetTransactionId() << "\n";
  os << " #Insert Location :" << GetInsertLocation().block;
  os << " " << GetInsertLocation().offset << "\n";
  os << " #Delete Location :" << GetDeleteLocation().block;
  os << " " << GetDeleteLocation().offset << "\n";
  os << "\n";

  return os.str();
}
Example #7
0
storage::TileGroup *DataTable::TransformTileGroup(
    const oid_t &tile_group_offset, const double &theta) {
  // First, check if the tile group is in this table
  if (tile_group_offset >= tile_groups_.GetSize()) {
    LOG_ERROR("Tile group offset not found in table : %u ", tile_group_offset);
    return nullptr;
  }

  auto tile_group_id =
      tile_groups_.FindValid(tile_group_offset, invalid_tile_group_id);

  // Get orig tile group from catalog
  auto &catalog_manager = catalog::Manager::GetInstance();
  auto tile_group = catalog_manager.GetTileGroup(tile_group_id);
  auto diff = tile_group->GetSchemaDifference(default_partition_);

  // Check threshold for transformation
  if (diff < theta) {
    return nullptr;
  }

  LOG_TRACE("Transforming tile group : %u", tile_group_offset);

  // Get the schema for the new transformed tile group
  auto new_schema =
      TransformTileGroupSchema(tile_group.get(), default_partition_);

  // Allocate space for the transformed tile group
  std::shared_ptr<storage::TileGroup> new_tile_group(
      TileGroupFactory::GetTileGroup(
          tile_group->GetDatabaseId(), tile_group->GetTableId(),
          tile_group->GetTileGroupId(), tile_group->GetAbstractTable(),
          new_schema, default_partition_,
          tile_group->GetAllocatedTupleCount()));

  // Set the transformed tile group column-at-a-time
  SetTransformedTileGroup(tile_group.get(), new_tile_group.get());

  // Set the location of the new tile group
  // and clean up the orig tile group
  catalog_manager.AddTileGroup(tile_group_id, new_tile_group);

  return new_tile_group.get();
}
Example #8
0
/* Read EIT segments from DVB-demuxer or file. {{{ */
static void readEventTables(void) {
    int r, n = 0;
    char buf[1<<12], *bhead = buf;

    /* The dvb demultiplexer simply outputs individual whole packets (good),
     * but reading captured data from a file needs re-chunking. (bad). */
    do {
        if (n < sizeof(struct si_tab))
            goto read_more;
        struct si_tab *tab = (struct si_tab *)bhead;
        if (GetTableId(tab) == 0)
            goto read_more;
        size_t l = sizeof(struct si_tab) + GetSectionLength(tab);
        if (n < l)
            goto read_more;
        packet_count++;
        if (_dvb_crc32((uint8_t *)bhead, l) != 0) {
            /* data or length is wrong. skip bytewise. */
            //l = 1; // FIXME
            crcerr_count++;
        } else
            parseEIT(bhead, l);
        status();
        /* remove packet */
        n -= l;
        bhead += l;
        continue;
read_more:
        /* move remaining data to front of buffer */
        if (n > 0)
            memmove(buf, bhead, n);
        /* fill with fresh data */
        r = read(STDIN_FILENO, buf+n, sizeof(buf)-n);
        bhead = buf;
        n += r;
    } while (r > 0);
} /*}}}*/
Example #9
0
/*
 * Read EIT segments from DVB-demuxer
 */
void readEventTables(int format, cFilter* filters)
{
	int r = 0;
	u_char buf[1 << 12];
	size_t l;
	int compressed;
	int uncompressed;
	float ratio;
	int64_t lastReadTime = 0;

	if (filters)
	{
		alarm(0);
		setTimeoutDeadline();
		lastReadTime = getTime();
	}

	while (1) {
		int pid = DVB_EIT_PID;
		if (filters)
		{
			if ((getTime() - finishTime) >= 0) {
				log_message(DEBUG, "timeout occurred");
				break;
			}
			r = 0;
			int fd = filters->Poll(5000, &pid);
			if (fd > 0)
			{
				r = read(fd, buf, sizeof(buf));
				if (r > 0)
				{
					lastReadTime = getTime();
				}
				else if (r == 0)
				{
					if ((getTime() - lastReadTime) > 2000)
					{
						log_message(DEBUG, "did not read any more");
						break;
					}
				}
				else
				{
					if (errno != ETIMEDOUT)
						log_message(ERROR, "pid read error %d", errno);
				}
			}
		}
		else
		{
			r = read(STDIN_FILENO, buf, sizeof(buf));
			if (r < 0) {
				log_message(DEBUG, "did not read any more");
				break;
			}
		}
		packet_count++;
		struct si_tab *tab = (struct si_tab *) buf;
		if (GetTableId(tab) == 0) {
			continue;
		}
		l = sizeof(struct si_tab) + GetSectionLength(tab);
		log_message(TRACE, "tableid %d len %d totallen %d.", GetTableId(tab), l, r);
		if (!SI::CRC32::isValid((const char *) buf, r)) {
			log_message(ERROR, "data or length is wrong for pid %d (0x%x) tableid %d (0x%x), len %d. skipping packet.",pid, pid, GetTableId(tab), GetTableId(tab), r);
			crcerr_count++;
			if (pid == DVB_TDT_PID && GetTableId(tab) == 112)
			{
			}
		} else {
			switch (pid)
			{
				case DVB_EIT_PID:
					parseEIT(buf, l);
					break;
				case DVB_SDT_PID:
					parseSDT(buf, l, &fullChannels);
					break;
				case DVB_TDT_PID:
					break;
			}
		}
		log_message(
				TRACE,
				"Status: %d pkts, %d prgms, %d updates, %d invalid, %d CRC err",
				packet_count, programme_count, update_count,
				invalid_date_count, crcerr_count);
	}
	uncompressed = get_stat("freesathuffman.uncompressed");
	compressed = get_stat("freesathuffman.compressed");
	ratio = (compressed>0)?uncompressed / compressed:1;
	log_message(DEBUG, "freesat huffman average expansion ratio: %f", ratio);
}