Ejemplo n.º 1
0
//------------------------------------------------------------------------------------
int main(int argc, char **argv)
{
try {
   totaltime = clock();
   int iret,reading,nread;
   size_t nfile;

      // Title and description
   Title = PrgmName + ", part of the GPS ToolKit, Ver " + PrgmVers + ", Run ";
   PrgmEpoch.setLocalTime();
   Title += printTime(PrgmEpoch,"%04Y/%02m/%02d %02H:%02M:%02S");
   Title += "\n";
   cout << Title;

      // get command line
   iret=GetCommandLine(argc, argv);
   if(iret) return iret;

   PrevEpoch = CommonTime::BEGINNING_OF_TIME;

   // loop over input files - reading them twice
   Ninterps = 0;
   for(reading=1; reading <= 2; reading++) {
      nread = 0;
      for(nfile=0; nfile<PIC.InputObsName.size(); nfile++) {
         iret = ReadFile(nfile,reading);
         if(iret < 0) break;
         nread++;
      }
      // quit if error
      if(iret < 0) break;

      if(nread>0) {
         iret = AfterReadingFiles(reading);
         if(iret < 0) break;
      }

      CurrEpoch = CommonTime::BEGINNING_OF_TIME;
   }

   PIC.oflog << PrgmName << " did " << Ninterps << " interpolations" << endl;
   totaltime = clock()-totaltime;
   PIC.oflog << PrgmName << " timing: " << fixed << setprecision(3)
      << double(totaltime)/double(CLOCKS_PER_SEC) << " seconds.\n";
   cout << PrgmName << " timing: " << fixed << setprecision(3)
      << double(totaltime)/double(CLOCKS_PER_SEC) << " seconds.\n";

   PIC.oflog.close();

   return iret;
}
catch(FFStreamError& e) { cout << "FFStream exception:\n" << e << endl; }
catch(Exception& e) { cout << "GPSTK exception:\n" << e << endl; }
catch (...) { cout << "Unknown exception in main." << endl; }
}   // end main()
Ejemplo n.º 2
0
   Epoch UTCTime::asTDB()
   {
       
      Epoch TT = this->asTT();

      struct MJDTime mjdTT;
      mjdTT.MJDint = (long)floor(TT.MJD());
      mjdTT.MJDfr = TT.MJD() - mjdTT.MJDint;

      double tdbtdt = 0.0;
      double tdbtdtdot = 0.0;
      long oldmjd = 0;
      long l = 0;

      while ( mjdTT.MJDfr >= 1.0 ) 
      {
         mjdTT.MJDint++ ;
         mjdTT.MJDfr-- ;
      }
      while ( mjdTT.MJDfr < 0.0 ) 
      {
         mjdTT.MJDint-- ;
         mjdTT.MJDfr++ ;
      }

      if ( mjdTT.MJDint != oldmjd ) 
      {
         oldmjd = mjdTT.MJDint ;
         l = oldmjd + 2400001 ;

         tdbtdt = ctatv (l, 0.0) ;
         tdbtdtdot = ctatv (l, 0.5) - ctatv (l, -0.5) ;
      }

      double TDB_minus_TT = ( tdbtdt + (mjdTT.MJDfr - 0.5) * tdbtdtdot );

      Epoch T = TT;
      T += TDB_minus_TT;

      return T;
   }
Ejemplo n.º 3
0
uint32_t PagePoolOffsetAndEpochChunk::get_safe_offset_count(const Epoch& threshold) const {
  ASSERT_ND(is_sorted());
  OffsetAndEpoch dummy;
  dummy.safe_epoch_ = threshold.value();
  struct CompareEpoch {
    bool operator() (const OffsetAndEpoch& left, const OffsetAndEpoch& right) {
      return Epoch(left.safe_epoch_) < Epoch(right.safe_epoch_);
    }
  };
  const OffsetAndEpoch* result = std::lower_bound(chunk_, chunk_ + size_, dummy, CompareEpoch());
  ASSERT_ND(result);
  ASSERT_ND(result - chunk_ <= size_);
  return result - chunk_;
}
Ejemplo n.º 4
0
ErrorStack SavepointManagerPimpl::take_savepoint(Epoch new_global_durable_epoch) {
  while (get_saved_durable_epoch() < new_global_durable_epoch) {
    if (get_requested_durable_epoch() < new_global_durable_epoch) {
      if (get_requested_durable_epoch() < new_global_durable_epoch) {
        control_block_->requested_durable_epoch_ = new_global_durable_epoch.value();
        control_block_->save_wakeup_.signal();
      }
    }
    {
      uint64_t demand = control_block_->save_done_event_.acquire_ticket();
      if (get_saved_durable_epoch() >= new_global_durable_epoch) {
        break;
      }
      control_block_->save_done_event_.wait(demand);
    }
  }
  return kRetOk;
}
Ejemplo n.º 5
0
void ArrayPage::initialize_volatile_page(
  Epoch initial_epoch,
  StorageId storage_id,
  VolatilePagePointer page_id,
  uint16_t payload_size,
  uint8_t level,
  const ArrayRange& array_range) {
  ASSERT_ND(initial_epoch.is_valid());
  std::memset(this, 0, kPageSize);
  header_.init_volatile(page_id, storage_id, kArrayPageType);
  payload_size_ = payload_size;
  level_ = level;
  array_range_ = array_range;
  if (is_leaf()) {
    uint16_t records = get_leaf_record_count();
    for (uint16_t i = 0; i < records; ++i) {
      get_leaf_record(i, payload_size)->owner_id_.xct_id_.set_epoch(initial_epoch);
    }
  }
}
Ejemplo n.º 6
0
ErrorStack SavepointManagerPimpl::take_savepoint_after_snapshot(
  snapshot::SnapshotId new_snapshot_id,
  Epoch new_snapshot_epoch) {
  while (get_latest_snapshot_id() != new_snapshot_id) {
    {
      control_block_->new_snapshot_id_ = new_snapshot_id;
      control_block_->new_snapshot_epoch_ = new_snapshot_epoch.value();
      control_block_->save_wakeup_.signal();
    }
    {
      uint64_t demand = control_block_->save_done_event_.acquire_ticket();
      if (get_latest_snapshot_id() != new_snapshot_id) {
        control_block_->save_done_event_.wait(demand);
      }
    }
  }
  ASSERT_ND(get_latest_snapshot_id() == new_snapshot_id);
  ASSERT_ND(get_latest_snapshot_epoch() == new_snapshot_epoch);
  return kRetOk;
}
Ejemplo n.º 7
0
ErrorStack LogMapper::handle_process_buffer(const fs::DirectIoFile &file, IoBufStatus* status) {
  const Epoch base_epoch = parent_.get_base_epoch();  // only for assertions
  const Epoch until_epoch = parent_.get_valid_until_epoch();  // only for assertions

  // many temporary memory are used only within this method and completely cleared out
  // for every call.
  clear_storage_buckets();

  char* buffer = reinterpret_cast<char*>(io_buffer_.get_block());
  status->more_in_the_file_ = false;
  for (; status->cur_inbuf_ < status->end_inbuf_aligned_; ++processed_log_count_) {
    // Note: The loop here must be a VERY tight loop, iterated over every single log entry!
    // In most cases, we should be just calling bucket_log().
    const log::LogHeader* header
      = reinterpret_cast<const log::LogHeader*>(buffer + status->cur_inbuf_);
    ASSERT_ND(header->log_length_ > 0);
    ASSERT_ND(status->buf_infile_aligned_ != 0 || status->cur_inbuf_ != 0
      || header->get_type() == log::kLogCodeEpochMarker);  // file starts with marker
    // we must be starting from epoch marker.
    ASSERT_ND(!status->first_read_ || header->get_type() == log::kLogCodeEpochMarker);
    ASSERT_ND(header->get_kind() == log::kRecordLogs
      || header->get_type() == log::kLogCodeEpochMarker
      || header->get_type() == log::kLogCodeFiller);

    if (UNLIKELY(header->log_length_ + status->cur_inbuf_ > status->end_inbuf_aligned_)) {
      // if a log goes beyond this read, stop processing here and read from that offset again.
      // this is simpler than glue-ing the fragment. This happens just once per 64MB read,
      // so not a big waste.
      if (status->to_infile(status->cur_inbuf_ + header->log_length_)
          > status->size_infile_aligned_) {
        // but it never spans two files. something is wrong.
        LOG(ERROR) << "inconsistent end of log entry. offset="
          << status->to_infile(status->cur_inbuf_)
          << ", file=" << file << ", log header=" << *header;
        return ERROR_STACK_MSG(kErrorCodeSnapshotInvalidLogEnd, file.get_path().c_str());
      }
      status->next_infile_ = status->to_infile(status->cur_inbuf_);
      status->more_in_the_file_ = true;
      break;
    } else if (UNLIKELY(header->get_type() == log::kLogCodeEpochMarker)) {
      // skip epoch marker
      const log::EpochMarkerLogType *marker =
        reinterpret_cast<const log::EpochMarkerLogType*>(header);
      ASSERT_ND(header->log_length_ == sizeof(log::EpochMarkerLogType));
      ASSERT_ND(marker->log_file_ordinal_ == status->cur_file_ordinal_);
      ASSERT_ND(marker->log_file_offset_ == status->to_infile(status->cur_inbuf_));
      ASSERT_ND(marker->new_epoch_ >= marker->old_epoch_);
      ASSERT_ND(!base_epoch.is_valid() || marker->new_epoch_ >= base_epoch);
      ASSERT_ND(marker->new_epoch_ <= until_epoch);
      if (status->first_read_) {
        ASSERT_ND(!base_epoch.is_valid()
          || marker->old_epoch_ <= base_epoch  // otherwise we skipped some logs
          || marker->old_epoch_ == marker->new_epoch_);  // the first marker (old==new) is ok
        status->first_read_ = false;
      } else {
        ASSERT_ND(!base_epoch.is_valid() || marker->old_epoch_ >= base_epoch);
      }
    } else if (UNLIKELY(header->get_type() == log::kLogCodeFiller)) {
      // skip filler log
    } else {
      bool bucketed = bucket_log(header->storage_id_, status->cur_inbuf_);
      if (UNLIKELY(!bucketed)) {
        // need to add a new bucket
        bool added = add_new_bucket(header->storage_id_);
        if (added) {
          bucketed = bucket_log(header->storage_id_, status->cur_inbuf_);
          ASSERT_ND(bucketed);
        } else {
          // runs out of bucket_memory. have to flush now.
          flush_all_buckets();
          added = add_new_bucket(header->storage_id_);
          ASSERT_ND(added);
          bucketed = bucket_log(header->storage_id_, status->cur_inbuf_);
          ASSERT_ND(bucketed);
        }
      }
    }

    status->cur_inbuf_ += header->log_length_;
  }

  // This fixes Bug #100. When a full mapper buffer exactly ends with a complete log,
  // we must keep reading. Didn't
  if (status->cur_inbuf_ == status->end_inbuf_aligned_
      && status->end_infile_ > status->to_infile(status->cur_inbuf_)) {
    LOG(INFO) << "Hooray, a full mapper buffer exactly ends with a complete log record. rare!";
    status->next_infile_ = status->to_infile(status->cur_inbuf_);
    status->more_in_the_file_ = true;
  }

  // bucktized all logs. now let's send them out to reducers
  flush_all_buckets();
  return kRetOk;
}
void FailoverUnitProxy::ChangeReplicatorRoleAsyncOperation::OnStart(AsyncOperationSPtr const & thisSPtr)
{
    Epoch primaryEpoch;
    FABRIC_REPLICA_ROLE newRole;
    ProxyOutgoingMessageUPtr rwStatusNotificationMsg;

    {
        AcquireExclusiveLock grab(owner_.lock_);

        TraceBeforeStart(grab);

        ASSERT_IF(owner_.failoverUnitDescription_.CurrentConfigurationEpoch == Epoch::InvalidEpoch(), "Epoch passed to replicator must be valid.");

        primaryEpoch = owner_.failoverUnitDescription_.CurrentConfigurationEpoch.ToPrimaryEpoch();

        if(owner_.replicatorState_ == ReplicatorStates::Closed)
        {
            bool didComplete = thisSPtr->TryComplete(thisSPtr, ErrorCodeValue::Success);
            ASSERT_IFNOT(didComplete, "Failed to complete ChangeReplicatorRoleAsyncOperation");

            return;
        }

        ASSERT_IFNOT(owner_.serviceDescription_.IsStateful, "Attempt to change role on an FUP that is not hosting a stateful service");
        
        if (owner_.currentReplicatorRole_ == owner_.replicaDescription_.CurrentConfigurationRole)
        {
            bool didComplete = thisSPtr->TryComplete(thisSPtr, ErrorCodeValue::Success);
            ASSERT_IFNOT(didComplete, "Failed to complete ChangeReplicatorRoleAsyncOperation");

            return;
        }

        ASSERT_IFNOT(owner_.replicatorState_ == ReplicatorStates::Opened, "Attempt to change role on a replicator not in the Opened state.");
       
        ASSERT_IF(owner_.replicatorOperationManager_ == nullptr, "Replicator operation manager expected but not available.");

        if (!TryStartUserApi(grab, wformatString(owner_.replicaDescription_.CurrentConfigurationRole), thisSPtr))
        {
            return;
        }

        previousRole_ = owner_.currentReplicatorRole_;
        newRole = ReplicaRole::ConvertToPublicReplicaRole(owner_.replicaDescription_.CurrentConfigurationRole);

        if (owner_.replicaDescription_.CurrentConfigurationRole == ReplicaRole::None)
        {
            rwStatusNotificationMsg = owner_.ComposeReadWriteStatusRevokedNotification(grab);
        }
    }

    owner_.SendReadWriteStatusRevokedNotification(move(rwStatusNotificationMsg));

    // Start changing the replicator role

    AsyncOperationSPtr operation = owner_.replicator_->BeginChangeRole(
        primaryEpoch.ToPublic(),
        newRole,
        [this] (AsyncOperationSPtr const & operation) { this->ChangeRoleCompletedCallback(operation); },
        thisSPtr);

    TryContinueUserApi(operation);

    if (operation->CompletedSynchronously)
    {
        FinishChangeRole(operation);
    }
}
Ejemplo n.º 9
0
already_AddRefed<nsIRunnable>
LabeledEventQueue::GetEvent(EventPriority* aPriority,
                            const MutexAutoLock& aProofOfLock)
{
  if (mEpochs.IsEmpty()) {
    return nullptr;
  }

  Epoch epoch = mEpochs.FirstElement();
  if (!epoch.IsLabeled()) {
    EpochQueueEntry& first = mUnlabeled.FirstElement();
    if (!IsReadyToRun(first.mRunnable, nullptr)) {
      return nullptr;
    }

    PopEpoch();
    EpochQueueEntry entry = mUnlabeled.Pop();
    MOZ_ASSERT(entry.mEpochNumber == epoch.mEpochNumber);
    MOZ_ASSERT(entry.mRunnable.get());
    return entry.mRunnable.forget();
  }

  if (!sCurrentSchedulerGroup) {
    return nullptr;
  }

  // Move visible tabs to the front of the queue. The mAvoidVisibleTabCount field
  // prevents us from preferentially processing events from visible tabs twice in
  // a row. This scheme is designed to prevent starvation.
  if (TabChild::HasVisibleTabs() && mAvoidVisibleTabCount <= 0) {
    for (auto iter = TabChild::GetVisibleTabs().ConstIter();
         !iter.Done(); iter.Next()) {
      SchedulerGroup* group = iter.Get()->GetKey()->TabGroup();
      if (!group->isInList() || group == sCurrentSchedulerGroup) {
        continue;
      }

      // For each visible tab we move to the front of the queue, we have to
      // process two SchedulerGroups (the visible tab and another one, presumably
      // a background group) before we prioritize visible tabs again.
      mAvoidVisibleTabCount += 2;

      // We move |group| right before sCurrentSchedulerGroup and then set
      // sCurrentSchedulerGroup to group.
      MOZ_ASSERT(group != sCurrentSchedulerGroup);
      group->removeFrom(*sSchedulerGroups);
      sCurrentSchedulerGroup->setPrevious(group);
      sCurrentSchedulerGroup = group;
    }
  }

  // Iterate over each SchedulerGroup once, starting at sCurrentSchedulerGroup.
  SchedulerGroup* firstGroup = sCurrentSchedulerGroup;
  SchedulerGroup* group = firstGroup;
  do {
    mAvoidVisibleTabCount--;

    RunnableEpochQueue& queue = group->GetQueue(mPriority);

    if (queue.IsEmpty()) {
      // This can happen if |group| is in a different LabeledEventQueue than |this|.
      group = NextSchedulerGroup(group);
      continue;
    }

    EpochQueueEntry& first = queue.FirstElement();
    if (first.mEpochNumber == epoch.mEpochNumber &&
        IsReadyToRun(first.mRunnable, group)) {
      sCurrentSchedulerGroup = NextSchedulerGroup(group);

      PopEpoch();

      if (group->DequeueEvent() == SchedulerGroup::NoLongerQueued) {
        // Now we can take group out of sSchedulerGroups.
        if (sCurrentSchedulerGroup == group) {
          // Since we changed sCurrentSchedulerGroup above, we'll only get here
          // if |group| was the only element in sSchedulerGroups. In that case
          // set sCurrentSchedulerGroup to null.
          MOZ_ASSERT(group->getNext() == nullptr);
          MOZ_ASSERT(group->getPrevious() == nullptr);
          sCurrentSchedulerGroup = nullptr;
        }
        group->removeFrom(*sSchedulerGroups);
      }
      EpochQueueEntry entry = queue.Pop();
      return entry.mRunnable.forget();
    }

    group = NextSchedulerGroup(group);
  } while (group != firstGroup);

  return nullptr;
}
Ejemplo n.º 10
0
//------------------------------------------------------------------------------------
int main(int argc, char **argv)
{
try {
   int i,n;
   string line,line2,id,rawfile,ddrfile,outfile,site1,site2,sat1,sat2;
   ifstream instr;
   ofstream outstr;
   Epoch CurrEpoch;

   clock_t totaltime = clock();
      // print title and current time to screen
   CurrEpoch.setLocalTime();
   cout << "ddmerge version " << Version << " run " << CurrEpoch << endl;

   if(argc < 4) {
      cout << "Usage: ddmerge <RAWfile> <DDRfile> <output_file>" << endl;
      cout << "    where the two input file are output of DDBase" << endl;
      cout << " ddmerge will take elevation and azimuth data from the RAW" << endl;
      cout << " file and append it to the appropriate line in the DDR file" << endl;
      cout << " and output to the output file" << endl;
      return -1;
   }
   rawfile = string(argv[1]);
   ddrfile = string(argv[2]);
   outfile = string(argv[3]);

      // this must be binary or you get the wrong answers.
   instr.open(rawfile.c_str(),ios::in|ios::binary);
   if(!instr.is_open()) {
      cout << "Failed to open input file " << rawfile << endl;
      return -1;
   }
   cout << "Opened input file " << rawfile << endl;
   instr.exceptions(fstream::failbit);

   n = 0;
   while(1) {
      try {
         //instr.read((char *)p2, 1); // get one char
         instr.getline(buffer,BUFF_SIZE);
      }
      catch(exception& e) {}
      if(instr.bad()) cout << "Read error" << endl;
      if(instr.eof()) { cout << "Reached EOF" << endl; break; }

      n++;
      line = string(buffer);
      stripTrailing(line,'\r');
      if(word(line,0) == "RAW") {
         id = word(line,1);
         if(id != "site") {
            id += " " + word(line,2);
            if(Chunklist.find(id) == Chunklist.end()) {
               Chunk newchunk;
               newchunk.filepos = instr.tellg();
               newchunk.line = line;
               Chunklist[id] = newchunk;
            }
         }
      }
   }

   instr.clear();
   instr.close();

   // you must use pointers to the streams because storing a stream inside an object
   // that goes into an STL container leads to weird errors...try it.
   filepointer = new ifstream[Chunklist.size()];
   if(!filepointer) { cout << "failed to allocate filepointers" << endl; return -1; }

   map<string,Chunk>::iterator it;
   for(i=0,it=Chunklist.begin(); it != Chunklist.end(); i++,it++) {
      filepointer[i].open(rawfile.c_str(), ios::in|ios::binary);
      if(!filepointer[i].is_open()) {
         cout << "Failed to open chunk " << i << endl;
         break;
      }
      filepointer[i].exceptions(fstream::failbit);
      it->second.fpindex = i;
      filepointer[i].seekg(it->second.filepos);
   }

   outstr.open(outfile.c_str(), ios::out);
   if(!outstr.is_open()) {
      cout << "Failed to open output file " << outfile << endl;
      return -1;
   }
   cout << "Opened output file " << outfile << endl;
   outstr.exceptions(fstream::failbit);

   //for(it=Chunklist.begin(); it != Chunklist.end(); it++) {
   //   if(! it->second.status) continue;
   //   outstr << "Chunk " << it->first << endl;
   //   outstr << it->second.Update() << endl;
   //   outstr << it->second.Update() << endl;
   //   outstr << endl;
   //}

   instr.open(ddrfile.c_str());
   if(!instr.is_open()) {
      cout << "Failed to open input file " << ddrfile << endl;
      return -1;
   }
   cout << "Opened input file " << ddrfile << endl;
   instr.exceptions(fstream::failbit);

   n = 0;
   while(1) {
      try { instr.getline(buffer,BUFF_SIZE); }
      catch(exception& e) {} //cout << "exception: " << e.what() << endl;
      if(instr.bad()) { cout << "Read error" << endl; break; }
      if(instr.eof()) { cout << "Reached EOF" << endl; break; }

      n++;
      line = string(buffer);
      stripTrailing(line,'\r');
      if(word(line,0) == "RES") {
         site1 = word(line,1);
         if(site1 != "site") {
            site2 = word(line,2);
            sat1 = word(line,3);
            sat2 = word(line,4);
            id = word(line,7);        // TD different for MJD
            n = asInt(id);
            outstr << line;                          // endl below
            // find the corresponding lines in the chunks
            id = site1 + " " + sat1;
            line2 = Chunklist[id].find(n);
            if(Chunklist[id].status) outstr
               << " " << rightJustify(word(line2,11),5)
               << " " << rightJustify(word(line2,12),6);
            id = site1 + " " + sat2;
            line2 = Chunklist[id].find(n);
            if(Chunklist[id].status) outstr
               << " " << rightJustify(word(line2,11),5)
               << " " << rightJustify(word(line2,12),6);
            id = site2 + " " + sat1;
            line2 = Chunklist[id].find(n);
            if(Chunklist[id].status) outstr
               << " " << rightJustify(word(line2,11),5)
               << " " << rightJustify(word(line2,12),6);
            id = site2 + " " + sat2;
            line2 = Chunklist[id].find(n);
            if(Chunklist[id].status) outstr
               << " " << rightJustify(word(line2,11),5)
               << " " << rightJustify(word(line2,12),6);
         }
         else outstr
            << line << "  EL11   AZ11  EL12   AZ12  EL21   AZ21  EL22   AZ22";
      }
      else outstr << line << endl
         << "# ddmerge (v." << Version << ") " << rawfile
         << " " << ddrfile << " " << outfile << " Run " << CurrEpoch;
      outstr << endl;
   }

   instr.close();
   delete[] filepointer;

      // compute run time
   totaltime = clock()-totaltime;
   cout << "ddmerge timing: " << fixed << setprecision(3)
      << double(totaltime)/double(CLOCKS_PER_SEC) << " seconds." << endl;

   return 0;
}
catch(Exception& e) {
   cout << "GPSTk Exception : " << e;
}
catch (...) {
   cout << "Unknown error in ddmerge.  Abort." << endl;
}

      // close files
   return -1;
}   // end main()
Ejemplo n.º 11
0
ErrorStack MetaLogger::truncate_non_durable(Epoch saved_durable_epoch) {
  ASSERT_ND(saved_durable_epoch.is_valid());
  const uint64_t from_offset = control_block_->oldest_offset_;
  const uint64_t to_offset = control_block_->durable_offset_;
  ASSERT_ND(from_offset <= to_offset);
  LOG(INFO) << "Truncating non-durable meta logs, if any. Right now meta logger's"
    << " oldest_offset_=" << from_offset
    << ", (meta logger's local) durable_offset_=" << to_offset
    << ", global saved_durable_epoch=" << saved_durable_epoch;
  ASSERT_ND(current_file_->is_opened());

  // Currently, we need to read everything from oldest_offset_ to see from where
  // We might have non-durable logs.
  // TASK(Hideaki) We should change SavepointManager to emit globally_durable_offset_. later.
  const uint64_t read_size = to_offset - from_offset;
  if (read_size > 0) {
    memory::AlignedMemory buffer;
    buffer.alloc(read_size, 1U << 12, memory::AlignedMemory::kNumaAllocOnnode, 0);
    WRAP_ERROR_CODE(current_file_->seek(from_offset, fs::DirectIoFile::kDirectIoSeekSet));
    WRAP_ERROR_CODE(current_file_->read_raw(read_size, buffer.get_block()));

    char* buf = reinterpret_cast<char*>(buffer.get_block());
    uint64_t cur = 0;
    uint64_t first_non_durable_at = read_size;
    while (cur < read_size) {
      log::BaseLogType* entry = reinterpret_cast<log::BaseLogType*>(buf + cur);
      ASSERT_ND(entry->header_.get_kind() != log::kRecordLogs);
      const uint32_t log_length = entry->header_.log_length_;
      log::LogCode type = entry->header_.get_type();
      ASSERT_ND(type != log::kLogCodeInvalid);
      if (type == log::kLogCodeFiller || type == log::kLogCodeEpochMarker) {
        // Skip filler/marker. These don't have XID
      } else {
        Epoch epoch = entry->header_.xct_id_.get_epoch();
        if (epoch <= saved_durable_epoch) {
          // Mostly this case.
        } else {
          // Ok, found a non-durable entry!
          const uint64_t raw_offset = from_offset + cur;
          on_non_durable_meta_log_found(&entry->header_, saved_durable_epoch, raw_offset);
          ASSERT_ND(first_non_durable_at == read_size || first_non_durable_at < cur);
          first_non_durable_at = std::min(first_non_durable_at, cur);
          // We can break here, but let's read all and warn all of them. meta log should be tiny
        }
      }
      cur += log_length;
    }

    if (first_non_durable_at < read_size) {
      // NOTE: This happens. Although the meta logger itself immediately flushes all logs
      // to durable storages, the global durable_epoch is min(all_logger_durable_epoch).
      // Thus, when the user didn't invoke wait_on_commit, we might have to discard
      // some meta logs that are "durable by itself" but "non-durable regarding the whole database"
      LOG(WARNING) << "Found some meta logs that are not in durable epoch (" << saved_durable_epoch
        << "). We will truncate non-durable regions. new durable_offset=" << first_non_durable_at;
      control_block_->durable_offset_ = first_non_durable_at;
      engine_->get_savepoint_manager()->change_meta_logger_durable_offset(first_non_durable_at);
    }
  } else {
    // Even if all locally-durable regions are globally durable,
    // there still could be locally-non-durable regions (=not yet fsynced).
    // Will truncate such regions.
    LOG(ERROR) << "Meta log file has a non-durable region. Probably there"
      << " was a crash. Will truncate";
  }

  const uint64_t new_offset = control_block_->durable_offset_;
  if (new_offset < current_file_->get_current_offset()) {
    LOG(WARNING) << "Truncating meta log file to " << new_offset
      << " from " << current_file_->get_current_offset();
    WRAP_ERROR_CODE(current_file_->truncate(new_offset, true));
  }
  WRAP_ERROR_CODE(current_file_->seek(new_offset, fs::DirectIoFile::kDirectIoSeekSet));
  return kRetOk;
}