예제 #1
0
파일: repo.cpp 프로젝트: DirektSPEED/hhvm
void Repo::saveGlobalData(GlobalData newData) {
  s_globalData = newData;

  auto const repoId = repoIdForNewUnit(UnitOrigin::File);
  RepoStmt stmt(*this);
  stmt.prepare(
    folly::format(
      "INSERT INTO {} VALUES(@data);", table(repoId, "GlobalData")
    ).str()
  );
  RepoTxn txn(*this);
  RepoTxnQuery query(txn, stmt);
  BlobEncoder encoder;
  encoder(s_globalData);
  query.bindBlob("@data", encoder, /* static */ true);
  query.exec();

  // TODO(#3521039): we could just put the litstr table in the same
  // blob as the above and delete LitstrRepoProxy.
  LitstrTable::get().insert(txn, UnitOrigin::File);

  txn.commit();
}
예제 #2
0
void UnitRepoProxy::GetUnitLitstrsStmt
                  ::get(UnitEmitter& ue) {
  RepoTxn txn(m_repo);
  if (!prepared()) {
    std::stringstream ssSelect;
    ssSelect << "SELECT litstrId,litstr FROM "
             << m_repo.table(m_repoId, "UnitLitstr")
             << " WHERE unitSn == @unitSn ORDER BY litstrId ASC;";
    txn.prepare(*this, ssSelect.str());
  }
  RepoTxnQuery query(txn, *this);
  query.bindInt64("@unitSn", ue.m_sn);
  do {
    query.step();
    if (query.row()) {
      Id litstrId;        /**/ query.getId(0, litstrId);
      StringData* litstr; /**/ query.getStaticString(1, litstr);
      Id id UNUSED = ue.mergeUnitLitstr(litstr);
      assert(id == litstrId);
    }
  } while (!query.done());
  txn.commit();
}
예제 #3
0
 // transitions us to rollback state,
 // writes to replInfo fact that we have started rollback
 void startRollback(
     GTID idToRollbackTo,
     uint64_t rollbackPointTS,
     uint64_t rollbackPointHash
     ) 
 {
     incRBID();
     // now that we are settled, we have to take care of the GTIDManager
     // and the repl info thread.
     // We need to reset the state of the GTIDManager to the point
     // we intend to rollback to, and we need to make sure that the repl info thread
     // has captured this information.
     theReplSet->gtidManager->resetAfterInitialSync(
         idToRollbackTo,
         rollbackPointTS,
         rollbackPointHash
         );
     // now force an update of the repl info thread
     theReplSet->forceUpdateReplInfo();
     Client::Transaction txn(DB_SERIALIZABLE);
     updateRollbackStatus(BSON("_id" << ROLLBACK_ID << "state" << RB_STARTING<< "info" << "starting rollback"));
     txn.commit(DB_TXN_NOSYNC);
 }
예제 #4
0
파일: repo.cpp 프로젝트: amit2014/hhvm
bool Repo::createSchema(int repoId) {
  try {
    RepoTxn txn(*this);
    {
      std::stringstream ssCreate;
      ssCreate << "CREATE TABLE " << table(repoId, "magic")
               << "(product TEXT);";
      txn.exec(ssCreate.str());

      std::stringstream ssInsert;
      ssInsert << "INSERT INTO " << table(repoId, "magic")
               << " VALUES('" << kMagicProduct << "');";
      txn.exec(ssInsert.str());
    }
    {
      std::stringstream ssCreate;
      ssCreate << "CREATE TABLE " << table(repoId, "writable")
               << "(canary INTEGER);";
      txn.exec(ssCreate.str());
    }
    {
      std::stringstream ssCreate;
      ssCreate << "CREATE TABLE " << table(repoId, "FileMd5")
               << "(path TEXT, md5 BLOB, UNIQUE(path, md5));";
      txn.exec(ssCreate.str());
    }
    m_urp.createSchema(repoId, txn);
    m_pcrp.createSchema(repoId, txn);
    m_frp.createSchema(repoId, txn);
    m_lsrp.createSchema(repoId, txn);

    txn.commit();
  } catch (RepoExc& re) {
    return true;
  }
  return false;
}
예제 #5
0
RepoStatus
UnitRepoProxy::GetSourceLocTabStmt::get(int64_t unitSn,
                                        SourceLocTable& sourceLocTab) {
  try {
    RepoTxn txn(m_repo);
    if (!prepared()) {
      std::stringstream ssSelect;
      ssSelect << "SELECT pastOffset,line0,char0,line1,char1 FROM "
               << m_repo.table(m_repoId, "UnitSourceLoc")
               << " WHERE unitSn == @unitSn"
                  " ORDER BY pastOffset ASC;";
      txn.prepare(*this, ssSelect.str());
    }
    RepoTxnQuery query(txn, *this);
    query.bindInt64("@unitSn", unitSn);
    do {
      query.step();
      if (!query.row()) {
        return RepoStatus::error;
      }
      Offset pastOffset;
      query.getOffset(0, pastOffset);
      SourceLoc sLoc;
      query.getInt(1, sLoc.line0);
      query.getInt(2, sLoc.char0);
      query.getInt(3, sLoc.line1);
      query.getInt(4, sLoc.char1);
      SourceLocEntry entry(pastOffset, sLoc);
      sourceLocTab.push_back(entry);
    } while (!query.done());
    txn.commit();
  } catch (RepoExc& re) {
    return RepoStatus::error;
  }
  return RepoStatus::success;
}
예제 #6
0
void UnitRepoProxy::GetUnitArrayTypeTableStmt
                  ::get(UnitEmitter& ue) {
  RepoTxn txn(m_repo);
  if (!prepared()) {
    std::stringstream ssSelect;
    ssSelect << "SELECT unitSn, arrayTypeTable FROM "
             << m_repo.table(m_repoId, "UnitArrayTypeTable")
             << " WHERE unitSn == @unitSn;";
    txn.prepare(*this, ssSelect.str());
  }

  RepoTxnQuery query(txn, *this);
  query.bindInt64("@unitSn", ue.m_sn);

  query.step();
  assertx(query.row());
  BlobDecoder dataBlob = query.getBlob(1);
  dataBlob(ue.m_arrayTypeTable);
  dataBlob.assertDone();
  query.step();
  assertx(query.done());

  txn.commit();
}
예제 #7
0
파일: unit-emitter.cpp 프로젝트: CryQ/hhvm
void UnitRepoProxy::GetUnitArraysStmt
                  ::get(UnitEmitter& ue) {
  RepoTxn txn(m_repo);
  if (!prepared()) {
    std::stringstream ssSelect;
    ssSelect << "SELECT arrayId,array FROM "
             << m_repo.table(m_repoId, "UnitArray")
             << " WHERE unitSn == @unitSn ORDER BY arrayId ASC;";
    txn.prepare(*this, ssSelect.str());
  }
  RepoTxnQuery query(txn, *this);
  query.bindInt64("@unitSn", ue.m_sn);
  do {
    query.step();
    if (query.row()) {
      Id arrayId;        /**/ query.getId(0, arrayId);
      std::string key;   /**/ query.getStdString(1, key);
      Variant v = unserialize_from_buffer(key.data(), key.size());
      Id id UNUSED = ue.mergeArray(v.asArrRef().get(), key);
      assert(id == arrayId);
    }
  } while (!query.done());
  txn.commit();
}
예제 #8
0
 void removeDataFromDocsMap() {
     Client::Transaction txn(DB_SERIALIZABLE);
     RollbackDocsMapIterator docsMap;
     size_t numDocs = 0;
     log() << "Removing documents from collections for rollback." << rsLog;
     for (RollbackDocsMapIterator it; it.ok(); it.advance()){
         numDocs++;
         DocID curr = it.current();
         LOCK_REASON(lockReason, "repl: deleting a doc during rollback");
         Client::ReadContext ctx(curr.ns, lockReason);
         Collection* cl = getCollection(curr.ns);
         verify(cl);
         BSONObj currDoc;
         LOG(2) << "Finding by pk of " << curr.pk << rsLog;
         bool found = cl->findByPK(curr.pk, currDoc);
         if (found) {
             deleteOneObject(cl, curr.pk, currDoc, Collection::NO_LOCKTREE);
         }
     }
     log() << "Done removing " << numDocs << " documents from collections for rollback." << rsLog;
     updateRollbackStatus(BSON("_id" << ROLLBACK_ID << "state" << RB_DOCS_REMOVED<< \
         "info" << "removed docs from docs map"));
     txn.commit(DB_TXN_NOSYNC);
 }
예제 #9
0
파일: unit.cpp 프로젝트: KWMalik/hiphop-php
void UnitRepoProxy::GetUnitPreConstsStmt
                  ::get(UnitEmitter& ue) {
  RepoTxn txn(m_repo);
  if (!prepared()) {
    std::stringstream ssSelect;
    ssSelect << "SELECT name,value,preconstId FROM "
             << m_repo.table(m_repoId, "UnitPreConst")
             << " WHERE unitSn == @unitSn ORDER BY preConstId ASC;";
    txn.prepare(*this, ssSelect.str());
  }
  RepoTxnQuery query(txn, *this);
  query.bindInt64("@unitSn", ue.sn());
  do {
    query.step();
    if (query.row()) {
      StringData* name; /**/ query.getStaticString(0, name);
      TypedValue value; /**/ query.getTypedValue(1, value);
      Id id;            /**/ query.getId(2, id);
      UNUSED Id addedId = ue.addPreConst(name, value);
      ASSERT(id == addedId);
    }
  } while (!query.done());
  txn.commit();
}
예제 #10
0
/* Add an entry to the History list at mIndex and 
 * increment the index to point to the new entry
 */
NS_IMETHODIMP
nsSHistory::AddEntry(nsISHEntry * aSHEntry, PRBool aPersist)
{
  NS_ENSURE_ARG(aSHEntry);

  nsCOMPtr<nsISHTransaction> currentTxn;

  if(mListRoot)
    GetTransactionAtIndex(mIndex, getter_AddRefs(currentTxn));

  PRBool currentPersist = PR_TRUE;
  if(currentTxn)
    currentTxn->GetPersist(&currentPersist);

  if(!currentPersist)
  {
    NS_ENSURE_SUCCESS(currentTxn->SetSHEntry(aSHEntry),NS_ERROR_FAILURE);
    currentTxn->SetPersist(aPersist);
    return NS_OK;
  }

  nsCOMPtr<nsISHTransaction> txn(do_CreateInstance(NS_SHTRANSACTION_CONTRACTID));
  NS_ENSURE_TRUE(txn, NS_ERROR_FAILURE);

  // Notify any listener about the new addition
  if (mListener) {
    nsCOMPtr<nsISHistoryListener> listener(do_QueryReferent(mListener));
    if (listener) {
      nsCOMPtr<nsIURI> uri;
      nsCOMPtr<nsIHistoryEntry> hEntry(do_QueryInterface(aSHEntry));
      if (hEntry) {
        PRInt32 currentIndex = mIndex;
        hEntry->GetURI(getter_AddRefs(uri));
        listener->OnHistoryNewEntry(uri);

        // If a listener has changed mIndex, we need to get currentTxn again,
        // otherwise we'll be left at an inconsistent state (see bug 320742)
        if (currentIndex != mIndex)
          GetTransactionAtIndex(mIndex, getter_AddRefs(currentTxn));
      }
    }
  }

  // Set the ShEntry and parent for the transaction. setting the 
  // parent will properly set the parent child relationship
  txn->SetPersist(aPersist);
  NS_ENSURE_SUCCESS(txn->Create(aSHEntry, currentTxn), NS_ERROR_FAILURE);
   
  // A little tricky math here...  Basically when adding an object regardless of
  // what the length was before, it should always be set back to the current and
  // lop off the forward.
  mLength = (++mIndex + 1);

  // If this is the very first transaction, initialize the list
  if(!mListRoot)
    mListRoot = txn;

  // Purge History list if it is too long
  if ((gHistoryMaxSize >= 0) && (mLength > gHistoryMaxSize))
    PurgeHistory(mLength-gHistoryMaxSize);
  
  return NS_OK;
}
예제 #11
0
    void RangeDeleter::doWork() {
        _env->initThread();

        while (!inShutdown() && !stopRequested()) {
            string errMsg;

            boost::scoped_ptr<OperationContext> txn(getGlobalServiceContext()->newOpCtx());

            RangeDeleteEntry* nextTask = NULL;

            {
                boost::unique_lock<boost::mutex> sl(_queueMutex);
                while (_taskQueue.empty()) {
                    _taskQueueNotEmptyCV.timed_wait(
                        sl, duration::milliseconds(kNotEmptyTimeoutMillis));

                    if (stopRequested()) {
                        log() << "stopping range deleter worker" << endl;
                        return;
                    }

                    if (_taskQueue.empty()) {
                        // Try to check if some deletes are ready and move them to the
                        // ready queue.

                        TaskList::iterator iter = _notReadyQueue.begin();
                        while (iter != _notReadyQueue.end()) {
                            RangeDeleteEntry* entry = *iter;

                            set<CursorId> cursorsNow;
                            {
                                if (entry->options.waitForOpenCursors) {
                                    _env->getCursorIds(txn.get(),
                                                       entry->options.range.ns,
                                                       &cursorsNow);
                                }
                            }

                            set<CursorId> cursorsLeft;
                            std::set_intersection(entry->cursorsToWait.begin(),
                                                  entry->cursorsToWait.end(),
                                                  cursorsNow.begin(),
                                                  cursorsNow.end(),
                                                  std::inserter(cursorsLeft,
                                                                cursorsLeft.end()));

                            entry->cursorsToWait.swap(cursorsLeft);

                            if (entry->cursorsToWait.empty()) {
                               (*iter)->stats.queueEndTS = jsTime();
                                _taskQueue.push_back(*iter);
                                _taskQueueNotEmptyCV.notify_one();
                                iter = _notReadyQueue.erase(iter);
                            }
                            else {
                                logCursorsWaiting(entry);
                                ++iter;
                            }
                        }
                    }
                }

                if (stopRequested()) {
                    log() << "stopping range deleter worker" << endl;
                    return;
                }

                nextTask = _taskQueue.front();
                _taskQueue.pop_front();

                _deletesInProgress++;
            }

            {
                nextTask->stats.deleteStartTS = jsTime();
                bool delResult = _env->deleteRange(txn.get(),
                                                   *nextTask,
                                                   &nextTask->stats.deletedDocCount,
                                                   &errMsg);
                nextTask->stats.deleteEndTS = jsTime();

                if (delResult) {
                    nextTask->stats.waitForReplStartTS = jsTime();

                    if (!_waitForMajority(txn.get(), &errMsg)) {
                        warning() << "Error encountered while waiting for replication: " << errMsg;
                    }

                    nextTask->stats.waitForReplEndTS = jsTime();
                }
                else {
                    warning() << "Error encountered while trying to delete range: "
                              << errMsg << endl;
                }
            }

            {
                boost::lock_guard<boost::mutex> sl(_queueMutex);

                NSMinMax setEntry(nextTask->options.range.ns,
                                  nextTask->options.range.minKey,
                                  nextTask->options.range.maxKey);
                deletePtrElement(&_deleteSet, &setEntry);
                _deletesInProgress--;

                if (nextTask->notifyDone) {
                    nextTask->notifyDone->notifyOne();
                }
            }

            recordDelStats(new DeleteJobStats(nextTask->stats));
            delete nextTask;
            nextTask = NULL;
        }
    }
예제 #12
0
    void RangeDeleter::doWork() {
        _env->initThread();

        while (!inShutdown() && !stopRequested()) {
            string errMsg;

            RangeDeleteEntry* nextTask = NULL;

            {
                scoped_lock sl(_queueMutex);
                while (_taskQueue.empty()) {
                    _taskQueueNotEmptyCV.timed_wait(
                        sl.boost(), duration::milliseconds(NotEmptyTimeoutMillis));

                    if (stopRequested()) {
                        log() << "stopping range deleter worker" << endl;
                        return;
                    }

                    if (_taskQueue.empty()) {
                        // Try to check if some deletes are ready and move them to the
                        // ready queue.

                        TaskList::iterator iter = _notReadyQueue.begin();
                        while (iter != _notReadyQueue.end()) {
                            RangeDeleteEntry* entry = *iter;

                            set<CursorId> cursorsNow;
                            {
                                boost::scoped_ptr<OperationContext> txn(getGlobalEnvironment()->newOpCtx());
                                _env->getCursorIds(txn.get(), entry->ns, &cursorsNow);
                            }

                            set<CursorId> cursorsLeft;
                            std::set_intersection(entry->cursorsToWait.begin(),
                                                  entry->cursorsToWait.end(),
                                                  cursorsNow.begin(),
                                                  cursorsNow.end(),
                                                  std::inserter(cursorsLeft,
                                                                cursorsLeft.end()));

                            entry->cursorsToWait.swap(cursorsLeft);

                            if (entry->cursorsToWait.empty()) {
                               (*iter)->stats.queueEndTS = jsTime();
                                _taskQueue.push_back(*iter);
                                _taskQueueNotEmptyCV.notify_one();
                                iter = _notReadyQueue.erase(iter);
                            }
                            else {
                                const unsigned long long int elapsedMillis =
                                    entry->stats.queueStartTS.millis - curTimeMillis64();
                                if ( elapsedMillis > LogCursorsThresholdMillis &&
                                    entry->timeSinceLastLog.millis > LogCursorsIntervalMillis) {

                                    entry->timeSinceLastLog = jsTime();
                                    logCursorsWaiting(entry->ns,
                                                      entry->min,
                                                      entry->max,
                                                      elapsedMillis,
                                                      entry->cursorsToWait);
                                }

                                ++iter;
                            }
                        }
                    }
                }

                if (stopRequested()) {
                    log() << "stopping range deleter worker" << endl;
                    return;
                }

                nextTask = _taskQueue.front();
                _taskQueue.pop_front();

                _deletesInProgress++;
            }

            {
                boost::scoped_ptr<OperationContext> txn(getGlobalEnvironment()->newOpCtx());
                ReplTime lastOp;

                nextTask->stats.deleteStartTS = jsTime();
                bool delResult = _env->deleteRange(txn.get(),
                                                   *nextTask,
                                                   &nextTask->stats.deletedDocCount,
                                                   &lastOp,
                                                   &errMsg);
                nextTask->stats.deleteEndTS = jsTime();

                if (delResult) {
                    nextTask->stats.waitForReplStartTS = jsTime();
                    if (!_env->waitForReplication(lastOp,
                                                  DelWriteConcern,
                                                  WaitForReplTimeoutSecs,
                                                  &errMsg)) {
                        warning() << "Error encountered while waiting for replication: "
                                  << errMsg << endl;
                    }
                    nextTask->stats.waitForReplEndTS = jsTime();
                }
                else {
                    warning() << "Error encountered while trying to delete range: "
                              << errMsg << endl;
                }
            }

            {
                scoped_lock sl(_queueMutex);

                NSMinMax setEntry(nextTask->ns, nextTask->min, nextTask->max);
                deletePtrElement(&_deleteSet, &setEntry);
                _deletesInProgress--;

                if (nextTask->notifyDone) {
                    nextTask->notifyDone->notifyOne();
                }
            }

            recordDelStats(new DeleteJobStats(nextTask->stats));
            delete nextTask;
            nextTask = NULL;
        }
    }
예제 #13
0
파일: data_layer.cpp 프로젝트: zjnqh/caffe
void DataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  // LOG(INFO)<<"load_batch";
  CHECK(batch->data_.count());
  CHECK(this->transformed_data_.count());

  // Reshape according to the first datum of each batch
  // on single input batches allows for inputs of varying dimension.
  const int batch_size = this->layer_param_.data_param().batch_size();
  // Datum& datum = *(reader_.full().peek());
  // // Use data_transformer to infer the expected blob shape from datum.
  Datum datum;
  datum.ParseFromString(cursor_->value());
  // Use data_transformer to infer the expected blob shape from datum.
  vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
  this->transformed_data_.Reshape(top_shape);
  // Reshape batch according to the batch_size.
  top_shape[0] = batch_size;
  batch->data_.Reshape(top_shape);
  Dtype* top_data = batch->data_.mutable_cpu_data();
  // Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
  Dtype* top_label = NULL;  // suppress warnings about uninitialized variables
  // LOG(INFO)<<" output_labels_:"<<this->output_labels_;
  if (this->output_labels_) {
    top_label = batch->label_.mutable_cpu_data();
    // top_label = this->prefetch_label_.mutable_cpu_data();
  }
  Dtype* use_data=this->use_data_.mutable_cpu_data();
  // LOG(INFO)<<" use_data[0]:"<<use_data[0];
  if (use_data[0]==0.0){
    // LOG(INFO)<<"visit in order";
    for (int item_id = 0; item_id < batch_size; item_id++) {
      Datum datum;
      datum.ParseFromString(cursor_->value());
   
      // Apply data transformations (mirror, scale, crop...)
      // LOG(INFO)<<"jq enter data_layers"<< item_id;
      int offset = batch->data_.offset(item_id);
      // LOG(INFO)<<"jq enter data_layers";
      this->transformed_data_.set_cpu_data(top_data + offset);
      this->data_transformer_->Transform(datum, &(this->transformed_data_));
      // Copy label.
      if (this->output_labels_) {
        top_label[item_id] = datum.label();
        // std::cout<<" cursor_:"<<datum.label();
      }
      // use_data[item_id +5] = start;
      // trans_time += timer.MicroSeconds();
      cursor_->Next();
      // start +=1.0;
      // std::cout<<" output_labels_:"<<this->output_labels_;
      if (!cursor_->valid()) {
        DLOG(INFO) << "Restarting data prefetching from start.";
        cursor_->SeekToFirst();
      }
      // reader_.free().push(const_cast<Datum*>(&datum));
    }
  }else if (use_data[0]!=0.0){
    // forward-backward using semi supervised with false label
    // 0, sami-super-unsuper, 1, label_kinds, 2, step over, 
    // 3, datanum, 4, start index
    // LOG(INFO)<<"visit in Key/value";
    // LOG(INFO)<<"this->PREFETCH_COUNT:"<<this->PREFETCH_COUNT;
    int step_over = batch_size+1;
    // std::cout<<std::endl;
    scoped_ptr<db::Transaction> txn(db_->NewTransaction());
    // std::cout<<"key:";
    int kCIFARImageNBytes=3072;
    for (int item_id = 0; item_id < batch_size; item_id++) {
      char str_buffer[kCIFARImageNBytes];
      int id= static_cast<int>(use_data[item_id+ 1]);
      // std::cout<<" "<<id<<":";
      int length = snprintf(str_buffer, kCIFARImageNBytes, "%05d", id);
      string value;
      string str=string(str_buffer, length);
      txn->Get(str, value);
      Datum datum;
      datum.ParseFromString(value);
      int offset = batch->data_.offset(item_id);
      // LOG(INFO)<<"jq enter data_layers";
      this->transformed_data_.set_cpu_data(top_data + offset);
      this->data_transformer_->Transform(datum, &(this->transformed_data_));
      // std::cout<<" output_labels_:"<<this->output_labels_;
      if (this->output_labels_) {
        // top_label[item_id] = datum.label();
        top_label[item_id] = use_data[item_id+ step_over];
        // std::cout<<" KV:"<<datum.label();
        // top_label[item_id]= static_cast<int>(use_data[item_id + batch_size +3]);
      }
      if( use_data[item_id+ step_over]!=(datum.label()%1000))
        LOG(INFO)<<"image id:"<<id<<" not correctly fetch: "<<datum.label()
      <<" vs "<<use_data[item_id+ step_over];
      // std::cout<<top_label[item_id];
      // std::cout<<" key:"<<id;
    }
    // std::cout<<std::endl;
    // for (int item_id = 0; item_id < 50000; item_id++) {
    //   char str_buffer[kCIFARImageNBytes];
    //   // int id= static_cast<int>(use_data[item_id+ 1]);
    //   int length = snprintf(str_buffer, kCIFARImageNBytes, "%05d", item_id);
    //   string value;
    //   string str=string(str_buffer, length);
    //   txn->Get(str, value);
    //   // Datum datum;
    //   // datum.ParseFromString(value);
    //   // int offset = batch->data_.offset(item_id);
    //   // // LOG(INFO)<<"jq enter data_layers";
    //   // this->transformed_data_.set_cpu_data(top_data + offset);
    //   // this->data_transformer_->Transform(datum, &(this->transformed_data_));
    //   // if (this->output_labels_) {
    //   //   top_label[item_id] = datum.label();
    //   //   // top_label[item_id]= static_cast<int>(use_data[item_id + batch_size +3]);
    //   // }
    //   // std::cout<<" "<<item_id;
    // }
    // std::cout<<std::endl;
    txn->Commit();
  }
  timer.Stop();
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
예제 #14
0
  // Fill the bottom blobs.
  void Fill(bool share_location) {
    int loc_classes = share_location ? 1 : num_classes_;
    // Create fake network which simulates a simple multi box network.
    vector<Blob<Dtype>*> fake_bottom_vec;
    vector<Blob<Dtype>*> fake_top_vec;
    LayerParameter layer_param;
    // Fake input (image) of size 20 x 20
    Blob<Dtype>* fake_input = new Blob<Dtype>(num_, 3, 20, 20);

    // 1) Fill ground truth.
#ifdef USE_LMDB
    string filename;
    GetTempDirname(&filename);
    DataParameter_DB backend = DataParameter_DB_LMDB;
    scoped_ptr<db::DB> db(db::GetDB(backend));
    db->Open(filename, db::NEW);
    scoped_ptr<db::Transaction> txn(db->NewTransaction());
    for (int i = 0; i < num_; ++i) {
      AnnotatedDatum anno_datum;
      // Fill data.
      Datum* datum = anno_datum.mutable_datum();
      datum->set_channels(3);
      datum->set_height(20);
      datum->set_width(20);
      std::string* data = datum->mutable_data();
      for (int j = 0; j < 3*20*20; ++j) {
        data->push_back(static_cast<uint8_t>(j/100.));
      }
      anno_datum.set_type(AnnotatedDatum_AnnotationType_BBOX);
      if (i == 0 || i == 2) {
        AnnotationGroup* anno_group = anno_datum.add_annotation_group();
        anno_group->set_group_label(1);
        Annotation* anno = anno_group->add_annotation();
        anno->set_instance_id(0);
        NormalizedBBox* bbox = anno->mutable_bbox();
        bbox->set_xmin(0.1);
        bbox->set_ymin(0.1);
        bbox->set_xmax(0.3);
        bbox->set_ymax(0.3);
        bbox->set_difficult(i % 2);
      }
      if (i == 2) {
        AnnotationGroup* anno_group = anno_datum.add_annotation_group();
        anno_group->set_group_label(2);
        Annotation* anno = anno_group->add_annotation();
        anno->set_instance_id(0);
        NormalizedBBox* bbox = anno->mutable_bbox();
        bbox->set_xmin(0.2);
        bbox->set_ymin(0.2);
        bbox->set_xmax(0.4);
        bbox->set_ymax(0.4);
        bbox->set_difficult(i % 2);
        anno = anno_group->add_annotation();
        anno->set_instance_id(1);
        bbox = anno->mutable_bbox();
        bbox->set_xmin(0.6);
        bbox->set_ymin(0.6);
        bbox->set_xmax(0.8);
        bbox->set_ymax(0.9);
        bbox->set_difficult((i + 1) % 2);
      }
      string key_str = caffe::format_int(i, 3);
      string out;
      CHECK(anno_datum.SerializeToString(&out));
      txn->Put(key_str, out);
    }
    txn->Commit();
    db->Close();
    DataParameter* data_param = layer_param.mutable_data_param();
    data_param->set_batch_size(num_);
    data_param->set_source(filename.c_str());
    data_param->set_backend(backend);
    AnnotatedDataLayer<Dtype> anno_data_layer(layer_param);
    fake_top_vec.clear();
    fake_top_vec.push_back(fake_input);
    fake_top_vec.push_back(blob_bottom_gt_);
    anno_data_layer.SetUp(fake_bottom_vec, fake_top_vec);
    anno_data_layer.Forward(fake_bottom_vec, fake_top_vec);
#else
    FillerParameter filler_param;
    GaussianFiller<Dtype> filler(filler_param);
    filler.Fill(fake_input);
    vector<int> gt_shape(4, 1);
    gt_shape[2] = 4;
    gt_shape[3] = 8;
    blob_bottom_gt_->Reshape(gt_shape);
    Dtype* gt_data = blob_bottom_gt_->mutable_cpu_data();
    FillItem(gt_data, "0 1 0 0.1 0.1 0.3 0.3 0");
    FillItem(gt_data + 8, "2 1 0 0.1 0.1 0.3 0.3 0");
    FillItem(gt_data + 8 * 2, "2 2 0 0.2 0.2 0.4 0.4 0");
    FillItem(gt_data + 8 * 3, "2 2 1 0.6 0.6 0.8 0.9 1");
#endif  // USE_LMDB

    // Fake layer
    PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
    pooling_param->set_pool(PoolingParameter_PoolMethod_AVE);
    pooling_param->set_kernel_size(10);
    pooling_param->set_stride(10);

    PoolingLayer<Dtype> pooling_layer(layer_param);
    Blob<Dtype>* fake_blob = new Blob<Dtype>(num_, 5, height_, width_);
    fake_bottom_vec.clear();
    fake_bottom_vec.push_back(fake_input);
    fake_top_vec.clear();
    fake_top_vec.push_back(fake_blob);
    pooling_layer.SetUp(fake_bottom_vec, fake_top_vec);
    pooling_layer.Forward(fake_bottom_vec, fake_top_vec);

    // 2) Fill bbox location predictions.
    ConvolutionParameter* convolution_param =
        layer_param.mutable_convolution_param();
    convolution_param->add_pad(0);
    convolution_param->add_kernel_size(1);
    convolution_param->add_stride(1);
    int num_output = num_priors_per_location_ * loc_classes * 4;
    convolution_param->set_num_output(num_output);
    convolution_param->mutable_weight_filler()->set_type("xavier");
    convolution_param->mutable_bias_filler()->set_type("constant");
    convolution_param->mutable_bias_filler()->set_value(0.1);
    ConvolutionLayer<Dtype> conv_layer_loc(layer_param);
    fake_bottom_vec.clear();
    fake_bottom_vec.push_back(fake_blob);
    Blob<Dtype> fake_output_loc;
    fake_top_vec.clear();
    fake_top_vec.push_back(&fake_output_loc);
    conv_layer_loc.SetUp(fake_bottom_vec, fake_top_vec);
    conv_layer_loc.Forward(fake_bottom_vec, fake_top_vec);

    // Use Permute and Flatten layer to prepare for MultiBoxLoss layer.
    PermuteParameter* permute_param = layer_param.mutable_permute_param();
    permute_param->add_order(0);
    permute_param->add_order(2);
    permute_param->add_order(3);
    permute_param->add_order(1);
    PermuteLayer<Dtype> permute_layer(layer_param);
    fake_bottom_vec.clear();
    fake_bottom_vec.push_back(&fake_output_loc);
    fake_top_vec.clear();
    Blob<Dtype> fake_permute_loc;
    fake_top_vec.push_back(&fake_permute_loc);
    permute_layer.SetUp(fake_bottom_vec, fake_top_vec);
    permute_layer.Forward(fake_bottom_vec, fake_top_vec);

    FlattenParameter* flatten_param = layer_param.mutable_flatten_param();
    flatten_param->set_axis(1);
    FlattenLayer<Dtype> flatten_layer(layer_param);
    vector<int> loc_shape(4, 1);
    loc_shape[0] = num_;
    loc_shape[1] = num_output * height_ * width_;
    blob_bottom_loc_->Reshape(loc_shape);
    fake_bottom_vec.clear();
    fake_bottom_vec.push_back(&fake_permute_loc);
    fake_top_vec.clear();
    fake_top_vec.push_back(blob_bottom_loc_);
    flatten_layer.SetUp(fake_bottom_vec, fake_top_vec);
    flatten_layer.Forward(fake_bottom_vec, fake_top_vec);

    // 3) Fill bbox confidence predictions.
    convolution_param->set_num_output(num_priors_per_location_ * num_classes_);
    ConvolutionLayer<Dtype> conv_layer_conf(layer_param);
    fake_bottom_vec.clear();
    fake_bottom_vec.push_back(fake_blob);
    num_output = num_priors_per_location_ * num_classes_;
    Blob<Dtype> fake_output_conf;
    fake_top_vec.clear();
    fake_top_vec.push_back(&fake_output_conf);
    conv_layer_conf.SetUp(fake_bottom_vec, fake_top_vec);
    conv_layer_conf.Forward(fake_bottom_vec, fake_top_vec);

    fake_bottom_vec.clear();
    fake_bottom_vec.push_back(&fake_output_conf);
    fake_top_vec.clear();
    Blob<Dtype> fake_permute_conf;
    fake_top_vec.push_back(&fake_permute_conf);
    permute_layer.SetUp(fake_bottom_vec, fake_top_vec);
    permute_layer.Forward(fake_bottom_vec, fake_top_vec);

    vector<int> conf_shape(4, 1);
    conf_shape[0] = num_;
    conf_shape[1] = num_output * height_ * width_;
    blob_bottom_conf_->Reshape(conf_shape);
    fake_bottom_vec.clear();
    fake_bottom_vec.push_back(&fake_permute_conf);
    fake_top_vec.clear();
    fake_top_vec.push_back(blob_bottom_conf_);
    flatten_layer.SetUp(fake_bottom_vec, fake_top_vec);
    flatten_layer.Forward(fake_bottom_vec, fake_top_vec);

    // 4) Fill prior bboxes.
    PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param();
    prior_box_param->add_min_size(5);
    prior_box_param->add_max_size(10);
    prior_box_param->add_aspect_ratio(3.);
    prior_box_param->set_flip(true);

    PriorBoxLayer<Dtype> prior_layer(layer_param);
    fake_bottom_vec.clear();
    fake_bottom_vec.push_back(fake_blob);
    fake_bottom_vec.push_back(fake_input);
    fake_top_vec.clear();
    fake_top_vec.push_back(blob_bottom_prior_);
    prior_layer.SetUp(fake_bottom_vec, fake_top_vec);
    prior_layer.Forward(fake_bottom_vec, fake_top_vec);

    delete fake_blob;
    delete fake_input;
  }
예제 #15
0
int
TpcbExample::run(int n)
{
	DB *adb, *bdb, *hdb, *tdb;
	int failed, ret, txns;
	DWORD start_time, end_time;
	double elapsed_secs;

	//
	// Open the database files.
	//

	int err;
	if ((err = db_create(&adb, dbenv, 0)) != 0) {
		_snprintf(msgString, ERR_STRING_MAX,
		    "db_create of account db failed. Error: %s",
		    db_strerror(err));
		return (1);
	}
	if ((err = adb->open(adb, NULL, "account", NULL, DB_UNKNOWN,
			     DB_AUTO_COMMIT, 0)) != 0) {
		_snprintf(msgString, ERR_STRING_MAX,
		    "Open of account file failed. Error: %s", db_strerror(err));
		return (1);
	}

	if ((err = db_create(&bdb, dbenv, 0)) != 0) {
		_snprintf(msgString, ERR_STRING_MAX,
		    "db_create of branch db failed. Error: %s",
		    db_strerror(err));
		return (1);
	}
	if ((err = bdb->open(bdb, NULL, "branch", NULL, DB_UNKNOWN,
			     DB_AUTO_COMMIT, 0)) != 0) {
		_snprintf(msgString, ERR_STRING_MAX,
		    "Open of branch file failed. Error: %s", db_strerror(err));
		return (1);
	}

	if ((err = db_create(&tdb, dbenv, 0)) != 0) {
		_snprintf(msgString, ERR_STRING_MAX,
		    "db_create of teller db failed. Error: %s",
		    db_strerror(err));
		return (1);
	}
	if ((err = tdb->open(tdb, NULL, "teller", NULL, DB_UNKNOWN,
			     DB_AUTO_COMMIT, 0)) != 0) {
		_snprintf(msgString, ERR_STRING_MAX,
		    "Open of teller file failed. Error: %s", db_strerror(err));
		return (1);
	}

	if ((err = db_create(&hdb, dbenv, 0)) != 0) {
		_snprintf(msgString, ERR_STRING_MAX,
		    "db_create of teller db failed. Error: %s",
		    db_strerror(err));
		return (1);
	}
	if ((err = hdb->open(hdb, NULL, "history", NULL, DB_UNKNOWN,
			     DB_AUTO_COMMIT, 0)) != 0) {
		_snprintf(msgString, ERR_STRING_MAX,
		    "Open of history file failed. Error: %s", db_strerror(err));
		return (1);
	}

	start_time = GetTickCount();
	for (txns = n, failed = 0; n-- > 0;)
		if ((ret = txn(adb, bdb, tdb, hdb,
		    accounts, branches, tellers)) != 0)
			++failed;
	end_time = GetTickCount();
	if (end_time == start_time)
		++end_time;
#define MILLISECS_PER_SEC 1000
	elapsed_secs = (double)((end_time - start_time))/MILLISECS_PER_SEC;
	_snprintf(msgString, ERR_STRING_MAX,
		"%s: %d txns: %d failed, %.2f TPS\n", progname, txns, failed,
	    (txns - failed) / elapsed_secs);

	(void)adb->close(adb, 0);
	(void)bdb->close(bdb, 0);
	(void)tdb->close(tdb, 0);
	(void)hdb->close(hdb, 0);

	return (0);
}
예제 #16
0
    void ReplicationCoordinatorImpl::_heartbeatReconfigStore(const ReplicaSetConfig& newConfig) {
        class StoreThreadGuard {
        public:
            StoreThreadGuard(boost::unique_lock<boost::mutex>* lk,
                             boost::scoped_ptr<boost::thread>* thread,
                             bool* inShutdown) :
                _lk(lk),
                _thread(thread),
                _inShutdown(inShutdown) {}
            ~StoreThreadGuard() {
                if (!_lk->owns_lock()) {
                    _lk->lock();
                }
                if (*_inShutdown) {
                    return;
                }
                _thread->get()->detach();
                _thread->reset(NULL);
            }

        private:
            boost::unique_lock<boost::mutex>* const _lk;
            boost::scoped_ptr<boost::thread>* const _thread;
            bool* const _inShutdown;
        };

        boost::unique_lock<boost::mutex> lk(_mutex, boost::defer_lock_t());
        StoreThreadGuard guard(&lk, &_heartbeatReconfigThread, &_inShutdown);

        const StatusWith<int> myIndex = validateConfigForHeartbeatReconfig(
                _externalState.get(),
                newConfig);

        if (myIndex.getStatus() == ErrorCodes::NodeNotFound) {
            lk.lock();
            // If this node absent in newConfig, and this node was not previously initialized,
            // return to kConfigUninitialized immediately, rather than storing the config and
            // transitioning into the RS_REMOVED state.  See SERVER-15740.
            if (!_rsConfig.isInitialized()) {
                invariant(_rsConfigState == kConfigHBReconfiguring);
                LOG(1) << "Ignoring new configuration in heartbeat response because we are "
                    "uninitialized and not a member of the new configuration";
                _setConfigState_inlock(kConfigUninitialized);
                return;
            }
            lk.unlock();
        }

        if (!myIndex.getStatus().isOK() && myIndex.getStatus() != ErrorCodes::NodeNotFound) {
            warning() << "Not persisting new configuration in heartbeat response to disk because "
                    "it is invalid: "<< myIndex.getStatus();
        }
        else {
            boost::scoped_ptr<OperationContext> txn(
                                      _externalState->createOperationContext("WriteReplSetConfig"));
            Status status = _externalState->storeLocalConfigDocument(txn.get(), newConfig.toBSON());

            lk.lock();
            if (!status.isOK()) {
                error() << "Ignoring new configuration in heartbeat response because we failed to"
                    " write it to stable storage; " << status;
                invariant(_rsConfigState == kConfigHBReconfiguring);
                if (_rsConfig.isInitialized()) {
                    _setConfigState_inlock(kConfigSteady);
                }
                else {
                    _setConfigState_inlock(kConfigUninitialized);
                }
                return;
            }

            lk.unlock();

            _externalState->startThreads();
        }

        const stdx::function<void (const ReplicationExecutor::CallbackData&)> reconfigFinishFn(
                stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
                           this,
                           stdx::placeholders::_1,
                           newConfig,
                           myIndex));

        if (_currentState.primary()) {
            // If the primary is receiving a heartbeat reconfig, that strongly suggests
            // that there has been a force reconfiguration.  In any event, it might lead
            // to this node stepping down as primary, so we'd better do it with the global
            // lock.
            _replExecutor.scheduleWorkWithGlobalExclusiveLock(reconfigFinishFn);
        }
        else {
            _replExecutor.scheduleWork(reconfigFinishFn);
        }
    }
예제 #17
0
void
TpcbExample::run(int n, int accounts, int branches, int tellers)
{
	Db *adb, *bdb, *hdb, *tdb;
	double gtps, itps;
	int failed, ifailed, ret, txns;
	time_t starttime, curtime, lasttime;
#ifndef _WIN32
	pid_t pid;

	pid = getpid();
#else
	int pid;

	pid = 0;
#endif

	//
	// Open the database files.
	//

	int err;
	adb = new Db(this, 0);
	if ((err = adb->open("account", NULL, DB_UNKNOWN, 0, 0)) != 0)
		errExit(err, "Open of account file failed");

	bdb = new Db(this, 0);
	if ((err = bdb->open("branch", NULL, DB_UNKNOWN, 0, 0)) != 0)
		errExit(err, "Open of branch file failed");

	tdb = new Db(this, 0);
	if ((err = tdb->open("teller", NULL, DB_UNKNOWN, 0, 0)) != 0)
		errExit(err, "Open of teller file failed");

	hdb = new Db(this, 0);
	if ((err = hdb->open("history", NULL, DB_UNKNOWN, 0, 0)) != 0)
		errExit(err, "Open of history file failed");

	txns = failed = ifailed = 0;
	starttime = time(NULL);
	lasttime = starttime;
	while (n-- > 0) {
		txns++;
		ret = txn(adb, bdb, tdb, hdb, accounts, branches, tellers);
		if (ret != 0) {
			failed++;
			ifailed++;
		}
		if (n % 5000 == 0) {
			curtime = time(NULL);
			gtps = (double)(txns - failed) / (curtime - starttime);
			itps = (double)(5000 - ifailed) / (curtime - lasttime);

			// We use printf because it provides much simpler
			// formatting than iostreams.
			//
			printf("[%d] %d txns %d failed ", (int)pid,
			    txns, failed);
			printf("%6.2f TPS (gross) %6.2f TPS (interval)\n",
			   gtps, itps);
			lasttime = curtime;
			ifailed = 0;
		}
	}

	(void)adb->close(0);
	(void)bdb->close(0);
	(void)tdb->close(0);
	(void)hdb->close(0);

	cout << (long)txns << " transactions begun "
	     << (long)failed << " failed\n";
}
예제 #18
0
CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(CMutableTransaction &tx, CTxMemPool *pool) {
    CTransaction txn(tx);
    return FromTx(txn, pool);
}
예제 #19
0
int writeRestrictions( pqxx::connection &conn, std::string &filename, std::string &rtable )
{
    std::ofstream out( filename.c_str(), std::ios::out | std::ios::binary );
    writeHeader( out );

    pqxx::work txn( conn );

    uint32_t cnt = 0;
    // write the count of edges we will output
    try {
        pqxx::result q = txn.exec(
            "select count(*) as cnt from " + rtable
        );
        cnt = q[0]["cnt"].as<uint32_t>();
    }
    catch (const std::exception &e) {
        // nop
    }
    out.write( (const char *) &cnt, sizeof(uint32_t) );

    if (cnt) {
        pqxx::stateless_cursor<pqxx::cursor_base::read_only,
                               pqxx::cursor_base::owned> cursor(
            txn,
            "select n_via, "
            "       n_from, "
            "       n_to, "
            "       case when is_forbidden then 0 else 1 end::integer as forbid "
            "  from " + rtable +
            " order by to_way_id asc ",
            "restrictions",
            false
        );

        for ( size_t idx = 0; true; idx += CURSOR_CHUNK) {
            pqxx::result result = cursor.retrieve( idx, idx + CURSOR_CHUNK );
            if ( result.empty() ) break;

            for ( pqxx::result::const_iterator row = result.begin();
                  row != result.end();
                  ++row
                ) {

                uint32_t n_via = row["n_via"].as<uint32_t>();
                out.write( (const char *) &n_via, sizeof(uint32_t) );

                uint32_t n_from = row["n_from"].as<uint32_t>();
                out.write( (const char *) &n_from, sizeof(uint32_t) );

                uint32_t n_to = row["n_to"].as<uint32_t>();
                out.write( (const char *) &n_to, sizeof(uint32_t) );

                uchar_t flag = (row["forbid"].as<int>() == 1) ? '\01' : '\0';
                out.write( (const char *) &flag, sizeof(uchar_t) );

                flag = (uchar_t) 0x7f;
                out.write( (const char *) &flag, sizeof(uchar_t) );

                flag = '\0';
                out.write( (const char *) &flag, sizeof(uchar_t) );
                out.write( (const char *) &flag, sizeof(uchar_t) );

            }
            if ( result.size() < CURSOR_CHUNK ) break;
        }
    }
    out.close();

    return cnt;
}
예제 #20
0
void *child(void *dummy)
{
	txn();
	return NULL;
}
예제 #21
0
void Repo::loadGlobalData(bool allowFailure /* = false */,
                          bool readArrayTable /* = true */) {
  if (readArrayTable) m_lsrp.load();

  if (!RuntimeOption::RepoAuthoritative) return;

  std::vector<std::string> failures;

  /*
   * This should probably just go to the Local repo always, except
   * that our unit test suite is currently running RepoAuthoritative
   * tests with the compiled repo as the Central repo.
   */
  for (int repoId = RepoIdCount - 1; repoId >= 0; --repoId) {
    if (repoName(repoId).empty()) {
      // The repo wasn't loadable
      continue;
    }
    try {
      RepoStmt stmt(*this);
      const auto& tbl = table(repoId, "GlobalData");
      stmt.prepare(
        folly::format(
          "SELECT count(*), data from {};", tbl
        ).str()
      );
      RepoTxn txn(*this);
      RepoTxnQuery query(txn, stmt);
      query.step();
      if (!query.row()) {
        throw RepoExc("Can't find table %s", tbl.c_str());
      };
      int val;
      query.getInt(0, val);
      if (val == 0) {
        throw RepoExc("No rows in %s. Did you forget to compile that file with "
                      "this HHVM version?", tbl.c_str());
      }
      BlobDecoder decoder = query.getBlob(1);
      decoder(s_globalData);
      if (readArrayTable) {
        auto& arrayTypeTable = globalArrayTypeTable();
        decoder(arrayTypeTable);
        decoder(s_globalData.APCProfile);
        decoder(s_globalData.ConstantFunctions);
        decoder.assertDone();
      }
      txn.commit();
    } catch (RepoExc& e) {
      failures.push_back(repoName(repoId) + ": "  + e.msg());
      continue;
    }

    // TODO: this should probably read out the other elements of the global data
    // which control Option or RuntimeOption values -- the others are read out
    // in an inconsistent and ad-hoc manner. But I don't understand their uses
    // and interactions well enough to feel comfortable fixing now.
    RuntimeOption::EvalPromoteEmptyObject    = s_globalData.PromoteEmptyObject;
    RuntimeOption::EnableIntrinsicsExtension =
      s_globalData.EnableIntrinsicsExtension;
    HHBBC::options.ElideAutoloadInvokes     = s_globalData.ElideAutoloadInvokes;
    RuntimeOption::AutoprimeGenerators      = s_globalData.AutoprimeGenerators;
    RuntimeOption::EnableHipHopSyntax       = s_globalData.EnableHipHopSyntax;
    RuntimeOption::EvalHardTypeHints        = s_globalData.HardTypeHints;
    RuntimeOption::EvalUseHHBBC             = s_globalData.UsedHHBBC;
    RuntimeOption::PHP7_Builtins            = s_globalData.PHP7_Builtins;
    RuntimeOption::PHP7_IntSemantics        = s_globalData.PHP7_IntSemantics;
    RuntimeOption::PHP7_NoHexNumerics       = s_globalData.PHP7_NoHexNumerics;
    RuntimeOption::PHP7_ScalarTypes         = s_globalData.PHP7_ScalarTypes;
    RuntimeOption::PHP7_Substr              = s_globalData.PHP7_Substr;
    RuntimeOption::EvalReffinessInvariance  = s_globalData.ReffinessInvariance;
    RuntimeOption::EvalHackArrDVArrs        = s_globalData.HackArrDVArrs;
    RuntimeOption::DisallowDynamicVarEnvFuncs =
      s_globalData.DisallowDynamicVarEnvFuncs;

    if (s_globalData.HardReturnTypeHints) {
      RuntimeOption::EvalCheckReturnTypeHints = 3;
    }
    if (s_globalData.ThisTypeHintLevel == 3) {
      RuntimeOption::EvalThisTypeHintLevel = s_globalData.ThisTypeHintLevel;
    }
    RuntimeOption::ConstantFunctions.clear();
    for (auto const& elm : s_globalData.ConstantFunctions) {
      RuntimeOption::ConstantFunctions.insert(elm);
    }

    return;
  }

  if (allowFailure) return;

  if (failures.empty()) {
    std::fprintf(stderr, "No repo was loadable. Check all the possible repo "
                 "locations (Repo.Central.Path, HHVM_REPO_CENTRAL_PATH, and "
                 "$HOME/.hhvm.hhbc) to make sure one of them is a valid "
                 "sqlite3 HHVM repo built with this exact HHVM version.\n");
  } else {
    // We should always have a global data section in RepoAuthoritative
    // mode, or the repo is messed up.
    std::fprintf(stderr, "Failed to load Repo::GlobalData:\n");
    for (auto& f : failures) {
      std::fprintf(stderr, "  %s\n", f.c_str());
    }
  }

  assertx(Process::IsInMainThread());
  exit(1);
}
예제 #22
0
static void run_test(const ftcxx::DBEnv &env, const ftcxx::DB &db) {
    fill(env, db);

    ftcxx::DBTxn txn(env);

    {
        uint32_t lk;
        uint32_t rk;

        for (uint32_t i = 0; i < N; i += 1000) {
            lk = i;
            rk = i + 499;

            ftcxx::Slice key;
            ftcxx::Slice val;
            uint32_t expect = i;
            uint32_t last = 0;
            for (auto cur(db.buffered_cursor(txn, ftcxx::Slice::slice_of(lk), ftcxx::Slice::slice_of(rk),
                                             UIntComparator(), ftcxx::DB::NullFilter()));
                 cur.next(key, val);
                 ) {
                last = key.as<uint32_t>();
                assert(expect == last);
                expect++;
            }
            assert(last == (i + 499));
        }
    }

    txn.commit();

    ftcxx::DBTxn extxn(env);

    {
        ftcxx::Slice key;
        ftcxx::Slice val;
        uint32_t expect = 0;
        uint32_t last = 0;
        for (auto cur(db.buffered_cursor(extxn, UIntComparator(), ftcxx::DB::NullFilter())); cur.next(key, val); ) {
            last = key.as<uint32_t>();
            assert(expect == last);
            expect++;
        }
        assert(last == N - 1);
    }

    {
        ftcxx::Slice key;
        ftcxx::Slice val;
        uint32_t expect = 0;
        uint32_t last = 0;
        for (auto cur(db.simple_cursor(extxn, UIntComparator(), key, val)); ; ) {
            std::cout << key.as<uint32_t>() << std::endl;
            last = key.as<uint32_t>();
            assert(expect == last);
            expect++;
            if (!cur.next()) {
                break;
            }
        }
        assert(last == N - 1);
    }

    extxn.commit();
}
예제 #23
0
bool bdb_blockchain::initialize(const std::string& prefix)
{
    // Try to lock the directory first
    boost::filesystem::path lock_path = prefix;
    lock_path /= "db-lock";
    std::ofstream touch_file(lock_path.native(), std::ios::app);
    touch_file.close();
    flock_ = lock_path.c_str();
    if (!flock_.try_lock())
    {
        // Database already opened elsewhere
        return false;
    }
    // Continue on
    GOOGLE_PROTOBUF_VERIFY_VERSION;
    env_ = new DbEnv(0);
    env_->set_lk_max_locks(10000);
    env_->set_lk_max_objects(10000);
    env_->set_cachesize(1, 0, 1);
    if (env_->open(prefix.c_str(), env_flags, 0) != 0)
        return false;
    // Create database objects
    db_blocks_ = new Db(env_, 0);
    db_blocks_hash_ = new Db(env_, 0);
    db_txs_ = new Db(env_, 0);
    db_spends_ = new Db(env_, 0);
    db_address_ = new Db(env_, 0);
    if (db_blocks_->set_bt_compare(bt_compare_blocks) != 0)
    {
        log_fatal() << "Internal error setting BTREE comparison function";
        return false;
    }
    txn_guard txn(env_);
    if (db_blocks_->open(txn.get(), "blocks", "block-data",
            DB_BTREE, db_flags, 0) != 0)
        return false;
    if (db_blocks_hash_->open(txn.get(), "blocks", "block-hash", 
            DB_BTREE, db_flags, 0) != 0)
        return false;
    db_blocks_->associate(txn.get(), db_blocks_hash_, get_block_hash, 0);
    if (db_txs_->open(txn.get(), "transactions", "tx",
            DB_BTREE, db_flags, 0) != 0)
        return false;
    if (db_spends_->open(txn.get(), "transactions", "spends",
            DB_BTREE, db_flags, 0) != 0)
        return false;
    db_address_->set_flags(DB_DUP);
    if (db_address_->open(txn.get(), "address", "address",
            DB_BTREE, db_flags, 0) != 0)
        return false;
    txn.commit();

    common_ = std::make_shared<bdb_common>(env_,
        db_blocks_, db_blocks_hash_, db_txs_, db_spends_, db_address_);

    orphans_ = std::make_shared<orphans_pool>(20);
    bdb_chain_keeper_ptr chainkeeper = 
        std::make_shared<bdb_chain_keeper>(common_, env_,
            db_blocks_, db_blocks_hash_, db_txs_, db_spends_, db_address_);
    chain_ = chainkeeper;
    organize_ = std::make_shared<bdb_organizer>(
        common_, orphans_, chainkeeper, reorganize_subscriber_);

    return true;
}
CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(const CMutableTransaction &tx) {
    CTransaction txn(tx);
    return FromTx(txn);
}
예제 #25
0
파일: bgsync.cpp 프로젝트: igagnidz/tokumx
    void BackgroundSync::runRollback(OplogReader& r, uint64_t oplogTS) {
        // starting from ourLast, we need to read the remote oplog
        // backwards until we find an entry in the remote oplog
        // that has the same GTID, timestamp, and hash as
        // what we have in our oplog. If we don't find one that is within
        // some reasonable timeframe, then we go fatal
        GTID ourLast = theReplSet->gtidManager->getLiveState();
        GTID idToRollbackTo;
        uint64_t rollbackPointTS = 0;
        uint64_t rollbackPointHash = 0;
        incRBID();
        try {
            shared_ptr<DBClientCursor> rollbackCursor = r.getRollbackCursor(ourLast);
            while (rollbackCursor->more()) {
                BSONObj remoteObj = rollbackCursor->next();
                GTID remoteGTID = getGTIDFromBSON("_id", remoteObj);
                uint64_t remoteTS = remoteObj["ts"]._numberLong();
                uint64_t remoteLastHash = remoteObj["h"].numberLong();
                if (remoteTS + 1800*1000 < oplogTS) {
                    log() << "Rollback takes us too far back, throwing exception. remoteTS: " << remoteTS << " oplogTS: " << oplogTS << rsLog;
                    throw RollbackOplogException("replSet rollback too long a time period for a rollback (at least 30 minutes).");
                    break;
                }
                //now try to find an entry in our oplog with that GTID
                BSONObjBuilder localQuery;
                BSONObj localObj;
                addGTIDToBSON("_id", remoteGTID, localQuery);
                bool foundLocally = false;
                {
                    LOCK_REASON(lockReason, "repl: looking up oplog entry for rollback");
                    Client::ReadContext ctx(rsoplog, lockReason);
                    Client::Transaction transaction(DB_SERIALIZABLE);
                    foundLocally = Collection::findOne(rsoplog, localQuery.done(), localObj);
                    transaction.commit();
                }
                if (foundLocally) {
                    GTID localGTID = getGTIDFromBSON("_id", localObj);
                    uint64_t localTS = localObj["ts"]._numberLong();
                    uint64_t localLastHash = localObj["h"].numberLong();
                    if (localLastHash == remoteLastHash &&
                        localTS == remoteTS &&
                        GTID::cmp(localGTID, remoteGTID) == 0
                        )
                    {
                        idToRollbackTo = localGTID;
                        rollbackPointTS = localTS;
                        rollbackPointHash = localLastHash;
                        log() << "found id to rollback to " << idToRollbackTo << rsLog;
                        break;
                    }
                }
            }
            // At this point, either we have found the point to try to rollback to,
            // or we have determined that we cannot rollback
            if (idToRollbackTo.isInitial()) {
                // we cannot rollback
                throw RollbackOplogException("could not find ID to rollback to");
            }
        }
        catch (DBException& e) {
            log() << "Caught DBException during rollback " << e.toString() << rsLog;
            throw RollbackOplogException("DBException while trying to find ID to rollback to: " + e.toString());
        }
        catch (std::exception& e2) {
            log() << "Caught std::exception during rollback " << e2.what() << rsLog;
            throw RollbackOplogException(str::stream() << "Exception while trying to find ID to rollback to: " << e2.what());
        }

        // proceed with the rollback to point idToRollbackTo
        // probably ought to grab a global write lock while doing this
        // I don't think we want oplog cursors reading from this machine
        // while we are rolling back. Or at least do something to protect against this

        // first, let's get all the operations that are being applied out of the way,
        // we don't want to rollback an item in the oplog while simultaneously,
        // the applier thread is applying it to the oplog
        {
            boost::unique_lock<boost::mutex> lock(_mutex);
            while (_deque.size() > 0) {
                log() << "waiting for applier to finish work before doing rollback " << rsLog;
                _queueDone.wait(lock);
            }
            verifySettled();
        }

        // now let's tell the system we are going to rollback, to do so,
        // abort live multi statement transactions, invalidate cursors, and
        // change the state to RS_ROLLBACK
        {
            // so we know nothing is simultaneously occurring
            RWLockRecursive::Exclusive e(operationLock);
            LOCK_REASON(lockReason, "repl: killing all operations for rollback");
            Lock::GlobalWrite lk(lockReason);
            ClientCursor::invalidateAllCursors();
            Client::abortLiveTransactions();
            theReplSet->goToRollbackState();
        }

        try {
            // now that we are settled, we have to take care of the GTIDManager
            // and the repl info thread.
            // We need to reset the state of the GTIDManager to the point
            // we intend to rollback to, and we need to make sure that the repl info thread
            // has captured this information.
            theReplSet->gtidManager->resetAfterInitialSync(
                idToRollbackTo,
                rollbackPointTS,
                rollbackPointHash
                );
            // now force an update of the repl info thread
            theReplSet->forceUpdateReplInfo();

            // at this point, everything should be settled, the applier should
            // have nothing left (and remain that way, because this is the only
            // thread that can put work on the applier). Now we can rollback
            // the data.
            while (true) {
                BSONObj o;
                {
                    LOCK_REASON(lockReason, "repl: checking for oplog data");
                    Lock::DBRead lk(rsoplog, lockReason);
                    Client::Transaction txn(DB_SERIALIZABLE);
                    // if there is nothing in the oplog, break
                    o = getLastEntryInOplog();
                    if( o.isEmpty() ) {
                        break;
                    }
                }
                GTID lastGTID = getGTIDFromBSON("_id", o);
                // if we have rolled back enough, break from while loop
                if (GTID::cmp(lastGTID, idToRollbackTo) <= 0) {
                    dassert(GTID::cmp(lastGTID, idToRollbackTo) == 0);
                    break;
                }
                rollbackTransactionFromOplog(o, true);
            }
            theReplSet->leaveRollbackState();
        }
        catch (DBException& e) {
            log() << "Caught DBException during rollback " << e.toString() << rsLog;
            throw RollbackOplogException("DBException while trying to run rollback: " + e.toString());
        }
        catch (std::exception& e2) {
            log() << "Caught std::exception during rollback " << e2.what() << rsLog;
            throw RollbackOplogException(str::stream() << "Exception while trying to run rollback: " << e2.what());
        }
        
    }
예제 #26
0
void Repo::loadGlobalData(bool allowFailure /* = false */) {
  m_lsrp.load();

  if (!RuntimeOption::RepoAuthoritative) return;

  std::vector<std::string> failures;

  /*
   * This should probably just go to the Local repo always, except
   * that our unit test suite is currently running RepoAuthoritative
   * tests with the compiled repo as the Central repo.
   */
  for (int repoId = RepoIdCount - 1; repoId >= 0; --repoId) {
    if (repoName(repoId).empty()) {
      // The repo wasn't loadable
      continue;
    }
    try {
      RepoStmt stmt(*this);
      const auto& tbl = table(repoId, "GlobalData");
      stmt.prepare(
        folly::format(
          "SELECT count(*), data from {};", tbl
        ).str()
      );
      RepoTxn txn(*this);
      RepoTxnQuery query(txn, stmt);
      query.step();
      if (!query.row()) {
        throw RepoExc("Can't find table %s", tbl.c_str());
      };
      int val;
      query.getInt(0, val);
      if (val == 0) {
        throw RepoExc("No rows in %s. Did you forget to compile that file with "
                      "this HHVM version?", tbl.c_str());
      };
      BlobDecoder decoder = query.getBlob(1);
      decoder(s_globalData);

      txn.commit();
    } catch (RepoExc& e) {
      failures.push_back(repoName(repoId) + ": "  + e.msg());
      continue;
    }

    // TODO: this should probably read out the other elements of the global data
    // which control Option or RuntimeOption values -- the others are read out
    // in an inconsistent and ad-hoc manner. But I don't understand their uses
    // and interactions well enough to feel comfortable fixing now.
    RuntimeOption::PHP7_IntSemantics = s_globalData.PHP7_IntSemantics;
    RuntimeOption::PHP7_ScalarTypes  = s_globalData.PHP7_ScalarTypes;
    RuntimeOption::AutoprimeGenerators = s_globalData.AutoprimeGenerators;

    return;
  }

  if (allowFailure) return;

  if (failures.empty()) {
    std::fprintf(stderr, "No repo was loadable. Check all the possible repo "
                 "locations (Repo.Central.Path, HHVM_REPO_CENTRAL_PATH, and "
                 "$HOME/.hhvm.hhbc) to make sure one of them is a valid "
                 "sqlite3 HHVM repo built with this exact HHVM version.\n");
  } else {
    // We should always have a global data section in RepoAuthoritative
    // mode, or the repo is messed up.
    std::fprintf(stderr, "Failed to load Repo::GlobalData:\n");
    for (auto& f : failures) {
      std::fprintf(stderr, "  %s\n", f.c_str());
    }
  }

  assert(Process::IsInMainThread());
  exit(1);
}
예제 #27
0
int writeEdges( pqxx::connection &conn, std::ofstream &out, std::string &etable )
{
    pqxx::work txn( conn );

    // write the count of edges we will output
    pqxx::result q = txn.exec(
        "select count(*) as cnt from " + etable + " where source != target "
    );
    uint32_t cnt = q[0]["cnt"].as<uint32_t>();
    out.write( (const char *) &cnt, sizeof(uint32_t) );

    pqxx::stateless_cursor<pqxx::cursor_base::read_only,
                           pqxx::cursor_base::owned> cursor(
        txn,
        "select case when dir_travel='T' then target "
        "            else source end as source, "
        "       case when dir_travel='T' then source "
        "            else target end as target, "
        "       st_length_spheroid(st_geometryn(the_geom, 1), "
        "           'SPHEROID[\"GRS_1980\",6378137,298.257222101]'::spheroid"
        "           )::integer as length, "
        "       case when dir_travel = 'B' then 0 else 1 end::integer as dir, "
        "       round(case when speed_cat='1' then 130.0 "
        "                  when speed_cat='2' then 101.0 "
        "                  when speed_cat='3' then  91.0 "
        "                  when speed_cat='4' then  71.0 "
        "                  when speed_cat='5' then  51.0 "
        "                  when speed_cat='6' then  31.0 "
        "                  when speed_cat='7' then  11.0 "
        "                  when speed_cat='8' then   5.0 "
        "                  else 1.0 "
        "                  end * "
        "           st_length_spheroid(st_geometryn(the_geom, 1), "
        "             'SPHEROID[\"GRS_1980\",6378137,298.257222101]'::spheroid"
        "           ) * 0.027777778)::integer as weight, "
        "       speed_cat::integer as rank, "
        "       case when b.id is null then 0 else b.id end::integer as nameid, "
        "       case when roundabout='Y' then 1 "
        "            else 0 end::integer as roundabout, "
        "       case when tunnel='Y' or bridge='Y' then 1 "
        "            else 0 end::integer as ignoreingrid, "
        "       0::integer as accessrestricted "
        "  from " + etable + " a left outer join " + etable + "_osrm_names b "
        "    on a.name=b.name where source != target "
        " order by a.target asc " + LIMIT,
        "edges",
        false
    );

    for ( size_t idx = 0; true; idx += CURSOR_CHUNK) {
        pqxx::result result = cursor.retrieve( idx, idx + CURSOR_CHUNK );
        if ( result.empty() ) break;

        for ( pqxx::result::const_iterator row = result.begin();
              row != result.end();
              ++row
            ) {

            uint32_t source = row["source"].as<uint32_t>();
            out.write( (const char *) &source, sizeof(uint32_t) );

            uint32_t target = row["target"].as<uint32_t>();
            out.write( (const char *) &target, sizeof(uint32_t) );

            int32_t length = row["length"].as<int32_t>();
            out.write( (const char *) &length, sizeof(int32_t) );

            int16_t dir = row["dir"].as<int16_t>();
            out.write( (const char *) &dir, sizeof(int16_t) );

            int32_t weight = row["weight"].as<int32_t>();
            out.write( (const char *) &weight, sizeof(int32_t) );

            int16_t rank = row["rank"].as<int16_t>();
            out.write( (const char *) &rank, sizeof(int16_t) );

            uint32_t nameid = row["nameid"].as<uint32_t>();
            out.write( (const char *) &nameid, sizeof(int32_t) );

            uchar_t flag = (row["roundabout"].as<int>() == 1) ? '\01' : '\0';
            out.write( (const char *) &flag, sizeof(uchar_t) );

            flag = (row["ignoreingrid"].as<int>() == 1) ? '\01' : '\0';
            out.write( (const char *) &flag, sizeof(uchar_t) );

            flag = (row["accessrestricted"].as<int>() == 1) ? '\01' : '\0';
            out.write( (const char *) &flag, sizeof(uchar_t) );

            // write a dummy byte for word alignment
            flag = '\0';
            out.write( (const char *) &flag, sizeof(uchar_t) );
        }
        if ( result.size() < CURSOR_CHUNK ) break;
    }
    return cnt;
}