Example #1
0
Shared<T>::Data::Data(T* _t)
  : t(CHECK_NOTNULL(_t)), upgraded(false) {}
Example #2
0
void RecurrentLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  CHECK_GE(bottom[0]->num_axes(), 2)
      << "bottom[0] must have at least 2 axes -- (#timesteps, #streams, ...)";
  T_ = bottom[0]->shape(0);
  N_ = bottom[0]->shape(1);
  LOG(INFO) << "Initializing recurrent layer: assuming input batch contains "
            << T_ << " timesteps of " << N_ << " independent streams.";

  CHECK_EQ(bottom[1]->num_axes(), 2)
      << "bottom[1] must have exactly 2 axes -- (#timesteps, #streams)";
  CHECK_EQ(T_, bottom[1]->shape(0));
  CHECK_EQ(N_, bottom[1]->shape(1));

  // If provided, bottom[2] is a static input to the recurrent net.
  static_input_ = (bottom.size() > 2);
  if (static_input_) {
    CHECK_GE(bottom[2]->num_axes(), 1);
    CHECK_EQ(N_, bottom[2]->shape(0));
  }

  // Create a NetParameter; setup the inputs that aren't unique to particular
  // recurrent architectures.
  NetParameter net_param;
  net_param.set_force_backward(true);

  net_param.add_input("x");
  BlobShape input_shape;
  for (int i = 0; i < bottom[0]->num_axes(); ++i) {
    input_shape.add_dim(bottom[0]->shape(i));
  }
  net_param.add_input_shape()->CopyFrom(input_shape);

  input_shape.Clear();
  for (int i = 0; i < bottom[1]->num_axes(); ++i) {
    input_shape.add_dim(bottom[1]->shape(i));
  }
  net_param.add_input("cont");
  net_param.add_input_shape()->CopyFrom(input_shape);

  if (static_input_) {
    input_shape.Clear();
    for (int i = 0; i < bottom[2]->num_axes(); ++i) {
      input_shape.add_dim(bottom[2]->shape(i));
    }
    net_param.add_input("x_static");
    net_param.add_input_shape()->CopyFrom(input_shape);
  }

  // Call the child's FillUnrolledNet implementation to specify the unrolled
  // recurrent architecture.
  this->FillUnrolledNet(&net_param);

  // Prepend this layer's name to the names of each layer in the unrolled net.
  const string& layer_name = this->layer_param_.name();
  if (layer_name.size() > 0) {
    for (int i = 0; i < net_param.layer_size(); ++i) {
      LayerParameter* layer = net_param.mutable_layer(i);
      layer->set_name(layer_name + "_" + layer->name());
    }
  }

  // Create the unrolled net.
  unrolled_net_.reset(new Net<Dtype>(net_param));
  unrolled_net_->set_debug_info(
      this->layer_param_.recurrent_param().debug_info());

  // Setup pointers to the inputs.
  x_input_blob_ = CHECK_NOTNULL(unrolled_net_->blob_by_name("x").get());
  cont_input_blob_ = CHECK_NOTNULL(unrolled_net_->blob_by_name("cont").get());
  if (static_input_) {
    x_static_input_blob_ =
        CHECK_NOTNULL(unrolled_net_->blob_by_name("x_static").get());
  }

  // Setup pointers to paired recurrent inputs/outputs.
  vector<string> recur_input_names;
  RecurrentInputBlobNames(&recur_input_names);
  vector<string> recur_output_names;
  RecurrentOutputBlobNames(&recur_output_names);
  const int num_recur_blobs = recur_input_names.size();
  CHECK_EQ(num_recur_blobs, recur_output_names.size());
  recur_input_blobs_.resize(num_recur_blobs);
  recur_output_blobs_.resize(num_recur_blobs);
  for (int i = 0; i < recur_input_names.size(); ++i) {
    recur_input_blobs_[i] =
        CHECK_NOTNULL(unrolled_net_->blob_by_name(recur_input_names[i]).get());
    recur_output_blobs_[i] =
        CHECK_NOTNULL(unrolled_net_->blob_by_name(recur_output_names[i]).get());
  }

  // Setup pointers to outputs.
  vector<string> output_names;
  OutputBlobNames(&output_names);
  CHECK_EQ(top.size(), output_names.size())
      << "OutputBlobNames must provide an output blob name for each top.";
  output_blobs_.resize(output_names.size());
  for (int i = 0; i < output_names.size(); ++i) {
    output_blobs_[i] =
        CHECK_NOTNULL(unrolled_net_->blob_by_name(output_names[i]).get());
  }

  // We should have 2 inputs (x and cont), plus a number of recurrent inputs,
  // plus maybe a static input.
  CHECK_EQ(2 + num_recur_blobs + static_input_,
           unrolled_net_->input_blobs().size());

  // This layer's parameters are any parameters in the layers of the unrolled
  // net. We only want one copy of each parameter, so check that the parameter
  // is "owned" by the layer, rather than shared with another.
  this->blobs_.clear();
  for (int i = 0; i < unrolled_net_->params().size(); ++i) {
    if (unrolled_net_->param_owners()[i] == -1) {
      LOG(INFO) << "Adding parameter " << i << ": "
                << unrolled_net_->param_display_names()[i];
      this->blobs_.push_back(unrolled_net_->params()[i]);
    }
  }
  // Check that param_propagate_down is set for all of the parameters in the
  // unrolled net; set param_propagate_down to true in this layer.
  for (int i = 0; i < unrolled_net_->layers().size(); ++i) {
    for (int j = 0; j < unrolled_net_->layers()[i]->blobs().size(); ++j) {
      CHECK(unrolled_net_->layers()[i]->param_propagate_down(j))
          << "param_propagate_down not set for layer " << i << ", param " << j;
    }
  }
  this->param_propagate_down_.clear();
  this->param_propagate_down_.resize(this->blobs_.size(), true);

  // Set the diffs of recurrent outputs to 0 -- we can't backpropagate across
  // batches.
  for (int i = 0; i < recur_output_blobs_.size(); ++i) {
    caffe_set(recur_output_blobs_[i]->count(), Dtype(0),
              recur_output_blobs_[i]->mutable_cpu_diff());
  }
}
Example #3
0
void principia__IteratorIncrement(Iterator* const iterator) {
  journal::Method<journal::IteratorIncrement> m({iterator});
  CHECK_NOTNULL(iterator)->Increment();
  return m.Return();
}
void RecurrentLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  CHECK_GE(bottom[0]->num_axes(), 2)
      << "bottom[0] must have at least 2 axes -- (#timesteps, #streams, ...)";
  T_ = bottom[0]->shape(0);
  N_ = bottom[0]->shape(1);
  LOG(INFO) << "Initializing recurrent layer: assuming input batch contains "
            << T_ << " timesteps of " << N_ << " independent streams.";

  CHECK_EQ(bottom[1]->num_axes(), 2)
      << "bottom[1] must have exactly 2 axes -- (#timesteps, #streams)";
  CHECK_EQ(T_, bottom[1]->shape(0));
  CHECK_EQ(N_, bottom[1]->shape(1));

  // If expose_hidden is set, we take as input and produce as output
  // the hidden state blobs at the first and last timesteps.
  expose_hidden_ = this->layer_param_.recurrent_param().expose_hidden();

  // Get (recurrent) input/output names.
  vector<string> output_names;
  OutputBlobNames(&output_names);
  vector<string> recur_input_names;
  RecurrentInputBlobNames(&recur_input_names);
  vector<string> recur_output_names;
  RecurrentOutputBlobNames(&recur_output_names);
  const int num_recur_blobs = recur_input_names.size();
  CHECK_EQ(num_recur_blobs, recur_output_names.size());

  // If provided, bottom[2] is a static input to the recurrent net.
  const int num_hidden_exposed = expose_hidden_ * num_recur_blobs;
  static_input_ = (bottom.size() > 2 + num_hidden_exposed);
  if (static_input_) {
    CHECK_GE(bottom[2]->num_axes(), 1);
    CHECK_EQ(N_, bottom[2]->shape(0));
  }

  // Create a NetParameter; setup the inputs that aren't unique to particular
  // recurrent architectures.
  NetParameter net_param;

  LayerParameter* input_layer_param = net_param.add_layer();
  input_layer_param->set_type("Input");
  InputParameter* input_param = input_layer_param->mutable_input_param();
  input_layer_param->add_top("x");
  BlobShape input_shape;
  for (int i = 0; i < bottom[0]->num_axes(); ++i) {
    input_shape.add_dim(bottom[0]->shape(i));
  }
  input_param->add_shape()->CopyFrom(input_shape);

  input_shape.Clear();
  for (int i = 0; i < bottom[1]->num_axes(); ++i) {
    input_shape.add_dim(bottom[1]->shape(i));
  }
  input_layer_param->add_top("cont");
  input_param->add_shape()->CopyFrom(input_shape);

  if (static_input_) {
    input_shape.Clear();
    for (int i = 0; i < bottom[2]->num_axes(); ++i) {
      input_shape.add_dim(bottom[2]->shape(i));
    }
    input_layer_param->add_top("x_static");
    input_param->add_shape()->CopyFrom(input_shape);
  }

  // Call the child's FillUnrolledNet implementation to specify the unrolled
  // recurrent architecture.
  this->FillUnrolledNet(&net_param);

  // Prepend this layer's name to the names of each layer in the unrolled net.
  const string& layer_name = this->layer_param_.name();
  if (layer_name.size()) {
    for (int i = 0; i < net_param.layer_size(); ++i) {
      LayerParameter* layer = net_param.mutable_layer(i);
      layer->set_name(layer_name + "_" + layer->name());
    }
  }

  // Add "pseudo-losses" to all outputs to force backpropagation.
  // (Setting force_backward is too aggressive as we may not need to backprop to
  // all inputs, e.g., the sequence continuation indicators.)
  vector<string> pseudo_losses(output_names.size());
  for (int i = 0; i < output_names.size(); ++i) {
    LayerParameter* layer = net_param.add_layer();
    pseudo_losses[i] = output_names[i] + "_pseudoloss";
    layer->set_name(pseudo_losses[i]);
    layer->set_type("Reduction");
    layer->add_bottom(output_names[i]);
    layer->add_top(pseudo_losses[i]);
    layer->add_loss_weight(1);
  }

  // Create the unrolled net.
  unrolled_net_.reset(new Net<Dtype>(net_param));
  unrolled_net_->set_debug_info(
      this->layer_param_.recurrent_param().debug_info());

  // Setup pointers to the inputs.
  x_input_blob_ = CHECK_NOTNULL(unrolled_net_->blob_by_name("x").get());
  cont_input_blob_ = CHECK_NOTNULL(unrolled_net_->blob_by_name("cont").get());
  if (static_input_) {
    x_static_input_blob_ =
        CHECK_NOTNULL(unrolled_net_->blob_by_name("x_static").get());
  }

  // Setup pointers to paired recurrent inputs/outputs.
  recur_input_blobs_.resize(num_recur_blobs);
  recur_output_blobs_.resize(num_recur_blobs);
  for (int i = 0; i < recur_input_names.size(); ++i) {
    recur_input_blobs_[i] =
        CHECK_NOTNULL(unrolled_net_->blob_by_name(recur_input_names[i]).get());
    recur_output_blobs_[i] =
        CHECK_NOTNULL(unrolled_net_->blob_by_name(recur_output_names[i]).get());
  }

  // Setup pointers to outputs.
  CHECK_EQ(top.size() - num_hidden_exposed, output_names.size())
      << "OutputBlobNames must provide an output blob name for each top.";
  output_blobs_.resize(output_names.size());
  for (int i = 0; i < output_names.size(); ++i) {
    output_blobs_[i] =
        CHECK_NOTNULL(unrolled_net_->blob_by_name(output_names[i]).get());
  }

  // We should have 2 inputs (x and cont), plus a number of recurrent inputs,
  // plus maybe a static input.
  CHECK_EQ(2 + num_recur_blobs + static_input_,
           unrolled_net_->input_blobs().size());

  // This layer's parameters are any parameters in the layers of the unrolled
  // net. We only want one copy of each parameter, so check that the parameter
  // is "owned" by the layer, rather than shared with another.
  this->blobs_.clear();
  for (int i = 0; i < unrolled_net_->params().size(); ++i) {
    if (unrolled_net_->param_owners()[i] == -1) {
      LOG(INFO) << "Adding parameter " << i << ": "
                << unrolled_net_->param_display_names()[i];
      this->blobs_.push_back(unrolled_net_->params()[i]);
    }
  }
  // Check that param_propagate_down is set for all of the parameters in the
  // unrolled net; set param_propagate_down to true in this layer.
  for (int i = 0; i < unrolled_net_->layers().size(); ++i) {
    for (int j = 0; j < unrolled_net_->layers()[i]->blobs().size(); ++j) {
      CHECK(unrolled_net_->layers()[i]->param_propagate_down(j))
          << "param_propagate_down not set for layer " << i << ", param " << j;
    }
  }
  this->param_propagate_down_.clear();
  this->param_propagate_down_.resize(this->blobs_.size(), true);

  // Set the diffs of recurrent outputs to 0 -- we can't backpropagate across
  // batches.
  for (int i = 0; i < recur_output_blobs_.size(); ++i) {
    caffe_set(recur_output_blobs_[i]->count(), Dtype(0),
              recur_output_blobs_[i]->mutable_cpu_diff());
  }

  // Check that the last output_names.size() layers are the pseudo-losses;
  // set last_layer_index so that we don't actually run these layers.
  const vector<string>& layer_names = unrolled_net_->layer_names();
  last_layer_index_ = layer_names.size() - 1 - pseudo_losses.size();
  for (int i = last_layer_index_ + 1, j = 0; i < layer_names.size(); ++i, ++j) {
    CHECK_EQ(layer_names[i], pseudo_losses[j]);
  }
}
Example #5
0
void CloudRenderer::setup(Matrix_3xN* points, Matrix_3xN* colors, VectorN* sizes) {
    if (!program.isLinked()) {
        init(); // setup is called before init, when shader is not yet compiled...
    }

    CHECK_NOTNULL(points);
    if(colors) CHECK(points->cols() == colors->cols());
    if(sizes) CHECK(sizes->size() == points->cols());
    this->num_points = points->cols();    
        
    vao.bind();    
    program.bind();
    
// #define CLOUD_PRINT_DATA
#ifdef CLOUD_PRINT_DATA    
    std::cout << "data" << std::endl;
    for(int i=0; i<points->cols(); i++)
        std::cout << points->col(i).transpose() << std::endl;
#endif
    
    ///--- Create vertex buffer/attributes "position"
    {
        bool success = vertexbuffer.create();
        assert(success);
        vertexbuffer.setUsagePattern( QGLBuffer::StaticDraw );
        success = vertexbuffer.bind();
        assert(success);
        vertexbuffer.allocate( points->data(), sizeof(Scalar) * points->size() );
        program.setAttributeBuffer("vpoint", GL_FLOAT, 0, 3 );
        program.enableAttributeArray("vpoint");
    }

    ///--- Create vertex buffer/attributes "colors"
    if(!colors){
        static Matrix_3xN _colors(points->rows(), points->cols());
        _colors.row(0).array().setConstant(1); ///< force red
        colors = &_colors;
    }
    {
        bool success = vcolor_buf.create();
        assert(success);
        vcolor_buf.setUsagePattern( QGLBuffer::StaticDraw );
        success = vcolor_buf.bind();
        assert(success);
        vcolor_buf.allocate( colors->data(), sizeof(Scalar) * points->size() );
        program.setAttributeBuffer("vcolor", GL_FLOAT, 0, 3 );
        program.enableAttributeArray("vcolor");        
    }
    
    ///--- Create vertex buffer/attributes "sizes"
    if(!sizes){
        static VectorN _sizes(points->cols());
        sizes = &_sizes;
    }
    {
        bool success = vsize_buf.create(); assert(success);
        vsize_buf.setUsagePattern( QGLBuffer::StaticDraw );
        success = vsize_buf.bind(); assert(success);
        vsize_buf.allocate( colors->data(), sizeof(Scalar) * sizes->size() );
        program.setAttributeBuffer("vsize", GL_FLOAT, 0, 1 );
        program.enableAttributeArray("vsize");     
    }
    
    program.release();
    vao.release();
}
Example #6
0
std::string Magic::file(char const* path) const
{
  return CHECK_NOTNULL(magic_file(magic_, path));
}
Example #7
0
size_t Server::CreateSendServerPushRowMsgsPartial(
    PushMsgSendFunc PushMsgSend) {
  boost::unordered_map<int32_t, RecordBuff> buffs;
  boost::unordered_map<int32_t, ServerPushRowMsg*> msg_map;
  boost::unordered_map<int32_t, size_t> client_buff_size;
  boost::unordered_map<int32_t,
                       boost::unordered_map<int32_t, ServerRow*> >
      table_rows_to_send;

  accum_oplog_count_ = 0;

  size_t accum_send_bytes = 0;

  int32_t comm_channel_idx
      = GlobalContext::GetCommChannelIndexServer(server_id_);

  // Create a message for each bg thread
  int32_t client_id = 0;
  for (client_id = 0;
       client_id < GlobalContext::get_num_clients(); ++client_id) {
    client_buff_size[client_id] = 0;
  }

  for (auto table_iter = tables_.begin(); table_iter != tables_.end();
       table_iter++) {
    int32_t table_id = table_iter->first;

    table_rows_to_send.insert(std::make_pair(
        table_id,
        boost::unordered_map<int32_t, ServerRow*>()));

    table_iter->second.GetPartialTableToSend(
        &(table_rows_to_send[table_id]),
        &client_buff_size,
        GlobalContext::get_server_push_row_threshold());
  }

  size_t num_tables = tables_.size();

  for (auto buff_size_iter = client_buff_size.begin();
       buff_size_iter != client_buff_size.end(); ++buff_size_iter) {

    if (buff_size_iter->second > 0)
      buff_size_iter->second += (sizeof(int32_t) + sizeof(int32_t))*num_tables;
  }

  for (auto buff_size_iter = client_buff_size.begin();
       buff_size_iter != client_buff_size.end(); buff_size_iter++) {
    size_t buff_size = buff_size_iter->second;
    int32_t client_id = buff_size_iter->first;

    if (buff_size == 0) {
      msg_map[client_id] = 0;
      continue;
    }

    ServerPushRowMsg *msg = new ServerPushRowMsg(buff_size);
    msg_map[client_id] = msg;
    buffs.insert(
        std::make_pair(client_id,
                       RecordBuff(msg->get_data(), buff_size)));
  }

  size_t num_tables_left = tables_.size();

  for (auto table_iter = tables_.begin(); table_iter != tables_.end();
       table_iter++) {
    int32_t table_id = table_iter->first;
    ServerTable &server_table = table_iter->second;

    for (auto buff_iter = buffs.begin(); buff_iter != buffs.end();
         ++buff_iter) {
      RecordBuff &record_buff = buff_iter->second;
      int32_t *table_id_ptr = record_buff.GetMemPtrInt32();
      CHECK_NOTNULL(table_id_ptr);
      *table_id_ptr = table_id;
    }

    server_table.AppendRowsToBuffsPartial(
        &buffs, table_rows_to_send[table_id]);

    --num_tables_left;

    for (auto buff_iter = buffs.begin(); buff_iter != buffs.end();
         ++buff_iter) {
      RecordBuff &record_buff = buff_iter->second;
      int32_t *table_end_ptr = record_buff.GetMemPtrInt32();
      CHECK_NOTNULL(table_end_ptr);

      if (num_tables_left == 0)
        *table_end_ptr = GlobalContext::get_serialized_table_end();
      else
        *table_end_ptr = GlobalContext::get_serialized_table_separator();
    }
  }

  for (auto msg_iter = msg_map.begin(); msg_iter != msg_map.end();
       msg_iter++) {
    int32_t client_id = msg_iter->first;
    ServerPushRowMsg *msg = msg_iter->second;
    if (msg == 0)
      continue;

    accum_send_bytes += msg->get_size();

    int32_t bg_id = GlobalContext::get_bg_thread_id(client_id,
                                                    comm_channel_idx);
    PushMsgSend(bg_id, msg, false, GetBgVersion(bg_id), GetMinClock());

    VLOG(0) << "Send server push row size = " << msg->get_avai_size()
            << " to bg id = " << bg_id
            << " server id = " << ThreadContext::get_id();

    delete msg;
  }

  return accum_send_bytes;
}
Example #8
0
void WorkerTable::Notify(int id) {
  m_.lock();
  CHECK_NOTNULL(waitings_[id]);
  waitings_[id]->Notify();
  m_.unlock();
}
Example #9
0
void WorkerTable::Reset(int msg_id, int num_wait) {
  m_.lock();
  CHECK_NOTNULL(waitings_[msg_id]);
  waitings_[msg_id]->Reset(num_wait);
  m_.unlock();
}
INITIALIZE_EASYLOGGINGPP

int main(void) {
    el::Loggers::addFlag(el::LoggingFlag::DisableApplicationAbortOnFatalLog);
    
    // These checks should fail
    LOG(INFO) << "----- DONT WORRY ABOUT FOLLOWING CHECKS FAILING - THEY ARE EXPECTED";
    CHECK(1 > 2) << "1 is not greater than 2";
    CHECK_EQ(1, 2) << "1 is not equal to 2";
    CHECK_NE(1, 1) << "Wow, I did not know 1 == 1";
    CHECK_STREQ("abc", "def") << " :)";
    CHECK_STRNE("abc", "abc") << " :(";
    CHECK_STRCASEEQ("abc", "ABCD") << " :p";
    CHECK_STRCASENE("abc", "ABC") << " B)";
    int* f = new int;
    int* toF = CHECK_NOTNULL(f);

    (void)toF; // Unused warning suppression

    delete f;
    f = nullptr;
    // These checks should pass 
    LOG(WARNING) << "----- START WORRYING ABOUT CHECKS NOW";
    CHECK(1 < 2) << " snap -- lib has bug!";
    CHECK_EQ(1, 1) << " snap -- lib has bug!";
    CHECK_NE(1, 2) << " snap -- lib has bug!";
    CHECK_STREQ("abc", "abc") << " snap -- lib has bug!";
    CHECK_STRNE("abc", "abe") << " snap -- lib has bug!";
    CHECK_STRCASEEQ("abc", "ABC") << " snap -- lib has bug!";
    CHECK_STRCASENE("abc", "ABE") << " snap -- lib has bug!";
    LOG(INFO) << "----- HOPEFULLY NO CHECK FAILED SINCE YOU STARTED WORRYING!";

    // DCHECKs
    DCHECK(1 > 2) << "1 is not greater than 2";
    DCHECK_EQ(1, 2) << "1 is not equal to 2";
    DCHECK_NE(1, 1) << "Wow, I did not know 1 == 1";
    DCHECK_STREQ("abc", "def") << " :)";
    DCHECK_STRNE("abc", "abc") << " :(";
    DCHECK_STRCASEEQ("abc", "ABCD") << " :p";
    DCHECK_STRCASENE("abc", "ABC") << " B)";
    
    // PCHECKs
    std::fstream fstr("a/file/that/does/not/exist", std::fstream::in);
    PCHECK(fstr.is_open());
    DPCHECK(fstr.is_open());

    int min = 1;
    int max = 5;
    CHECK_BOUNDS(1, min, max) << "Index out of bounds";
    CHECK_BOUNDS(2, min, max) << "Index out of bounds";
    CHECK_BOUNDS(3, min, max) << "Index out of bounds";
    CHECK_BOUNDS(4, min, max) << "Index out of bounds";
    CHECK_BOUNDS(5, min, max) << "Index out of bounds";
    CHECK_BOUNDS(6, min, max) << "Index out of bounds";
    DCHECK_BOUNDS(1, min, max) << "Index out of bounds";
    DCHECK_BOUNDS(2, min, max) << "Index out of bounds";
    DCHECK_BOUNDS(3, min, max) << "Index out of bounds";
    DCHECK_BOUNDS(4, min, max) << "Index out of bounds";
    DCHECK_BOUNDS(5, min, max) << "Index out of bounds";
    DCHECK_BOUNDS(6, min, max) << "Index out of bounds";

    return 0;
}
Example #11
0
Isolator::Isolator(Owned<IsolatorProcess> _process)
  : process(_process)
{
  process::spawn(CHECK_NOTNULL(process.get()));
}
Example #12
0
const cv::Rect WellDecoder::getWellRectangle() const {	
    CHECK_NOTNULL(wellRectangle.get());
	VLOG(9) << "getWellRectangle: bbox: " << wellRectangle->getRectangle();

	return wellRectangle->getRectangle();
}
Example #13
0
const T* Shared<T>::operator -> () const
{
  return CHECK_NOTNULL(get());
}
Example #14
0
const T& Shared<T>::operator * () const
{
  return *CHECK_NOTNULL(get());
}
Example #15
0
HTTPConnector::HTTPConnector(Callback* callback,
                             const WheelTimerInstance& timeout)
    : cb_(CHECK_NOTNULL(callback)), timeout_(timeout) {}
bool IMPL_CLASS::MConnect(double aTime)
{
	if (!MIsOpen())
	{
		LOG(ERROR)<<"The shared client is not opened.";
		return false;
	}
	CHECK_NOTNULL(FMyInfo);
	CHECK_NOTNULL(FEv.FEvents);
	CHECK_NOTNULL(FServerInfo);
	NSHARE::CRAII<CMutex> _block(FConnectMutex);
	if (MIsConnected())
	{
		LOG(ERROR)<<"connected already";
		return true;
	}
	VLOG(2) << "Connect "<<FMyInfo->FInfo.FId<<" Time=" << aTime;
	event_info_t _info;
	_info.FEventType = event_info_t::E_CONNECT;
	_info.FConnect.FClientOffset = static_cast<CSharedAllocator::offset_t>(FSharedMemory.MGetAllocator()->MOffset(FMyInfo));
	MInvokeEvent(&_info);

	_info.FEventType = event_info_t::E_NO;
	double  _time=NSHARE::get_time();
	bool _is_not_timeout=false;
	bool _is_try_again=false;
	do
	{
		for(;(NSHARE::get_time()-_time)<aTime && MIsOpen();)
		{
			_is_not_timeout=MWaitForEvent(FEv,&_info, aTime);
			VLOG_IF(2,_is_not_timeout)<<"Event recv="<<_info;
			if(!_is_not_timeout)
			{
				VLOG(2)<<"Event connecting is not received";
				continue;
			}
			if(_info.FEventType == event_info_t::E_CONNECTED)
				break;
			else
			{
				LOG(WARNING)<<"Receive unknown event "<<_info;
			}
		}

		if (_is_not_timeout)
		{
			if(_info.FEventType == event_info_t::E_CONNECTED)
			{
				LOG(INFO)<<"Connected";
				MEventConnected(_info.FIdFrom.MGetId(),_info.FConnect.FClientOffset);
				break;
			}
			else
				CHECK(false);
		}
		else if(MRemoveEvent(&_info,FEv))
		{
			LOG(ERROR)<<"Cannot connect";
			CHECK_EQ(FMyInfo->FInfo.FId.FUniqueID,0);
			break;
		}else
		{
			CHECK_NE(_info.FEventType, event_info_t::E_CONNECTED);
			if(_is_try_again)
			{
				DCHECK_EQ(FMyInfo->FInfo.FId.FUniqueID,0);
				if(FMyInfo->FInfo.FId.FUniqueID==0)
					return false;
				break;
			}
			_is_try_again=true;
			LOG(ERROR)<<"The server is handling the connection";
			 _time=NSHARE::get_time();
			 continue;
		}
	}while(!FIsConnected && MIsOpen());

	return FIsConnected;
}
Example #17
0
std::string Magic::buffer(std::string_view bfr) const
{
  auto const data = reinterpret_cast<void const*>(bfr.data());
  return CHECK_NOTNULL(magic_buffer(magic_, data, bfr.size()));
}
shared_identify_t IMPL_CLASS::MServerIdentifier() const
{
	CHECK_NOTNULL(FServerInfo);
	return FServerInfo->FInfo.FId.MGetId();
}
Example #19
0
Magic::Magic()
  : magic_(CHECK_NOTNULL(magic_open(MAGIC_MIME)))
{
  CHECK_EQ(magic_load(magic_, nullptr), 0) << magic_error(magic_);
}
Example #20
0
inline Celestial const& Celestial::parent() const {
  return *CHECK_NOTNULL(parent_);
}
Example #21
0
const Flags& flags()
{
  return *CHECK_NOTNULL(systemd_flags);
}
Example #22
0
inline void Celestial::set_parent(Celestial const* parent) {
  parent_ = CHECK_NOTNULL(parent);
}
Example #23
0
void Solver<Dtype>::Test(const int_tp test_net_id) {
  CHECK(Caffe::root_solver());
  LOG(INFO) << "Iteration " << iter_
            << ", Testing net (#" << test_net_id << ")";
  CHECK_NOTNULL(test_nets_[test_net_id].get())->
  ShareTrainedLayersWith(net_.get());
  vector<Dtype> test_score;
  vector<int_tp> test_score_output_id;
  vector<Blob<Dtype>*> bottom_vec;
  const shared_ptr<Net<Dtype> >& test_net = test_nets_[test_net_id];
  Dtype loss = 0;
  for (int_tp i = 0; i < param_.test_iter(test_net_id); ++i) {
    SolverAction::Enum request = GetRequestedAction();
    // Check to see if stoppage of testing/training has been requested.
    while (request != SolverAction::NONE) {
        if (SolverAction::SNAPSHOT == request) {
          Snapshot();
        } else if (SolverAction::STOP == request) {
          requested_early_exit_ = true;
        }
        request = GetRequestedAction();
    }
    if (requested_early_exit_) {
      // break out of test loop.
      break;
    }

    Dtype iter_loss;
    const vector<Blob<Dtype>*>& result =
    test_net->Forward(bottom_vec, &iter_loss);
    if (param_.test_compute_loss()) {
      loss += iter_loss;
    }
    if (i == 0) {
      for (int_tp j = 0; j < result.size(); ++j) {
        const Dtype* result_vec = result[j]->cpu_data();
        for (int_tp k = 0; k < result[j]->count(); ++k) {
          test_score.push_back(result_vec[k]);
          test_score_output_id.push_back(j);
        }
      }
    } else {
      int_tp idx = 0;
      for (int_tp j = 0; j < result.size(); ++j) {
        const Dtype* result_vec = result[j]->cpu_data();
        for (int_tp k = 0; k < result[j]->count(); ++k) {
          test_score[idx++] += result_vec[k];
        }
      }
    }
  }
  if (requested_early_exit_) {
    LOG(INFO)     << "Test interrupted.";
    return;
  }
  if (param_.test_compute_loss()) {
    loss /= param_.test_iter(test_net_id);
    LOG(INFO) << "Test loss: " << loss;
  }
  for (int_tp i = 0; i < test_score.size(); ++i) {
    const int_tp output_blob_index =
    test_net->output_blob_indices()[test_score_output_id[i]];
    const string& output_name = test_net->blob_names()[output_blob_index];
    const Dtype loss_weight = test_net->blob_loss_weights()[output_blob_index];
    ostringstream loss_msg_stream;
    const Dtype mean_score = test_score[i] / param_.test_iter(test_net_id);
    if (loss_weight) {
      loss_msg_stream << " (* " << loss_weight
      << " = " << loss_weight * mean_score << " loss)";
    }
    LOG(INFO) << "    Test net output #" << i << ": " << output_name << " = "
              << mean_score << loss_msg_stream.str();
  }
}
Example #24
0
bool CIPCSignalEvent::MInit(uint8_t* aBuf, size_t aSize, eOpenType aHasToBeNew)
{
	CHECK_NOTNULL(FPImpl);
	return FPImpl->MInit(aBuf,aSize, aHasToBeNew);
}
Example #25
0
const T& Value::as() const
{
  return *CHECK_NOTNULL(boost::get<T>(this));
}
Example #26
0
bool CIPCSignalEvent::MSignal()
{
	CHECK_NOTNULL(FPImpl);
	VLOG(2) << "Event " << FPImpl->FName << " is signaled.";
	return SetEvent(FPImpl->FSignalEvent) != FALSE;
}
Example #27
0
bool principia__IteratorAtEnd(Iterator const* const iterator) {
  journal::Method<journal::IteratorAtEnd> m({iterator});
  return m.Return(CHECK_NOTNULL(iterator)->AtEnd());
}
Example #28
0
inline Trajectory<Barycentric> const& Celestial::prediction() const {
  CHECK(is_initialized());
  return *CHECK_NOTNULL(prediction_);
}
Example #29
0
int principia__IteratorSize(Iterator const* const iterator) {
  journal::Method<journal::IteratorSize> m({iterator});
  return m.Return(CHECK_NOTNULL(iterator)->Size());
}
Example #30
0
std::unique_ptr<T[]> TakeOwnershipArray(T** const pointer) {
  CHECK_NOTNULL(pointer);
  std::unique_ptr<T[]> owned_pointer(*pointer);
  *pointer = nullptr;
  return owned_pointer;
}