void Solver::Init() { LOG(INFO) << "Solver data type: " << Type_Name(data_type_); CHECK(Caffe::root_solver() || root_solver_) << "root_solver_ needs to be set for all non-root solvers"; LOG_IF(INFO, Caffe::root_solver()) << "Initializing solver from parameters: " << std::endl << param_.DebugString(); CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; CheckSnapshotWritePermissions(); if (Caffe::root_solver()) { // P2PSync does other solvers if they exist Caffe::set_root_seed(static_cast<uint64_t>(param_.random_seed())); } // Scaffolding code InitTrainNet(); InitTestNets(); LOG(INFO) << "Solver scaffolding done."; iter_ = 0; total_lapse_ = 0.F; current_step_ = 0; }
void Solver<Dtype>::Init(const SolverParameter& param) { CHECK(Caffe::root_solver() || root_solver_) << "root_solver_ needs to be set for all non-root solvers"; LOG_IF(INFO, Caffe::root_solver()) << "Initializing solver from parameters: " << std::endl << param.DebugString(); param_ = param; CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; CheckSnapshotWritePermissions(); if (Caffe::root_solver() && param_.random_seed() >= 0) { Caffe::set_random_seed(param_.random_seed()); } // Scaffolding code InitTrainNet(); if (Caffe::root_solver()) { InitTestNets(); LOG(INFO) << "Solver scaffolding done."; } iter_ = 0; current_step_ = 0; }
const WdtTransferRequest &Sender::init() { VLOG(1) << "Sender Init() with encryption set = " << transferRequest_.encryptionData.isSet(); if (validateTransferRequest() != OK) { LOG(ERROR) << "Couldn't validate the transfer request " << transferRequest_.getLogSafeString(); return transferRequest_; } // TODO cleanup / most not necessary / duplicate state transferRequest_.protocolVersion = protocolVersion_; transferRequest_.directory = srcDir_; transferRequest_.hostName = destHost_; // TODO Figure out what to do with file info // transferRequest.fileInfo = dirQueue_->getFileInfo(); transferRequest_.errorCode = OK; bool encrypt = transferRequest_.encryptionData.isSet(); LOG_IF(INFO, encrypt) << "Encryption is enabled for this transfer"; return transferRequest_; }
int Net < Dtype >::appendBottom(const NetParameter& param, const int layer_id, const int bottom_id, set<string>* available_blobs, map<string, int>* blob_name_to_idx){ const LayerParameter& layer_param = param.layer(layer_id); const string& blob_name = layer_param.bottom(bottom_id); if (!available_blobs->count(blob_name)) LOG(FATAL) << "Unknown bottom blob: " << blob_name<< " at layer: " << layer_param.name() << "."; // a bottom blob must share a top blob const int blob_id = (*blob_name_to_idx)[blob_name]; LOG_IF(INFO, Dragon::get_root_solver()) << layer_param.name() << "[Layer-Accept] <- " << blob_name << " [Blob-Name]"; bottom_vecs[layer_id].push_back(blobs[blob_id].get()); bottom_id_vecs[layer_id].push_back(blob_id); // ensure that a top blob must specify only one bottom blob // SplitLayer can be used to shadow a top blob into several top blobs available_blobs->erase(blob_name); bool need_bp = true; // default(TEST) is false bottoms_need_backward[layer_id].push_back(need_bp & blobs_need_backward[blob_id]); return blob_id; }
INITIALIZE_EASYLOGGINGPP int main(int argc, char** argv) { START_EASYLOGGINGPP(argc, argv); el::Loggers::addFlag(el::LoggingFlag::DisableApplicationAbortOnFatalLog); el::Loggers::addFlag(el::LoggingFlag::ColoredTerminalOutput); // You can uncomment following lines to take advantage of hierarchical logging // el::Loggers::addFlag(el::LoggingFlag::HierarchicalLogging); // el::Loggers::setLoggingLevel(el::Level::Global); LOG(INFO); LOG(DEBUG); LOG(WARNING); LOG(ERROR); LOG(TRACE); VLOG(1); LOG(FATAL); DLOG(INFO); DLOG(DEBUG); DLOG(WARNING); DLOG(ERROR); DLOG(TRACE); DVLOG(1); DLOG(FATAL); el::Loggers::removeFlag(el::LoggingFlag::ColoredTerminalOutput); LOG_IF(true, INFO); LOG_IF(true, DEBUG); LOG_IF(true, WARNING); LOG_IF(true, ERROR); LOG_IF(true, TRACE); VLOG_IF(true, 1); LOG_IF(true, FATAL); LOG_EVERY_N(1, INFO); LOG_EVERY_N(1, DEBUG); LOG_EVERY_N(1, WARNING); LOG_EVERY_N(1, ERROR); LOG_EVERY_N(1, TRACE); VLOG_EVERY_N(1, 1); LOG_EVERY_N(1, FATAL); CHECK(1 == 1); CCHECK(1 == 1, "default"); return 0; }
int KQueue::update(int nevents, const timespec *timeout, bool may_fail) { int err = kevent(kq, &events[0], changes_n, &events[0], nevents, timeout); auto kevent_errno = errno; bool is_fatal_error = [&] { if (err != -1) { return false; } if (may_fail) { return kevent_errno != ENOENT; } return kevent_errno != EINTR; }(); LOG_IF(FATAL, is_fatal_error) << Status::PosixError(kevent_errno, "kevent failed"); changes_n = 0; if (err < 0) { return 0; } return err; }
void JsonLoader::load_messages( JsonLoader::Reader* ctx, std::deque<google::protobuf::Message*>* out_msgs) { out_msgs->clear(); butil::IOBuf request_json; while (ctx->get_next_json(&request_json)) { VLOG(1) << "Load " << out_msgs->size() + 1 << "-th json=`" << request_json << '\''; std::string error; google::protobuf::Message* request = _request_prototype->New(); butil::IOBufAsZeroCopyInputStream wrapper(request_json); if (!json2pb::JsonToProtoMessage(&wrapper, request, &error)) { LOG(WARNING) << "Fail to convert to pb: " << error << ", json=`" << request_json << '\''; delete request; continue; } out_msgs->push_back(request); LOG_IF(INFO, (out_msgs->size() % 10000) == 0) << "Loaded " << out_msgs->size() << " jsons"; } }
int WdtSocket::writeInternal(const char *buf, int nbyte, int timeoutMs, bool retry) { int count = 0; int written = 0; while (written < nbyte) { int w = writeWithAbortCheck(buf + written, nbyte - written, timeoutMs, /* always try to write everything */ true); if (w <= 0) { writeErrorCode_ = SOCKET_WRITE_ERROR; return (written > 0 ? written : -1); } if (!retry) { return w; } written += w; count++; } WDT_CHECK_EQ(nbyte, written); LOG_IF(INFO, count > 1) << "Took " << count << " attempts to write " << nbyte << " bytes to socket"; return written; }
int main() { LOG(INFO) << "Dump log test"; // CHECK Operation CHECK_NE(1, 2) << ": The world must be ending!"; // Check if it is euqual CHECK_EQ(std::string("abc")[1], 'b'); int x = 2; int y = 1; LOG_IF(ERROR, x > y) << "2 > 1. This should be also OK"; // Test dump log in different thread int err = pthread_create(&newTid, NULL, ThreadRunnable, NULL); if (err != 0) { LOG(FATAL) << "Unable to create a thread"; return 1; } sleep(1); TestStopWatch(); return 0; }
size_t IOFactory::loadFile( std::list<Chunk> &ret, const boost::filesystem::path &filename, util::istring suffix_override, util::istring dialect ) { FileFormatList formatReader; formatReader = getFileFormatList( filename.file_string(), suffix_override, dialect ); const size_t nimgs_old = ret.size(); // save number of chunks const util::istring with_dialect = dialect.empty() ? util::istring( "" ) : util::istring( " with dialect \"" ) + dialect + "\""; if ( formatReader.empty() ) { if( !boost::filesystem::exists( filename ) ) { LOG( Runtime, error ) << util::MSubject( filename.file_string() ) << " does not exist as file, and no suitable plugin was found to generate data from " << ( suffix_override.empty() ? util::istring( "that name" ) : util::istring( "the suffix \"" ) + suffix_override + "\"" ); } else if( suffix_override.empty() ) { LOG( Runtime, error ) << "No plugin found to read " << filename.file_string() << with_dialect; } else { LOG( Runtime, error ) << "No plugin supporting the requested suffix " << suffix_override << with_dialect << " was found"; } } else { BOOST_FOREACH( FileFormatList::const_reference it, formatReader ) { LOG( ImageIoDebug, info ) << "plugin to load file" << with_dialect << " " << util::MSubject( filename.file_string() ) << ": " << it->getName(); try { return it->load( ret, filename.file_string(), dialect ); } catch ( std::runtime_error &e ) { if( suffix_override.empty() ) { LOG( Runtime, formatReader.size() > 1 ? warning : error ) << "Failed to load " << filename.file_string() << " using " << it->getName() << with_dialect << " ( " << e.what() << " )"; } else { LOG( Runtime, warning ) << "The enforced format " << it->getName() << " failed to read " << filename.file_string() << with_dialect << " ( " << e.what() << " ), maybe it just wasn't the right format"; } } } LOG_IF( boost::filesystem::exists( filename ) && formatReader.size() > 1, Runtime, error ) << "No plugin was able to load: " << util::MSubject( filename.file_string() ) << with_dialect; }
void berkeleydb_store<Object>::init(const std::string& type, const int rank) { DLOG_ASSERT(!FLAGS_store_dir.empty()) << "FLAG failure. You have to set directory path from commandline. " << " Run with --help," << " and see how to run Differential Execution with file systems."; std::string rank_str; std::stringstream ss_rank; ss_rank << rank; std::string id_path_str_ = FLAGS_store_dir + "/" + FLAGS_sim_id; std::string db_path_str_ = id_path_str_ + "/" + type + ss_rank.str(); /* make directory */ boost::filesystem::path id_path_(id_path_str_); boost::filesystem::path db_path_(db_path_str_); boost::filesystem::create_directory(FLAGS_store_dir); boost::filesystem::create_directory(id_path_); boost::filesystem::create_directory(db_path_); LOG_IF(INFO, rank == 0) << "Create directory: " << id_path_str_; /* open database */ env = new DbEnv(0); u_int32_t env_flags = DB_CREATE | // If the environment does not exist, create it. DB_INIT_LOCK | // Initialize locking DB_INIT_LOG | // Initialize logging DB_INIT_MPOOL | // Initialize the cache DB_INIT_TXN; // Initialize transactions env->open(db_path_str_.c_str(), env_flags, 0); db = new Db(env, 0); u_int32_t oFlags = DB_CREATE | DB_AUTO_COMMIT; db->open(NULL, // Transaction pointer "database.db", // Database file name NULL, // Optional logical database name DB_BTREE, // Database access method oFlags, // Open flags 0); // File mode (using defaults) };
void asdf_multiplat_t::render_debug() { LOG_IF(CheckGLError(), "Error before drawing spritebatch debug information"); auto passthrough = Content.shaders["passthrough"]; passthrough->use_program(); passthrough->world_matrix = glm::mat4(); passthrough->view_matrix = glm::mat4(); passthrough->projection_matrix = spritebatch->spritebatch_shader->projection_matrix; passthrough->update_wvp_uniform(); glUniform4f(passthrough->uniforms["Color"], 0.0f, 0.2f, 1.0f, 1.0f); glDisable(GL_CULL_FACE); glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); glLineWidth(1.2f); //glBegin(GL_POLYGON); //glColor4f(0.5f, 0.3f, 1.0f, 1.0f); //glVertex2f(-1, 1); //glVertex2f(1, 1); //glVertex2f(1, -1); //glVertex2f(-1, -1); //glEnd(); //glBegin(GL_POLYGON); //glColor4f(0.3f, 0.1f, 0.3f, 1.0f); //glVertex2f(-0.5f, 0.5f); //glVertex2f(0.5f, 0.5f); //glVertex2f(0.5f, -0.5f); //glVertex2f(-0.5f, -0.5f); //glEnd(); main_view->render_debug(); glUseProgram(0); }
_INITIALIZE_EASYLOGGINGPP int main(int argc, char** argv) { _START_EASYLOGGINGPP(argc, argv); el::Helpers::addFlag(el::LoggingFlag::DisableApplicationAbortOnFatalLog); LOG(INFO); LOG(DEBUG); LOG(WARNING); LOG(ERROR); LOG(TRACE); VLOG(1); LOG(FATAL); DLOG(INFO); DLOG(DEBUG); DLOG(WARNING); DLOG(ERROR); DLOG(TRACE); DVLOG(1); DLOG(FATAL); LOG_IF(true, INFO); LOG_IF(true, DEBUG); LOG_IF(true, WARNING); LOG_IF(true, ERROR); LOG_IF(true, TRACE); VLOG_IF(true, 1); LOG_IF(true, FATAL); LOG_EVERY_N(1, INFO); LOG_EVERY_N(1, DEBUG); LOG_EVERY_N(1, WARNING); LOG_EVERY_N(1, ERROR); LOG_EVERY_N(1, TRACE); VLOG_EVERY_N(1, 1); LOG_EVERY_N(1, FATAL); CHECK(1 == 1); CCHECK(1 == 1, "default"); return 0; }
void Net<Dtype>::Init(const NetParameter& in_param){ CHECK(Dragon::get_root_solver() || root_net) << "Root net need to be set for all non-root solvers."; phase = in_param.state().phase(); NetParameter filtered_param, param; // filter for unqualified LayerParameters(e.g Test DataLayer) filterNet(in_param, &filtered_param); insertSplits(filtered_param, ¶m); name = param.name(); LOG_IF(INFO, Dragon::get_root_solver()) << "Initialize net from parameters: ";/*<< endl << param.DebugString();*/ map<string, int> blob_name_to_idx; set<string> available_blobs; CHECK_EQ(param.input_size(), param.input_shape_size())<< "input blob_shape must specify a blob."; memory_used = 0; // check and stuff virtual input blobs firstly [Viewing Mode Only] for (int input_id=0; input_id < param.input_size(); input_id++){ const int layer_id = -1; // net_input.push_back(.....virtual blob.....) appendTop(param, layer_id, input_id, &available_blobs, &blob_name_to_idx); } // stuff real blobs for each layer then [Traning/Testing/Viewing Mode] bottom_vecs.resize(param.layer_size()); bottom_id_vecs.resize(param.layer_size()); bottoms_need_backward.resize(param.layer_size()); top_vecs.resize(param.layer_size()); top_id_vecs.resize(param.layer_size()); param_id_vecs.resize(param.layer_size()); for (int layer_id = 0; layer_id < param.layer_size(); layer_id++){ bool share_from_root = !Dragon::get_root_solver() && root_net->layers[layer_id]->shareInParallel(); // copy net phase to layer if not set if (!param.layer(layer_id).has_phase()) param.mutable_layer(layer_id)->set_phase(phase); const LayerParameter& layer_param = param.layer(layer_id); if (share_from_root){ LOG(INFO) << "Share Layer: " << layer_param.name() << " from the root net."; // share layer by pointer layers.push_back(root_net->layers[layer_id]); layers[layer_id]->setShared(true); } else{ // use layer factory to create a pointer // layer type is referred by layer_param->type() // see more in layer_factory.hpp layers.push_back(LayerFactory<Dtype>::createLayer(layer_param)); } layer_names.push_back(layer_param.name()); LOG_IF(INFO, Dragon::get_root_solver()) << "Create Layer: " << layer_param.name(); bool need_bp = false; // stuff bottom blobs for (int bottom_id = 0; bottom_id < layer_param.bottom_size(); bottom_id++){ const int blob_id = appendBottom(param, layer_id, bottom_id, &available_blobs, &blob_name_to_idx); // check whether a bottom need back propogation need_bp |= blobs_need_backward[blob_id]; } // stuff top blobs for (int top_id = 0; top_id < layer_param.top_size(); top_id++) appendTop(param, layer_id, top_id, &available_blobs, &blob_name_to_idx); // auto top blobs // NOT_IMPLEMENTED; Layer<Dtype>* layer = layers[layer_id].get(); // setup for layer if (share_from_root){ const vector<Blob<Dtype>*> base_top = root_net->top_vecs[layer_id]; const vector<Blob<Dtype>*> this_top = this->top_vecs[layer_id]; // reshape solely after root_net finishing for (int top_id = 0; top_id < base_top.size(); top_id++){ this_top[top_id]->reshapeLike(*base_top[top_id]); } } else layer->setup(bottom_vecs[layer_id], top_vecs[layer_id]); LOG_IF(INFO, Dragon::get_root_solver()) << "Setup Layer: " << layer_param.name(); for (int top_id = 0; top_id < top_vecs[layer_id].size(); top_id++){ // extend size to max number of blobs if necessary if (blobs_loss_weight.size() <= top_id_vecs[layer_id][top_id]) blobs_loss_weight.resize(top_id_vecs[layer_id][top_id] + 1, Dtype(0)); // store global loss weights from each layer each blob blobs_loss_weight[top_id_vecs[layer_id][top_id]] = layer->getLoss(top_id); LOG_IF(INFO, Dragon::get_root_solver()) << "Top shape: " << top_vecs[layer_id][top_id]->shape_string(); if (layer->getLoss(top_id)) LOG_IF(INFO, Dragon::get_root_solver()) << " with loss weight " << layer->getLoss(top_id); // sum up for training parameter statistic memory_used += top_vecs[layer_id][top_id]->count(); } LOG_IF(INFO, Dragon::get_root_solver()) << "Memory required for Data: " << memory_used*sizeof(Dtype); const int param_size = layer_param.param_size(); // blobs_size will be set after layer->setup() const int param_blobs_size = layer->getBlobs().size(); CHECK_LE(param_size, param_blobs_size)<< "Too many params specify for layer."; // use if do not specify hyperparameter // lr_mult=decay_mult=1.0 ParamSpec default_hyperparameter; for (int param_id = 0; param_id < param_blobs_size; param_id++){ const ParamSpec* hyperparameter = param_id < param_size ? &layer_param.param(param_id) : &default_hyperparameter; const bool param_need_bp = hyperparameter->lr_mult() != 0; // check whether a param blob need back propogation [default=true] need_bp |= param_need_bp; layer->setParamNeedBp(param_id, param_need_bp); } // stuff param blobs for (int param_id = 0; param_id < param_blobs_size; param_id++) appendParam(param, layer_id, param_id); // update param blobs if share others shareWeights(); layer_need_backward.push_back(need_bp); // after checking all bottom blobs and param blobs if (need_bp) for (int top_id = 0; top_id < top_id_vecs[layer_id].size(); top_id++) blobs_need_backward[top_id_vecs[layer_id][top_id]] = true; } // end layer_id set<string> blobs_under_loss, blobs_skip_bp; for (int layer_id = layers.size()-1; layer_id >= 0; layer_id--){ bool layer_contributes_loss = false; bool layer_skip_bp = true; Layer<Dtype>* layer = layers[layer_id].get(); for (int top_id = 0; top_id < top_vecs[layer_id].size(); top_id++){ const string& blob_name = blobs_name[top_id_vecs[layer_id][top_id]]; if (layer->getLoss(top_id) || blobs_under_loss.count(blob_name)) layer_contributes_loss = true; if (!blobs_skip_bp.count(blob_name)) layer_skip_bp = false; // find any top blobs if affected by loss and do not force to skip bp if (layer_contributes_loss&&!layer_skip_bp) break; } // optimization trick: set lr_mult but is not affected by loss if (layer_need_backward[layer_id] && layer_skip_bp){ // cancel layer layer_need_backward[layer_id] = false; // cancel bottom for (int bottom_id = 0; bottom_id < bottom_vecs[layer_id].size(); bottom_id++){ bottoms_need_backward[layer_id][bottom_id] = false; } } // cancel directly if layer is not affected by loss if (!layer_contributes_loss) layer_need_backward[layer_id] = false; // debug info if (Dragon::get_root_solver()){ if (layer_need_backward[layer_id]) LOG(INFO) << "Layer: " << layer_names[layer_id] << " need back-propogation."; else LOG(INFO) << "Layer: " << layer_names[layer_id] << " does not need back-propogation."; } // if one top blob affected by loss // all bottom blobs will be affected // regard it as "loss back-affected" for (int bottom_id = 0; bottom_id < bottom_vecs[layer_id].size(); bottom_id++){ const string& blob_name = blobs_name[bottom_id_vecs[layer_id][bottom_id]]; if (layer_contributes_loss) blobs_under_loss.insert(blob_name); else bottoms_need_backward[layer_id][bottom_id] = false; // use for optimization trick : skip all bottom blobs if (!bottoms_need_backward[layer_id][bottom_id]) blobs_skip_bp.insert(blob_name); } } // end layer id if (param.force_backward()){ for (int layer_id = 0; layer_id < layers.size(); layer_id++){ layer_need_backward[layer_id] = true; for (int bottom_id = 0; bottom_id < bottom_vecs[layer_id].size(); bottom_id++){ // set for bottoms bottoms_need_backward[layer_id][bottom_id] = bottoms_need_backward[layer_id][bottom_id]||layers[layer_id]->allowForceBackward(bottom_id); // set for blobs blobs_need_backward[bottom_id_vecs[layer_id][bottom_id]] = blobs_need_backward[bottom_id_vecs[layer_id][bottom_id]]||bottoms_need_backward[layer_id][bottom_id]; } // set for params for (int param_id = 0; param_id < layers[layer_id]->getBlobs().size(); param_id++){ layers[layer_id]->setParamNeedBp(param_id, true); } } } // move un-used(declare top but not use as bottom) blobs into output blobs // usually contain loss blobs for (set<string>::iterator i = available_blobs.begin(); i != available_blobs.end(); i++){ LOG_IF(INFO, Dragon::get_root_solver()) << "Network produces output: " << *i; net_output_blobs.push_back(blobs[blob_name_to_idx[*i]].get()); net_output_blob_indices.push_back(blob_name_to_idx[*i]); } // store blob_name -> blob_ids blobs_name_idx = blob_name_to_idx; // store layer_name -> layer_id for (size_t layer_id = 0; layer_id < layer_names.size(); layer_id++) layers_name_idx[layer_names[layer_id]] = layer_id; debug_info = param.debug_info(); LOG_IF(INFO, Dragon::get_root_solver()) << "Network Initializion done."; }
void Net<Dtype>::appendParam(const NetParameter& param, const int layer_id, const int param_id){ const LayerParameter& layer_param = param.layer(layer_id); Layer<Dtype>* layer = layers[layer_id].get(); const int param_size = layer_param.param_size(); // default name="" (not set) string param_name = param_id<param_size? layer_param.param(param_id).name() : ""; // has name if (param_name.size()) param_display_names.push_back(param_name); // set param_id as name else{ ostringstream display_name; display_name << param_id; param_display_names.push_back(display_name.str()); } // each param blob has a net id(both weight and bias) const int net_param_id = param_blobs.size(); // add param blob which can be used by a net id param_blobs.push_back(layer->getBlobs()[param_id]); // store a net id // param_id_vecs[layer_id][param_id] can get the net_param_id param_id_vecs[layer_id].push_back(net_param_id); // store orginal id ( x_th layer/ y_th param ) // param_layer_indices[net_param_id] can get layer_id/param_id param_layer_indices.push_back(make_pair(layer_id, param_id)); ParamSpec default_hyperparameter; const ParamSpec* hyperparameter = param_id < param_size ? &layer_param.param(param_id) : &default_hyperparameter; // do not have a name or if (!param_size || !param_name.size() || (param_name.size() && !param_names_index.count(param_name))){ param_owners.push_back(-1); // has a name(non-empty) but has not logged before if (param_name.size()) param_names_index[param_name] = net_param_id; const int learnable_param_id = learnable_params.size(); learnable_params.push_back(param_blobs[net_param_id].get()); learnable_param_ids.push_back(learnable_param_id); has_params_lr.push_back(hyperparameter->has_lr_mult()); has_params_decay.push_back(hyperparameter->has_decay_mult()); params_lr.push_back(hyperparameter->lr_mult()); params_decay.push_back(hyperparameter->decay_mult()); } else{ // has a name(non-empty) and has logged before // it means to share this param and we need get the owner id const int owner_net_param_id = param_names_index[param_name]; param_owners.push_back(owner_net_param_id); const pair<int, int>& owner_index = param_layer_indices[owner_net_param_id]; const int owner_layer_id = owner_index.first; const int owner_param_id = owner_index.second; LOG_IF(INFO, Dragon::get_root_solver()) << "Share parameter: " << param_name << " ownd by layer: " << layer_names[owner_layer_id] << " param index: " << owner_layer_id; Blob<Dtype>* this_blob = param_blobs[net_param_id].get(); Blob<Dtype>* owner_blob = param_blobs[owner_net_param_id].get(); CHECK(this_blob);CHECK(owner_blob); // check before sharing if (layer_param.param(param_id).share_mode() == ParamSpec_DimCheckMode_PERMISSIVE_MODE) CHECK_EQ(this_blob->count(), owner_blob->count()); else CHECK(this_blob->shape() == owner_blob->shape()); // note that learnable_param_id = owner_net_param_id const int learnable_param_id = learnable_param_ids[owner_net_param_id]; // store parent id learnable_param_ids.push_back(learnable_param_id); // check lr_mult if (hyperparameter->has_lr_mult()){ if (has_params_lr[learnable_param_id]) CHECK_EQ(hyperparameter->lr_mult(), params_lr[learnable_param_id]) << "Shared param: " << param_name << " has mismatched lr_mult."; else{ has_params_lr[learnable_param_id] = true; params_lr[learnable_param_id] = hyperparameter->lr_mult(); } } // check decay_mult if (hyperparameter->has_decay_mult()){ if (has_params_decay[learnable_param_id]) CHECK_EQ(hyperparameter->decay_mult(), params_decay[learnable_param_id]) << "Shared param: " << param_name << " has mismatched decay_mult."; else{ has_params_decay[learnable_param_id] = true; params_decay[learnable_param_id] = hyperparameter->decay_mult(); } } } }
void CurlClient::onEgressResumed() noexcept { LOG_IF(INFO, loggingEnabled_) << "Egress resumed"; }
void CurlClient::onEgressPaused() noexcept { LOG_IF(INFO, loggingEnabled_) << "Egress paused"; }
void CurlClient::onError(const HTTPException& error) noexcept { LOG_IF(ERROR, loggingEnabled_) << "An error occurred: " << error.what(); }
void hexmap_t::render() { rendered_map->render(); LOG_IF(CheckGLError(), "Error during hex_map_t::render()"); }
void CurlClient::onEOM() noexcept { LOG_IF(INFO, loggingEnabled_) << "Got EOM"; }
void CurlClient::onTrailers(std::unique_ptr<HTTPHeaders>) noexcept { LOG_IF(INFO, loggingEnabled_) << "Discarding trailers"; }
void CurlClient::connectError(const folly::AsyncSocketException& ex) { LOG_IF(ERROR, loggingEnabled_) << "Coudln't connect to " << url_.getHostAndPort() << ":" << ex.what(); }
void spritebatch_t::render_batch(shared_ptr<texture_t> const& texture) { size_t numBatchedSprites = 0; sprite_vertex_t spriteVertices[9001]; unsigned short spriteIndices[9001]; //todo: refactor this for (sprite_t const& sprite : sprite_map[texture]) { vec2 up(0.0f, 1.0f); vec2 right(1.0f, 0.0f); up = rotate(up, -sprite.rotation); right = rotate(right, -sprite.rotation); size_t vertNum = numBatchedSprites * 4; float hwidth = sprite.src_rect.width / 2.0f; float hheight = sprite.src_rect.height / 2.0f; float spritehwidth = hwidth * sprite.scale[0]; float spritehheight = hheight * sprite.scale[1]; ///FIXME precision float minX = sprite.src_rect.x / double(texture->get_width()); float minY = sprite.src_rect.y / double(texture->get_height()); float maxX = (sprite.src_rect.x + sprite.src_rect.width) / double(texture->get_width()); float maxY = (sprite.src_rect.y + sprite.src_rect.height) / double(texture->get_height()); /* 0 _________ 1 * | /| * | / | * | / | * | / | * |/________| * 2 3 */ //v0 - top left spriteVertices[vertNum].position = sprite.position + right*(-spritehwidth) + up*(spritehheight); spriteVertices[vertNum].tex_coord = vec2(minX, maxY); spriteVertices[vertNum].color = sprite.color; //v1 - top right spriteVertices[vertNum + 1].position = sprite.position + right*(spritehwidth)+up*(spritehheight); spriteVertices[vertNum + 1].tex_coord = vec2(maxX, maxY); spriteVertices[vertNum + 1].color = sprite.color; //v2 - bottom left spriteVertices[vertNum + 2].position = sprite.position + right*(-spritehwidth) + up*(-spritehheight); spriteVertices[vertNum + 2].tex_coord = vec2(minX, minY); spriteVertices[vertNum + 2].color = sprite.color; //v3- bottom right spriteVertices[vertNum + 3].position = sprite.position + right*(spritehwidth)+up*(-spritehheight); spriteVertices[vertNum + 3].tex_coord = vec2(maxX, minY); spriteVertices[vertNum + 3].color = sprite.color; size_t indexNum = numBatchedSprites * 6; spriteIndices[indexNum + 0] = vertNum + 0; spriteIndices[indexNum + 1] = vertNum + 1; spriteIndices[indexNum + 2] = vertNum + 2; spriteIndices[indexNum + 3] = vertNum + 2; spriteIndices[indexNum + 4] = vertNum + 1; spriteIndices[indexNum + 5] = vertNum + 3; //--- DEBUG --- //todo: refactor if (debugging_sprites) { LOG_IF(CheckGLError(), "Error before drawing spritebatch debug information"); auto passthrough = Content.shaders["passthrough"]; passthrough->use_program(); passthrough->world_matrix = spritebatch_shader->world_matrix; passthrough->view_matrix = spritebatch_shader->view_matrix; passthrough->projection_matrix = spritebatch_shader->projection_matrix; passthrough->update_wvp_uniform(); glUniform4f(passthrough->uniforms["Color"], 1.0f, 0.0f, 0.0f, 1.0f); glDisable(GL_CULL_FACE); glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); glLineWidth(6.0f); glBegin(GL_POLYGON); glTexCoord2f(0.01f, 0.99f); glVertex2f(spriteVertices[vertNum + 0].position.x, spriteVertices[vertNum + 2].position.y); //BOTTOM LEFT glTexCoord2f(0.01f, 0.01f); glVertex2f(spriteVertices[vertNum + 1].position.x, spriteVertices[vertNum + 1].position.y); //TOP LEFT glTexCoord2f(0.99f, 0.01f); glVertex2f(spriteVertices[vertNum + 2].position.x, spriteVertices[vertNum + 0].position.y); //TOP RIGHT glTexCoord2f(0.99f, 0.99f); glVertex2f(spriteVertices[vertNum + 3].position.x, spriteVertices[vertNum + 3].position.y); //BOTTOM RIGHT glEnd(); spritebatch_shader->use_program(); LOG_IF(CheckGLError(), "Error drawing spritebatch debug information"); } //--- numBatchedSprites++; } //bind/push the index data glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, index_buffer); glBufferData(GL_ELEMENT_ARRAY_BUFFER, numBatchedSprites * 6 * sizeof(short), reinterpret_cast<void*>(spriteIndices), GL_STREAM_DRAW); //bind the vbo and push the vertex data glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer); glBufferData(GL_ARRAY_BUFFER, numBatchedSprites * 4 * sizeof(sprite_vertex_t), reinterpret_cast<void*>(spriteVertices), GL_STREAM_DRAW); //enable position, texCoord, color glEnableVertexAttribArray(0); glEnableVertexAttribArray(1); glEnableVertexAttribArray(2); //set up vertex attrib size / types. stride is just the total size of the vertex GLsizei stride = sizeof(sprite_vertex_t); glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, stride, 0); glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, stride, reinterpret_cast<void*>(sizeof(glm::vec2)) ); //MUST STILL PROVIDE OFFSET glVertexAttribPointer(2, 4, GL_FLOAT, GL_TRUE, stride, reinterpret_cast<void*>(sizeof(glm::vec2) * 2) ); LOG_IF(CheckGLError(), "Error after setting up spritebatch vertex attributes"); //set openGL state glEnable(GL_TEXTURE_2D); glBindTexture(GL_TEXTURE_2D, texture->get_textureID()); //bind the texture. Sampler is set in End() glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); glDisable(GL_CULL_FACE); glDrawElements(GL_TRIANGLES, numBatchedSprites * 6, GL_UNSIGNED_SHORT, 0); if (debugging_sprites) { Content.shaders["passthrough"]->use_program(); glUniform4f(Content.shaders["passthrough"]->uniforms["Color"], 0.0f, 0.0f, 1.0f, 1.0f); glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); glLineWidth(2.0f); glDrawElements(GL_TRIANGLES, numBatchedSprites * 6, GL_UNSIGNED_SHORT, 0); } //reset opengl state //glEnable(GL_CULL_FACE); glBindTexture(GL_TEXTURE_2D, 0); glDisableVertexAttribArray(0); glDisableVertexAttribArray(1); glDisableVertexAttribArray(2); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); LOG_IF(CheckGLError(), "Error after drawing spritebatch polygons"); ASSERT(!CheckGLError(), ""); }
bool CCondvar::MTimedwait(CMutex *aMutex, double const aTime) { LOG_IF(FATAL,aTime<0) << "Time is negative."; unsigned int const _time=(aTime<=0?INFINITE:(unsigned int)(aTime * 1000)); return FPImpl->MWait(aMutex, _time); }
void Net < Dtype >::appendTop(const NetParameter& param, const int layer_id, const int top_id, set<string>* available_blobs, map<string, int>* blob_name_to_idx){ boost::shared_ptr<LayerParameter> layer_param( layer_id >= 0 ? new LayerParameter(param.layer(layer_id)) : NULL); // use (layer_id//top_id) or (-1//top_id) to get a blob name const string& blob_name = layer_param ? (top_id<layer_param->top_size() ? layer_param->top(top_id) : "(automatic)") : param.input(top_id); // in-place case (e.g: // I0721 10:38 : 16.722070 4692 net.cpp : 84] relu1 <-conv1 // I0721 10:38 : 16.722082 4692 net.cpp : 98] relu1->conv1(in-place) // check a blob whether at the same postion in both bottom and top if (blob_name_to_idx && layer_param && top_id < layer_param->bottom_size() && blob_name == layer_param->bottom(top_id)){ LOG_IF(INFO, Dragon::get_root_solver()) << layer_param->name() << "[Layer-Produce]->" << blob_name << " [Blob-Name] (in-place)"; // add into this layer's top blob using blob_name top_vecs[layer_id].push_back(blobs[(*blob_name_to_idx)[blob_name]].get()); // log the id top_id_vecs[layer_id].push_back((*blob_name_to_idx)[blob_name]); } else if (blob_name_to_idx && (*blob_name_to_idx).count(blob_name) ){ LOG(FATAL) << "Top blob:" << blob_name << " propogate from multiple sources."; } // normal top blob stuffing else{ // debug info if (Dragon::get_root_solver()){ if (layer_param) LOG(INFO) << layer_param->name() << "[Layer-Produce] ->" << blob_name << " [Blob-Name]"; // special case and only used when viewing a Net's structure // because they need not specify data source and can not train or test // virtual data input blobs do not belong to any layers // see more in insert_splits.cpp/void InsertSplits() else LOG(INFO) << "Input " << top_id << "[Blob-Code] -> " << blob_name << "[Blob - Name]"; } // allocate a null blob at first boost::shared_ptr<Blob<Dtype>> ptr_blob(new Blob<Dtype>()); // store global blob infos const int blob_id = blobs.size(); blobs.push_back(ptr_blob); blobs_name.push_back(blob_name); blobs_need_backward.push_back(false); // encode index number for a name // which also represent this top blob is binded from a bottom // check it before can know whether a top blob has multiple sources(Forbidden) if (blob_name_to_idx) (*blob_name_to_idx)[blob_name] = blob_id; // reshape for virtual input blobs solely // becaude they do not exist into a DataLayer(provide reshape/transfrom service) if (layer_id == -1){ ptr_blob->reshape(param.input_shape(top_id)); // store solely for virtual input blobs net_input_blobs.push_back(ptr_blob.get()); net_input_blob_indices.push_back(blob_id); } else{ top_vecs[layer_id].push_back(ptr_blob.get()); top_id_vecs[layer_id].push_back(blob_id); } } // a set used for listing all exsiting top blobs if (available_blobs) available_blobs->insert(blob_name); }
void CurlClient::onUpgrade(UpgradeProtocol) noexcept { LOG_IF(INFO, loggingEnabled_) << "Discarding upgrade protocol"; }
void Solver<Dtype>::Step(int iters) { const int start_iter = iter_; const int stop_iter = iter_ + iters; int average_loss = this->param_.average_loss(); losses_.clear(); smoothed_loss_ = 0; while (iter_ < stop_iter) { // zero-init the params net_->ClearParamDiffs(); if (param_.test_interval() && iter_ % param_.test_interval() == 0 && (iter_ > 0 || param_.test_initialization()) && Caffe::root_solver()) { TestAll(); if (requested_early_exit_) { // Break out of the while loop because stop was requested while testing. break; } } for (int i = 0; i < callbacks_.size(); ++i) { callbacks_[i]->on_start(); } const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); // accumulate the loss and gradient Dtype loss = 0; for (int i = 0; i < param_.iter_size(); ++i) { loss += net_->ForwardBackward(); } loss /= param_.iter_size(); // average the loss across iterations for smoothed reporting UpdateSmoothedLoss(loss, start_iter, average_loss); if (display) { LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_ << ", loss = " << smoothed_loss_; const vector<Blob<Dtype>*>& result = net_->output_blobs(); int score_index = 0; for (int j = 0; j < result.size(); ++j) { const Dtype* result_vec = result[j]->cpu_data(); const string& output_name = net_->blob_names()[net_->output_blob_indices()[j]]; const Dtype loss_weight = net_->blob_loss_weights()[net_->output_blob_indices()[j]]; for (int k = 0; k < result[j]->count(); ++k) { ostringstream loss_msg_stream; if (loss_weight) { loss_msg_stream << " (* " << loss_weight << " = " << loss_weight * result_vec[k] << " loss)"; } LOG_IF(INFO, Caffe::root_solver()) << " Train net output #" << score_index++ << ": " << output_name << " = " << result_vec[k] << loss_msg_stream.str(); } } } for (int i = 0; i < callbacks_.size(); ++i) { callbacks_[i]->on_gradients_ready(); } ApplyUpdate(); // Increment the internal iter_ counter -- its value should always indicate // the number of times the weights have been updated. ++iter_; SolverAction::Enum request = GetRequestedAction(); // Save a snapshot if needed. if ((param_.snapshot() && iter_ % param_.snapshot() == 0 && Caffe::root_solver()) || (request == SolverAction::SNAPSHOT)) { Snapshot(); } if (SolverAction::STOP == request) { requested_early_exit_ = true; // Break out of training loop. break; } } }
void Plugin::SetMainBody(Index const index) { main_body_ = dynamic_cast<RotatingBody<Barycentric> const*>( &*FindOrDie(celestials_, index)->body()); LOG_IF(FATAL, main_body_ == nullptr) << index; }
Stream* Node::enqueuePacket(Packet* packet, Topology topology, size_t num_nodes) { LOG_IF(FATAL, packet == nullptr) << "Packet is null!"; VLOG(3) << "PCKT: " << *packet; LOG_IF(FATAL, packet->getX() > 1000); LOG_IF(FATAL, packet->getY() > 1000); LOG_IF(FATAL, packet->getZ() > 1000); std::vector<Stream*> queues; switch (topology) { case Topology::RING: if (packet->getX() > this->x) { if (packet->getX() - this->x > num_nodes - packet->getX() + this->x) { // Get Node with x - 1 if (this->x == 0) { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::BACK)]); } else { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::BACK)]); } } else { // Get Node with x + 1 if (this->x == num_nodes - 1) { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::FOREWARD)]); } else { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::FOREWARD)]); } } } else { if (this->x - packet->getX() > num_nodes - this->x + packet->getX()) { // Get Node with x + 1 if (this->x == num_nodes - 1) { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::FOREWARD)]); } else { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::FOREWARD)]); } } else { // Get Node with x - 1 if (this->x == 0) { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::BACK)]); } else { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::BACK)]); } } } break; case Topology::GRID: if (this->x != packet->getX()) { // Move packet in x direction if (packet->getX() > this->x) { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::FOREWARD)]); // Foreward } else { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::BACK)]); // Back } } if (this->y != packet->getY()) { // Move packet in y direction if (packet->getY() > this->y) { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::RIGHT)]); // Right } else { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::LEFT)]); // Left } } break; case Topology::CUBE: if (this->x != packet->getX()) { // Move packet in x direction if (packet->getX() > this->x) { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::FOREWARD)]); // Foreward } else { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::BACK)]); // Back } } if (this->y != packet->getY()) { // Move packet in y direction if (packet->getY() > this->y) { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::RIGHT)]); // Right } else { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::LEFT)]); // Left } } if (this->z != packet->getZ()) { // Move packet in z direction if (packet->getZ() > this->z) { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::DOWN)]); // Down } else { queues.push_back(this->streams[static_cast<size_t>(StreamDirection::UP)]); // Up } } break; default: LOG(FATAL) << "No topology with id '" << static_cast<size_t>(topology) << "' exists."; } // Insert packet into viable queue with shortest length Stream* stream = nullptr; if (!queues.empty()) { for (size_t i = 0; i < queues.size(); i++) { if (stream == nullptr || queues.at(i)->size() < stream->size()) { LOG_IF(FATAL, queues.at(i) == nullptr) << "Queue is null"; stream = queues.at(i); } } LOG_IF(FATAL, packet->getX() > 1000); LOG_IF(FATAL, packet->getY() > 1000); LOG_IF(FATAL, packet->getZ() > 1000); stream->push(packet); } else { LOG(FATAL) << "Packet already at destination!"; } return stream; }
int append_record_forward(char *fpath, fileheader_t * record, int size, const char *origid) { FILE *fp; char buf[PATHLEN]; char address[64] = ""; char fwd_title[STRLEN] = ""; int r; // No matter what, append it, and return if that failed. r = append_record(fpath, record, size); if (r < 0) return r; // #ifdef USE_MAIL_AUTO_FORWARD if (strlen(fpath) + strlen(FN_FORWARD) >= PATHLEN) { log_filef("log/invalid_append_record_forward", LOG_CREAT, "%s %s %s\n", Cdatelite(&now), cuser.userid, fpath); return -1; } setdirpath(buf, fpath, FN_FORWARD); fp = fopen(buf, "r"); if (!fp) return 0; // Load and setup address address[0] = 0; fscanf(fp, "%63s", address); fclose(fp); chomp(address); strip_blank(address, address); #ifdef UNTRUSTED_FORWARD_TIMEBOMB if (dasht(buf) < UNTRUSTED_FORWARD_TIMEBOMB) { // We may unlink here, but for systems with timebomb, // just leave it alone and let user see it in login screen. // unlink(buf); return 0; } #endif if (get_num_records(fpath, sizeof(fileheader_t)) > MAX_KEEPMAIL_HARDLIMIT) { unlink(buf); // TODO add a mail so that origid knows what happened. LOG_IF(LOG_CONF_INTERNETMAIL, log_filef("log/internet_mail.log", LOG_CREAT, "%s [%s] (%s -> %s) mailbox overflow (%d > %d)\n", Cdatelite(&now), __FUNCTION__, origid, address, get_num_records(fpath, sizeof(fileheader_t)), MAX_KEEPMAIL_HARDLIMIT)); return 0; } if (!*address || strchr(address, '@') == NULL || strcasestr(address, str_mail_address)) { #ifndef UNTRUSTED_FORWARD_TIMEBOMB // delete the setting if we don't have timebombs. unlink(buf); LOG_IF(LOG_CONF_INTERNETMAIL, log_filef("log/internet_mail.log", LOG_CREAT, "%s [%s] Removed bad address: %s (%s)\n", Cdatelite(&now), __FUNCTION__, address, origid)); #endif return 0; } setdirpath(buf, fpath, record->filename); // because too many user set wrong forward address, // let's put their own address instead. // and again because some really stupid user // does not understand they've set auto-forward, // let's mark this in the title. snprintf(fwd_title, sizeof(fwd_title)-1, "[自動轉寄] %s", record->title); bsmtp(buf, fwd_title, address, origid); LOG_IF(LOG_CONF_INTERNETMAIL, log_filef("log/internet_mail.log", LOG_CREAT, "%s [%s] %s -> (%s) %s: %s\n", Cdatelite(&now), __FUNCTION__, cuser.userid, origid, address, fwd_title)); // #endif // USE_MAIL_AUTO_FORWARD return 0; }