bool CraftingSession::prepareComponent(Item* component, uint32 needed, ManufactureSlot* manSlot) { FactoryCrate* fC = dynamic_cast<FactoryCrate*>(component); if(fC) { uint32 amount = AdjustFactoryCrate(fC, needed); DLOG(INFO) << "CraftingSession::prepareComponent FactoryCrate take " << amount; //TODO - added stacks shouldnt have more items than maximally possible - needed is the amount needed for the slot // that might be bigger than the max stack size //create the new item - link it to the slot mAsyncComponentAmount = needed; mAsyncManSlot = manSlot; //make sure we request the right amount of stacks for(uint8 i = 0; i<amount; i++) gObjectFactory->requestNewClonedItem(this,fC->getLinkedObject()->getId(),mManufacturingSchematic->getId()); // if its now empty remove it out of the inventory so we cant use it several times // and destroy it while were at it uint32 crateSize = fC->getAttribute<uint32>("factory_count"); if(!crateSize) { TangibleObject* container = dynamic_cast<TangibleObject*>(gWorldManager->getObjectById(fC->getParentId())); //just delete it gContainerManager->deleteObject(fC, container); } //dont send result - its a callback return false; } //no stacksize or crate - do not bother with temporaries if(!component->hasAttribute("stacksize")) { // remove it out of the inventory so we cant use it several times TangibleObject* tO = dynamic_cast<TangibleObject*>(gWorldManager->getObjectById(component->getParentId())); assert(tO && "CraftingSession::prepareComponent :: cant get parent"); tO->removeObject(component); //leave parent_id untouched - we might need to readd it to the container! //please note that we can only use components out of our inventory or the crafting stations thingy //so update containment for all watchers //TODO gMessageLib->sendContainmentMessage(component->getId(),mManufacturingSchematic->getId(),0xffffffff,mOwner); //send result directly we dont have a callback return true; } //only pure stacks remain AdjustComponentStack(component, needed); //create the new item - link it to the slot mAsyncComponentAmount = needed; mAsyncManSlot = manSlot; gObjectFactory->requestNewClonedItem(this,component->getId(),mManufacturingSchematic->getId()); //delete the stack if empty uint32 stackSize = component->getAttribute<uint32>("stacksize"); if(!stackSize) { //remove the item out of its container TangibleObject* tO = dynamic_cast<TangibleObject*>(gWorldManager->getObjectById(component->getParentId())); if(!tO) { assert(false); return false; } //just delete it tO->removeObject(component); gWorldManager->destroyObject(component); } //dont send result - its a callback return false; }
void DataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { CHECK_EQ(bottom.size(), 0) << "Data Layer takes no input blobs."; CHECK_GE(top->size(), 1) << "Data Layer takes at least one blob as output."; CHECK_LE(top->size(), 2) << "Data Layer takes at most two blobs as output."; if (top->size() == 1) { output_labels_ = false; } else { output_labels_ = true; } // Initialize the leveldb leveldb::DB* db_temp; leveldb::Options options; options.create_if_missing = false; options.max_open_files = 10; LOG(INFO) << "Opening leveldb " << this->layer_param_.data_param().source(); leveldb::Status status = leveldb::DB::Open( options, this->layer_param_.data_param().source(), &db_temp); CHECK(status.ok()) << "Failed to open leveldb " << this->layer_param_.data_param().source() << std::endl << status.ToString(); db_.reset(db_temp); iter_.reset(db_->NewIterator(leveldb::ReadOptions())); iter_->SeekToFirst(); // Check if we would need to randomly skip a few data points if (this->layer_param_.data_param().rand_skip()) { unsigned int skip = caffe_rng_rand() % this->layer_param_.data_param().rand_skip(); LOG(INFO) << "Skipping first " << skip << " data points."; while (skip-- > 0) { iter_->Next(); if (!iter_->Valid()) { iter_->SeekToFirst(); } } } // Read a data point, and use it to initialize the top blob. Datum datum; datum.ParseFromString(iter_->value().ToString()); // image int crop_size = this->layer_param_.data_param().crop_size(); if (crop_size > 0) { (*top)[0]->Reshape(this->layer_param_.data_param().batch_size(), datum.channels(), crop_size, crop_size); prefetch_data_.reset(new Blob<Dtype>( this->layer_param_.data_param().batch_size(), datum.channels(), crop_size, crop_size)); } else { (*top)[0]->Reshape( this->layer_param_.data_param().batch_size(), datum.channels(), datum.height(), datum.width()); prefetch_data_.reset(new Blob<Dtype>( this->layer_param_.data_param().batch_size(), datum.channels(), datum.height(), datum.width())); } LOG(INFO) << "output data size: " << (*top)[0]->num() << "," << (*top)[0]->channels() << "," << (*top)[0]->height() << "," << (*top)[0]->width(); // label if (output_labels_) { (*top)[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1); prefetch_label_.reset( new Blob<Dtype>(this->layer_param_.data_param().batch_size(), 1, 1, 1)); } // datum size datum_channels_ = datum.channels(); datum_height_ = datum.height(); datum_width_ = datum.width(); datum_size_ = datum.channels() * datum.height() * datum.width(); CHECK_GT(datum_height_, crop_size); CHECK_GT(datum_width_, crop_size); // check if we want to have mean if (this->layer_param_.data_param().has_mean_file()) { const string& mean_file = this->layer_param_.data_param().mean_file(); LOG(INFO) << "Loading mean file from" << mean_file; BlobProto blob_proto; ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); data_mean_.FromProto(blob_proto); CHECK_EQ(data_mean_.num(), 1); CHECK_EQ(data_mean_.channels(), datum_channels_); CHECK_EQ(data_mean_.height(), datum_height_); CHECK_EQ(data_mean_.width(), datum_width_); } else { // Simply initialize an all-empty mean. data_mean_.Reshape(1, datum_channels_, datum_height_, datum_width_); } // Now, start the prefetch thread. Before calling prefetch, we make two // cpu_data calls so that the prefetch thread does not accidentally make // simultaneous cudaMalloc calls when the main thread is running. In some // GPUs this seems to cause failures if we do not so. prefetch_data_->mutable_cpu_data(); if (output_labels_) { prefetch_label_->mutable_cpu_data(); } data_mean_.cpu_data(); DLOG(INFO) << "Initializing prefetch"; CreatePrefetchThread(); DLOG(INFO) << "Prefetch initialized."; }
void FloDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) { CPUTimer batch_timer; batch_timer.Start(); double read_time = 0; double trans_time = 0; CPUTimer timer; CHECK(batch->data_.count()); CHECK(this->transformed_data_.count()); ImageDataParameter image_data_param = this->layer_param_.image_data_param(); const int batch_size = image_data_param.batch_size(); string root_folder = image_data_param.root_folder(); // Reshape according to the first image of each batch // on single input batches allows for inputs of varying dimension. int xSize, ySize; CHECK(readFloFile(root_folder + lines_[lines_id_].first, NULL, xSize, ySize)) << "Could not load " << lines_[lines_id_].first; // Use data_transformer to infer the expected blob shape from a cv_img. vector<int> top_shape = vector<int>(4); top_shape[0] = 1; top_shape[1] = 2; top_shape[2] = ySize; top_shape[3] = xSize; //this->transformed_data_.Reshape(top_shape); // Reshape batch according to the batch_size. top_shape[0] = batch_size; batch->data_.Reshape(top_shape); Dtype* prefetch_data = batch->data_.mutable_cpu_data(); // datum scales const int lines_size = lines_.size(); for (int item_id = 0; item_id < batch_size; ++item_id) { // get a blob timer.Start(); CHECK_GT(lines_size, lines_id_); read_time += timer.MicroSeconds(); timer.Start(); // Apply transformations (mirror, crop...) to the image int offset = batch->data_.offset(item_id); //this->transformed_data_.set_cpu_data(prefetch_data + offset); CHECK(readFloFile(root_folder + lines_[lines_id_].first, prefetch_data + offset, xSize, ySize)) << "Could not load " << lines_[lines_id_].first; //this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); trans_time += timer.MicroSeconds(); // go to the next iter lines_id_++; if (lines_id_ >= lines_size) { // We have reached the end. Restart from the first. DLOG(INFO) << "Restarting data prefetching from start."; lines_id_ = 0; if (this->layer_param_.image_data_param().shuffle()) { ShuffleImages(); } } } batch_timer.Stop(); DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; }
jobjectArray MetaInterface::parseObject(MetaBase const* root, const int current_index) { int next_index = current_index + 1; const int path_elements = mElements.getCount(); jobjectArray result = NULL; const MetaObject* meta = root->getMetaObject(); const char* field_string = mElements.get(current_index); const int field_count = meta->getFieldCount(); for (int y = 0; y < field_count; y++) { const MetaField* metaField = meta->getField(y); if (strcmp(field_string, metaField->getName()) == 0) { DLOG(INFO) << "Found field: " << metaField->getName(); // found the field! // test for array access int array_index = -1; if (current_index + 1 < path_elements) { const char* next_element = mElements.get(current_index + 1); if (isNumber(next_element)) { array_index = atoi(next_element); next_index++; } } // pull the object out of the root object void* object = NULL; bool is_array_terminator = false; if (array_index >= 0) { if (array_index >= 0 && array_index <= metaField->getElementCount(root)) { if (metaField->getStorageType() == MetaField::TYPE_pointer) { // array of pointers object = *((void**)metaField->getElement(root, array_index)); } else { // inline array object = metaField->getElement(root, array_index); } } } else if (metaField->getElementCount(root) > 1 && next_index >= path_elements) { // This is an array request. is_array_terminator = true; } else { if (metaField->getStorageType() == MetaField::TYPE_pointer) { object = *((void**)metaField->get(root)); } else { object = metaField->get(root); } } if (object && MetaBase::authenticatePointer(object)) { // safe to cast! MetaBase* new_root = static_cast<MetaBase*>(object); if (next_index < path_elements) { // recurse result = parseObject(new_root, next_index); } else { // leaf result = printObject(new_root); } } else if (is_array_terminator) { // This is the leaf, but it's an array. result = printArray(root, metaField); } else { // we found the field but can't go any further. DLOG(INFO) << "Field null or unknown type: " << metaField->getTypeName() << " (" << reinterpret_cast<int>(object) << ")"; } } } return result; }
void Net<Dtype>::Init(const NetParameter& in_param) { LOG(INFO) << "Initializing net from parameters: " << std::endl << in_param.DebugString(); // Create a copy of in_param with splits added where necessary. NetParameter param; InsertSplits(in_param, ¶m); // Basically, build all the layers and set up its connections. name_ = param.name(); map<string, int> blob_name_to_idx; set<string> available_blobs; int num_layers = param.layers_size(); CHECK_EQ(param.input_size() * 4, param.input_dim_size()) << "Incorrect bottom blob dimension specifications."; size_t memory_used = 0; // set the input blobs for (int i = 0; i < param.input_size(); ++i) { const string& blob_name = param.input(i); shared_ptr<Blob<Dtype> > blob_pointer( new Blob<Dtype>(param.input_dim(i * 4), param.input_dim(i * 4 + 1), param.input_dim(i * 4 + 2), param.input_dim(i * 4 + 3))); blobs_.push_back(blob_pointer); blob_names_.push_back(blob_name); blob_need_backward_.push_back(param.force_backward()); net_input_blob_indices_.push_back(i); net_input_blobs_.push_back(blob_pointer.get()); blob_name_to_idx[blob_name] = i; available_blobs.insert(blob_name); memory_used += blob_pointer->count(); } DLOG(INFO) << "Memory required for Data" << memory_used*sizeof(Dtype); // For each layer, set up their input and output bottom_vecs_.resize(param.layers_size()); top_vecs_.resize(param.layers_size()); bottom_id_vecs_.resize(param.layers_size()); top_id_vecs_.resize(param.layers_size()); for (int i = 0; i < param.layers_size(); ++i) { bool in_place = false; const LayerParameter& layer_param = param.layers(i); layers_.push_back(shared_ptr<Layer<Dtype> >(GetLayer<Dtype>(layer_param))); layer_names_.push_back(layer_param.name()); LOG(INFO) << "Creating Layer " << layer_param.name(); bool need_backward = param.force_backward(); // Figure out this layer's input and output for (int j = 0; j < layer_param.bottom_size(); ++j) { const string& blob_name = layer_param.bottom(j); const int blob_id = blob_name_to_idx[blob_name]; if (available_blobs.find(blob_name) == available_blobs.end()) { LOG(FATAL) << "Unknown blob input " << blob_name << " to layer" << j; } LOG(INFO) << layer_param.name() << " <- " << blob_name; bottom_vecs_[i].push_back( blobs_[blob_id].get()); bottom_id_vecs_[i].push_back(blob_id); // If a blob needs backward, this layer should provide it. need_backward |= blob_need_backward_[blob_id]; available_blobs.erase(blob_name); } for (int j = 0; j < layer_param.top_size(); ++j) { const string& blob_name = layer_param.top(j); // Check if we are doing in-place computation if (layer_param.bottom_size() > j && blob_name == layer_param.bottom(j)) { // In-place computation LOG(INFO) << layer_param.name() << " -> " << blob_name << " (in-place)"; in_place = true; available_blobs.insert(blob_name); top_vecs_[i].push_back( blobs_[blob_name_to_idx[blob_name]].get()); top_id_vecs_[i].push_back(blob_name_to_idx[blob_name]); } else if (blob_name_to_idx.find(blob_name) != blob_name_to_idx.end()) { // If we are not doing in-place computation but has duplicated blobs, // raise an error. LOG(FATAL) << "Duplicate blobs produced by multiple sources."; } else { // Normal output. LOG(INFO) << layer_param.name() << " -> " << blob_name; shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>()); blobs_.push_back(blob_pointer); blob_names_.push_back(blob_name); blob_need_backward_.push_back(param.force_backward()); blob_name_to_idx[blob_name] = blob_names_.size() - 1; available_blobs.insert(blob_name); top_vecs_[i].push_back(blobs_[blob_names_.size() - 1].get()); top_id_vecs_[i].push_back(blob_names_.size() - 1); } } // After this layer is connected, set it up. // LOG(INFO) << "Setting up " << layer_names_[i]; layers_[i]->SetUp(bottom_vecs_[i], &top_vecs_[i]); for (int topid = 0; topid < top_vecs_[i].size(); ++topid) { LOG(INFO) << "Top shape: " << top_vecs_[i][topid]->num() << " " << top_vecs_[i][topid]->channels() << " " << top_vecs_[i][topid]->height() << " " << top_vecs_[i][topid]->width() << " (" << top_vecs_[i][topid]->count() << ")"; if (!in_place) memory_used += top_vecs_[i][topid]->count(); } DLOG(INFO) << "Memory required for Data " << memory_used*sizeof(Dtype); int blobs_lr_size = layers_[i]->layer_param().blobs_lr_size(); CHECK(blobs_lr_size == layers_[i]->blobs().size() || blobs_lr_size == 0) << "Incorrect blobs lr size: should be either 0 or the same as " "the number of the layer's parameter blobs."; if (blobs_lr_size) { // Check if this layer needs backward operation itself for (int j = 0; j < blobs_lr_size; ++j) { need_backward |= (layers_[i]->layer_param().blobs_lr(j) > 0); } } else if (layers_[i]->blobs().size()) { // catch: if a layer param does not specify blobs_lr, we should assume the // learning rate to be 1. Thus we will need to perform backward. need_backward = true; } // Finally, set the backward flag layer_need_backward_.push_back(need_backward); if (need_backward) { LOG(INFO) << layer_names_[i] << " needs backward computation."; for (int j = 0; j < top_id_vecs_[i].size(); ++j) { blob_need_backward_[top_id_vecs_[i][j]] = true; } } else { LOG(INFO) << layer_names_[i] << " does not need backward computation."; } } // In the end, all remaining blobs are considered output blobs. for (set<string>::iterator it = available_blobs.begin(); it != available_blobs.end(); ++it) { LOG(INFO) << "This network produces output " << *it; net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get()); net_output_blob_indices_.push_back(blob_name_to_idx[*it]); } for (size_t i = 0; i < blob_names_.size(); ++i) { blob_names_index_[blob_names_[i]] = i; } for (size_t i = 0; i < layer_names_.size(); ++i) { layer_names_index_[layer_names_[i]] = i; } GetLearningRateAndWeightDecay(); LOG(INFO) << "Network initialization done."; LOG(INFO) << "Memory required for Data " << memory_used*sizeof(Dtype); }
PassRefPtr<ShapeResult> HarfBuzzShaper::shapeResult() { RefPtr<ShapeResult> result = ShapeResult::create(m_font, m_normalizedBufferLength, m_textRun.direction()); HarfBuzzScopedPtr<hb_buffer_t> harfBuzzBuffer(hb_buffer_create(), hb_buffer_destroy); const FontDescription& fontDescription = m_font->getFontDescription(); const String& localeString = fontDescription.locale(); CString locale = localeString.latin1(); const hb_language_t language = hb_language_from_string(locale.data(), locale.length()); RunSegmenter::RunSegmenterRange segmentRange = { 0, 0, USCRIPT_INVALID_CODE, OrientationIterator::OrientationInvalid, SmallCapsIterator::SmallCapsSameCase, FontFallbackPriority::Invalid }; RunSegmenter runSegmenter( m_normalizedBuffer.get(), m_normalizedBufferLength, m_font->getFontDescription().orientation(), fontDescription.variant()); Vector<UChar32> fallbackCharsHint; // TODO: Check whether this treatAsZerowidthspace from the previous script // segmentation plays a role here, does the new scriptRuniterator handle that correctly? while (runSegmenter.consume(&segmentRange)) { RefPtr<FontFallbackIterator> fallbackIterator = m_font->createFontFallbackIterator( segmentRange.fontFallbackPriority); appendToHolesQueue(HolesQueueNextFont, 0, 0); appendToHolesQueue(HolesQueueRange, segmentRange.start, segmentRange.end - segmentRange.start); const SimpleFontData* currentFont = nullptr; RefPtr<UnicodeRangeSet> currentFontRangeSet; bool fontCycleQueued = false; while (m_holesQueue.size()) { HolesQueueItem currentQueueItem = m_holesQueue.takeFirst(); if (currentQueueItem.m_action == HolesQueueNextFont) { // For now, we're building a character list with which we probe // for needed fonts depending on the declared unicode-range of a // segmented CSS font. Alternatively, we can build a fake font // for the shaper and check whether any glyphs were found, or // define a new API on the shaper which will give us coverage // information? if (!collectFallbackHintChars(fallbackCharsHint, fallbackIterator->needsHintList())) { // Give up shaping since we cannot retrieve a font fallback // font without a hintlist. m_holesQueue.clear(); break; } FontDataForRangeSet nextFontDataForRangeSet = fallbackIterator->next(fallbackCharsHint); currentFont = nextFontDataForRangeSet.fontData().get(); currentFontRangeSet = nextFontDataForRangeSet.ranges(); if (!currentFont) { ASSERT(!m_holesQueue.size()); break; } fontCycleQueued = false; continue; } // TODO crbug.com/522964: Only use smallCapsFontData when the font does not support true smcp. The spec // says: "To match the surrounding text, a font may provide alternate glyphs for caseless characters when // these features are enabled but when a user agent simulates small capitals, it must not attempt to // simulate alternates for codepoints which are considered caseless." const SimpleFontData* smallcapsAdjustedFont = segmentRange.smallCapsBehavior == SmallCapsIterator::SmallCapsUppercaseNeeded ? currentFont->smallCapsFontData(fontDescription).get() : currentFont; // Compatibility with SimpleFontData approach of keeping a flag for overriding drawing direction. // TODO: crbug.com/506224 This should go away in favor of storing that information elsewhere, for example in // ShapeResult. const SimpleFontData* directionAndSmallCapsAdjustedFont = fontDataAdjustedForOrientation(smallcapsAdjustedFont, m_font->getFontDescription().orientation(), segmentRange.renderOrientation); if (!shapeRange(harfBuzzBuffer.get(), currentQueueItem.m_startIndex, currentQueueItem.m_numCharacters, directionAndSmallCapsAdjustedFont, currentFontRangeSet, segmentRange.script, language)) DLOG(ERROR) << "Shaping range failed."; if (!extractShapeResults(harfBuzzBuffer.get(), result.get(), fontCycleQueued, currentQueueItem, directionAndSmallCapsAdjustedFont, segmentRange.script, !fallbackIterator->hasNext())) DLOG(ERROR) << "Shape result extraction failed."; hb_buffer_reset(harfBuzzBuffer.get()); } } return result.release(); }
/* * Initializes the specified 'Match' data structure and the initial state of * commands.c for matching target windows of a command. * */ CFGFUN(criteria_init, int _state) { criteria_next_state = _state; DLOG("Initializing criteria, current_match = %p, state = %d\n", current_match, _state); match_init(current_match); }
void DevicePair::compute(const vector<int> devices, vector<DevicePair>* pairs) { #ifndef CPU_ONLY vector<int> remaining(devices); // Depth for reduction tree int remaining_depth = static_cast<int>(ceil(log2(remaining.size()))); // Group GPUs by board for (int d = 0; d < remaining_depth; ++d) { for (int i = 0; i < remaining.size(); ++i) { for (int j = i + 1; j < remaining.size(); ++j) { cudaDeviceProp a, b; CUDA_CHECK(cudaGetDeviceProperties(&a, remaining[i])); CUDA_CHECK(cudaGetDeviceProperties(&b, remaining[j])); if (a.isMultiGpuBoard && b.isMultiGpuBoard) { if (a.multiGpuBoardGroupID == b.multiGpuBoardGroupID) { pairs->push_back(DevicePair(remaining[i], remaining[j])); DLOG(INFO) << "GPU board: " << remaining[i] << ":" << remaining[j]; remaining.erase(remaining.begin() + j); break; } } } } } ostringstream s; for (int i = 0; i < remaining.size(); ++i) { s << (i ? ", " : "") << remaining[i]; } DLOG(INFO) << "GPUs paired by boards, remaining: " << s.str(); // Group by P2P accessibility remaining_depth = ceil(log2(remaining.size())); for (int d = 0; d < remaining_depth; ++d) { for (int i = 0; i < remaining.size(); ++i) { for (int j = i + 1; j < remaining.size(); ++j) { int access; CUDA_CHECK( cudaDeviceCanAccessPeer(&access, remaining[i], remaining[j])); if (access) { pairs->push_back(DevicePair(remaining[i], remaining[j])); DLOG(INFO) << "P2P pair: " << remaining[i] << ":" << remaining[j]; remaining.erase(remaining.begin() + j); break; } } } } s.str(""); for (int i = 0; i < remaining.size(); ++i) { s << (i ? ", " : "") << remaining[i]; } DLOG(INFO) << "GPUs paired by P2P access, remaining: " << s.str(); // Group remaining remaining_depth = ceil(log2(remaining.size())); for (int d = 0; d < remaining_depth; ++d) { for (int i = 0; i < remaining.size(); ++i) { pairs->push_back(DevicePair(remaining[i], remaining[i + 1])); DLOG(INFO) << "Remaining pair: " << remaining[i] << ":" << remaining[i + 1]; remaining.erase(remaining.begin() + i + 1); } } // Should only be the parent node remaining CHECK_EQ(remaining.size(), 1); pairs->insert(pairs->begin(), DevicePair(-1, remaining[0])); CHECK(pairs->size() == devices.size()); for (int i = 0; i < pairs->size(); ++i) { CHECK((*pairs)[i].parent() != (*pairs)[i].device()); for (int j = i + 1; j < pairs->size(); ++j) { CHECK((*pairs)[i].device() != (*pairs)[j].device()); } } #else NO_GPU; #endif }
bool UgvParam::loadParam(std::string configFile) { std::ifstream in(configFile); if(!in) { DLOG(FATAL) << "Couldn't find configuration file: " << configFile; return false; } std::string line; std::string key; double value; while (getline(in, line)) { std::stringstream ss; ss << line; ss >> key; if(key[0] == '#') continue; ss >> value; if(key == "EulrChangeThreshold") DivideCarTrack.EulrChangeThreshold = value; else if(key == "DetectPoints") DivideCarTrack.DetectPoints = value; else if(key == "DetectDistance") DivideCarTrack.DetectDistance = value; else if(key == "ValidSegmentPointsNum") DivideCarTrack.ValidSegmentPointsNum = value; else if(key == "SimilarEulrThreshold") LineParallel.SimilarEulrThreshold = value; else if(key == "LateralDistanceThreshold") SameSeg.LateralDistanceThreshold = value; else if(key == "SameDirectionThreshold") SameSeg.SameDirectionThreshold = value; else if(key == "xMax") Scale.xMax = value; else if(key == "xMin") Scale.xMin = value; else if(key == "yMax") Scale.yMax = value; else if(key == "yMin") Scale.yMin = value; else if(key == "GridSize") Scale.GridSize = value; else if(key == "PixelPerGrid") Scale.PixelPerGrid = value; else if(key == "LeftDetectAngleBoundary") ProbMap.LeftDetectAngleBoundary = value; else if(key == "RightDetectAngleBoundary") ProbMap.RightDetectAngleBoundary = value; else if(key == "unitHeight") ProbMap.unitHeight = value; else if(key == "OccupiedThreshold") ProbMap.OccupiedThreshold = value; else if(key == "ClearThreshold") ProbMap.ClearThreshold = value; else if(key == "incrementUnit") ProbMap.incrementUnit = value; else if(key == "MaxGroundHeight") ProbMap.MaxGroundHeight = value; else if(key == "MaxAvgMidDiff") ProbMap.MaxAvgMidDiff = value; else if(key == "SaveNeeded"){ LocalMap.SaveNeeded.insert(value); while(ss){ ss >> value; LocalMap.SaveNeeded.insert(value); } } else if(key == "SaveInterval")
bool anomaly_serv::clear_row(const string& id) { check_set_config(); anomaly_->clear_row(id); DLOG(INFO) << "row cleared: " << id; return true; }
int main(int argc, char *argv[]) { int backlog = 10; muxer_t *muxers[2] = {NULL, NULL}; status_writer_t *sw = NULL; child_t *child = NULL; int child_status = -1; int ring_buffer_size = 65535; int fds[3] = {-1, -1, -1}; int ii = 0, exit_status = 0, nwritten = 0; pthread_t sw_thread, muxer_threads[2]; char socket_paths[3][PATH_MAX + 1]; char *socket_names[3] = { "stdout.sock", "stderr.sock", "status.sock" }; barrier_t *barrier = NULL; if (argc < 3) { fprintf(stderr, "Usage: %s <socket directory> <cmd>\n", argv[0]); exit(EXIT_FAILURE); } printf("Usage: %s <socket directory> = %s <cmd> = %s \n" , argv[0],argv[1],argv[2]); fflush(stdout); /* Setup listeners on domain sockets */ for (ii = 0; ii < 3; ++ii) { memset(socket_paths[ii], 0, sizeof(socket_paths[ii])); nwritten = snprintf(socket_paths[ii], sizeof(socket_paths[ii]), "%s/%s", argv[1], socket_names[ii]); if (nwritten >= sizeof(socket_paths[ii])) { fprintf(stderr, "Socket path too long\n"); exit_status = 1; goto cleanup; } fds[ii] = create_unix_domain_listener(socket_paths[ii], backlog); DLOG("created listener, path=%s fd=%d", socket_paths[ii], fds[ii]); if (-1 == fds[ii]) { perrorf("Failed creating socket at %s:", socket_paths[ii]); exit_status = 1; goto cleanup; } set_cloexec(fds[ii]); } /* * Make sure iomux-spawn runs in an isolated process group such that * it is not affected by signals sent to its parent's process group. */ setsid(); child = child_create(argv + 2, argc - 2); printf("child_pid=%d\n", child->pid); fflush(stdout); /* Muxers for stdout/stderr */ muxers[0] = muxer_alloc(fds[0], child->stdout[0], ring_buffer_size); muxers[1] = muxer_alloc(fds[1], child->stderr[0], ring_buffer_size); for (ii = 0; ii < 2; ++ii) { if (pthread_create(&muxer_threads[ii], NULL, run_muxer, muxers[ii])) { perrorf("Failed creating muxer thread:"); exit_status = 1; goto cleanup; } DLOG("created muxer thread for socket=%s", socket_paths[ii]); } /* Status writer */ barrier = barrier_alloc(); sw = status_writer_alloc(fds[2], barrier); if (pthread_create(&sw_thread, NULL, run_status_writer, sw)) { perrorf("Failed creating muxer thread:"); exit_status = 1; goto cleanup; } /* Wait for clients on stdout, stderr, and status */ for (ii = 0; ii < 2; ++ii) { muxer_wait_for_client(muxers[ii]); } barrier_wait(barrier); child_continue(child); printf("child active\n"); fflush(stdout); if (-1 == waitpid(child->pid, &child_status, 0)) { perrorf("Waitpid for child failed: "); exit_status = 1; goto cleanup; } DLOG("child exited, status = %d", WEXITSTATUS(child_status)); /* Wait for status writer */ status_writer_finish(sw, child_status); pthread_join(sw_thread, NULL); /* Wait for muxers */ for (ii = 0; ii < 2; ++ii) { muxer_stop(muxers[ii]); pthread_join(muxer_threads[ii], NULL); } DLOG("all done, cleaning up and exiting"); cleanup: if (NULL != child) { child_free(child); } if (NULL != barrier) { barrier_free(barrier); } if (NULL != sw) { status_writer_free(sw); } for (ii = 0; ii < 2; ++ii) { if (NULL != muxers[ii]) { muxer_free(muxers[ii]); } } /* Close accept sockets and clean up paths */ for (ii = 0; ii < 3; ++ii) { if (-1 != fds[ii]) { close(fds[ii]); unlink(socket_paths[ii]); } } return exit_status; }
void o3d3xx::FrameGrabber::Run() { boost::asio::io_service::work work(this->io_service_); // // setup the camera for image acquistion // std::string cam_ip; int cam_port; try { cam_ip = this->cam_->GetIP(); cam_port = std::stoi(this->cam_->GetParameter("PcicTcpPort")); } catch (const o3d3xx::error_t& ex) { LOG(ERROR) << "Could not get IP/Port of the camera: " << ex.what(); return; } LOG(INFO) << "Camera connection info: ip=" << cam_ip << ", port=" << cam_port; try { this->cam_->RequestSession(); this->cam_->SetOperatingMode(o3d3xx::Camera::operating_mode::RUN); this->cam_->CancelSession(); } catch (const o3d3xx::error_t& ex) { LOG(ERROR) << "Failed to setup camera for image acquisition: " << ex.what(); return; } // // init the asio structures // boost::asio::ip::tcp::socket sock(this->io_service_); boost::asio::ip::tcp::endpoint endpoint( boost::asio::ip::address::from_string(cam_ip), cam_port); // // Forward declare our two read handlers (because they need to call // eachother). // o3d3xx::FrameGrabber::ReadHandler ticket_handler; o3d3xx::FrameGrabber::ReadHandler image_handler; // // image data callback // std::size_t bytes_read = 0; std::size_t buff_sz = 0; // bytes image_handler = [&, this] (const boost::system::error_code& ec, std::size_t bytes_transferred) { if (ec) { throw o3d3xx::error_t(ec.value()); } bytes_read += bytes_transferred; //DLOG(INFO) << "Read " << bytes_read << " image bytes of " // << buff_sz; if (bytes_read == buff_sz) { DLOG(INFO) << "Got full image!"; bytes_read = 0; // 1. verify the data if (o3d3xx::verify_image_buffer(this->back_buffer_)) { DLOG(INFO) << "Image OK"; // 2. move the data to the front buffer in O(1) time complexity this->front_buffer_mutex_.lock(); this->back_buffer_.swap(this->front_buffer_); this->front_buffer_mutex_.unlock(); // 3. notify waiting clients this->front_buffer_cv_.notify_all(); } else { LOG(WARNING) << "Bad image!"; } // read another ticket sock.async_read_some( boost::asio::buffer(this->ticket_buffer_.data(), o3d3xx::IMG_TICKET_SZ), ticket_handler); return; } sock.async_read_some( boost::asio::buffer(&this->back_buffer_[bytes_read], buff_sz - bytes_read), image_handler); }; // // ticket callback // std::size_t ticket_bytes_read = 0; std::size_t ticket_buff_sz = o3d3xx::IMG_TICKET_SZ; this->ticket_buffer_.resize(ticket_buff_sz); ticket_handler = [&, this] (const boost::system::error_code& ec, std::size_t bytes_transferred) { if (ec) { throw o3d3xx::error_t(ec.value()); } ticket_bytes_read += bytes_transferred; DLOG(INFO) << "Read " << ticket_bytes_read << " ticket bytes of " << ticket_buff_sz; if (ticket_bytes_read == ticket_buff_sz) { DLOG(INFO) << "Got full ticket!"; ticket_bytes_read = 0; if (o3d3xx::verify_ticket_buffer(this->ticket_buffer_)) { DLOG(INFO) << "Ticket OK"; buff_sz = o3d3xx::get_image_buffer_size(this->ticket_buffer_); DLOG(INFO) << "Image buffer size: " << buff_sz; this->back_buffer_.resize(buff_sz); sock.async_read_some( boost::asio::buffer(this->back_buffer_.data(), buff_sz), image_handler); return; } LOG(WARNING) << "Bad ticket!"; } sock.async_read_some( boost::asio::buffer(&this->ticket_buffer_[ticket_bytes_read], ticket_buff_sz - ticket_bytes_read), ticket_handler); }; // // connect to the sensor and start streaming in image data // try { sock.async_connect(endpoint, [&, this] (const boost::system::error_code& ec) { if (ec) { throw o3d3xx::error_t(ec.value()); } sock.async_read_some( boost::asio::buffer( this->ticket_buffer_.data(), ticket_buff_sz), ticket_handler); }); this->io_service_.run(); } catch (const std::exception& ex) { // // In here we should discern why the exception with thrown. // // Special case the "Stop()" request from the control thread // LOG(WARNING) << "Exception: " << ex.what(); } LOG(INFO) << "Framegrabber thread done."; }
int main(int argc, char **argv) { static unsigned char blk[LZJODY_BSIZE]; static unsigned char out[LZJODY_BSIZE + 4]; int i; int length = 0; /* Incoming data block length counter */ int c_length; /* Compressed block length temp variable */ int blocknum = 0; /* Current block number */ unsigned char options = 0; /* Compressor options */ #ifdef THREADED struct thread_info *thr; int nprocs = 1; /* Number of processors */ int eof = 0; /* End of file? */ char running = 0; /* Number of threads running */ #endif /* THREADED */ if (argc < 2) goto usage; /* Windows requires that data streams be put into binary mode */ #ifdef ON_WINDOWS setmode(STDIN_FILENO, _O_BINARY); setmode(STDOUT_FILENO, _O_BINARY); #endif /* ON_WINDOWS */ files.in = stdin; files.out = stdout; if (!strncmp(argv[1], "-c", 2)) { #ifndef THREADED /* Non-threaded compression */ /* fprintf(stderr, "blk %p, blkend %p, files %p\n", blk, blk + LZJODY_BSIZE - 1, files); */ while((length = fread(blk, 1, LZJODY_BSIZE, files.in))) { if (ferror(files.in)) goto error_read; DLOG("\n--- Compressing block %d\n", blocknum); i = lzjody_compress(blk, out, options, length); if (i < 0) goto error_compression; DLOG("c_size %d bytes\n", i); i = fwrite(out, i, 1, files.out); if (!i) goto error_write; blocknum++; } #else /* Using POSIX threads */ #ifdef _SC_NPROCESSORS_ONLN /* Get number of online processors for pthreads */ nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); if (nprocs < 1) { fprintf(stderr, "warning: system returned bad number of processors: %d\n", nprocs); nprocs = 1; } #endif /* _SC_NPROCESSORS_ONLN */ /* Run two threads per processor */ nprocs <<= 1; fprintf(stderr, "lzjody: compressing with %d worker threads\n", nprocs); /* Allocate per-thread input/output memory and control blocks */ thr = (struct thread_info *)calloc(nprocs, sizeof(struct thread_info)); if (!thr) goto oom; /* Set compressor options */ for (i = 0; i < nprocs; i++) (thr + i)->options = options; thread_error = 0; while (1) { struct thread_info *cur = NULL; uint32_t min_blk; /* Minimum block number */ unsigned int min_thread; /* Thread for min_blk */ int thread; /* Temporary thread scan counter */ int open_thr; /* Next open thread */ /* See if lowest block number is finished */ while (1) { min_blk = 0xffffffff; min_thread = 0; /* Scan threads for smallest block number */ pthread_mutex_lock(&mtx); for (thread = 0; thread < nprocs; thread++) { unsigned int j; fprintf(stderr, ":thr %p, thread %d\n", (void *)thr, thread); if (thread_error != 0) goto error_compression; j = (thr + thread)->block; if (j > 0 && j < min_blk) { min_blk = j; min_thread = thread; fprintf(stderr, ":j%d:%d thr %p, cur %p, min_thread %d\n", j, min_blk, (void *)thr, (void *)cur, min_thread); } } pthread_mutex_unlock(&mtx); cur = thr + min_thread; fprintf(stderr, "thr %p, cur %p, min_thread %d\n", (void *)thr, (void *)cur, min_thread); if (cur->working == 0 && cur->length > 0) { pthread_detach(cur->id); /* flush finished block */ i = fwrite(cur->out, cur->o_length, 1, files.out); if (!i) goto error_write; cur->block = 0; cur->length = 0; DLOG("Thread %d done\n", min_thread); running--; } else break; } /* Terminate when all blocks are written */ if (eof && (running == 0)) break; /* Start threads */ if (running < nprocs) { /* Don't read any more if EOF reached */ if (!eof) { /* Find next open thread */ cur = thr; for (open_thr = 0; open_thr < nprocs; open_thr++) { if (cur->working == 0 && cur->block == 0) break; cur++; } /* If no threads are available, wait for one */ if (open_thr == nprocs) { pthread_mutex_lock(&mtx); pthread_cond_wait(&cond, &mtx); pthread_mutex_unlock(&mtx); continue; } /* Read next block */ length = fread(cur->blk, 1, (LZJODY_BSIZE * CHUNK), files.in); if (ferror(files.in)) goto error_read; if (length < (LZJODY_BSIZE * CHUNK)) eof = 1; if (length > 0) { blocknum++; /* Set up thread */ cur->working = 1; cur->block = blocknum; cur->length = length; cur->o_length = 0; running++; DLOG("Thread %d start\n", open_thr); /* Start thread */ pthread_create(&(cur->id), NULL, compress_thread, (void *)cur); } else eof = 1; } else if (running > 0) { /* EOF but threads still running */ pthread_mutex_lock(&mtx); pthread_cond_wait(&cond, &mtx); pthread_mutex_unlock(&mtx); } } } free(thr); #endif /* THREADED */ } /* Decompress */ if (!strncmp(argv[1], "-d", 2)) { while(fread(blk, 1, 2, files.in)) { /* Get block-level decompression options */ options = *blk & 0xc0; /* Read the length of the compressed data */ length = *(blk + 1); length |= ((*blk & 0x1f) << 8); if (length > (LZJODY_BSIZE + 4)) goto error_blocksize_d_prefix; i = fread(blk, 1, length, files.in); if (ferror(files.in)) goto error_read; if (i != length) goto error_shortread; if (options & O_NOCOMPRESS) { c_length = *(blk + 1); c_length |= ((*blk & 0x1f) << 8); DLOG("--- Writing uncompressed block %d (%d bytes)\n", blocknum, c_length); if (c_length > LZJODY_BSIZE) goto error_unc_length; i = fwrite((blk + 2), 1, c_length, files.out); if (i != c_length) { length = c_length; goto error_write; } } else { DLOG("--- Decompressing block %d\n", blocknum); length = lzjody_decompress(blk, out, i, options); if (length < 0) goto error_decompress; if (length > LZJODY_BSIZE) goto error_blocksize_decomp; i = fwrite(out, 1, length, files.out); if (i != length) goto error_write; /* DLOG("Wrote %d bytes\n", i); */ } blocknum++; } } exit(EXIT_SUCCESS); error_compression: fprintf(stderr, "Fatal error during compression, aborting.\n"); exit(EXIT_FAILURE); error_read: fprintf(stderr, "Error reading file %s\n", "stdin"); exit(EXIT_FAILURE); error_write: fprintf(stderr, "Error writing file %s (%d of %d written)\n", "stdout", i, length); exit(EXIT_FAILURE); error_shortread: fprintf(stderr, "Error: short read: %d < %d (eof %d, error %d)\n", i, length, feof(files.in), ferror(files.in)); exit(EXIT_FAILURE); error_unc_length: fprintf(stderr, "Error: uncompressed length too large (%d > %d)\n", c_length, LZJODY_BSIZE); exit(EXIT_FAILURE); error_blocksize_d_prefix: fprintf(stderr, "Error: decompressor prefix too large (%d > %d)\n", length, (LZJODY_BSIZE + 4)); exit(EXIT_FAILURE); error_blocksize_decomp: fprintf(stderr, "Error: decompressor overflow (%d > %d)\n", length, LZJODY_BSIZE); exit(EXIT_FAILURE); error_decompress: fprintf(stderr, "Error: cannot decompress block %d\n", blocknum); exit(EXIT_FAILURE); #ifdef THREADED oom: fprintf(stderr, "Error: out of memory\n"); exit(EXIT_FAILURE); #endif usage: fprintf(stderr, "lzjody %s, a compression utility by Jody Bruchon (%s)\n", LZJODY_UTIL_VER, LZJODY_UTIL_VERDATE); fprintf(stderr, "\nlzjody -c compress stdin to stdout\n"); fprintf(stderr, "\nlzjody -d decompress stdin to stdout\n"); exit(EXIT_FAILURE); }
/* * Do some sanity checks and then reparent the window. * */ void manage_window(xcb_window_t window, xcb_get_window_attributes_cookie_t cookie, bool needs_to_be_mapped) { xcb_drawable_t d = { window }; xcb_get_geometry_cookie_t geomc; xcb_get_geometry_reply_t *geom; xcb_get_window_attributes_reply_t *attr = NULL; xcb_get_property_cookie_t wm_type_cookie, strut_cookie, state_cookie, utf8_title_cookie, title_cookie, class_cookie, leader_cookie, transient_cookie, role_cookie, startup_id_cookie, wm_hints_cookie; #ifdef USE_ICONS xcb_get_property_cookie_t wm_icon_cookie; #endif geomc = xcb_get_geometry(conn, d); #define FREE_GEOMETRY() do { \ if ((geom = xcb_get_geometry_reply(conn, geomc, 0)) != NULL) \ free(geom); \ } while (0) /* Check if the window is mapped (it could be not mapped when intializing and calling manage_window() for every window) */ if ((attr = xcb_get_window_attributes_reply(conn, cookie, 0)) == NULL) { DLOG("Could not get attributes\n"); FREE_GEOMETRY(); return; } if (needs_to_be_mapped && attr->map_state != XCB_MAP_STATE_VIEWABLE) { FREE_GEOMETRY(); goto out; } /* Don’t manage clients with the override_redirect flag */ if (attr->override_redirect) { FREE_GEOMETRY(); goto out; } /* Check if the window is already managed */ if (con_by_window_id(window) != NULL) { DLOG("already managed (by con %p)\n", con_by_window_id(window)); FREE_GEOMETRY(); goto out; } /* Get the initial geometry (position, size, …) */ if ((geom = xcb_get_geometry_reply(conn, geomc, 0)) == NULL) { DLOG("could not get geometry\n"); goto out; } uint32_t values[1]; /* Set a temporary event mask for the new window, consisting only of * PropertyChange and StructureNotify. We need to be notified of * PropertyChanges because the client can change its properties *after* we * requested them but *before* we actually reparented it and have set our * final event mask. * We need StructureNotify because the client may unmap the window before * we get to re-parent it. * If this request fails, we assume the client has already unmapped the * window between the MapRequest and our event mask change. */ values[0] = XCB_EVENT_MASK_PROPERTY_CHANGE | XCB_EVENT_MASK_STRUCTURE_NOTIFY; xcb_void_cookie_t event_mask_cookie = xcb_change_window_attributes_checked(conn, window, XCB_CW_EVENT_MASK, values); if (xcb_request_check(conn, event_mask_cookie) != NULL) { LOG("Could not change event mask, the window probably already disappeared.\n"); goto out; } #define GET_PROPERTY(atom, len) xcb_get_property(conn, false, window, atom, XCB_GET_PROPERTY_TYPE_ANY, 0, len) wm_type_cookie = GET_PROPERTY(A__NET_WM_WINDOW_TYPE, UINT32_MAX); strut_cookie = GET_PROPERTY(A__NET_WM_STRUT_PARTIAL, UINT32_MAX); state_cookie = GET_PROPERTY(A__NET_WM_STATE, UINT32_MAX); utf8_title_cookie = GET_PROPERTY(A__NET_WM_NAME, 128); leader_cookie = GET_PROPERTY(A_WM_CLIENT_LEADER, UINT32_MAX); transient_cookie = GET_PROPERTY(XCB_ATOM_WM_TRANSIENT_FOR, UINT32_MAX); title_cookie = GET_PROPERTY(XCB_ATOM_WM_NAME, 128); class_cookie = GET_PROPERTY(XCB_ATOM_WM_CLASS, 128); role_cookie = GET_PROPERTY(A_WM_WINDOW_ROLE, 128); startup_id_cookie = GET_PROPERTY(A__NET_STARTUP_ID, 512); wm_hints_cookie = xcb_icccm_get_wm_hints(conn, window); #ifdef USE_ICONS wm_icon_cookie = xcb_get_property_unchecked(conn, false, window, A__NET_WM_ICON, XCB_ATOM_CARDINAL, 0, UINT32_MAX); #endif /* TODO: also get wm_normal_hints here. implement after we got rid of xcb-event */ DLOG("Managing window 0x%08x\n", window); i3Window *cwindow = scalloc(sizeof(i3Window)); cwindow->id = window; cwindow->depth = get_visual_depth(attr->visual); /* We need to grab the mouse buttons for click to focus */ xcb_grab_button(conn, false, window, XCB_EVENT_MASK_BUTTON_PRESS, XCB_GRAB_MODE_SYNC, XCB_GRAB_MODE_ASYNC, root, XCB_NONE, 1 /* left mouse button */, XCB_BUTTON_MASK_ANY /* don’t filter for any modifiers */); xcb_grab_button(conn, false, window, XCB_EVENT_MASK_BUTTON_PRESS, XCB_GRAB_MODE_SYNC, XCB_GRAB_MODE_ASYNC, root, XCB_NONE, 2 /* middle mouse button */, XCB_BUTTON_MASK_ANY /* don’t filter for any modifiers */); xcb_grab_button(conn, false, window, XCB_EVENT_MASK_BUTTON_PRESS, XCB_GRAB_MODE_SYNC, XCB_GRAB_MODE_ASYNC, root, XCB_NONE, 3 /* right mouse button */, XCB_BUTTON_MASK_ANY /* don’t filter for any modifiers */); /* update as much information as possible so far (some replies may be NULL) */ window_update_class(cwindow, xcb_get_property_reply(conn, class_cookie, NULL), true); window_update_name_legacy(cwindow, xcb_get_property_reply(conn, title_cookie, NULL), true); window_update_name(cwindow, xcb_get_property_reply(conn, utf8_title_cookie, NULL), true); window_update_leader(cwindow, xcb_get_property_reply(conn, leader_cookie, NULL)); window_update_transient_for(cwindow, xcb_get_property_reply(conn, transient_cookie, NULL)); window_update_strut_partial(cwindow, xcb_get_property_reply(conn, strut_cookie, NULL)); window_update_role(cwindow, xcb_get_property_reply(conn, role_cookie, NULL), true); window_update_hints(cwindow, xcb_get_property_reply(conn, wm_hints_cookie, NULL)); #ifdef USE_ICONS window_update_icon(cwindow, xcb_get_property_reply(conn, wm_icon_cookie, NULL)); #endif xcb_get_property_reply_t *startup_id_reply; startup_id_reply = xcb_get_property_reply(conn, startup_id_cookie, NULL); char *startup_ws = startup_workspace_for_window(cwindow, startup_id_reply); DLOG("startup workspace = %s\n", startup_ws); /* check if the window needs WM_TAKE_FOCUS */ cwindow->needs_take_focus = window_supports_protocol(cwindow->id, A_WM_TAKE_FOCUS); /* Where to start searching for a container that swallows the new one? */ Con *search_at = croot; xcb_get_property_reply_t *reply = xcb_get_property_reply(conn, wm_type_cookie, NULL); if (xcb_reply_contains_atom(reply, A__NET_WM_WINDOW_TYPE_DOCK)) { LOG("This window is of type dock\n"); Output *output = get_output_containing(geom->x, geom->y); if (output != NULL) { DLOG("Starting search at output %s\n", output->name); search_at = output->con; } /* find out the desired position of this dock window */ if (cwindow->reserved.top > 0 && cwindow->reserved.bottom == 0) { DLOG("Top dock client\n"); cwindow->dock = W_DOCK_TOP; } else if (cwindow->reserved.top == 0 && cwindow->reserved.bottom > 0) { DLOG("Bottom dock client\n"); cwindow->dock = W_DOCK_BOTTOM; } else { DLOG("Ignoring invalid reserved edges (_NET_WM_STRUT_PARTIAL), using position as fallback:\n"); if (geom->y < (search_at->rect.height / 2)) { DLOG("geom->y = %d < rect.height / 2 = %d, it is a top dock client\n", geom->y, (search_at->rect.height / 2)); cwindow->dock = W_DOCK_TOP; } else { DLOG("geom->y = %d >= rect.height / 2 = %d, it is a bottom dock client\n", geom->y, (search_at->rect.height / 2)); cwindow->dock = W_DOCK_BOTTOM; } } } DLOG("Initial geometry: (%d, %d, %d, %d)\n", geom->x, geom->y, geom->width, geom->height); Con *nc = NULL; Match *match = NULL; Assignment *assignment; /* TODO: two matches for one container */ /* See if any container swallows this new window */ nc = con_for_window(search_at, cwindow, &match); if (nc == NULL) { /* If not, check if it is assigned to a specific workspace / output */ if ((assignment = assignment_for(cwindow, A_TO_WORKSPACE | A_TO_OUTPUT))) { DLOG("Assignment matches (%p)\n", match); if (assignment->type == A_TO_WORKSPACE) { nc = con_descend_tiling_focused(workspace_get(assignment->dest.workspace, NULL)); DLOG("focused on ws %s: %p / %s\n", assignment->dest.workspace, nc, nc->name); if (nc->type == CT_WORKSPACE) nc = tree_open_con(nc, cwindow); else nc = tree_open_con(nc->parent, cwindow); } /* TODO: handle assignments with type == A_TO_OUTPUT */ } else if (startup_ws) { /* If it’s not assigned, but was started on a specific workspace, * we want to open it there */ DLOG("Using workspace on which this application was started (%s)\n", startup_ws); nc = con_descend_tiling_focused(workspace_get(startup_ws, NULL)); DLOG("focused on ws %s: %p / %s\n", startup_ws, nc, nc->name); if (nc->type == CT_WORKSPACE) nc = tree_open_con(nc, cwindow); else nc = tree_open_con(nc->parent, cwindow); } else { /* If not, insert it at the currently focused position */ if (focused->type == CT_CON && con_accepts_window(focused)) { LOG("using current container, focused = %p, focused->name = %s\n", focused, focused->name); nc = focused; } else nc = tree_open_con(NULL, cwindow); } } else { /* M_BELOW inserts the new window as a child of the one which was * matched (e.g. dock areas) */ if (match != NULL && match->insert_where == M_BELOW) { nc = tree_open_con(nc, cwindow); } } DLOG("new container = %p\n", nc); nc->window = cwindow; x_reinit(nc); nc->border_width = geom->border_width; char *name; sasprintf(&name, "[i3 con] container around %p", cwindow); x_set_name(nc, name); free(name); Con *ws = con_get_workspace(nc); Con *fs = (ws ? con_get_fullscreen_con(ws, CF_OUTPUT) : NULL); if (fs == NULL) fs = con_get_fullscreen_con(croot, CF_GLOBAL); if (fs == NULL) { DLOG("Not in fullscreen mode, focusing\n"); if (!cwindow->dock) { /* Check that the workspace is visible and on the same output as * the current focused container. If the window was assigned to an * invisible workspace, we should not steal focus. */ Con *current_output = con_get_output(focused); Con *target_output = con_get_output(ws); if (workspace_is_visible(ws) && current_output == target_output) { if (!match || !match->restart_mode) { con_focus(nc); } else DLOG("not focusing, matched with restart_mode == true\n"); } else DLOG("workspace not visible, not focusing\n"); } else DLOG("dock, not focusing\n"); } else { DLOG("fs = %p, ws = %p, not focusing\n", fs, ws); /* Insert the new container in focus stack *after* the currently * focused (fullscreen) con. This way, the new container will be * focused after we return from fullscreen mode */ Con *first = TAILQ_FIRST(&(nc->parent->focus_head)); if (first != nc) { /* We only modify the focus stack if the container is not already * the first one. This can happen when existing containers swallow * new windows, for example when restarting. */ TAILQ_REMOVE(&(nc->parent->focus_head), nc, focused); TAILQ_INSERT_AFTER(&(nc->parent->focus_head), first, nc, focused); } } /* set floating if necessary */ bool want_floating = false; if (xcb_reply_contains_atom(reply, A__NET_WM_WINDOW_TYPE_DIALOG) || xcb_reply_contains_atom(reply, A__NET_WM_WINDOW_TYPE_UTILITY) || xcb_reply_contains_atom(reply, A__NET_WM_WINDOW_TYPE_TOOLBAR) || xcb_reply_contains_atom(reply, A__NET_WM_WINDOW_TYPE_SPLASH)) { LOG("This window is a dialog window, setting floating\n"); want_floating = true; } FREE(reply); if (cwindow->transient_for != XCB_NONE || (cwindow->leader != XCB_NONE && cwindow->leader != cwindow->id && con_by_window_id(cwindow->leader) != NULL)) { LOG("This window is transient for another window, setting floating\n"); want_floating = true; if (config.popup_during_fullscreen == PDF_LEAVE_FULLSCREEN && fs != NULL) { LOG("There is a fullscreen window, leaving fullscreen mode\n"); con_toggle_fullscreen(fs, CF_OUTPUT); } else if (config.popup_during_fullscreen == PDF_SMART && fs != NULL && fs->window != NULL) { i3Window *transient_win = cwindow; while (transient_win != NULL && transient_win->transient_for != XCB_NONE) { if (transient_win->transient_for == fs->window->id) { LOG("This floating window belongs to the fullscreen window (popup_during_fullscreen == smart)\n"); con_focus(nc); break; } Con *next_transient = con_by_window_id(transient_win->transient_for); if (next_transient == NULL) break; transient_win = next_transient->window; } } } /* dock clients cannot be floating, that makes no sense */ if (cwindow->dock) want_floating = false; /* Store the requested geometry. The width/height gets raised to at least * 75x50 when entering floating mode, which is the minimum size for a * window to be useful (smaller windows are usually overlays/toolbars/… * which are not managed by the wm anyways). We store the original geometry * here because it’s used for dock clients. */ nc->geometry = (Rect){ geom->x, geom->y, geom->width, geom->height }; if (want_floating) { DLOG("geometry = %d x %d\n", nc->geometry.width, nc->geometry.height); floating_enable(nc, true); } /* to avoid getting an UnmapNotify event due to reparenting, we temporarily * declare no interest in any state change event of this window */ values[0] = XCB_NONE; xcb_change_window_attributes(conn, window, XCB_CW_EVENT_MASK, values); xcb_void_cookie_t rcookie = xcb_reparent_window_checked(conn, window, nc->frame, 0, 0); if (xcb_request_check(conn, rcookie) != NULL) { LOG("Could not reparent the window, aborting\n"); goto geom_out; } values[0] = CHILD_EVENT_MASK & ~XCB_EVENT_MASK_ENTER_WINDOW; xcb_change_window_attributes(conn, window, XCB_CW_EVENT_MASK, values); xcb_flush(conn); reply = xcb_get_property_reply(conn, state_cookie, NULL); if (xcb_reply_contains_atom(reply, A__NET_WM_STATE_FULLSCREEN)) con_toggle_fullscreen(nc, CF_OUTPUT); FREE(reply); /* Put the client inside the save set. Upon termination (whether killed or * normal exit does not matter) of the window manager, these clients will * be correctly reparented to their most closest living ancestor (= * cleanup) */ xcb_change_save_set(conn, XCB_SET_MODE_INSERT, window); /* Check if any assignments match */ run_assignments(cwindow); /* 'ws' may be invalid because of the assignments, e.g. when the user uses * "move window to workspace 1", but had it assigned to workspace 2. */ ws = con_get_workspace(nc); /* If this window was put onto an invisible workspace (via assignments), we * render this workspace. It wouldn’t be rendered in our normal code path * because only the visible workspaces get rendered. * * By rendering the workspace, we assign proper coordinates (read: not * width=0, height=0) to the window, which is important for windows who * actually use them to position their GUI elements, e.g. rhythmbox. */ if (ws && !workspace_is_visible(ws)) { /* This is a bit hackish: we need to copy the content container’s rect * to the workspace, because calling render_con() on the content * container would also take the shortcut and not render the invisible * workspace at all. However, just calling render_con() on the * workspace isn’t enough either — it needs the rect. */ ws->rect = ws->parent->rect; render_con(ws, true); } tree_render(); /* Send an event about window creation */ ipc_send_window_new_event(nc); geom_out: free(geom); out: free(attr); return; }
void Client::ZCom_cbConnectResult( ZCom_ConnID _id, eZCom_ConnectResult _result, ZCom_BitStream &_reply ) { if ( _result != eZCom_ConnAccepted ) { Network::ConnectionReply::type r = static_cast<Network::ConnectionReply::type>(_reply.getInt(8)); if(r == Network::ConnectionReply::Retry) { DLOG("Got retry from server"); network.reconnect(50); } else if(r == Network::ConnectionReply::Banned) { console.addLogMsg("* YOU ARE BANNED FROM THIS SERVER"); } else { console.addLogMsg("* COULDNT ESTABLISH CONNECTION"); } } else { network.setClient(true); ZCom_requestDownstreamLimit(_id, network.downPPS, network.downBPP); console.addLogMsg("* CONNECTION ACCEPTED"); network.setServerID(_id); network.incConnCount(); std::string mod = _reply.getStringStatic(); std::string map = _reply.getStringStatic(); game.refreshLevels(); game.refreshMods(); bool hasLevel = game.hasLevel(map); bool hasMod = game.hasMod(mod); if(!hasMod) { game.error(Game::ErrorModNotFound); //This doesn't work somewhy: network.disconnect(); //And maybe we don't want to do it since it would overwrite our error message } else if(!hasLevel) { if(network.autoDownloads) { ZCom_requestZoidMode(_id, 2); // We need to update if(!hasLevel) updater.requestLevel(map); } else game.error(Game::ErrorMapNotFound); } else { game.setMod( mod ); if(game.changeLevel( map, false ) && game.isLoaded()) { game.runInitScripts(); sendConsistencyInfo(); ZCom_requestZoidMode(_id, 1); } else { console.addLogMsg("* COULDN'T LOAD MOD OR LEVEL"); network.disconnect(); } } } }
int timeout_task (struct task_act *tk) { struct oper_act * on; struct DSError * err = &(tk->tk_resp.di_error.de_err); struct ds_search_task *tmp; DLOG(log_dsap, LLOG_TRACE, ("timeout_task")); for(on=tk->tk_operlist; on!=NULLOPER; on=on->on_next_task) { /* Time out operations started by task */ on->on_state = ON_ABANDONED; on->on_task = NULLTASK; if (on->on_dsas) { di_desist (on->on_dsas); on -> on_dsas = NULL_DI_BLOCK; } } if(tk->tk_dx.dx_arg.dca_dsarg.arg_type != OP_SEARCH) { ds_error_free (err); err->dse_type = DSE_SERVICEERROR; if (tk->tk_timed == TRUE) err->ERR_SERVICE.DSE_sv_problem = DSE_SV_TIMELIMITEXCEEDED; else /* tk->tk_timed == 2 */ err->ERR_SERVICE.DSE_sv_problem = DSE_SV_ADMINLIMITEXCEEDED; task_error(tk); task_extract(tk); } else { /* Do search collation */ if ((tk->tk_state == TK_ACTIVE) && (tk->local_st == NULL_ST)) { ds_error_free (err); /* nothing happened yet... */ err->dse_type = DSE_SERVICEERROR; if (tk->tk_timed == TRUE) err->ERR_SERVICE.DSE_sv_problem = DSE_SV_TIMELIMITEXCEEDED; else /* tk->tk_timed == 2 */ err->ERR_SERVICE.DSE_sv_problem = DSE_SV_ADMINLIMITEXCEEDED; task_error(tk); } else { /* send the results we have got... */ tk->tk_result = &(tk->tk_resp.di_result.dr_res); tk->tk_result->dcr_dsres.result_type = tk->tk_dx.dx_arg.dca_dsarg.arg_type; tk->tk_resp.di_type = DI_RESULT; if (tk->tk_timed == TRUE) tk->tk_resp.di_result.dr_res.dcr_dsres.res_sr.CSR_limitproblem = LSR_TIMELIMITEXCEEDED; else /* tk->tk_timed == 2 */ tk->tk_resp.di_result.dr_res.dcr_dsres.res_sr.CSR_limitproblem = LSR_ADMINSIZEEXCEEDED; /* Go through sub-tasks and add a POQ for each */ for(tmp=tk->referred_st; tmp!= NULL_ST; tmp=tmp->st_next) add_cref2poq (&tk->tk_result->dcr_dsres.res_sr,tmp->st_cr); task_result(tk); st_free_dis(&tk->referred_st,1); } task_extract(tk); } }
bool HarfBuzzShaper::extractShapeResults(hb_buffer_t* harfBuzzBuffer, ShapeResult* shapeResult, bool& fontCycleQueued, const HolesQueueItem& currentQueueItem, const SimpleFontData* currentFont, UScriptCode currentRunScript, bool isLastResort) { enum ClusterResult { Shaped, NotDef, Unknown }; ClusterResult currentClusterResult = Unknown; ClusterResult previousClusterResult = Unknown; unsigned previousCluster = 0; unsigned currentCluster = 0; // Find first notdef glyph in harfBuzzBuffer. unsigned numGlyphs = hb_buffer_get_length(harfBuzzBuffer); hb_glyph_info_t* glyphInfo = hb_buffer_get_glyph_infos(harfBuzzBuffer, 0); unsigned lastChangePosition = 0; if (!numGlyphs) { DLOG(ERROR) << "HarfBuzz returned empty glyph buffer after shaping."; return false; } for (unsigned glyphIndex = 0; glyphIndex <= numGlyphs; ++glyphIndex) { // Iterating by clusters, check for when the state switches from shaped // to non-shaped and vice versa. Taking into account the edge cases of // beginning of the run and end of the run. previousCluster = currentCluster; currentCluster = glyphInfo[glyphIndex].cluster; if (glyphIndex < numGlyphs) { // Still the same cluster, merge shaping status. if (previousCluster == currentCluster && glyphIndex != 0) { if (glyphInfo[glyphIndex].codepoint == 0) { currentClusterResult = NotDef; } else { // We can only call the current cluster fully shapped, if // all characters that are part of it are shaped, so update // currentClusterResult to Shaped only if the previous // characters have been shaped, too. currentClusterResult = currentClusterResult == Shaped ? Shaped : NotDef; } continue; } // We've moved to a new cluster. previousClusterResult = currentClusterResult; currentClusterResult = glyphInfo[glyphIndex].codepoint == 0 ? NotDef : Shaped; } else { // The code below operates on the "flanks"/changes between NotDef // and Shaped. In order to keep the code below from explictly // dealing with character indices and run end, we explicitly // terminate the cluster/run here by setting the result value to the // opposite of what it was, leading to atChange turning true. previousClusterResult = currentClusterResult; currentClusterResult = currentClusterResult == NotDef ? Shaped : NotDef; } bool atChange = (previousClusterResult != currentClusterResult) && previousClusterResult != Unknown; if (!atChange) continue; // Compute the range indices of consecutive shaped or .notdef glyphs. // Cluster information for RTL runs becomes reversed, e.g. character 0 // has cluster index 5 in a run of 6 characters. unsigned numCharacters = 0; unsigned numGlyphsToInsert = 0; unsigned startIndex = 0; if (HB_DIRECTION_IS_FORWARD(hb_buffer_get_direction(harfBuzzBuffer))) { startIndex = glyphInfo[lastChangePosition].cluster; if (glyphIndex == numGlyphs) { numCharacters = currentQueueItem.m_startIndex + currentQueueItem.m_numCharacters - glyphInfo[lastChangePosition].cluster; numGlyphsToInsert = numGlyphs - lastChangePosition; } else { numCharacters = glyphInfo[glyphIndex].cluster - glyphInfo[lastChangePosition].cluster; numGlyphsToInsert = glyphIndex - lastChangePosition; } } else { // Direction Backwards startIndex = glyphInfo[glyphIndex - 1].cluster; if (lastChangePosition == 0) { numCharacters = currentQueueItem.m_startIndex + currentQueueItem.m_numCharacters - glyphInfo[glyphIndex - 1].cluster; } else { numCharacters = glyphInfo[lastChangePosition - 1].cluster - glyphInfo[glyphIndex - 1].cluster; } numGlyphsToInsert = glyphIndex - lastChangePosition; } if (currentClusterResult == Shaped && !isLastResort) { // Now it's clear that we need to continue processing. if (!fontCycleQueued) { appendToHolesQueue(HolesQueueNextFont, 0, 0); fontCycleQueued = true; } // Here we need to put character positions. ASSERT(numCharacters); appendToHolesQueue(HolesQueueRange, startIndex, numCharacters); } // If numCharacters is 0, that means we hit a NotDef before shaping the // whole grapheme. We do not append it here. For the next glyph we // encounter, atChange will be true, and the characters corresponding to // the grapheme will be added to the TODO queue again, attempting to // shape the whole grapheme with the next font. // When we're getting here with the last resort font, we have no other // choice than adding boxes to the ShapeResult. if ((currentClusterResult == NotDef && numCharacters) || isLastResort) { // Here we need to specify glyph positions. OwnPtr<ShapeResult::RunInfo> run = adoptPtr(new ShapeResult::RunInfo(currentFont, TextDirectionToHBDirection(m_textRun.direction(), m_font->getFontDescription().orientation(), currentFont), ICUScriptToHBScript(currentRunScript), startIndex, numGlyphsToInsert, numCharacters)); insertRunIntoShapeResult(shapeResult, run.release(), lastChangePosition, numGlyphsToInsert, harfBuzzBuffer); } lastChangePosition = glyphIndex; } return true; }
struct task_act * task_select (int *secs_p) { struct connection * cn; struct connection * cn_tmp; struct connection **next_cn; struct task_act * tk; struct task_act **next_tk; struct oper_act * on; int timeout_tmp; char process_edbs = TRUE; char do_timeout; int suspended = FALSE; int xi = 0; struct task_act * ret_tk = NULLTASK; extern char startup_update; struct oper_act * newop = NULLOPER; time (&timenow); (*secs_p) = NOTOK; conns_used = 0; /* DLOG(log_dsap, LLOG_DEBUG, ("task_select connections:")); conn_list_log(connlist); */ for(cn=connlist; cn!=NULLCONN; cn=cn_tmp) { cn_tmp = cn->cn_next; /* Nasty but necessary in conn_extract() manages to get itself called somehow */ do_timeout = FALSE; #ifdef DEBUG conn_log(cn,LLOG_DEBUG); #endif next_tk = &(cn->cn_tasklist); for(tk=cn->cn_tasklist; tk!=NULLTASK; tk=(*next_tk)) { if(tk->tk_timed) { if(tk->tk_timeout <= timenow) { #ifdef DEBUG struct UTCtime ut; struct UTCtime ut2; DLOG(log_dsap, LLOG_TRACE, ("task has timelimit of %ld", tk->tk_timeout)); tm2ut(gmtime(&(tk->tk_timeout)), &ut); DLOG(log_dsap, LLOG_DEBUG, ("converted timelimit = %s", utct2str(&(ut)))); tm2ut(gmtime(&(timenow)), &ut2); DLOG(log_dsap, LLOG_DEBUG, ("time now = %s", utct2str(&(ut2)))); #endif (*next_tk) = tk->tk_next; timeout_task(tk); continue; } else { timeout_tmp = (int) tk->tk_timeout - timenow; if(((*secs_p) == NOTOK) || ((*secs_p) > timeout_tmp)) { (*secs_p) = timeout_tmp; } } } next_tk = &(tk->tk_next); } if(cn->cn_state == CN_OPEN) { next_tk = &(cn->cn_tasklist); for(tk=cn->cn_tasklist; tk!=NULLTASK; tk=(*next_tk)) { next_tk = &(tk->tk_next); if(tk->tk_state == TK_ACTIVE) { if( (ret_tk == NULLTASK) || (tk->tk_prio > ret_tk->tk_prio) || ( (tk->tk_prio == ret_tk->tk_prio) && ( (!ret_tk->tk_timed) || ( (tk->tk_timed) && (tk->tk_timeout < ret_tk->tk_timeout) ) ) ) ) { ret_tk = tk; } } if(tk->tk_state == TK_SUSPEND) { /* * A task suspended to allow the network to be polled. * Set suspended to force polling. */ tk->tk_state = TK_ACTIVE; suspended = TRUE; } } if(cn->cn_tasklist == NULLTASK) { if(cn->cn_initiator) { if(cn->cn_operlist == NULLOPER) { if((cn->cn_last_used + conn_timeout) <= timenow) { do_timeout = TRUE; } else { timeout_tmp = (int) (cn->cn_last_used + conn_timeout) - timenow; if(((*secs_p) == NOTOK) || ((*secs_p) > timeout_tmp)) { (*secs_p) = timeout_tmp; } } } else { timeout_tmp = conn_timeout; /* safety catch */ if ((tk = cn->cn_operlist->on_task) != NULLTASK) { if (tk->tk_timed) { timeout_tmp = (int) tk->tk_timeout - timenow; if (timeout_tmp < 0) timeout_tmp = 0; } } if(((*secs_p) == NOTOK) || ((*secs_p) > timeout_tmp)) { (*secs_p) = timeout_tmp; } cn->cn_last_used = timenow; } } } else { cn->cn_last_used = timenow; process_edbs = FALSE; } } else { if((cn->cn_last_used + nsap_timeout) <= timenow) { if ((cn->cn_state == CN_CONNECTING1) || (cn->cn_state == CN_CONNECTING2)) conn_retry(cn,1); else if (cn->cn_state == CN_CLOSING) { if (conn_release_retry(cn) == NOTOK) { /* had its chance - abort */ conn_rel_abort (cn); do_ds_unbind(cn); conn_extract(cn); } } else if ( (cn->cn_state == CN_OPENING) || (cn->cn_state == CN_PRE_OPENING) ) { /* something started to associate - then gave up !!! */ conn_rel_abort (cn); conn_extract (cn); } (*secs_p) = nsap_timeout; } else { timeout_tmp = (int) (cn->cn_last_used + nsap_timeout) - timenow; if(((*secs_p) == NOTOK) || ((*secs_p) > timeout_tmp)) { (*secs_p) = timeout_tmp; } } } if(do_timeout) { LLOG(log_dsap, LLOG_TRACE, ("Timing out connection %d",cn->cn_ad)); if (conn_release(cn) == NOTOK) { (*secs_p) = nsap_timeout; conns_used++; } } else { conns_used++; } } /* * Open the connection with the highest priority operation * waiting on it... * * Get DSA Info operations are highest priority, followed by * BIND_COMPARE, and X500, and finally GetEDB operations. */ next_cn = &(connwaitlist); for(cn=connwaitlist; cn!=NULLCONN; cn=(*next_cn)) { if(conns_used >= MAX_CONNS) break; for(on=cn->cn_operlist; on!=NULLOPER; on=on->on_next_conn) { if(on->on_type == ON_TYPE_GET_DSA_INFO) { (*next_cn) = cn->cn_next; if(conn_request(cn) == OK) { conns_used++; cn->cn_next = connlist; connlist = cn; cn->cn_last_used = timenow; /* Do something with the operations */ } else { /* Do something with the operations */ } break; } } if(on == NULLOPER) next_cn = &(cn->cn_next); } next_cn = &(connwaitlist); for(cn=connwaitlist; cn!=NULLCONN; cn=(*next_cn)) { if(conns_used >= (MAX_CONNS - CONNS_RESERVED_DI)) break; for(on=cn->cn_operlist; on!=NULLOPER; on=on->on_next_conn) { if(on->on_type != ON_TYPE_GET_EDB) { (*next_cn) = cn->cn_next; if(conn_request(cn) == OK) { conns_used++; cn->cn_next = connlist; connlist = cn; cn->cn_last_used = timenow; /* Do something with the operations */ } else { /* Do something with the operations */ } break; } } if(on == NULLOPER) next_cn = &(cn->cn_next); } next_cn = &(connwaitlist); for(cn=connwaitlist; cn!=NULLCONN; cn=(*next_cn)) { if(conns_used >= (MAX_CONNS - CONNS_RESERVED_DI - CONNS_RESERVED_X500)) break; (*next_cn) = cn->cn_next; if(conn_request(cn) == OK) { conns_used++; cn->cn_next = connlist; connlist = cn; cn->cn_last_used = timenow; /* Do something with the operations */ } else { /* Do something with the operations */ } } if(process_edbs && !quipu_shutdown) { /* * Nothing is happening that would be disturbed by writing back * a retrieved EDB so it is a good time to process them. */ if (!get_edb_ops && pending_ops) { get_edb_ops = pending_ops; pending_ops = NULLOPER; if(oper_chain(get_edb_ops) != OK) { LLOG(log_dsap, LLOG_TRACE, ("Could not chain a pending operation")); (*secs_p) = 0; /* service network and then try next one */ pending_ops = get_edb_ops -> on_next_task; get_edb_ops -> on_next_task = NULLOPER; oper_free(get_edb_ops); get_edb_ops = NULLOPER; } } else if (get_edb_ops) { if (get_edb_ops->on_state == ON_COMPLETE) { if (get_edb_ops->on_type == ON_TYPE_GET_EDB) process_edb(get_edb_ops,&newop); else { /* ON_TYPE_SHADOW */ process_shadow(get_edb_ops); ds_res_free (&get_edb_ops-> on_resp.di_result.dr_res.dcr_dsres); } if (newop) { newop->on_next_task = get_edb_ops->on_next_task; get_edb_ops->on_next_task = NULLOPER; oper_conn_extract(get_edb_ops); oper_free(get_edb_ops); if (oper_send_invoke (newop) != OK) { LLOG(log_dsap, LLOG_EXCEPTIONS, ("oper_send getedb next failed")); oper_free (newop); get_edb_ops = NULLOPER; } get_edb_ops = newop; } else if (get_edb_ops) { pending_ops = get_edb_ops->on_next_task; get_edb_ops->on_next_task = NULLOPER; oper_conn_extract(get_edb_ops); oper_free(get_edb_ops); get_edb_ops = NULLOPER; } (*secs_p) = 0; /* Schedule next one ! */ } else if (get_edb_ops->on_state == ON_ABANDONED) { LLOG (log_dsap,LLOG_TRACE,("Get edb has been abandoned")); pending_ops = get_edb_ops->on_next_task; get_edb_ops->on_next_task = NULLOPER; oper_free(get_edb_ops); get_edb_ops = NULLOPER; (*secs_p) = 0; /* Schedule next one ! */ } } else if (startup_update) { /* see if cache timer has expired - if so resend edb ops... */ if ( (timenow - lastedb_update) >= slave_timeout ) slave_update(); } } if ((get_edb_ops == NULLOPER) && startup_update ) { /* make sure we are awake for the next EDB update */ if ((timeout_tmp = lastedb_update + slave_timeout - timenow) >= 0) if (((*secs_p) == NOTOK) || ((*secs_p) > timeout_tmp)) (*secs_p) = timeout_tmp; } if(suspended) { /* * A task suspended in order for the network to be checked. * Force this to happen by setting the selected task to NULL * and the polling time of the network to 0 secs. */ ret_tk = NULLTASK; (*secs_p) = 0; } for(cn=connwaitlist; cn!=NULLCONN; cn=cn->cn_next) xi++; /* If someting is waiting, see if we can shut a connection down */ /* Make arbitary choice for now */ for(cn=connlist; (xi!=0 || quipu_shutdown) && (cn!=NULLCONN); cn=cn_tmp) { cn_tmp = cn->cn_next; if ((cn->cn_state == CN_OPEN) && (cn->cn_tasklist == NULLTASK) && (cn->cn_initiator) && (cn->cn_operlist == NULLOPER)) { LLOG(log_dsap, LLOG_TRACE, ("Releasing connection %d early (%d waiting)",cn->cn_ad,xi)); if (conn_release(cn) == NOTOK) conns_used++; else xi--; (*secs_p) = 0; /* let connection be re-used */ } } #ifndef NO_STATS if ( (timenow - last_log_close) >= LOGOPENTIME ) { ll_close (log_stat); last_log_close = timenow; } else { if ( (ret_tk == NULLTASK) && (*secs_p >= LOGOPENTIME)) *secs_p = LOGOPENTIME; /* Wake to close log! */ } #endif if(process_edbs && quipu_shutdown) *secs_p = NOTOK; return(ret_tk); }
void DataLayer<Dtype>::load_batch(Batch<Dtype>* batch) { CPUTimer batch_timer; batch_timer.Start(); double read_time = 0; double trans_time = 0; CPUTimer timer; // LOG(INFO)<<"load_batch"; CHECK(batch->data_.count()); CHECK(this->transformed_data_.count()); // Reshape according to the first datum of each batch // on single input batches allows for inputs of varying dimension. const int batch_size = this->layer_param_.data_param().batch_size(); // Datum& datum = *(reader_.full().peek()); // // Use data_transformer to infer the expected blob shape from datum. Datum datum; datum.ParseFromString(cursor_->value()); // Use data_transformer to infer the expected blob shape from datum. vector<int> top_shape = this->data_transformer_->InferBlobShape(datum); this->transformed_data_.Reshape(top_shape); // Reshape batch according to the batch_size. top_shape[0] = batch_size; batch->data_.Reshape(top_shape); Dtype* top_data = batch->data_.mutable_cpu_data(); // Dtype* top_data = this->prefetch_data_.mutable_cpu_data(); Dtype* top_label = NULL; // suppress warnings about uninitialized variables // LOG(INFO)<<" output_labels_:"<<this->output_labels_; if (this->output_labels_) { top_label = batch->label_.mutable_cpu_data(); // top_label = this->prefetch_label_.mutable_cpu_data(); } Dtype* use_data=this->use_data_.mutable_cpu_data(); // LOG(INFO)<<" use_data[0]:"<<use_data[0]; if (use_data[0]==0.0){ // LOG(INFO)<<"visit in order"; for (int item_id = 0; item_id < batch_size; item_id++) { Datum datum; datum.ParseFromString(cursor_->value()); // Apply data transformations (mirror, scale, crop...) // LOG(INFO)<<"jq enter data_layers"<< item_id; int offset = batch->data_.offset(item_id); // LOG(INFO)<<"jq enter data_layers"; this->transformed_data_.set_cpu_data(top_data + offset); this->data_transformer_->Transform(datum, &(this->transformed_data_)); // Copy label. if (this->output_labels_) { top_label[item_id] = datum.label(); // std::cout<<" cursor_:"<<datum.label(); } // use_data[item_id +5] = start; // trans_time += timer.MicroSeconds(); cursor_->Next(); // start +=1.0; // std::cout<<" output_labels_:"<<this->output_labels_; if (!cursor_->valid()) { DLOG(INFO) << "Restarting data prefetching from start."; cursor_->SeekToFirst(); } // reader_.free().push(const_cast<Datum*>(&datum)); } }else if (use_data[0]!=0.0){ // forward-backward using semi supervised with false label // 0, sami-super-unsuper, 1, label_kinds, 2, step over, // 3, datanum, 4, start index // LOG(INFO)<<"visit in Key/value"; // LOG(INFO)<<"this->PREFETCH_COUNT:"<<this->PREFETCH_COUNT; int step_over = batch_size+1; // std::cout<<std::endl; scoped_ptr<db::Transaction> txn(db_->NewTransaction()); // std::cout<<"key:"; int kCIFARImageNBytes=3072; for (int item_id = 0; item_id < batch_size; item_id++) { char str_buffer[kCIFARImageNBytes]; int id= static_cast<int>(use_data[item_id+ 1]); // std::cout<<" "<<id<<":"; int length = snprintf(str_buffer, kCIFARImageNBytes, "%05d", id); string value; string str=string(str_buffer, length); txn->Get(str, value); Datum datum; datum.ParseFromString(value); int offset = batch->data_.offset(item_id); // LOG(INFO)<<"jq enter data_layers"; this->transformed_data_.set_cpu_data(top_data + offset); this->data_transformer_->Transform(datum, &(this->transformed_data_)); // std::cout<<" output_labels_:"<<this->output_labels_; if (this->output_labels_) { // top_label[item_id] = datum.label(); top_label[item_id] = use_data[item_id+ step_over]; // std::cout<<" KV:"<<datum.label(); // top_label[item_id]= static_cast<int>(use_data[item_id + batch_size +3]); } if( use_data[item_id+ step_over]!=(datum.label()%1000)) LOG(INFO)<<"image id:"<<id<<" not correctly fetch: "<<datum.label() <<" vs "<<use_data[item_id+ step_over]; // std::cout<<top_label[item_id]; // std::cout<<" key:"<<id; } // std::cout<<std::endl; // for (int item_id = 0; item_id < 50000; item_id++) { // char str_buffer[kCIFARImageNBytes]; // // int id= static_cast<int>(use_data[item_id+ 1]); // int length = snprintf(str_buffer, kCIFARImageNBytes, "%05d", item_id); // string value; // string str=string(str_buffer, length); // txn->Get(str, value); // // Datum datum; // // datum.ParseFromString(value); // // int offset = batch->data_.offset(item_id); // // // LOG(INFO)<<"jq enter data_layers"; // // this->transformed_data_.set_cpu_data(top_data + offset); // // this->data_transformer_->Transform(datum, &(this->transformed_data_)); // // if (this->output_labels_) { // // top_label[item_id] = datum.label(); // // // top_label[item_id]= static_cast<int>(use_data[item_id + batch_size +3]); // // } // // std::cout<<" "<<item_id; // } // std::cout<<std::endl; txn->Commit(); } timer.Stop(); batch_timer.Stop(); DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; }
static int digi_pci_attach(device_t dev) { struct digi_softc *sc; u_int32_t device_id; #ifdef DIGI_INTERRUPT int retVal = 0; #endif sc = device_get_softc(dev); KASSERT(sc, ("digi%d: softc not allocated in digi_pci_attach\n", device_get_unit(dev))); bzero(sc, sizeof(*sc)); sc->dev = dev; sc->res.unit = device_get_unit(dev); device_id = pci_get_devid(dev); switch (device_id >> 16) { case PCI_DEVICE_EPC: sc->name = "Digiboard PCI EPC/X ASIC"; sc->res.mrid = 0x10; sc->model = PCIEPCX; sc->module = "EPCX_PCI"; break; case PCI_DEVICE_XEM: sc->name = "Digiboard PCI PC/Xem ASIC"; sc->res.mrid = 0x10; sc->model = PCXEM; sc->module = "Xem"; break; case PCI_DEVICE_XR: sc->name = "Digiboard PCI PC/Xr ASIC"; sc->res.mrid = 0x10; sc->model = PCIXR; sc->module = "Xr"; break; case PCI_DEVICE_CX: sc->name = "Digiboard PCI C/X ASIC"; sc->res.mrid = 0x10; sc->model = PCCX; sc->module = "CX_PCI"; break; case PCI_DEVICE_XRJ: sc->name = "Digiboard PCI PC/Xr PLX"; sc->res.mrid = 0x18; sc->model = PCIXR; sc->module = "Xr"; break; case PCI_DEVICE_EPCJ: sc->name = "Digiboard PCI EPC/X PLX"; sc->res.mrid = 0x18; sc->model = PCIEPCX; sc->module = "EPCX_PCI"; break; case PCI_DEVICE_920_4: /* Digi PCI4r 920 */ sc->name = "Digiboard PCI4r 920"; sc->res.mrid = 0x10; sc->model = PCIXR; sc->module = "Xr"; break; case PCI_DEVICE_920_8: /* Digi PCI8r 920 */ sc->name = "Digiboard PCI8r 920"; sc->res.mrid = 0x10; sc->model = PCIXR; sc->module = "Xr"; break; case PCI_DEVICE_920_2: /* Digi PCI2r 920 */ sc->name = "Digiboard PCI2r 920"; sc->res.mrid = 0x10; sc->model = PCIXR; sc->module = "Xr"; break; default: device_printf(dev, "Unknown device id = %08x\n", device_id); return (ENXIO); } pci_write_config(dev, 0x40, 0, 4); pci_write_config(dev, 0x46, 0, 4); sc->res.mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->res.mrid, RF_ACTIVE); #ifdef DIGI_INTERRUPT sc->res.irqrid = 0; sc->res.irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->res.irqrid, RF_SHAREABLE | RF_ACTIVE); if (sc->res.irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); return (ENXIO); } retVal = bus_setup_intr(dev, sc->res.irq, INTR_TYPE_TTY, digiintr, sc, &sc->res.irqHandler); #else DLOG(DIGIDB_IRQ, (sc->dev, "Interrupt support compiled out\n")); #endif sc->vmem = rman_get_virtual(sc->res.mem); sc->pmem = vtophys(sc->vmem); sc->pcibus = 1; sc->win_size = 0x200000; sc->win_bits = 21; sc->csigs = &digi_normal_signals; sc->status = DIGI_STATUS_NOTINIT; callout_handle_init(&sc->callout); callout_handle_init(&sc->inttest); sc->setwin = digi_pci_setwin; sc->hidewin = digi_pci_hidewin; sc->towin = digi_pci_towin; PCIPORT = FEPRST; return (digi_attach(sc)); }
int lfs_update(struct vnode *vp, const struct timespec *acc, const struct timespec *mod, int updflags) { struct inode *ip; struct lfs *fs = VFSTOULFS(vp->v_mount)->um_lfs; int flags; ASSERT_NO_SEGLOCK(fs); if (vp->v_mount->mnt_flag & MNT_RDONLY) return (0); ip = VTOI(vp); /* * If we are called from vinvalbuf, and the file's blocks have * already been scheduled for writing, but the writes have not * yet completed, lfs_vflush will not be called, and vinvalbuf * will cause a panic. So, we must wait until any pending write * for our inode completes, if we are called with UPDATE_WAIT set. */ mutex_enter(vp->v_interlock); while ((updflags & (UPDATE_WAIT|UPDATE_DIROP)) == UPDATE_WAIT && WRITEINPROG(vp)) { DLOG((DLOG_SEG, "lfs_update: sleeping on ino %d" " (in progress)\n", ip->i_number)); cv_wait(&vp->v_cv, vp->v_interlock); } mutex_exit(vp->v_interlock); LFS_ITIMES(ip, acc, mod, NULL); if (updflags & UPDATE_CLOSE) flags = ip->i_flag & (IN_MODIFIED | IN_ACCESSED | IN_CLEANING); else flags = ip->i_flag & (IN_MODIFIED | IN_CLEANING); if (flags == 0) return (0); /* If sync, push back the vnode and any dirty blocks it may have. */ if ((updflags & (UPDATE_WAIT|UPDATE_DIROP)) == UPDATE_WAIT) { /* Avoid flushing VU_DIROP. */ mutex_enter(&lfs_lock); ++fs->lfs_diropwait; while (vp->v_uflag & VU_DIROP) { DLOG((DLOG_DIROP, "lfs_update: sleeping on inode %d" " (dirops)\n", ip->i_number)); DLOG((DLOG_DIROP, "lfs_update: vflags 0x%x, iflags" " 0x%x\n", vp->v_iflag | vp->v_vflag | vp->v_uflag, ip->i_flag)); if (fs->lfs_dirops == 0) lfs_flush_fs(fs, SEGM_SYNC); else mtsleep(&fs->lfs_writer, PRIBIO+1, "lfs_fsync", 0, &lfs_lock); /* XXX KS - by falling out here, are we writing the vn twice? */ } --fs->lfs_diropwait; mutex_exit(&lfs_lock); return lfs_vflush(vp); } return 0; }
int ds_bind_init (struct connection *cn) { struct ds_bind_arg * arg = &(cn->cn_start.cs_ds.ds_bind_arg); struct ds_bind_arg * result = &(cn->cn_start.cs_res); struct ds_bind_error * error = &(cn->cn_start.cs_err); Attr_Sequence as; Entry entryptr; extern AttributeType at_password; extern AttributeType at_p_password; struct di_block * dsas = NULL_DI_BLOCK; struct di_block * di_tmp; struct oper_act * on; struct ds_compare_arg * cma; struct DSError err; static struct common_args ca_def = default_common_args; int res; int retval; struct protected_password * pp; #ifndef NO_STATS char buff[LINESIZE]; #endif extern struct SecurityServices *dsap_security; DLOG (log_dsap,LLOG_TRACE,("ds_bind_init")); if ( (arg->dba_version != DBA_VERSION_V1988) || quipu_shutdown) { error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SERVICE; error->dbe_value = DSE_SV_UNAVAILABLE; return(DS_ERROR_CONNECT); } /* We don't support any bilaterally-defined authentication procedures. * Hence, if we get EXTERNAL credentials in the bind, reject them. */ if (arg->dba_auth_type == DBA_AUTH_EXTERNAL) { DLOG(log_dsap, LLOG_EXCEPTIONS, ("EXTERNAL found in credentials")); error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SERVICE; error->dbe_value = DSE_SV_UNAVAILABLE; return (DS_ERROR_CONNECT); } /* If password is present, but zero length, treat as though absent */ if ((arg->dba_auth_type == DBA_AUTH_SIMPLE) && (arg->dba_passwd_len == 0)) arg->dba_auth_type = DBA_AUTH_NONE; switch (arg->dba_auth_type) { case DBA_AUTH_NONE: if (((arg->dba_dn == NULLDN) && auth_bind == 1) || (auth_bind > 1)) { out: ; #ifndef NO_STATS if (arg->dba_dn == NULLDN) LLOG(log_stat, LLOG_TRACE, ("Bind (%d) (rejected)", cn->cn_ad)); else { sprintf (buff,"Bind (%d) (rejected)",cn->cn_ad); pslog (log_stat,LLOG_TRACE,buff,(IFP)dn_print, (caddr_t)arg->dba_dn); } #endif error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SECURITY; error->dbe_value = DSE_SC_AUTHENTICATION; return (DS_ERROR_CONNECT); } break; case DBA_AUTH_SIMPLE: if (auth_bind > 2) goto out; break; case DBA_AUTH_PROTECTED: if (auth_bind > 3) goto out; break; case DBA_AUTH_STRONG: break; case DBA_AUTH_EXTERNAL: goto out; } if (arg->dba_dn == NULLDN) { #ifndef NO_STATS LLOG(log_stat, LLOG_NOTICE, ("Bind (%d) (anonymous)", cn->cn_ad)); #endif cn->cn_authen = DBA_AUTH_NONE; make_dsa_bind_arg(result); return(DS_OK); } /* Now we're sure dba_dn contains a valid pointer, can decode it */ if ( ! check_prefix_list (arg->dba_dn)) { #ifndef NO_STATS sprintf (buff,"Bind (%d) (reject - prefix)",cn->cn_ad); pslog (log_stat,LLOG_TRACE,buff,(IFP)dn_print, (caddr_t)arg->dba_dn); #endif error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SECURITY; error->dbe_value = DSE_SC_ACCESSRIGHTS; return (DS_ERROR_CONNECT); } if ((cn->cn_ctx == DS_CTX_X500_DAP) && !(check_dn_length(arg->dba_dn))) { #ifndef NO_STATS sprintf (buff,"Bind (%d) (reject - DAP length)",cn->cn_ad); pslog (log_stat,LLOG_TRACE,buff,(IFP)dn_print, (caddr_t)arg->dba_dn); #endif error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SECURITY; error->dbe_value = DSE_SC_ACCESSRIGHTS; return (DS_ERROR_CONNECT); } switch (arg->dba_auth_type) { case DBA_AUTH_NONE: /* partially check DN - i.e see if we can say if DEFINATELY does */ /* not exist. If it possibly exists - allow bind, checking it */ /* runs the risk of livelock */ switch (res = really_find_entry(arg->dba_dn, TRUE, NULLDNSEQ, FALSE, &(entryptr), &(err), &(dsas))) { case DS_X500_ERROR: if ((err.dse_type == DSE_NAMEERROR) && (err.ERR_NAME.DSE_na_problem == DSE_NA_NOSUCHOBJECT)) { ds_error_free(&(err)); #ifndef NO_STATS sprintf (buff,"Bind (%d) (no auth - rejected)",cn->cn_ad); pslog (log_stat,LLOG_TRACE,buff,(IFP)dn_print,(caddr_t)arg->dba_dn); #endif error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SECURITY; error->dbe_value = DSE_SC_INVALIDCREDENTIALS; return (DS_ERROR_CONNECT); } /* fall */ default: #ifndef NO_STATS sprintf (buff,"Bind (%d) (no auth)",cn->cn_ad); pslog (log_stat,LLOG_NOTICE,buff,(IFP)dn_print,(caddr_t)arg->dba_dn); #endif if (dsas != NULL_DI_BLOCK) di_desist (dsas); cn->cn_authen = DBA_AUTH_NONE; make_dsa_bind_arg(result); return (DS_OK); } case DBA_AUTH_SIMPLE: #ifndef NO_STATS sprintf (buff,"Bind (%d) (simple)",cn->cn_ad); pslog (log_stat,LLOG_NOTICE,buff,(IFP)dn_print,(caddr_t)arg->dba_dn); #endif /* Can't check simple credentials from DSP (livelock risk). * Hence treat DSP accesses as unauthenticated. */ if (cn->cn_ctx != DS_CTX_X500_DAP) { cn->cn_authen = DBA_AUTH_NONE; make_dsa_bind_arg(result); return(DS_OK); } break; case DBA_AUTH_PROTECTED: #ifndef NO_STATS sprintf (buff,"Bind (%d) (protected)",cn->cn_ad); pslog (log_stat,LLOG_NOTICE,buff,(IFP)dn_print,(caddr_t)arg->dba_dn); #endif if (cn->cn_ctx != DS_CTX_X500_DAP) { cn->cn_authen = DBA_AUTH_NONE; make_dsa_bind_arg(result); return(DS_OK); } else { UTC ut; long c_time, s_time, delta; time(&s_time); ut = str2utct(arg->dba_time1, strlen(arg->dba_time1)); if (ut == NULLUTC) c_time = 0L; /* 1970 is a convenient out-of-date timestamp */ else c_time = gtime(ut2tm(ut)); delta = s_time - c_time; if ((delta < 0) || (delta > bind_window)) { DLOG(log_dsap, LLOG_EXCEPTIONS, ("Time = %s, Delay = %D s : Association rejected", arg->dba_time1, delta)); error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SECURITY; error->dbe_value = DSE_SC_INVALIDCREDENTIALS; return (DS_ERROR_CONNECT); } pp = (struct protected_password *) calloc(1, sizeof(*pp)); /* Ought to check for null pointer ... */ pp->passwd = malloc((unsigned)arg->dba_passwd_len); bcopy(arg->dba_passwd, pp->passwd, arg->dba_passwd_len); pp->n_octets = arg->dba_passwd_len; pp->time1 = strdup(arg->dba_time1); pp->is_protected[0] = (char) 1; } break; case DBA_AUTH_STRONG: #ifndef NO_STATS sprintf (buff,"Bind (%d) (strong)",cn->cn_ad); pslog (log_stat,LLOG_NOTICE,buff,(IFP)dn_print,(caddr_t)arg->dba_dn); #endif /* Strong authentication is not yet supported. * It will eventually be possible to check strong credentials over DSP. * For the moment, accept them and treat as NONE over DSP, but reject * over DAP. */ if (dsap_security && dsap_security->serv_ckpath && dsap_security->serv_cknonce) { int rc; DN real_name; struct Nonce nonce; nonce.non_time1 = arg->dba_time1; nonce.non_time2 = arg->dba_time2; nonce.non_r1.n_bits = arg->dba_r1.n_bits; nonce.non_r1.value = arg->dba_r1.value; nonce.non_r2.n_bits = arg->dba_r2.n_bits; nonce.non_r2.value = arg->dba_r2.value; rc = (dsap_security->serv_cknonce)(&nonce); if (rc != OK) { error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SECURITY; error->dbe_value = rc; return (DS_ERROR_CONNECT); } rc = (dsap_security->serv_ckpath)((caddr_t) arg, _ZTokenToSignDAS, &_ZDAS_mod, arg->dba_cpath, arg->dba_sig, &real_name); if (rc != OK) { error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SECURITY; error->dbe_value = rc; return (DS_ERROR_CONNECT); } else { if(dn_cmp(real_name, arg->dba_dn) == OK) { make_dsa_bind_arg(result); return (DS_OK); } else { sprintf (buff,"User != Authenticated User, ie %s != ", dn2str(arg->dba_dn)); pslog (log_dsap,LLOG_NOTICE,buff,(IFP)dn_print,(caddr_t)real_name); error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SECURITY; error->dbe_value = DSE_SC_AUTHENTICATION; return (DS_ERROR_CONNECT); } } } else { if (cn->cn_ctx != DS_CTX_X500_DAP) { cn->cn_authen = DBA_AUTH_NONE; make_dsa_bind_arg(result); return (DS_OK); } else { error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SERVICE; error->dbe_value = DSE_SV_UNAVAILABLE; return (DS_ERROR_CONNECT); } } } /* If we fall through to here, credentials are simple or protected simple */ if ((res = really_find_entry(arg->dba_dn, TRUE, NULLDNSEQ, FALSE, &(entryptr), &(err), &(dsas))) == DS_OK) { /* is it really OK ??? */ if ((entryptr->e_data == E_TYPE_CONSTRUCTOR) || (entryptr->e_data == E_TYPE_CACHE_FROM_MASTER)) { DN dn_found; DLOG(log_dsap, LLOG_TRACE, ("rfe (bind) returned a constructor")); dn_found = get_copy_dn(entryptr); res = constructor_dsa_info(dn_found,NULLDNSEQ,FALSE,entryptr,&err,&dsas); dn_free (dn_found); } } switch(res) { case DS_OK: /* entryptr filled out - break through to deal with it */ break; case DS_CONTINUE: /* * At this point a remote operation is required to compare * the password given with the password of the entry, so * fire up the remote operation and return without completing. * Mark the operation as a BIND_COMPARE_OP and set the connection * which will need to be restarted. * Generate a compare argument. * Chain the compare operation using the di_blocks. */ cn->cn_start.cs_bind_compare = on = oper_alloc();/* cn knows about on */ on->on_type = ON_TYPE_BIND_COMPARE; on->on_bind_compare = cn; /* on knows about cn */ set_my_chain_args(&(on->on_req.dca_charg), arg->dba_dn); on->on_req.dca_dsarg.arg_type = OP_COMPARE; cma = &(on->on_req.dca_dsarg.arg_cm); cma->cma_common = ca_def; /* struct copy */ /* Set originator/requestor */ if (on->on_req.dca_charg.cha_originator) /* set my set_my_chain_arg */ dn_free(on->on_req.dca_charg.cha_originator); on->on_req.dca_charg.cha_originator = dn_cpy(arg->dba_dn); cma->cma_common.ca_requestor = dn_cpy(arg->dba_dn); cma->cma_common.ca_servicecontrol.svc_prio = SVC_PRIO_HIGH; cma->cma_object = dn_cpy(arg->dba_dn); if (arg->dba_auth_type == DBA_AUTH_SIMPLE) { cma->cma_purported.ava_type = AttrT_cpy (at_password); cma->cma_purported.ava_value = str2AttrV (arg->dba_passwd,str2syntax("octetstring")); } else { cma->cma_purported.ava_type = AttrT_cpy (at_p_password); cma->cma_purported.ava_value = (AttributeValue) calloc(1, sizeof(attrVal)); cma->cma_purported.ava_value->av_syntax = str2syntax("protectedPassword"); cma->cma_purported.ava_value->av_struct = (caddr_t) pp; } on->on_dsas = dsas; for(di_tmp=on->on_dsas; di_tmp!=NULL_DI_BLOCK; di_tmp=di_tmp->di_next) { di_tmp->di_type = DI_OPERATION; di_tmp->di_oper = on; } if(oper_chain(on) == OK) return(DS_CONTINUE); oper_extract(on); cn->cn_start.cs_bind_compare = NULLOPER; error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SERVICE; error->dbe_value = DSE_SV_UNAVAILABLE; return(DS_ERROR_CONNECT); case DS_X500_ERROR: /* User's entry doesn't exist, for example */ LLOG(log_dsap, LLOG_TRACE, ("ds_bind - really_find_entry erred:")); log_ds_error(&(err)); ds_error_free(&(err)); error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SECURITY; error->dbe_value = DSE_SC_INVALIDCREDENTIALS; return(DS_ERROR_CONNECT); default: error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SERVICE; error->dbe_value = DSE_SV_DITERROR; return(DS_ERROR_CONNECT); } if ((as = as_find_type (entryptr->e_attributes, (arg->dba_auth_type == DBA_AUTH_SIMPLE) ? at_password : at_p_password)) == NULLATTR) { /* No password in entry. * Simple authentication is not possible for entities without passwords. * Hence, give the `inappropriate authentication' message. */ error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SECURITY; error->dbe_value = DSE_SC_AUTHENTICATION; return (DS_ERROR_CONNECT); } if (arg->dba_auth_type == DBA_AUTH_SIMPLE) { if (strlen ((char *)as->attr_value->avseq_av.av_struct) != arg->dba_passwd_len) retval = -1; else retval = strncmp ((char *)as->attr_value->avseq_av.av_struct, arg->dba_passwd, arg->dba_passwd_len); } else retval = check_guard( ((struct protected_password *) as->attr_value->avseq_av.av_struct)->passwd, ((struct protected_password *) as->attr_value->avseq_av.av_struct)->n_octets, arg->dba_time1, arg->dba_passwd, arg->dba_passwd_len); if (retval == 0) { /* Password OK! */ cn->cn_authen = arg->dba_auth_type; make_dsa_bind_arg(result); return (DS_OK); } else { /* password wrong ! */ error->dbe_version = DBA_VERSION_V1988; error->dbe_type = DBE_TYPE_SECURITY; error->dbe_value = DSE_SC_INVALIDCREDENTIALS; return (DS_ERROR_CONNECT); } }
void MessageDispatch::handleSessionMessage(NetworkClient* client, Message* message) { DispatchClient* dispatchClient = 0; bool deleteClient = false; //boost::recursive_mutex::scoped_lock lk(mSessionMutex); message->ResetIndex(); // What kind of message is it? uint32 opcode; message->getUint32(opcode); // We want to intercept the opClusterClientConnect and opClusterClientDisconnect messages // so we can create account specific clients for use in async calls. if (opcode == opClusterClientConnect) { dispatchClient = new DispatchClient(); dispatchClient->setAccountId(message->getAccountId()); dispatchClient->setSession(client->getSession()); mAccountClientMap.insert(std::make_pair(message->getAccountId(),dispatchClient)); } else if (opcode == opClusterClientDisconnect) { // First find our DispatchClient. AccountClientMap::iterator iter = mAccountClientMap.find(message->getAccountId()); if(iter != mAccountClientMap.end()) { dispatchClient = (*iter).second; mAccountClientMap.erase(iter); DLOG(INFO) << "Destroying Dispatch Client for account " << message->getAccountId(); // Mark it for deletion deleteClient = true; } else { LOG(INFO) << "Could not find DispatchClient for account " << message->getAccountId() << " to be deleted."; client->getSession()->DestroyIncomingMessage(message); //lk.unlock(); return; } } else { AccountClientMap::iterator iter = mAccountClientMap.find(message->getAccountId()); if(iter != mAccountClientMap.end()) { dispatchClient = (*iter).second; } else { client->getSession()->DestroyIncomingMessage(message); //lk.unlock(); return; } /* else { dispatchClient = new DispatchClient(); dispatchClient->setAccountId(message->getAccountId()); dispatchClient->setSession(client->getSession()); mAccountClientMap.insert(message->getAccountId(), dispatchClient); } */ } //lk.unlock(); MessageCallbackMap::iterator iter = mMessageCallbackMap.find(opcode); if(iter != mMessageCallbackMap.end()) { // Reset our message index to just after the opcode. message->setIndex(4); // Call our handler (*iter).second(message, dispatchClient); } else { LOG(INFO) << "Unhandled opcode in MessageDispatch - " << opcode ; } // Delete the client here if we got a disconnect. if(deleteClient) { // We will delete the client when we delete the player or reconnect again. //delete dispatchClient; dispatchClient = NULL; } // We need to destroy the incoming message for the session here // We want the application to decide whether the message is needed further or not. // This is mainly used in the ConnectionServer since routing messages need a longer life than normal message->setPendingDelete(true); }
bool OpenCLParser::convert(std::string fileNameIN, std::string fileNameOUT) { if ( access( fileNameIN.c_str(), F_OK ) == -1 ) { LOG(ERROR) << "kernel source file = '" << fileNameIN.c_str() << "' doesn't exist"; return false; } if ( access( fileNameIN.c_str(), R_OK ) == -1 ) { LOG(ERROR) << "kernel source file = '" << fileNameIN.c_str() << "' isn't readable"; return false; } if ( access( fileNameOUT.c_str(), F_OK ) == 0 ) { struct stat statIN; if (stat(fileNameIN.c_str(), &statIN) == -1) { perror(fileNameIN.c_str()); return false; } struct stat statOUT; if (stat(fileNameOUT.c_str(), &statOUT) == -1) { perror(fileNameOUT.c_str()); return false; } if ( statOUT.st_mtime > statIN.st_mtime ) { DLOG(INFO) << "kernel source file = '" << fileNameOUT.c_str() << "' up-to-date"; return true; } } std::ifstream file; file.open(fileNameIN.c_str(), std::ifstream::in ); if ( ! file.is_open() ) { LOG(ERROR) << "failed to open file = '" << fileNameIN.c_str() << "' for reading"; return false; } std::string line; std::string kernel_buffer; std::string kernel_name; std::string kernel_type; std::string kernel_name_typed; std::string kernel_line_typed; std::string kernel_modified; std::string type_replace; std::string stdOpenCL; stdOpenCL += "// This file was auto-generated from file '" + fileNameIN + "' to conform to standard OpenCL\n"; bool recording = false; while (std::getline(file, line)) { if ( isAttributeLine(line) ) { if ( recording ) { recording = false; } kernel_name_typed = getTypedKernelName(line); kernel_line_typed = "__kernel void " + kernel_name_typed + getTypedKernelLine(line) + " {"; if ( isFloatType(kernel_name_typed) ) { type_replace = "float"; } if ( isDoubleType(kernel_name_typed) ) { type_replace = "double"; } kernel_modified = kernel_line_typed + "\n" + kernel_buffer; boost::regex re; re = boost::regex("\\sT\\s", boost::regex::perl); kernel_modified = boost::regex_replace(kernel_modified, re, " "+type_replace+" "); re = boost::regex("\\sT\\*\\s", boost::regex::perl); kernel_modified = boost::regex_replace(kernel_modified, re, " "+type_replace+"* "); stdOpenCL += kernel_modified; continue; } if ( isTemplateKernelLine(line) ) { kernel_name = getKernelName(line); kernel_type = getKernelType(line); DLOG(INFO)<<"found template kernel '"<<kernel_name<<"' with type '"<<kernel_type<<"'"; if ( recording == false ) { recording = true; } else { LOG(ERROR) << "error parsing kernel source file = '" << fileNameIN.c_str() << "'"; return false; } continue; } if ( recording ) { kernel_buffer += line + "\n"; } else { kernel_buffer = ""; stdOpenCL += line + "\n"; } } std::ofstream out(fileNameOUT.c_str()); out << stdOpenCL; out.close(); DLOG(INFO) << "convert AMD OpenCL '"<<fileNameIN.c_str()<<"' to standard OpenCL '"<<fileNameOUT.c_str()<<"'"; return true; }
void mosq_logger(struct mosquitto *mosq, void *obj, int level, const char *msg) { (void) mosq; (void) obj; DLOG("mosquitto -> level: %d, msg: %s\n", level, msg); }
PassRefPtr<ShapeResult> HarfBuzzShaper::shapeResult() { RefPtr<ShapeResult> result = ShapeResult::create(m_font, m_normalizedBufferLength, m_textRun.direction()); HarfBuzzScopedPtr<hb_buffer_t> harfBuzzBuffer(hb_buffer_create(), hb_buffer_destroy); const FontDescription& fontDescription = m_font->getFontDescription(); const String& localeString = fontDescription.locale(); CString locale = localeString.latin1(); const hb_language_t language = hb_language_from_string(locale.data(), locale.length()); bool needsCapsHandling = fontDescription.variantCaps() != FontDescription::CapsNormal; OpenTypeCapsSupport capsSupport; RunSegmenter::RunSegmenterRange segmentRange = { 0, 0, USCRIPT_INVALID_CODE, OrientationIterator::OrientationInvalid, FontFallbackPriority::Invalid }; RunSegmenter runSegmenter( m_normalizedBuffer.get(), m_normalizedBufferLength, m_font->getFontDescription().orientation()); Vector<UChar32> fallbackCharsHint; // TODO: Check whether this treatAsZerowidthspace from the previous script // segmentation plays a role here, does the new scriptRuniterator handle that correctly? while (runSegmenter.consume(&segmentRange)) { RefPtr<FontFallbackIterator> fallbackIterator = m_font->createFontFallbackIterator( segmentRange.fontFallbackPriority); appendToHolesQueue(HolesQueueNextFont, 0, 0); appendToHolesQueue(HolesQueueRange, segmentRange.start, segmentRange.end - segmentRange.start); RefPtr<FontDataForRangeSet> currentFontDataForRangeSet; bool fontCycleQueued = false; while (m_holesQueue.size()) { HolesQueueItem currentQueueItem = m_holesQueue.takeFirst(); if (currentQueueItem.m_action == HolesQueueNextFont) { // For now, we're building a character list with which we probe // for needed fonts depending on the declared unicode-range of a // segmented CSS font. Alternatively, we can build a fake font // for the shaper and check whether any glyphs were found, or // define a new API on the shaper which will give us coverage // information? if (!collectFallbackHintChars(fallbackCharsHint, fallbackIterator->needsHintList())) { // Give up shaping since we cannot retrieve a font fallback // font without a hintlist. m_holesQueue.clear(); break; } currentFontDataForRangeSet = fallbackIterator->next(fallbackCharsHint); if (!currentFontDataForRangeSet->fontData()) { ASSERT(!m_holesQueue.size()); break; } fontCycleQueued = false; continue; } SmallCapsIterator::SmallCapsBehavior smallCapsBehavior = SmallCapsIterator::SmallCapsSameCase; if (needsCapsHandling) { capsSupport = OpenTypeCapsSupport( currentFontDataForRangeSet->fontData()->platformData().harfBuzzFace(), fontDescription.variantCaps(), ICUScriptToHBScript(segmentRange.script)); if (capsSupport.needsRunCaseSplitting()) splitUntilNextCaseChange(currentQueueItem, smallCapsBehavior); } ASSERT(currentQueueItem.m_numCharacters); const SimpleFontData* smallcapsAdjustedFont = needsCapsHandling && capsSupport.needsSyntheticFont(smallCapsBehavior) ? currentFontDataForRangeSet->fontData()->smallCapsFontData(fontDescription).get() : currentFontDataForRangeSet->fontData(); // Compatibility with SimpleFontData approach of keeping a flag for overriding drawing direction. // TODO: crbug.com/506224 This should go away in favor of storing that information elsewhere, for example in // ShapeResult. const SimpleFontData* directionAndSmallCapsAdjustedFont = fontDataAdjustedForOrientation(smallcapsAdjustedFont, m_font->getFontDescription().orientation(), segmentRange.renderOrientation); CaseMapIntend caseMapIntend = CaseMapIntend::KeepSameCase; if (needsCapsHandling) { caseMapIntend = capsSupport.needsCaseChange(smallCapsBehavior); } CaseMappingHarfBuzzBufferFiller( caseMapIntend, fontDescription.locale(), harfBuzzBuffer.get(), m_normalizedBuffer.get(), m_normalizedBufferLength, currentQueueItem.m_startIndex, currentQueueItem.m_numCharacters); CapsFeatureSettingsScopedOverlay capsOverlay(m_features, capsSupport.fontFeatureToUse(smallCapsBehavior)); if (!shapeRange(harfBuzzBuffer.get(), currentQueueItem.m_startIndex, currentQueueItem.m_numCharacters, directionAndSmallCapsAdjustedFont, currentFontDataForRangeSet->ranges(), segmentRange.script, language)) DLOG(ERROR) << "Shaping range failed."; if (!extractShapeResults(harfBuzzBuffer.get(), result.get(), fontCycleQueued, currentQueueItem, directionAndSmallCapsAdjustedFont, segmentRange.script, !fallbackIterator->hasNext())) DLOG(ERROR) << "Shape result extraction failed."; hb_buffer_reset(harfBuzzBuffer.get()); } } return result.release(); }
void* DataLayerPrefetch(void* layer_pointer) { CHECK(layer_pointer); DataLayer<Dtype>* layer = static_cast<DataLayer<Dtype>*>(layer_pointer); CHECK(layer); Datum datum; CHECK(layer->prefetch_data_); Dtype* top_data = layer->prefetch_data_->mutable_cpu_data(); Dtype* top_label; if (layer->output_labels_) { top_label = layer->prefetch_label_->mutable_cpu_data(); } const Dtype scale = layer->layer_param_.data_param().scale(); const int batch_size = layer->layer_param_.data_param().batch_size(); const int crop_size = layer->layer_param_.data_param().crop_size(); const bool mirror = layer->layer_param_.data_param().mirror(); if (mirror && crop_size == 0) { LOG(FATAL) << "Current implementation requires mirror and crop_size to be " << "set at the same time."; } // datum scales const int channels = layer->datum_channels_; const int height = layer->datum_height_; const int width = layer->datum_width_; const int size = layer->datum_size_; const Dtype* mean = layer->data_mean_.cpu_data(); for (int item_id = 0; item_id < batch_size; ++item_id) { // get a blob CHECK(layer->iter_); CHECK(layer->iter_->Valid()); datum.ParseFromString(layer->iter_->value().ToString()); const string& data = datum.data(); if (crop_size) { CHECK(data.size()) << "Image cropping only support uint8 data"; int h_off, w_off; // We only do random crop when we do training. if (layer->phase_ == Caffe::TRAIN) { h_off = layer->PrefetchRand() % (height - crop_size); w_off = layer->PrefetchRand() % (width - crop_size); } else { h_off = (height - crop_size) / 2; w_off = (width - crop_size) / 2; } if (mirror && layer->PrefetchRand() % 2) { // Copy mirrored version for (int c = 0; c < channels; ++c) { for (int h = 0; h < crop_size; ++h) { for (int w = 0; w < crop_size; ++w) { int top_index = ((item_id * channels + c) * crop_size + h) * crop_size + (crop_size - 1 - w); int data_index = (c * height + h + h_off) * width + w + w_off; Dtype datum_element = static_cast<Dtype>(static_cast<uint8_t>(data[data_index])); top_data[top_index] = (datum_element - mean[data_index]) * scale; } } } } else { // Normal copy for (int c = 0; c < channels; ++c) { for (int h = 0; h < crop_size; ++h) { for (int w = 0; w < crop_size; ++w) { int top_index = ((item_id * channels + c) * crop_size + h) * crop_size + w; int data_index = (c * height + h + h_off) * width + w + w_off; Dtype datum_element = static_cast<Dtype>(static_cast<uint8_t>(data[data_index])); top_data[top_index] = (datum_element - mean[data_index]) * scale; } } } } } else { // we will prefer to use data() first, and then try float_data() if (data.size()) { for (int j = 0; j < size; ++j) { Dtype datum_element = static_cast<Dtype>(static_cast<uint8_t>(data[j])); top_data[item_id * size + j] = (datum_element - mean[j]) * scale; } } else { for (int j = 0; j < size; ++j) { top_data[item_id * size + j] = (datum.float_data(j) - mean[j]) * scale; } } } if (layer->output_labels_) { top_label[item_id] = datum.label(); } // go to the next iter layer->iter_->Next(); if (!layer->iter_->Valid()) { // We have reached the end. Restart from the first. DLOG(INFO) << "Restarting data prefetching from start."; layer->iter_->SeekToFirst(); } } return static_cast<void*>(NULL); }
void Client::ZCom_cbConnectionClosed(ZCom_ConnID _id, eZCom_CloseReason _reason, ZCom_BitStream &_reasondata) { network.decConnCount(); switch( _reason ) { case eZCom_ClosedDisconnect: { Network::DConnEvents dcEvent = static_cast<Network::DConnEvents>( _reasondata.getInt(8) ); switch( dcEvent ) { case Network::ServerMapChange: { console.addLogMsg("* SERVER CHANGED MAP"); network.reconnect(150); game.reset(Game::ServerChangeMap); } break; case Network::Quit: { console.addLogMsg("* CONNECTION CLOSED BY SERVER"); game.reset(Game::ServerQuit); } break; case Network::Kick: { console.addLogMsg("* YOU WERE KICKED"); game.reset(Game::Kicked); } break; case Network::IncompatibleData: { console.addLogMsg("* YOU HAVE INCOMPATIBLE DATA"); game.reset(Game::IncompatibleData); } break; case Network::IncompatibleProtocol: { console.addLogMsg("* THE HOST RUNS AN INCOMPATIBLE VERSION OF GUSANOS"); game.reset(Game::IncompatibleProtocol); } break; default: { console.addLogMsg("* CONNECTION CLOSED BY DUNNO WHAT :O"); game.reset(Game::ServerQuit); } break; } } break; case eZCom_ClosedTimeout: console.addLogMsg("* CONNECTION TIMEDOUT"); game.reset(Game::ServerQuit); break; case eZCom_ClosedReconnect: console.addLogMsg("* CONNECTION RECONNECTED"); break; default: break; } DLOG("A connection was closed"); }
void MKLPoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // We'll output the mask to top[1] if it's of size >1. size_t* mask = NULL; // suppress warnings about uninitalized variables // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; dnnAlgorithm_t algorithm; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: algorithm = dnnAlgorithmPoolingMax; break; case PoolingParameter_PoolMethod_AVE: algorithm = dnnAlgorithmPoolingAvg; break; case PoolingParameter_PoolMethod_STOCHASTIC: NOT_IMPLEMENTED; break; default: LOG(FATAL) << "Unknown pooling method."; } dnnError_t status; void* pooling_res[dnnResourceNumber]; mask = (use_top_mask) ? reinterpret_cast<size_t*>(top[1]->mutable_cpu_data()) : (max_idx_.mutable_cpu_data()); pooling_res[dnnResourceWorkspace] = reinterpret_cast<void*>(mask); void* bottom_data = reinterpret_cast<void *>(const_cast<Dtype*>(bottom[0]->prv_data())); if (NULL == bottom_data) { bottom_data = reinterpret_cast<void *>(const_cast<Dtype*>(bottom[0]->cpu_data())); if (NULL == poolingFwd) { // Now create poolingFwd status = dnnPoolingCreateForward<Dtype>(&poolingFwd, NULL, algorithm, fwd_bottom_data->layout_usr, kernel_size, kernel_stride, src_offset, dnnBorderZeros); CHECK_EQ(status, E_SUCCESS); // Now create poolingBwd status = dnnPoolingCreateBackward<Dtype>(&poolingBwd, NULL, algorithm, fwd_bottom_data->layout_usr, kernel_size, kernel_stride, src_offset, dnnBorderZeros); CHECK_EQ(status, E_SUCCESS); } } else if (NULL == poolingFwd) { // Is it the first pass? Create a primitive. CHECK_EQ((bottom[0]->get_prv_descriptor_data())->get_descr_type(), PrvMemDescr::PRV_DESCR_MKL2017); shared_ptr<MKLData<Dtype> > mem_descr = boost::static_pointer_cast<MKLData<Dtype> > (bottom[0]->get_prv_descriptor_data()); CHECK(mem_descr != NULL); DLOG(INFO) << "Using layout of " << mem_descr->name << " as input layout for " << this->layer_param_.name(); // copy shared_ptr fwd_bottom_data = mem_descr; // Now create poolingFwd status = dnnPoolingCreateForward<Dtype>(&poolingFwd, NULL, algorithm, fwd_bottom_data->layout_int, kernel_size, kernel_stride, src_offset, dnnBorderZeros); CHECK_EQ(status, E_SUCCESS); status = dnnLayoutCreateFromPrimitive<Dtype>(&fwd_top_data->layout_int, poolingFwd, dnnResourceDst); CHECK_EQ(status, 0) << "Failed dnnLayoutCreateFromPrimitive with status " << status << "\n"; fwd_top_data->create_conversions(); // Now create poolingBwd status = dnnPoolingCreateBackward<Dtype>(&poolingBwd, NULL, algorithm, fwd_bottom_data->layout_int, kernel_size, kernel_stride, src_offset, dnnBorderZeros); CHECK_EQ(status, E_SUCCESS); status = dnnLayoutCreateFromPrimitive<Dtype>(&bwd_top_diff->layout_int, poolingFwd, dnnResourceDst); CHECK_EQ(status, E_SUCCESS); status = dnnLayoutCreateFromPrimitive<Dtype>(&bwd_bottom_diff->layout_int, poolingFwd, dnnResourceSrc); CHECK_EQ(status, E_SUCCESS); bwd_top_diff->create_conversions(); bwd_bottom_diff->create_conversions(); } pooling_res[dnnResourceSrc] = bottom_data; if (fwd_top_data->convert_from_int) { top[0]->set_prv_data(fwd_top_data->prv_ptr(), fwd_top_data, false); pooling_res[dnnResourceDst] =reinterpret_cast<void *>( const_cast<Dtype*>(fwd_top_data->prv_ptr())); } else { pooling_res[dnnResourceDst] = reinterpret_cast<void *>(top[0]->mutable_cpu_data()); DLOG(INFO) << "Using cpu_data for top in DnnPooling."; } status = dnnExecute<Dtype>(poolingFwd, pooling_res); CHECK_EQ(status, E_SUCCESS); }
int resize_graphical_handler(Con *first, Con *second, orientation_t orientation, const xcb_button_press_event_t *event) { DLOG("resize handler\n"); /* TODO: previously, we were getting a rect containing all screens. why? */ Con *output = con_get_output(first); DLOG("x = %d, width = %d\n", output->rect.x, output->rect.width); x_mask_event_mask(~XCB_EVENT_MASK_ENTER_WINDOW); xcb_flush(conn); uint32_t mask = 0; uint32_t values[2]; mask = XCB_CW_OVERRIDE_REDIRECT; values[0] = 1; /* Open a new window, the resizebar. Grab the pointer and move the window around as the user moves the pointer. */ xcb_window_t grabwin = create_window(conn, output->rect, XCB_COPY_FROM_PARENT, XCB_COPY_FROM_PARENT, XCB_WINDOW_CLASS_INPUT_ONLY, XCURSOR_CURSOR_POINTER, true, mask, values); /* Keep track of the coordinate orthogonal to motion so we can determine * the length of the resize afterward. */ uint32_t initial_position, new_position; /* Configure the resizebar and snap the pointer. The resizebar runs along * the rect of the second con and follows the motion of the pointer. */ Rect helprect; if (orientation == HORIZ) { helprect.x = second->rect.x; helprect.y = second->rect.y; helprect.width = logical_px(2); helprect.height = second->rect.height; initial_position = second->rect.x; xcb_warp_pointer(conn, XCB_NONE, event->root, 0, 0, 0, 0, second->rect.x, event->root_y); } else { helprect.x = second->rect.x; helprect.y = second->rect.y; helprect.width = second->rect.width; helprect.height = logical_px(2); initial_position = second->rect.y; xcb_warp_pointer(conn, XCB_NONE, event->root, 0, 0, 0, 0, event->root_x, second->rect.y); } mask = XCB_CW_BACK_PIXEL; values[0] = config.client.focused.border; mask |= XCB_CW_OVERRIDE_REDIRECT; values[1] = 1; xcb_window_t helpwin = create_window(conn, helprect, XCB_COPY_FROM_PARENT, XCB_COPY_FROM_PARENT, XCB_WINDOW_CLASS_INPUT_OUTPUT, (orientation == HORIZ ? XCURSOR_CURSOR_RESIZE_HORIZONTAL : XCURSOR_CURSOR_RESIZE_VERTICAL), true, mask, values); xcb_circulate_window(conn, XCB_CIRCULATE_RAISE_LOWEST, helpwin); xcb_flush(conn); /* `new_position' will be updated by the `resize_callback'. */ new_position = initial_position; const struct callback_params params = {orientation, output, helpwin, &new_position}; /* `drag_pointer' blocks until the drag is completed. */ drag_result_t drag_result = drag_pointer(NULL, event, grabwin, BORDER_TOP, 0, resize_callback, ¶ms); xcb_destroy_window(conn, helpwin); xcb_destroy_window(conn, grabwin); xcb_flush(conn); /* User cancelled the drag so no action should be taken. */ if (drag_result == DRAG_REVERT) return 0; int pixels = (new_position - initial_position); DLOG("Done, pixels = %d\n", pixels); // if we got thus far, the containers must have // percentages associated with them assert(first->percent > 0.0); assert(second->percent > 0.0); // calculate the new percentage for the first container double new_percent, difference; double percent = first->percent; DLOG("percent = %f\n", percent); int original = (orientation == HORIZ ? first->rect.width : first->rect.height); DLOG("original = %d\n", original); new_percent = (original + pixels) * (percent / original); difference = percent - new_percent; DLOG("difference = %f\n", difference); DLOG("new percent = %f\n", new_percent); first->percent = new_percent; // calculate the new percentage for the second container double s_percent = second->percent; second->percent = s_percent + difference; DLOG("second->percent = %f\n", second->percent); // now we must make sure that the sum of the percentages remain 1.0 con_fix_percent(first->parent); return 0; }