bool EntityInstanceCompare::precede(const EntityInstance &e1, const EntityInstance &e2, const PropertyPrecedeOrderList &s, const EntityInfo &inf) { QVariant a1,a2; int t1,t2; foreach(PropertyPrecedeOrder so, s){ a1= e1.get(so.first); a2= e2.get(so.first); t1= a1.userType(); if(t1==QMetaType::UnknownType && inf.propertyInfo(so.first).dataType()!=QMetaType::UnknownType){ t1= inf.propertyInfo(so.first).dataType(); a1= inf.propertyInfo(so.first).defaultValue(); } t2= a2.userType(); if(t2==QMetaType::UnknownType && inf.propertyInfo(so.first).dataType()!=QMetaType::UnknownType){ t2= inf.propertyInfo(so.first).dataType(); a2= inf.propertyInfo(so.first).defaultValue(); } if(t1== QMetaType::UnknownType && t2== QMetaType::UnknownType) continue; if(so.second == def::Ascending){ if(t1== QMetaType::UnknownType) return true; if(t2== QMetaType::UnknownType) return false; if(lessThan(a1,a2)) return true; else if(greaterThan(a1,a2)) return false; }else if(so.second == def::Descending){ if(t1== QMetaType::UnknownType) return false; if(t2== QMetaType::UnknownType) return true; if(greaterThan(a1,a2)) return true; else if(lessThan(a1,a2)) return false; } }
void Chunk::open( const tstring& path ) { tinyxml2::XMLDocument doc; if (tinyxml2::XML_SUCCESS != doc.LoadFile(path.c_str())) { return; } tinyxml2::XMLElement* ele = doc.RootElement(); if (NULL == ele) { return; } tinyxml2::XMLElement* tex= ele->FirstChildElement("layer"); eTerrainLayer l = eTerrainLayer_0; while (tex) { // { const char* n = tex->Attribute("texture"); if (NULL != n) { setLayer(l, n); } } tex = tex->NextSiblingElement("layer"); l = (eTerrainLayer)(l + 1); } // tex= ele->FirstChildElement("model"); while(tex) { const char* n = tex->Attribute("file"); if (NULL != n) { EntityInstance* i = getSceneManager()->addEntityInstance(n); if (i) { Vector3 pos = Vector3::Zero; tex->QueryVector3Attribute("position", &pos); i->setPosition(pos); // { Vector3 pos = Vector3::One; tex->QueryVector3Attribute("setScale", &pos); i->setScale(pos); } // { const char* n = tex->Attribute("animation"); if (NULL != n) { i->setAnimation(n); } } } } tex = tex->NextSiblingElement("model"); } }
void TaskWorker::work(QMutex * aCancel, EntityInstance task, EntityInstance result) { emit stateChanged(TaskWorkerState::Busy); if (task.get(TaskProperty::Type).value < int >() == TaskType::Rename) { ; } else if (task.get(TaskProperty::Type).value < int >() == TaskType::Copy) { ;
EntityInstance * SpriterModel::getNewEntityInstance(EntityIdVector * entityIds) { EntityInstance *newEntityInstance = new EntityInstance(); for (auto& it : *entityIds) { Entity *entity = getEntity(it); if (entity) { newEntityInstance->appendEntity(this, entity, objectFactory); } else { Settings::error("SpriterModel::getNewEntityInstance - could not find entity with id " + std::to_string(it)); } } return newEntityInstance; }
QDebug operator<<(QDebug dbg, const EntityInstance &instance) { dbg.nospace()<<"\r\n{"; foreach(int p, instance.properties()){ dbg.nospace()<<p<<":"<<instance.get(p)<<";\r\n"; } dbg.nospace()<<"}"; return dbg.maybeSpace(); }
void Chunk::save( const tstring& path ) { //============================================================================ tinyxml2::XMLDocument doc; // tinyxml2::XMLDeclaration* dec = doc.NewDeclaration("xml version=\"1.0\""); doc.LinkEndChild(dec); // tinyxml2::XMLElement* ele = doc.NewElement("chunk"); // doc.LinkEndChild(ele); // for (int i = 0; i != eTerrainLayer_Size; ++i) { if (layers_[i] && !layers_[i]->getFileName().empty()) { tinyxml2::XMLElement* e = doc.NewElement("layer"); std::string tn(layers_[i]->getFileName()); //tn = FileSystem::cutDataPath(tn); e->SetAttribute("texture", tn.c_str()); ele->LinkEndChild(e); } } // for (int i = 0; i != EntityInstances_.size(); ++i) { EntityInstance* ei = EntityInstances_[i]; if (ei) { tinyxml2::XMLElement* e = doc.NewElement("model"); std::string tn(ei->getResId()); //tn = FileSystem::cutDataPath(tn); e->SetAttribute("file", tn.c_str()); e->SetAttribute("position", ei->getPosition()); e->SetAttribute("setScale", ei->getScale()); ele->LinkEndChild(e); } } doc.SaveFile(path.c_str()); }
void TokenDictionary::InitializeFromEntityReader(EntityReader *reader) { InitializeFromSequenceReader(reader); int pos_cutoff = FLAGS_pos_cutoff; std::vector<int> pos_freqs; Alphabet pos_alphabet; std::string special_symbols[NUM_SPECIAL_TOKENS]; special_symbols[TOKEN_UNKNOWN] = kTokenUnknown; special_symbols[TOKEN_START] = kTokenStart; special_symbols[TOKEN_STOP] = kTokenStop; for (int i = 0; i < NUM_SPECIAL_TOKENS; ++i) { pos_alphabet.Insert(special_symbols[i]); // Counts of special symbols are set to -1: pos_freqs.push_back(-1); } // Go through the corpus and build the dictionaries, // counting the frequencies. reader->Open(pipe_->GetOptions()->GetTrainingFilePath()); EntityInstance *instance = static_cast<EntityInstance*>(reader->GetNext()); while (instance != NULL) { int instance_length = instance->size(); for (int i = 0; i < instance_length; ++i) { int id; // Add POS to alphabet. id = pos_alphabet.Insert(instance->GetPosTag(i)); if (id >= pos_freqs.size()) { CHECK_EQ(id, pos_freqs.size()); pos_freqs.push_back(0); } ++pos_freqs[id]; } delete instance; instance = static_cast<EntityInstance*>(reader->GetNext()); } reader->Close(); // Now adjust the cutoffs if necessary. while (true) { pos_alphabet_.clear(); for (int i = 0; i < NUM_SPECIAL_TOKENS; ++i) { pos_alphabet_.Insert(special_symbols[i]); } for (Alphabet::iterator iter = pos_alphabet.begin(); iter != pos_alphabet.end(); ++iter) { if (pos_freqs[iter->second] > pos_cutoff) { pos_alphabet_.Insert(iter->first); } } if (pos_alphabet_.size() < kMaxPosAlphabetSize) break; ++pos_cutoff; LOG(INFO) << "Incrementing POS cutoff to " << pos_cutoff << "..."; } form_alphabet_.StopGrowth(); form_lower_alphabet_.StopGrowth(); lemma_alphabet_.StopGrowth(); prefix_alphabet_.StopGrowth(); suffix_alphabet_.StopGrowth(); feats_alphabet_.StopGrowth(); pos_alphabet_.StopGrowth(); cpos_alphabet_.StopGrowth(); LOG(INFO) << "Number of pos: " << pos_alphabet_.size(); CHECK_LT(pos_alphabet_.size(), 0xff); }
void EntityDictionary::CreateTagDictionary(SequenceReader *reader) { SequenceDictionary::CreateTagDictionary(reader); // TODO: the SplitEntityTag function should probably be elsewhere and not on // EntityInstance. EntityInstance instance; Alphabet entities; // Display information about the entity tags. LOG(INFO) << "Found " << tag_alphabet_.size() << " entity tags:"; for (Alphabet::iterator it = tag_alphabet_.begin(); it != tag_alphabet_.end(); ++it) { std::string entity_tag = it->first; LOG(INFO) << entity_tag; std::string prefix, entity; instance.SplitEntityTag(it->first, &prefix, &entity); if (entity != "") entities.Insert(entity); } LOG(INFO) << "Entities:"; for (Alphabet::iterator it = entities.begin(); it != entities.end(); ++it) { LOG(INFO) << it->first; } LOG(INFO) << "Computing allowed bigrams..."; // Every bigram is allowed by default. allowed_bigrams_.assign(1 + tag_alphabet_.size(), std::vector<bool>(1 + tag_alphabet_.size(), true)); // Now add the BIO-like constraints. for (Alphabet::iterator it = entities.begin(); it != entities.end(); ++it) { std::string entity = it->first; LOG(INFO) << "Processing entity " << entity << "..."; if (static_cast<EntityPipe*>(pipe_)->GetEntityOptions()->tagging_scheme() == EntityTaggingSchemes::BIO) { int tag_begin = tag_alphabet_.Lookup("B-" + entity); int tag_inside = tag_alphabet_.Lookup("I-" + entity); if (tag_inside < 0) continue; // An I-tag can only occur after a B-tag or another I-tag of the same // entity. for (int left_tag = -1; left_tag < tag_alphabet_.size(); ++left_tag) { if (left_tag != tag_begin && left_tag != tag_inside) { allowed_bigrams_[1 + tag_inside][1 + left_tag] = false; } } } else if (static_cast<EntityPipe*>(pipe_)->GetEntityOptions()-> tagging_scheme() == EntityTaggingSchemes::BILOU) { int tag_begin = tag_alphabet_.Lookup("B-" + entity); int tag_inside = tag_alphabet_.Lookup("I-" + entity); int tag_last = tag_alphabet_.Lookup("L-" + entity); // I-tags and L-tags can only occur after a B-tag or an I-tag of the same // entity. for (int left_tag = -1; left_tag < tag_alphabet_.size(); ++left_tag) { if (left_tag != tag_begin && left_tag != tag_inside) { if (tag_inside >= 0) { allowed_bigrams_[1 + tag_inside][1 + left_tag] = false; } if (tag_last >= 0) { allowed_bigrams_[1 + tag_last][1 + left_tag] = false; } } } // I-tags and B-tags can only occur before an I-tag or an L-tag of the // same entity. for (int right_tag = -1; right_tag < tag_alphabet_.size(); ++right_tag) { if (right_tag != tag_last && right_tag != tag_inside) { if (tag_inside >= 0) { allowed_bigrams_[1 + right_tag][1 + tag_inside] = false; } if (tag_begin >= 0) { allowed_bigrams_[1 + right_tag][1 + tag_begin] = false; } } } } } tag_alphabet_.BuildNames(); // Just to be able to plot readable information... int num_allowed_bigrams = 0; for (int tag = -1; tag < tag_alphabet_.size(); ++tag) { for (int left_tag = -1; left_tag < tag_alphabet_.size(); ++left_tag) { if (IsAllowedBigram(left_tag, tag)) { std::string left_tag_name = (left_tag >= 0) ? tag_alphabet_.GetName(left_tag) : "START"; std::string tag_name = (tag >= 0) ? tag_alphabet_.GetName(tag) : "STOP"; LOG(INFO) << "Allowed bigram: " << left_tag_name << " -> " << tag_name; ++num_allowed_bigrams; } } } LOG(INFO) << "Total allowed bigrams: " << num_allowed_bigrams; ReadGazetteerFiles(); }
void EntityTokenDictionary::Initialize(EntityReader *reader) { SetTokenDictionaryFlagValues(); LOG(INFO) << "Creating token dictionary..."; std::vector<int> form_freqs; std::vector<int> form_lower_freqs; std::vector<int> shape_freqs; std::vector<int> pos_freqs; Alphabet form_alphabet; Alphabet form_lower_alphabet; Alphabet shape_alphabet; Alphabet pos_alphabet; std::string special_symbols[NUM_SPECIAL_TOKENS]; special_symbols[TOKEN_UNKNOWN] = kTokenUnknown; special_symbols[TOKEN_START] = kTokenStart; special_symbols[TOKEN_STOP] = kTokenStop; for (int i = 0; i < NUM_SPECIAL_TOKENS; ++i) { prefix_alphabet_.Insert(special_symbols[i]); suffix_alphabet_.Insert(special_symbols[i]); form_alphabet.Insert(special_symbols[i]); form_lower_alphabet.Insert(special_symbols[i]); shape_alphabet.Insert(special_symbols[i]); pos_alphabet.Insert(special_symbols[i]); // Counts of special symbols are set to -1: form_freqs.push_back(-1); form_lower_freqs.push_back(-1); shape_freqs.push_back(-1); pos_freqs.push_back(-1); } // Go through the corpus and build the dictionaries, // counting the frequencies. reader->Open(pipe_->GetOptions()->GetTrainingFilePath()); EntityInstance *instance = static_cast<EntityInstance*>(reader->GetNext()); while (instance != NULL) { int instance_length = instance->size(); for (int i = 0; i < instance_length; ++i) { int id; // Add form to alphabet. std::string form = instance->GetForm(i); std::string form_lower(form); std::transform(form_lower.begin(), form_lower.end(), form_lower.begin(), ::tolower); if (!form_case_sensitive) form = form_lower; id = form_alphabet.Insert(form); if (id >= form_freqs.size()) { CHECK_EQ(id, form_freqs.size()); form_freqs.push_back(0); } ++form_freqs[id]; // Add lower-case form to the alphabet. id = form_lower_alphabet.Insert(form_lower); if (id >= form_lower_freqs.size()) { CHECK_EQ(id, form_lower_freqs.size()); form_lower_freqs.push_back(0); } ++form_lower_freqs[id]; // Add prefix/suffix to alphabet. std::string prefix = form.substr(0, prefix_length); id = prefix_alphabet_.Insert(prefix); int start = form.length() - suffix_length; if (start < 0) start = 0; std::string suffix = form.substr(start, suffix_length); id = suffix_alphabet_.Insert(suffix); // Add shape to alphabet. std::string shape; GetWordShape(instance->GetForm(i), &shape); id = shape_alphabet.Insert(shape); if (id >= shape_freqs.size()) { CHECK_EQ(id, shape_freqs.size()); shape_freqs.push_back(0); } ++shape_freqs[id]; // Add POS to alphabet. id = pos_alphabet.Insert(instance->GetPosTag(i)); if (id >= pos_freqs.size()) { CHECK_EQ(id, pos_freqs.size()); pos_freqs.push_back(0); } ++pos_freqs[id]; } delete instance; instance = static_cast<EntityInstance*>(reader->GetNext()); } reader->Close(); // Now adjust the cutoffs if necessary. while (true) { form_alphabet_.clear(); for (int i = 0; i < NUM_SPECIAL_TOKENS; ++i) { form_alphabet_.Insert(special_symbols[i]); } for (Alphabet::iterator iter = form_alphabet.begin(); iter != form_alphabet.end(); ++iter) { if (form_freqs[iter->second] > form_cutoff) { form_alphabet_.Insert(iter->first); } } if (form_alphabet_.size() < kMaxFormAlphabetSize) break; ++form_cutoff; LOG(INFO) << "Incrementing form cutoff to " << form_cutoff << "..."; } while (true) { form_lower_alphabet_.clear(); for (int i = 0; i < NUM_SPECIAL_TOKENS; ++i) { form_lower_alphabet_.Insert(special_symbols[i]); } for (Alphabet::iterator iter = form_lower_alphabet.begin(); iter != form_lower_alphabet.end(); ++iter) { if (form_lower_freqs[iter->second] > form_lower_cutoff) { form_lower_alphabet_.Insert(iter->first); } } if (form_lower_alphabet_.size() < kMaxFormAlphabetSize) break; ++form_lower_cutoff; LOG(INFO) << "Incrementing lower-case form cutoff to " << form_lower_cutoff << "..."; } while (true) { shape_alphabet_.clear(); for (int i = 0; i < NUM_SPECIAL_TOKENS; ++i) { shape_alphabet_.Insert(special_symbols[i]); } for (Alphabet::iterator iter = shape_alphabet.begin(); iter != shape_alphabet.end(); ++iter) { if (shape_freqs[iter->second] > shape_cutoff) { shape_alphabet_.Insert(iter->first); } } if (shape_alphabet_.size() < kMaxShapeAlphabetSize) break; ++shape_cutoff; LOG(INFO) << "Incrementing shape cutoff to " << shape_cutoff << "..."; } while (true) { pos_alphabet_.clear(); for (int i = 0; i < NUM_SPECIAL_TOKENS; ++i) { pos_alphabet_.Insert(special_symbols[i]); } for (const auto& pos_token : pos_alphabet) { if (pos_freqs[pos_token.second] > pos_cutoff) { pos_alphabet_.Insert(pos_token.first); } } if (pos_alphabet_.size() < kMaxPosAlphabetSize) break; ++pos_cutoff; LOG(INFO) << "Incrementing POS cutoff to " << pos_cutoff << "..."; } form_alphabet_.StopGrowth(); form_lower_alphabet_.StopGrowth(); shape_alphabet_.StopGrowth(); lemma_alphabet_.StopGrowth(); prefix_alphabet_.StopGrowth(); suffix_alphabet_.StopGrowth(); feats_alphabet_.StopGrowth(); pos_alphabet_.StopGrowth(); cpos_alphabet_.StopGrowth(); LOG(INFO) << "Number of forms: " << form_alphabet_.size() << endl << "Number of lower-case forms: " << form_lower_alphabet_.size() << endl << "Number of prefixes: " << prefix_alphabet_.size() << endl << "Number of suffixes: " << suffix_alphabet_.size() << endl << "Number of word shapes: " << shape_alphabet_.size() << endl << "Number of pos: " << pos_alphabet_.size(); CHECK_LT(form_alphabet_.size(), 0xffff); CHECK_LT(form_lower_alphabet_.size(), 0xffff); CHECK_LT(shape_alphabet_.size(), 0xffff); CHECK_LT(lemma_alphabet_.size(), 0xffff); CHECK_LT(prefix_alphabet_.size(), 0xffff); CHECK_LT(suffix_alphabet_.size(), 0xffff); CHECK_LT(feats_alphabet_.size(), 0xffff); CHECK_LT(pos_alphabet_.size(), 0xff); CHECK_LT(cpos_alphabet_.size(), 0xff); #ifndef NDEBUG BuildNames(); #endif }