bool SemanticRelationData::addRelations(AnalysisContent& analysis) { #ifdef DEBUG_LP SEMANTICANALYSISLOGINIT; #endif auto annotationData = static_cast< AnnotationData* >( analysis.getData("AnnotationData")); if (annotationData->dumpFunction("SemanticRelation") == 0) { annotationData->dumpFunction("SemanticRelation", new DumpSemanticRelation()); } auto recoData=static_cast<RecognizerData*>( analysis.getData("RecognizerData")); for (auto i = m_relations.begin(); i != m_relations.end(); i++) { LinguisticGraphVertex vertex1 = i->get<0>(); LinguisticGraphVertex vertex2 = i->get<1>(); auto matchesVtx1 = annotationData->matches(recoData->getGraphId(), vertex1, "annot"); auto matchesVtx2 = annotationData->matches(recoData->getGraphId(), vertex2, "annot"); if (!annotationData->hasAnnotation(*(matchesVtx1.begin()), *(matchesVtx2.begin()), "SemanticRelation")) { SemanticRelationAnnotation annot(i->get<2>()); GenericAnnotation ga(annot); annotationData->annotate(*(matchesVtx1.begin()), *(matchesVtx2.begin()), "SemanticRelation", ga); } else { auto annot = annotationData->annotation(*(matchesVtx1.begin()), *(matchesVtx2.begin()), "SemanticRelation").pointerValue<SemanticRelationAnnotation>(); SEMANTICANALYSISLOGINIT; LWARN << "SemanticRelationData::addRelations There is already a SemanticRelation between" << *(matchesVtx1.begin()) << "and" << *(matchesVtx2.begin()) << annot->type(); LWARN << "Adding new type" << i->get<2>(); QString type = QString::fromUtf8(annot->type().c_str()); QStringList typeList = type.split(','); typeList << i->get<2>().c_str(); typeList.sort(); typeList.removeDuplicates(); annot->type(typeList.join(',').toUtf8().constData()); LWARN << "Adding type is now" << annot->type(); } } m_relations.clear(); return true; }
void SodController::read_annotation(f_parameter& par) { QString error; QTextStream qts(&error); QString filename; QString dist_name; if(!par.param("file", filename)) qts << "read_annotation: no file specified, (file=..)\n"; if(!par.param("dist", dist_name)) qts << "read_annotation: no dist mapper specified (dist=..)\n"; if(!distanceViewers.count(dist_name)) qts << "read_annotation: unknown dist mapper: " << dist_name << "\n"; if(error.length()){ warn(error); return; } node_set ns = read_node_set(filename, true); if(!ns.n_size()){ warn("read_annotation obtained empty node_set"); return; } if(!distances.count(dist_name)){ warn("read_annotation: unknown distance set"); return; } if(distances[dist_name].n_size() != ns.n_size() ){ warn("read_annotation: specified node set has a different size to the annotation node_set"); return; } Annotation annot(ns); distanceViewers[dist_name]->setAnnotation(annot); }
void Sequence::AddMMDBAnnotTag(int mmdbID) const { CBioseq::TAnnot::const_iterator a, ae = bioseqASN->GetAnnot().end(); CSeq_annot::C_Data::TIds::const_iterator i, ie; bool found = false; for (a=bioseqASN->GetAnnot().begin(); a!=ae; ++a) { if ((*a)->GetData().IsIds()) { for (i=(*a)->GetData().GetIds().begin(), ie=(*a)->GetData().GetIds().end(); i!=ie; ++i) { if ((*i)->IsGeneral() && (*i)->GetGeneral().GetDb() == "mmdb" && (*i)->GetGeneral().GetTag().IsId()) { found = true; TRACEMSG("mmdb link already present in sequence " << identifier->ToString()); if ((*i)->GetGeneral().GetTag().GetId() != mmdbID || (identifier->mmdbID != MoleculeIdentifier::VALUE_NOT_SET && identifier->mmdbID != mmdbID)) ERRORMSG("Sequence::AddMMDBAnnotTag() - mmdbID mismatch"); break; } } } if (found) break; } if (!found) { CRef < CSeq_id > seqid(new CSeq_id()); seqid->SetGeneral().SetDb("mmdb"); seqid->SetGeneral().SetTag().SetId(mmdbID); CRef < CSeq_annot > annot(new CSeq_annot()); annot->SetData().SetIds().push_back(seqid); (const_cast<Sequence*>(this))->bioseqASN->SetAnnot().push_back(annot); } }
// ---------------------------------------------------------------------------- CRef< CSeq_annot > CVcfReader::ReadSeqAnnot( ILineReader& lr, IErrorContainer* pErrorContainer ) // ---------------------------------------------------------------------------- { CRef< CSeq_annot > annot( new CSeq_annot ); CRef< CAnnot_descr > desc( new CAnnot_descr ); annot->SetDesc( *desc ); annot->SetData().SetFtable(); m_Meta.Reset( new CAnnotdesc ); m_Meta->SetUser().SetType().SetStr( "vcf-meta-info" ); while ( ! lr.AtEOF() ) { string line = *(++lr); NStr::TruncateSpacesInPlace( line ); if ( x_ProcessMetaLine( line, annot ) ) { continue; } if ( x_ProcessHeaderLine( line, annot ) ) { continue; } if ( xProcessDataLine( line, annot ) ) { continue; } // still here? not good! cerr << "Unexpected line: " << line << endl; } return annot; }
static CRef<CSeq_annot> s_ReadTestSeqAnnot() { auto_ptr<CObjectIStream> istr(CObjectIStream::Open(kSeqAnnotAsnFile, eSerial_AsnText)); CRef<CSeq_annot> annot(new CSeq_annot); if (istr.get() && istr->InGoodState()) { *istr >> *annot; } else {
void ChangeDisplay::addText(std::string label, Ogre::Vector3 pos, int r, int g, int b) { MovableText* text = new MovableText(label, "Arial", TEXT_SIZE); MovableTextPtr annot(text); annot->setTextAlignment(MovableText::H_CENTER, MovableText::V_ABOVE); Ogre::SceneNode* frame_node = scene_node_->createChildSceneNode(); frame_node->attachObject(text); annot->setColor(Ogre::ColourValue(r/255.0, g/255.0, b/255.0, 1.0)); annot->getParentNode()->setPosition(pos); annot->setVisible(true); annotations.push_back(annot); }
bool NucleiDetectionWholeSlideFilter::process() { std::shared_ptr<MultiResolutionImage> img = _input.lock(); std::vector<unsigned long long> dims = img->getLevelDimensions(this->_processedLevel); double downsample = img->getLevelDownsample(this->_processedLevel); _centerPoints.clear(); NucleiDetectionFilter<double> filter; filter.setAlpha(_alpha); filter.setBeta(_beta); filter.setHMaximaThreshold(_threshold); filter.setMaximumRadius(_maxRadius); filter.setMinimumRadius(_minRadius); filter.setRadiusStep(_stepRadius); std::vector<Point> tmp; for (unsigned long long t_y = 0; t_y < dims[1]; t_y += 512) { std::cout << t_y << std::endl; for (unsigned long long t_x = 0; t_x < dims[0]; t_x += 512) { Patch<double> tile = img->getPatch<double>(static_cast<unsigned long long>(t_x*downsample), static_cast<unsigned long long>(t_y*downsample), 512, 512, this->_processedLevel); double* buf = tile.getPointer(); filter.filter(tile, tmp); for (std::vector<Point>::const_iterator it = tmp.begin(); it != tmp.end(); ++it) { std::vector<float> curPoint; curPoint.push_back(it->getX() * downsample + t_x*downsample); curPoint.push_back(it->getY() * downsample + t_y*downsample); _centerPoints.push_back(curPoint); } tmp.clear(); } } if (!_outPath.empty()) { std::shared_ptr<Annotation> annot(new Annotation()); annot->setName("Detected nuclei"); annot->setType(Annotation::POINTSET); for (std::vector<std::vector<float> >::const_iterator it = _centerPoints.begin(); it != _centerPoints.end(); ++it) { float x = (*it)[0]; float y = (*it)[1]; annot->addCoordinate(Point(x, y)); } std::shared_ptr<AnnotationList> annotList(new AnnotationList()); annotList->addAnnotation(annot); XmlRepository repo(annotList); repo.setSource(_outPath); repo.save(); } return true; }
void TestRTFAnnotation::TestWriteNoChrfmt() { ComparisonContext cContext; RTFFileContext context; RTFAnnotation annot(&context); FakeRTFWriter2 fake(&context); annot.m_bChrfmtFound = true; annot.Write(&fake); //assertMessage(fake.m_iEndGroupCount==0, _T("Write does nothing now - annotations are not written")); annot.m_bChrfmtFound = false; fake.m_iEndGroupCount=0; // fake.StartAnnot(); annot.Write(&fake); //assertTest(fake.m_iEndGroupCount==0); }
// ---------------------------------------------------------------------------- CRef< CSeq_annot > CVcfReader::ReadSeqAnnot( ILineReader& lr, IMessageListener* pEC ) // ---------------------------------------------------------------------------- { CRef< CSeq_annot > annot( new CSeq_annot ); CRef< CAnnot_descr > desc( new CAnnot_descr ); annot->SetDesc( *desc ); annot->SetData().SetFtable(); m_Meta.Reset( new CAnnotdesc ); m_Meta->SetUser().SetType().SetStr( "vcf-meta-info" ); while ( ! lr.AtEOF() ) { m_uLineNumber++; string line = *(++lr); NStr::TruncateSpacesInPlace( line ); if (xProcessMetaLine(line, annot, pEC)) { continue; } if (xProcessHeaderLine(line, annot)) { continue; } if (xProcessDataLine(line, annot, pEC)) { continue; } // still here? not good! AutoPtr<CObjReaderLineException> pErr( CObjReaderLineException::Create( eDiag_Warning, 0, "CVcfReader::ReadSeqAnnot: Unrecognized line or record type.", ILineError::eProblem_GeneralParsingError) ); ProcessWarning(*pErr, pEC); } return annot; }
bool CreateIdiomaticAlternative::operator()(Automaton::RecognizerMatch& result, AnalysisContent& analysis) const { #ifdef DEBUG_LP MORPHOLOGINIT; LDEBUG << "CreateIdiomaticAlternative, match is " << result; LDEBUG << " expression is " << (result.isContiguous()?"":"non") << " contiguous and" << (result.isContextual()?" non":"") << " absolute"; #endif if (result.empty()) return false; const LinguisticAnalysisStructure::AnalysisGraph& graph = *(result.getGraph()); AnnotationData* annotationData = static_cast< AnnotationData* >(analysis.getData("AnnotationData")); if (annotationData->dumpFunction("IdiomExpr") == 0) { annotationData->dumpFunction("IdiomExpr", new DumpIdiomaticExpressionAnnotation()); } RecognizerData* recoData=static_cast<RecognizerData*>(analysis.getData("RecognizerData")); std::set<LinguisticGraphVertex> addedVertices; // initialize the vertices to clear if (result.isContiguous()) { // MORPHOLOGINIT; // LDEBUG << "contiguous idiomatic expression found: " // << result.concatString(); // only one part : terms in expression are adjacent -> easy part // check if there is an overlap first if (recoData->matchOnRemovedVertices(result)) { // ignore current idiomatic expression, continue MORPHOLOGINIT; LWARN << "idiomatic expression ignored: " << Common::Misc::limastring2utf8stdstring(result.concatString()) << ": overlapping with a previous one"; return false; } // create the new token std::pair<Token*,MorphoSyntacticData*> newToken = createAlternativeToken(result); if (newToken.second->empty()) { // ignore current idiomatic expression, continue MORPHOLOGINIT; LERROR << "CreateIdiomaticAlternative::operator() Got empty morphosyntactic data. Abort"; delete newToken.first; delete newToken.second; return false; } // add the vertex LinguisticGraphVertex idiomaticVertex = addAlternativeVertex(newToken.first, newToken.second, const_cast<LinguisticGraph*>(graph.getGraph())); AnnotationGraphVertex agv = annotationData->createAnnotationVertex(); annotationData->addMatching("AnalysisGraph", idiomaticVertex, "annot", agv); annotationData->annotate(agv, Common::Misc::utf8stdstring2limastring("AnalysisGraph"), idiomaticVertex); IdiomaticExpressionAnnotation annot(result); GenericAnnotation ga(annot); annotationData->annotate(agv, Common::Misc::utf8stdstring2limastring("IdiomExpr"), ga); addedVertices.insert(idiomaticVertex); //create the alternative with this only vertex createBeginAlternative(result.front().getVertex(), idiomaticVertex,const_cast<LinguisticGraph&>(*graph.getGraph())); attachEndOfAlternative(idiomaticVertex, result.back().getVertex(),const_cast<LinguisticGraph&>(*graph.getGraph())); // if expression is not contextual, only keep alternative if (! result.isContextual()) { recoData->storeVerticesToRemove(result,const_cast<LinguisticGraph*>(graph.getGraph())); removeEdges(const_cast<LinguisticGraph&>(*graph.getGraph()), result, analysis); //recoData->setNextVertex(idiomaticVertex); // if match was on single token, use next vertices (to avoid loops) if (result.size() > 1) { recoData->setNextVertex(idiomaticVertex); } else { LinguisticGraphOutEdgeIt outItr,outItrEnd; boost::tie(outItr,outItrEnd) = out_edges(idiomaticVertex,*(graph.getGraph())); for (;outItr!=outItrEnd;outItr++) { recoData->setNextVertex(target(*outItr, *(graph.getGraph()))); } } } } else { // several parts : tough case // MORPHOLOGINIT; // LDEBUG << "non contiguous idiomatic expression found: " // << result.concatString(); // check if there is an overlap first if (recoData->matchOnRemovedVertices(result)) { // ignore current idiomatic expression, continue MORPHOLOGINIT; LWARN << "idiomatic expression ignored: " << Common::Misc::limastring2utf8stdstring(result.concatString()) << ": overlapping with a previous one"; return false; } // create the new token pair<Token*,MorphoSyntacticData*> newToken = createAlternativeToken(result); if (newToken.second->empty()) { // ignore current idiomatic expression, continue MORPHOLOGINIT; LERROR << "CreateIdiomaticAlternative::operator() Got empty morphosyntactic data. Abort"; delete newToken.first; delete newToken.second; return false; } // add the vertex LinguisticGraphVertex idiomaticVertex = addAlternativeVertex(newToken.first,newToken.second,const_cast<LinguisticGraph*>(graph.getGraph())); addedVertices.insert(idiomaticVertex); AnnotationGraphVertex agv = annotationData->createAnnotationVertex(); annotationData->addMatching("AnalysisGraph", idiomaticVertex, "annot", agv); annotationData->annotate(agv, Common::Misc::utf8stdstring2limastring("AnalysisGraph"), idiomaticVertex); IdiomaticExpressionAnnotation annot(result); GenericAnnotation ga(annot); annotationData->annotate(agv, Common::Misc::utf8stdstring2limastring("IdiomExpr"), ga); //create the alternative with this vertex and duplicate of other vertices deque<LinguisticGraphVertex> idiomAlternative; LinguisticGraphVertex headVertex=result.getHead(); #ifdef DEBUG_LP LDEBUG << "headVertex = " << headVertex; if (headVertex!=0) { LDEBUG << "=> " << Common::Misc::limastring2utf8stdstring(get(vertex_token,*graph.getGraph(),headVertex)->stringForm()); } #endif bool foundHead=false; bool keeping = false; std::pair< LinguisticGraphVertex, LinguisticGraphVertex > idiomPartBounds; std::set< std::pair< LinguisticGraphVertex, LinguisticGraphVertex > > edgesToRemove; RecognizerMatch::const_iterator matchItr=result.begin(); for (; matchItr!=result.end(); matchItr++) { if (!matchItr->isKept()) { if (keeping) { RecognizerMatch::const_iterator prevItr = matchItr - 1; idiomPartBounds.second = prevItr->getVertex(); keeping = false; #ifdef DEBUG_LP LDEBUG << "adding " << idiomPartBounds.first << " -> " << idiomPartBounds.second << " in edgesToRemove"; #endif edgesToRemove.insert(idiomPartBounds); } // duplicate this vertex #ifdef DEBUG_LP LDEBUG << "duplication of vertex " << matchItr->getVertex();; #endif Token* token=get(vertex_token,*graph.getGraph(),matchItr->getVertex()); MorphoSyntacticData* data = new MorphoSyntacticData(*get(vertex_data,*graph.getGraph(),matchItr->getVertex())); LinguisticGraphVertex dupVx = add_vertex(const_cast<LinguisticGraph&>(*graph.getGraph())); put(vertex_token,const_cast<LinguisticGraph&>(*graph.getGraph()),dupVx,token); put(vertex_data,const_cast<LinguisticGraph&>(*graph.getGraph()),dupVx,data); idiomAlternative.push_back(dupVx); AnnotationGraphVertex agv = annotationData->createAnnotationVertex(); annotationData->addMatching("AnalysisGraph", dupVx, "annot", agv); annotationData->annotate(agv, Common::Misc::utf8stdstring2limastring("AnalysisGraph"), dupVx); std::set< LinguisticGraphVertex > annotMatches = annotationData->matches("AnalysisGraph",matchItr->getVertex(),"annot"); for (std::set< LinguisticGraphVertex >::const_iterator annotIt(annotMatches.begin()); annotIt != annotMatches.end(); annotIt++) { std::set< std::string > excepted; excepted.insert("AnalysisGraph"); annotationData->cloneAnnotations(*annotIt, agv, excepted); } addedVertices.insert(dupVx); // verticesToRemove.insert(matchItr->getVertex()); } else { if (!keeping) { idiomPartBounds.first = matchItr->getVertex(); keeping = true; } #ifdef DEBUG_LP LDEBUG << "kept vertex " << matchItr->getVertex(); #endif if (matchItr->getVertex()==headVertex) { foundHead=true; #ifdef DEBUG_LP LDEBUG << "add head vertex " << idiomaticVertex; #endif idiomAlternative.push_back(idiomaticVertex); } } } if (!foundHead) { MORPHOLOGINIT; LWARN << "head token has not been found in non contiguous expression. " << "Idiomatic token is placed first"; idiomAlternative.push_front(idiomaticVertex); } if (keeping) { RecognizerMatch::const_iterator prevItr = matchItr - 1; idiomPartBounds.second = prevItr->getVertex(); keeping = false; #ifdef DEBUG_LP LDEBUG << "adding " << idiomPartBounds.first << " -> " << idiomPartBounds.second << " in edgesToRemove"; #endif edgesToRemove.insert(idiomPartBounds); } // link alternatives #ifdef DEBUG_LP LDEBUG << "idiomAlternative has " << idiomAlternative.size() << " vertex"; #endif createBeginAlternative(result.front().getVertex(), idiomAlternative.front(),const_cast<LinguisticGraph&>(*graph.getGraph())); { deque<LinguisticGraphVertex>::const_iterator idItr=idiomAlternative.begin(); LinguisticGraphVertex lastIdiomVx=*idItr; idItr++; while (idItr!=idiomAlternative.end()) { LinguisticGraphEdge newEdge; bool ok; boost::tie(newEdge, ok) = add_edge(lastIdiomVx,*idItr,const_cast<LinguisticGraph&>(*graph.getGraph())); #ifdef DEBUG_LP LDEBUG << "added new edge in alternatives linking: " << newEdge.m_source << " -> " << newEdge.m_target; #endif lastIdiomVx=*idItr; idItr++; } } attachEndOfAlternative(idiomAlternative.back(), result.back().getVertex(),const_cast<LinguisticGraph&>(*graph.getGraph())); // if expression is not contextual, only keep alternative if (! result.isContextual()) { #ifdef DEBUG_LP LDEBUG << "expression is not contextual, only keep alternative"; #endif std::set< std::pair< LinguisticGraphVertex, LinguisticGraphVertex > >::const_iterator edgesToRemoveIt, edgesToRemoveIt_end; edgesToRemoveIt = edgesToRemove.begin(); edgesToRemoveIt_end = edgesToRemove.end(); for (; edgesToRemoveIt != edgesToRemoveIt_end; edgesToRemoveIt++) { #ifdef DEBUG_LP LDEBUG << "Removing edge " << (*edgesToRemoveIt).first << " -> " << (*edgesToRemoveIt).second; #endif removeEdges(const_cast<LinguisticGraph&>(*graph.getGraph()), result, analysis); } // recoData->storeVerticesToRemove(result,*graph); // no need to check size: if several parts, more than one vertex recoData->setNextVertex(idiomaticVertex); } } RecognizerMatch::const_iterator matchItr=result.begin(); for (; matchItr!=result.end(); matchItr++) { recoData->clearUnreachableVertices( analysis, (*matchItr).getVertex()); } // recoData->clearUnreachableVertices( analysis, result.front().getVertex(), result.back().getVertex(), storedEdges); return true; }
CRef<CSeq_entry> CSraRun::GetSpotEntry(spotid_t spot_id) const { CRef<CSeq_entry> entry; CSraStringValue name(m_Name, spot_id); entry = new CSeq_entry(); CBioseq_set& seqset = entry->SetSet(); seqset.SetLevel(0); seqset.SetClass(seqset.eClass_other); CSraValueFor<SRASpotDesc> sdesc(m_SDesc, spot_id); TSeqPos trim_start = m_Trim && m_TrimStart? CSraValueFor<INSDC_coord_zero>(m_TrimStart, spot_id).Value(): 0; TSeqPos trim_end = sdesc->clip_qual_right; CSraValueFor<SRAReadDesc> rdesc(m_RDesc, spot_id); CSraStringValue read(m_Read, spot_id); CSraBytesValue qual(m_Qual, spot_id); int seq_count = 0; string id_start = GetAccession()+'.'+NStr::UIntToString(spot_id)+'.'; for ( int r = 0; r < sdesc->num_reads; ++r ) { if ( rdesc[r].type != SRA_READ_TYPE_BIOLOGICAL ) { continue; } TSeqPos len = rdesc[r].seg.len; if ( len == 0 ) { continue; } TSeqPos start = rdesc[r].seg.start; TSeqPos end = start + len; if ( m_Trim ) { start = max(start, trim_start); end = min(end, trim_end); if ( start >= end ) { continue; } len = end - start; } CRef<CSeq_entry> seq_entry(new CSeq_entry); CBioseq& seq = seq_entry->SetSeq(); CRef<CSeq_id> id(new CSeq_id); id->SetGeneral().SetDb("SRA"); id->SetGeneral().SetTag().SetStr(id_start+NStr::UIntToString(r+1)); seq.SetId().push_back(id); {{ CRef<CSeqdesc> desc(new CSeqdesc); desc->SetTitle(name.Value()); seq.SetDescr().Set().push_back(desc); }} {{ CSeq_inst& inst = seq.SetInst(); inst.SetRepr(inst.eRepr_raw); inst.SetMol(inst.eMol_na); inst.SetLength(len); inst.SetSeq_data().SetIupacna().Set() .assign(read.data()+start, len); }} {{ CRef<CSeq_annot> annot(new CSeq_annot); CRef<CSeq_graph> graph(new CSeq_graph); annot->SetData().SetGraph().push_back(graph); graph->SetTitle("Phred Quality"); graph->SetLoc().SetWhole(*id); graph->SetNumval(len); CByte_graph& bytes = graph->SetGraph().SetByte(); bytes.SetAxis(0); CByte_graph::TValues& values = bytes.SetValues(); values.reserve(len); int min = kMax_Int; int max = kMin_Int; for ( size_t i = 0; i < len; ++i ) { int v = qual[start+i]; values.push_back(v); if ( v < min ) { min = v; } if ( v > max ) { max = v; } } bytes.SetMin(min); bytes.SetMax(max); seq.SetAnnot().push_back(annot); }} seqset.SetSeq_set().push_back(seq_entry); ++seq_count; } switch ( seq_count ) { case 0: entry.Reset(); break; case 1: entry = seqset.GetSeq_set().front(); break; } return entry; }