Example #1
0
arma::mat Vespucci::Math::Transform::cwt(arma::vec X, std::string wavelet, arma::uvec scales)
{
    arma::mat wcoeffs(X.n_rows, scales.n_elem);
    arma::vec psi_xval(1024);
    arma::vec psi(1024);

    arma::uword old_length = X.n_rows;
    X = Vespucci::Math::ExtendToNextPow(X, 2);


    //calculate the wavelet:
    try{
        if (wavelet == "mexh"){
            psi_xval = arma::linspace(-8, 8, 1024);
            psi = (2/std::sqrt(3.0) * std::pow(arma::datum::pi, -0.25)) * (arma::ones(1024) - arma::pow(psi_xval, 2)) % arma::exp(-arma::pow(psi_xval, 2)/2);
        }
        else if (wavelet == "haar"){
            psi_xval = arma::linspace(0, 1, 1024);
            psi(0) = 0;
            psi(1023) = 0;
            psi.rows(1, 511) = arma::ones(511);
            psi.rows(512, 1022) = -1*arma::ones(511);
        }
    }catch(std::exception e){
        std::cerr << "Error calculating wavelet!" <<std::endl;
        throw e;
    }


    psi_xval -= arma::ones(psi_xval.n_elem)*psi_xval(0);
    double dxval = psi_xval(1);
    double xmax = psi_xval(psi_xval.n_elem - 1);

    arma::vec f, j, w;
    arma::uvec j_u;
    arma::uword i, scale, shift_by;
    try{
        for (i = 0; i < scales.n_elem; ++i){
            scale = scales(i);

            f = arma::zeros(X.n_elem);
            j = arma::floor(arma::linspace(0, scale*xmax, scale*xmax + 1)/(scale*dxval));
            j_u.set_size(j.n_elem);


            for (arma::uword k = 0; k < j_u.n_elem; ++k){
                j_u(k) = j(k);
            }

            f.rows(0, j.n_elem-1) = arma::flipud(psi.elem(j_u)) - arma::mean(psi.elem(j_u));

            if (f.n_rows != X.n_rows){
                std::cerr << "scale too large!" << std::endl;
            }

            //convolve and scale
            w = (1/std::sqrt(scale)) * Vespucci::Math::conv_fft(X, f, "filter");


            //shift by half wavelet width + scale * xmax
            shift_by = X.n_rows - std::floor((double) j.n_rows/2)  + scale*xmax;

            w = Vespucci::Math::rotate(w, shift_by, true);

            //if signal had to be padded, remove padding
            if (w.n_rows > old_length)
                w.shed_rows(old_length, w.n_rows - 1);

            wcoeffs.col(i) = w;//rotate(w, scale*xmax, true);
        }
    }catch(std::exception e){
        std::cerr << "error in CWT algorithm!" << std::endl;
        std::cerr << "scale = " << scale;

        throw e;
    }

    return wcoeffs;
}
Example #2
0
File: IO.cpp Project: cran/rtk
void lineCntOut(options* opts){
	
	string inF = opts->input;
	string outF = opts->output;
	string arg4 = opts->referenceFile;
	bool check4idxMatch = opts->check4idxMatch;

	istream* in;
	if (isGZfile(inF)) {
#ifdef _gzipread
		in = new igzstream(inF.c_str(), ios::in);
        #ifdef notRpackage
		cout << "Reading gzip input\n";
        #endif
#else
        #ifdef notRpackage
		cout << "gzip not supported in your rtk build\n"; exit(50);
        #endif
#endif

	}	else {
		in = new ifstream(inF.c_str());
	}
    if (!(*in)){
#ifdef notRpackage
        cerr << "Can't open infile " << inF << endl; std::exit(99);
#endif
    }
	ofstream out(outF.c_str(), ios::out);
	if (!out){
#ifdef notRpackage
        cerr << "Can't open outfile " << outF << endl; std::exit(99);
#endif
    }
    //read file that contains nicely ordered all indexes of lines to be extracted
    string line;
    vector<uint> srtTar;
    ifstream idxS(arg4.c_str());
    if (!idxS){
#ifdef notRpackage
        cerr << "Can't open outfile " << arg4 << endl; std::exit(99);
#endif
    }
    while (getline(idxS, line, '\n')) {
        if (line[0] == '>'){
            line.erase(0,1);
        }
        srtTar.push_back(stoi(line));
    }
    idxS.close();
    //sort ascending
    sort(srtTar.begin(), srtTar.end());

    //sort through new file
    if (!out){
#ifdef notRpackage
        cerr << "Can't open outfile " << outF << endl; std::exit(99);
#endif
    }
    uint cnt(1); uint j(0);
    while (getline((*in), line, '\n')) {
        if (cnt == srtTar[j]){
			if (check4idxMatch) {
				size_t pos = line.find('\t');
                #ifdef notRpackage
				if (pos == std::string::npos) {
					cout << "requires tab separated row name: line " << cnt << "\n"<<line<<"\n";
					exit(956);
				}
                #endif
				string rowN = line.substr(0,pos);
                #ifdef notRpackage
				if (stoi(rowN) != (int)cnt) {
					cerr << "mismatch "<<rowN<<" != "<<cnt<<"\n";
					exit(955);
				}
                #endif
			}
            out << line + "\n";
            uint cur = srtTar[j];
            while (srtTar[j] == cur){ j++; }
            if (j == srtTar.size()){ break; }
        }
        cnt++;
    }

    //(*in).close(); 
	delete in;
	out.close();
    if (j != srtTar.size()){

#ifdef notRpackage
        cerr << "Missed " << (srtTar.size() - j) << " entries." << endl;
#endif
    }
}
  void TheoreticalSpectrumGenerator::addLosses_(RichPeakSpectrum & spectrum, const AASequence & ion, double intensity, Residue::ResidueType res_type, int charge) const 
  {
    RichPeak1D p;

    set<String> losses;
    for (AASequence::ConstIterator it = ion.begin(); it != ion.end(); ++it)
    {
      if (it->hasNeutralLoss())
      {
        vector<EmpiricalFormula> loss_formulas = it->getLossFormulas();
        for (Size i = 0; i != loss_formulas.size(); ++i)
        {
          losses.insert(loss_formulas[i].toString());
        }
      }
    }

    if (!add_isotopes_)
    {
      p.setIntensity(intensity * rel_loss_intensity_);
    }

    for (set<String>::const_iterator it = losses.begin(); it != losses.end(); ++it)
    {
      EmpiricalFormula loss_ion = ion.getFormula(res_type, charge) - EmpiricalFormula(*it);
      // thanks to Chris and Sandro
      // check for negative element frequencies (might happen if losses are not allowed for specific ions)
      bool negative_elements(false);
      for (EmpiricalFormula::ConstIterator eit = loss_ion.begin(); eit != loss_ion.end(); ++eit)
      {
        if (eit->second < 0)
        {
          negative_elements = true;
          break;
        }
      }
      if (negative_elements)
      {
        continue;
      }
      double loss_pos = loss_ion.getMonoWeight() / (double)charge;
      const String& loss_name = *it;

      if (add_isotopes_)
      {
        IsotopeDistribution dist = loss_ion.getIsotopeDistribution(max_isotope_);
        UInt j(0);
        for (IsotopeDistribution::ConstIterator iso = dist.begin(); iso != dist.end(); ++iso)
        {
          p.setMZ((double)(loss_pos + j) / (double)charge);
          p.setIntensity(intensity * rel_loss_intensity_ * iso->second);
          if (add_metainfo_ && j == 0)
          {
            // note: important to construct a string from char. If omitted it will perform pointer arithmetics on the "-" string literal
            String ion_name = String(residueTypeToIonLetter_(res_type)) + String(ion.size()) + "-" + loss_name + String(charge, '+');
            p.setMetaValue("IonName", ion_name);
          }
          spectrum.push_back(p);
        }
      }
      else
      {
        p.setMZ(loss_pos);
        if (add_metainfo_)
        {
          // note: important to construct a string from char. If omitted it will perform pointer arithmetics on the "-" string literal
          String ion_name = String(residueTypeToIonLetter_(res_type)) + String(ion.size()) + "-" + loss_name + String(charge, '+');
          p.setMetaValue("IonName", ion_name);
        }
        spectrum.push_back(p);
      }
    }

  }
Example #4
0
int k(int x)
{
  return j(x + 1);
}
Example #5
0
void tst_QSet::javaMutableIterator()
{
    QSet<QString> set1;
    for (int k = 0; k < 25000; ++k)
        set1.insert(QString::number(k));

    {
        int sum = 0;
        QMutableSetIterator<QString> i(set1);
        while (i.hasNext())
            sum += toNumber(i.next());
        QVERIFY(sum == 24999 * 25000 / 2);
    }

    {
        int sum = 0;
        QMutableSetIterator<QString> i(set1);
        while (i.hasNext()) {
            i.next();
            sum += toNumber(i.value());
        }
        QVERIFY(sum == 24999 * 25000 / 2);
    }

    {
        int sum = 0;
        QMutableSetIterator<QString> i(set1);
        while (i.hasNext()) {
            sum += toNumber(i.peekNext());
            i.next();
        }
        QVERIFY(sum == 24999 * 25000 / 2);
    }

    {
        int sum = 0;
        QMutableSetIterator<QString> i(set1);
        while (i.hasNext()) {
            i.next();
            sum += toNumber(i.peekPrevious());
        }
        QVERIFY(sum == 24999 * 25000 / 2);
    }

    {
        int sum = 0;
        QMutableSetIterator<QString> i(set1);
        i.toBack();
        while (i.hasPrevious())
            sum += toNumber(i.previous());
        QVERIFY(sum == 24999 * 25000 / 2);
    }

    {
        int sum = 0;
        QMutableSetIterator<QString> i(set1);
        i.toBack();
        while (i.hasPrevious()) {
            sum += toNumber(i.peekPrevious());
            i.previous();
        }
        QVERIFY(sum == 24999 * 25000 / 2);
    }

    {
        int sum = 0;
        QMutableSetIterator<QString> i(set1);
        i.toBack();
        while (i.hasPrevious()) {
            i.previous();
            sum += toNumber(i.peekNext());
        }
        QVERIFY(sum == 24999 * 25000 / 2);
    }

    {
        QSet<QString> set2 = set1;
        QSet<QString> set3 = set2;

        QMutableSetIterator<QString> i(set2);
        QMutableSetIterator<QString> j(set3);

        while (i.hasNext()) {
            i.next();
            i.remove();
        }
        QVERIFY(set2.isEmpty());
        QVERIFY(!set3.isEmpty());

        j.toBack();
        while (j.hasPrevious()) {
            j.previous();
            j.remove();
        }
        QVERIFY(set2.isEmpty());
        QVERIFY(set3.isEmpty());
    }
}
        ProgramRunner::ProgramRunner( const BSONObj &args ) {
            verify( !args.isEmpty() );

            string program( args.firstElement().valuestrsafe() );
            verify( !program.empty() );
            boost::filesystem::path programPath = findProgram(program);

            string prefix( "mongod-" );
            bool isMongodProgram =
                    string("mongod") == program ||
                    program.compare( 0, prefix.size(), prefix ) == 0;

            prefix = "mongos-";
            bool isMongosProgram =
                    string("mongos") == program ||
                    program.compare( 0, prefix.size(), prefix ) == 0;

#if 0
            if (isMongosProgram == "mongos") {
                _argv.push_back("valgrind");
                _argv.push_back("--log-file=/tmp/mongos-%p.valgrind");
                _argv.push_back("--leak-check=yes");
                _argv.push_back("--suppressions=valgrind.suppressions");
                //_argv.push_back("--error-exitcode=1");
                _argv.push_back("--");
            }
#endif

            _argv.push_back( programPath.string() );

            _port = -1;

            BSONObjIterator j( args );
            j.next(); // skip program name (handled above)
            while(j.more()) {
                BSONElement e = j.next();
                string str;
                if ( e.isNumber() ) {
                    stringstream ss;
                    ss << e.number();
                    str = ss.str();
                }
                else {
                    verify( e.type() == mongo::String );
                    str = e.valuestr();
                }
                if ( str == "--port" )
                    _port = -2;
                else if ( _port == -2 )
                    _port = strtol( str.c_str(), 0, 10 );
                _argv.push_back(str);
            }

            if ( ! isMongodProgram && ! isMongosProgram && program != "mongobridge" )
                _port = 0;
            else {
                if ( _port <= 0 )
                    log() << "error: a port number is expected when running " << program << " from the shell" << endl;
                verify( _port > 0 );
            }
            if ( _port > 0 ) {
                bool haveDbForPort = registry.isPortRegistered( _port );
                if ( haveDbForPort ) {
                    log() << "already have db for port: " << _port << endl;
                    verify( !haveDbForPort );
                }
            }
        }
Example #7
0
void trim_trailing_whitespace( string &line )
{
  string::iterator b( line.begin() ), j( line.end() );
  while ( j-1 != b && isspace(*(j-1)) ) --j;
  line = string( b, j );
}
Example #8
0
        bool handleSpecialNamespaces( Request& r , QueryMessage& q ) {
            const char * ns = strstr( r.getns() , ".$cmd.sys." );
            if ( ! ns )
                return false;
            ns += 10;

            BSONObjBuilder b;
            vector<Shard> shards;

            ClientBasic* client = ClientBasic::getCurrent();
            AuthorizationSession* authSession = client->getAuthorizationSession();
            if ( strcmp( ns , "inprog" ) == 0 ) {
                const bool isAuthorized = authSession->isAuthorizedForActionsOnResource(
                        ResourcePattern::forClusterResource(), ActionType::inprog);
                audit::logInProgAuthzCheck(
                        client, q.query, isAuthorized ? ErrorCodes::OK : ErrorCodes::Unauthorized);
                uassert(ErrorCodes::Unauthorized, "not authorized to run inprog", isAuthorized);

                Shard::getAllShards( shards );

                BSONArrayBuilder arr( b.subarrayStart( "inprog" ) );

                for ( unsigned i=0; i<shards.size(); i++ ) {
                    Shard shard = shards[i];
                    ScopedDbConnection conn(shard.getConnString());
                    BSONObj temp = conn->findOne( r.getns() , q.query );
                    if ( temp["inprog"].isABSONObj() ) {
                        BSONObjIterator i( temp["inprog"].Obj() );
                        while ( i.more() ) {
                            BSONObjBuilder x;

                            BSONObjIterator j( i.next().Obj() );
                            while( j.more() ) {
                                BSONElement e = j.next();
                                if ( str::equals( e.fieldName() , "opid" ) ) {
                                    stringstream ss;
                                    ss << shard.getName() << ':' << e.numberInt();
                                    x.append( "opid" , ss.str() );
                                }
                                else if ( str::equals( e.fieldName() , "client" ) ) {
                                    x.appendAs( e , "client_s" );
                                }
                                else {
                                    x.append( e );
                                }
                            }
                            arr.append( x.obj() );
                        }
                    }
                    conn.done();
                }

                arr.done();
            }
            else if ( strcmp( ns , "killop" ) == 0 ) {
                const bool isAuthorized = authSession->isAuthorizedForActionsOnResource(
                        ResourcePattern::forClusterResource(), ActionType::killop);
                audit::logKillOpAuthzCheck(
                        client,
                        q.query,
                        isAuthorized ? ErrorCodes::OK : ErrorCodes::Unauthorized);
                uassert(ErrorCodes::Unauthorized, "not authorized to run killop", isAuthorized);

                BSONElement e = q.query["op"];
                if ( e.type() != String ) {
                    b.append( "err" , "bad op" );
                    b.append( e );
                }
                else {
                    b.append( e );
                    string s = e.String();
                    string::size_type i = s.find( ':' );
                    if ( i == string::npos ) {
                        b.append( "err" , "bad opid" );
                    }
                    else {
                        string shard = s.substr( 0 , i );
                        int opid = atoi( s.substr( i + 1 ).c_str() );
                        b.append( "shard" , shard );
                        b.append( "shardid" , opid );

                        log() << "want to kill op: " << e << endl;
                        Shard s(shard);

                        ScopedDbConnection conn(s.getConnString());
                        conn->findOne( r.getns() , BSON( "op" << opid ) );
                        conn.done();
                    }
                }
            }
            else if ( strcmp( ns , "unlock" ) == 0 ) {
                b.append( "err" , "can't do unlock through mongos" );
            }
            else {
                warning() << "unknown sys command [" << ns << "]" << endl;
                return false;
            }

            BSONObj x = b.done();
            replyToQuery(0, r.p(), r.m(), x);
            return true;
        }
Example #9
0
  /*
   * Propagation proper
   *
   */
  ExecStatus 
  Pack::propagate(Space& home, const ModEventDelta& med) {
    // Number of items
    int n = bs.size();
    // Number of bins
    int m = l.size();

    {
      Region region(home);

      // Possible sizes for bins
      int* s = region.alloc<int>(m);

      for (int j=m; j--; )
        s[j] = 0;

      // Compute sizes for bins
      if (OffsetView::me(med) == ME_INT_VAL) {
        // Also eliminate assigned items
        int k=0;
        for (int i=0; i<n; i++)
          if (bs[i].assigned()) {
            int j = bs[i].bin().val();
            l[j].offset(l[j].offset() - bs[i].size());
            t -= bs[i].size();
          } else {
            for (ViewValues<IntView> j(bs[i].bin()); j(); ++j)
              s[j.val()] += bs[i].size();
            bs[k++] = bs[i];
          }
        n=k; bs.size(n);
      } else {
        for (int i=n; i--; ) {
          assert(!bs[i].assigned());
          for (ViewValues<IntView> j(bs[i].bin()); j(); ++j)
            s[j.val()] += bs[i].size();
        }
      }

      // Propagate bin loads and compute lower and upper bound
      int min = t, max = t;
      for (int j=m; j--; ) {
        GECODE_ME_CHECK(l[j].gq(home,0));
        GECODE_ME_CHECK(l[j].lq(home,s[j]));
        min -= l[j].max(); max -= l[j].min();
      }

      // Propagate that load must be equal to total size
      for (bool mod = true; mod; ) {
        mod = false; ModEvent me;
        for (int j=m; j--; ) {
          int lj_min = l[j].min();
          me = l[j].gq(home, min + l[j].max());
          if (me_failed(me))
            return ES_FAILED;
          if (me_modified(me)) {
            max += lj_min - l[j].min(); mod = true;
          }
          int lj_max = l[j].max();
          me = l[j].lq(home, max + l[j].min());
          if (me_failed(me))
            return ES_FAILED;
          if (me_modified(me)) {
            min += lj_max - l[j].max(); mod = true;
          }
        }
      }

      if (n == 0) {
        assert(l.assigned());
        return home.ES_SUBSUMED(*this);
      }

    
      {
        TellCache tc(region,m);

        int k=0;
        for (int i=0; i<n; i++) {
          for (ViewValues<IntView> j(bs[i].bin()); j(); ++j) {
            if (bs[i].size() > l[j.val()].max())
              tc.nq(j.val());
            if (s[j.val()] - bs[i].size() < l[j.val()].min()) 
              tc.eq(j.val());
          }
          GECODE_ES_CHECK(tc.tell(home,bs[i].bin()));
          // Eliminate assigned bin
          if (bs[i].assigned()) {
            int j = bs[i].bin().val();
            l[j].offset(l[j].offset() - bs[i].size());
            t -= bs[i].size();
          } else {
            bs[k++] = bs[i];
          }
        }
        n=k; bs.size(n);
      }

    }

    // Only if the propagator is at fixpoint here, continue with the more
    // expensive stage for propagation.
    if (IntView::me(modeventdelta()) != ME_INT_NONE)
      return ES_NOFIX;

    // Now the invariant holds that no more assigned bins exist!
    {
      Region region(home);

      // Size of items
      SizeSetMinusOne* s = region.alloc<SizeSetMinusOne>(m);

      for (int j=m; j--; )
        s[j] = SizeSetMinusOne(region,n);

      // Set up size information
      for (int i=0; i<n; i++) {
        assert(!bs[i].assigned());
        for (ViewValues<IntView> j(bs[i].bin()); j(); ++j) 
          s[j.val()].add(bs[i].size());
      }

      for (int j=m; j--; ) {
        // Can items still be packed into bin?
        if (nosum(static_cast<SizeSet&>(s[j]), l[j].min(), l[j].max()))
          return ES_FAILED;
        int ap, bp;
        // Must there be packed more items into bin?
        if (nosum(static_cast<SizeSet&>(s[j]), l[j].min(), l[j].min(), 
                  ap, bp))
          GECODE_ME_CHECK(l[j].gq(home,bp));
        // Must there be packed less items into bin?
        if (nosum(static_cast<SizeSet&>(s[j]), l[j].max(), l[j].max(), 
                  ap, bp))
          GECODE_ME_CHECK(l[j].lq(home,ap));
      }

      TellCache tc(region,m);

      int k=0;
      for (int i=0; i<n; i++) {
        assert(!bs[i].assigned());
        for (ViewValues<IntView> j(bs[i].bin()); j(); ++j) {
          // Items must be removed in decreasing size!
          s[j.val()].minus(bs[i].size());
          // Can item i still be packed into bin j?
          if (nosum(s[j.val()], 
                    l[j.val()].min() - bs[i].size(),
                    l[j.val()].max() - bs[i].size()))
            tc.nq(j.val());
          // Must item i be packed into bin j?
          if (nosum(s[j.val()], l[j.val()].min(), l[j.val()].max()))
            tc.eq(j.val());
        }
        GECODE_ES_CHECK(tc.tell(home,bs[i].bin()));
        if (bs[i].assigned()) {
          int j = bs[i].bin().val();
          l[j].offset(l[j].offset() - bs[i].size());
          t -= bs[i].size();
        } else {
          bs[k++] = bs[i];
        }
      }
      n=k; bs.size(n);
    }

    // Perform lower bound checking
    if (n > 0) {
      Region region(home);

      // Find capacity estimate (we start from bs[0] as it might be
      // not packable, actually (will be detected later anyway)!
      int c = bs[0].size();
      for (int j=m; j--; )
        c = std::max(c,l[j].max());

      // Count how many items have a certain size (bucket sort)
      int* n_s = region.alloc<int>(c+1);

      for (int i=c+1; i--; )
        n_s[i] = 0;

      // Count unpacked items
      for (int i=n; i--; )
        n_s[bs[i].size()]++;

      // Number of items and remaining bin load
      int nm = n;

      // Only count positive remaining bin loads
      for (int j=m; j--; ) 
        if (l[j].max() < 0) {
          return ES_FAILED;
        } else if (c > l[j].max()) {
          n_s[c - l[j].max()]++; nm++;
        }

      // Sizes of items and remaining bin loads
      int* s = region.alloc<int>(nm);

      // Setup sorted sizes
      {
        int k=0;
        for (int i=c+1; i--; )
          for (int n=n_s[i]; n--; )
            s[k++]=i;
        assert(k == nm);
      }

      // Items in N1 are from 0 ... n1 - 1
      int n1 = 0;
      // Items in N2 are from n1 ... n12 - 1, we count elements in N1 and N2
      int n12 = 0;
      // Items in N3 are from n12 ... n3 - 1 
      int n3 = 0;
      // Free space in N2
      int f2 = 0;
      // Total size of items in N3
      int s3 = 0;

      // Initialize n12 and f2
      for (; (n12 < nm) && (s[n12] > c/2); n12++)
        f2 += c - s[n12];

      // Initialize n3 and s3
      for (n3 = n12; n3 < nm; n3++)
        s3 += s[n3];
        
      // Compute lower bounds
      for (int k=0; k<=c/2; k++) {
        // Make N1 larger by adding elements and N2 smaller
        for (; (n1 < nm) && (s[n1] > c-k); n1++)
          f2 -= c - s[n1];
        assert(n1 <= n12);
        // Make N3 smaller by removing elements
        for (; (s[n3-1] < k) && (n3 > n12); n3--)
          s3 -= s[n3-1];
        // Overspill
        int o = (s3 > f2) ? ((s3 - f2 + c - 1) / c) : 0;
        if (n12 + o > m)
          return ES_FAILED;
      }
    }

    return ES_NOFIX;
  }
Example #10
0
            bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
                string dbname = cc().database()->name; // this has to come before dbtemprelease
                dbtemprelease temprelease; // we don't touch the db directly

                string shardedOutputCollection = cmdObj["shardedOutputCollection"].valuestrsafe();

                MRSetup mr( dbname , cmdObj.firstElement().embeddedObjectUserCheck() , false );
                
                set<ServerAndQuery> servers;
                
                BSONObjBuilder shardCounts;
                map<string,long long> counts;
                
                BSONObj shards = cmdObj["shards"].embeddedObjectUserCheck();
                vector< auto_ptr<DBClientCursor> > shardCursors;
                BSONObjIterator i( shards );
                while ( i.more() ){
                    BSONElement e = i.next();
                    string shard = e.fieldName();

                    BSONObj res = e.embeddedObjectUserCheck();
                    
                    uassert( 10078 ,  "something bad happened" , shardedOutputCollection == res["result"].valuestrsafe() );
                    servers.insert( shard );
                    shardCounts.appendAs( res["counts"] , shard.c_str() );

                    BSONObjIterator j( res["counts"].embeddedObjectUserCheck() );
                    while ( j.more() ){
                        BSONElement temp = j.next();
                        counts[temp.fieldName()] += temp.numberLong();
                    }

                }

                BSONObj sortKey = BSON( "_id" << 1 );

                ParallelSortClusteredCursor cursor( servers , dbname + "." + shardedOutputCollection ,
                                                    Query().sort( sortKey ) );
                
                
                auto_ptr<Scope> s = globalScriptEngine->getPooledScope( ns );
                ScriptingFunction reduceFunction = s->createFunction( mr.reduceCode.c_str() );
                ScriptingFunction finalizeFunction = 0;
                if ( mr.finalizeCode.size() )
                    finalizeFunction = s->createFunction( mr.finalizeCode.c_str() );

                BSONList values;

                result.append( "result" , mr.finalShort );

                DBDirectClient db;
                
                while ( cursor.more() ){
                    BSONObj t = cursor.next().getOwned();
                                        
                    if ( values.size() == 0 ){
                        values.push_back( t );
                        continue;
                    }
                    
                    if ( t.woSortOrder( *(values.begin()) , sortKey ) == 0 ){
                        values.push_back( t );
                        continue;
                    }
                    

                    db.insert( mr.tempLong , reduceValues( values , s.get() , reduceFunction , 1 , finalizeFunction ) );
                    values.clear();
                    values.push_back( t );
                }
                
                if ( values.size() )
                    db.insert( mr.tempLong , reduceValues( values , s.get() , reduceFunction , 1 , finalizeFunction ) );
                
                long long finalCount = mr.renameIfNeeded( db );
                log(0) << " mapreducefinishcommand " << mr.finalLong << " " << finalCount << endl;

                for ( set<ServerAndQuery>::iterator i=servers.begin(); i!=servers.end(); i++ ){
                    ScopedDbConnection conn( i->_server );
                    conn->dropCollection( dbname + "." + shardedOutputCollection );
                }
                
                result.append( "shardCounts" , shardCounts.obj() );
                
                {
                    BSONObjBuilder c;
                    for ( map<string,long long>::iterator i=counts.begin(); i!=counts.end(); i++ ){
                        c.append( i->first , i->second );
                    }
                    result.append( "counts" , c.obj() );
                }

                return 1;
            }
Example #11
0
void ParameterWidget::applySaved(int pId, int filter_id)
{
  QWidget *found = 0;
  QDate tempdate;
  XSqlQuery qry;
  QString query;
  QString filterValue;
	QDate today = QDate::currentDate();
  int xid, init_filter_id;

	init_filter_id = filter_id;

  QMapIterator<int, QPair<QString, QVariant> > j(_filterValues);
	QPair<QString, ParameterWidgetTypes> tempPair;

  clearFilters();

  if (!parent())
    return;

  if (_filterList->id() == -1)
  {
    emit updated();
    return;
  }

  if (filter_id == 0 && _filterList->id() != -1)
    filter_id = _filterList->id(_filterList->currentIndex());

  QString classname(parent()->objectName());
  if (classname.isEmpty())
    classname = parent()->metaObject()->className();

  query = " SELECT filter_value, "
          "  CASE WHEN (filter_username IS NULL) THEN true "
          "  ELSE false END AS shared "
          " FROM filter "
          " WHERE filter_id=:id ";

  qry.prepare(query);
  qry.bindValue(":id", filter_id );

  qry.exec();

  if (qry.first())
  {
    filterValue = qry.value("filter_value").toString();
    _shared = qry.value("shared").toBool();
  }

	
  QStringList filterRows = filterValue.split("|");
  QString tempFilter = QString();

  int windowIdx = _filtersLayout->rowCount();

	if (filterRows.size() == 1  && pId == 0 && filter_id != 0)
	{
		emit updated();
		return;
	}

  for (int i = 0; i < filterRows.size(); ++i)
  {
    tempFilter = filterRows[i];
    if ( !(tempFilter.isEmpty()) )
    {
      //0 is filterType, 1 is filterValue, 2 is parameterwidgettype
      QStringList tempFilterList = tempFilter.split(":");
			QString key = this->getParameterTypeKey(tempFilterList[0]);
			if (key.isEmpty())
			{
				//parametertype is no longer found, prompt user to delete filter
				if (QMessageBox::question(this, tr("Invalid Filter Set"), tr("This filter set contains an obsolete filter and will be deleted. Do you want to do this?"),
					  QMessageBox::No | QMessageBox::Default,
            QMessageBox::Yes) == QMessageBox::No)
				return;
				else
				{
					QString query = "delete from filter where filter_id=:filter_id";
					XSqlQuery qry;

					qry.prepare(query);
					qry.bindValue(":filter_id", filter_id);
					qry.exec();

					setSavedFilters();
					return;
				}
			}
			else
			{
				this->addParam();

				QLayoutItem *test = _filtersLayout->itemAtPosition(windowIdx, 0)->layout()->itemAt(0);
				XComboBox *mybox = (XComboBox*)test->widget();

      
				int idx = mybox->findText(key);

				mybox->setCurrentIndex(idx);
	
		    found = getFilterWidget(windowIdx);

			  int widgetType = tempFilterList[2].toInt();

			  //grab pointer to newly created filter object
				switch (widgetType)
				{
					case Date:
						DLineEdit *dLineEdit;
						dLineEdit = qobject_cast<DLineEdit*>(found);
						if (dLineEdit != 0)
							dLineEdit->setDate(today.addDays(tempFilterList[1].toInt()), true);
						break;
					case User:
						UsernameCluster *usernameCluster;
							usernameCluster = qobject_cast<UsernameCluster*>(found);
						if (usernameCluster != 0)
							usernameCluster->setUsername(tempFilterList[1]);
						break;
					case Crmacct:
						CRMAcctCluster *crmacctCluster;
						crmacctCluster = qobject_cast<CRMAcctCluster*>(found);
						if (crmacctCluster != 0)
							crmacctCluster->setId(tempFilterList[1].toInt());
							break;
					case Contact:
						ContactCluster *contactCluster;
						contactCluster = qobject_cast<ContactCluster*>(found);
						if (contactCluster != 0)
							contactCluster->setId(tempFilterList[1].toInt());
						break;
					case XComBox:
						XComboBox *xBox;
						xBox = qobject_cast<XComboBox*>(found);
						if (xBox != 0)
						{
							//fix for setid not emitting id signal if id found for filter is first in list
							//set to any other valid id first to fix it
							xBox->setId(2);

							xid = tempFilterList[1].toInt();
							xBox->setId(xid);
						}
						break;
					case Multiselect:
					{
						QTableWidget *tab;
						tab = qobject_cast<QTableWidget*>(found);
						if (tab != 0)
						{
							QStringList   savedval = tempFilterList[1].split(",");
							bool oldblk = tab->blockSignals(true);
							/* the obvious, loop calling tab->selectRow(), gives one selected row,
							 so try this to get multiple selections:
							   make only the desired values selectable,
							   select everything, and
							   connect to a slot that can clean up after us.
							 yuck.
						*/
							for (int j = 0; j < tab->rowCount(); j++)
							{
								if (! savedval.contains(tab->item(j, 0)->data(Qt::UserRole).toString()))
									tab->item(j, 0)->setFlags(tab->item(j, 0)->flags() & (~ Qt::ItemIsSelectable));
							}
							QTableWidgetSelectionRange range(0, 0, tab->rowCount() - 1,
							                               tab->columnCount() - 1);
							tab->setRangeSelected(range, true);
							connect(tab, SIGNAL(itemClicked(QTableWidgetItem*)), this, SLOT(resetMultiselect(QTableWidgetItem*)));

							tab->blockSignals(oldblk);
							storeFilterValue(-1, tab);
						}
					}
					break;
					default:
					{
						QLineEdit *lineEdit;
						lineEdit = qobject_cast<QLineEdit*>(found);
						if (lineEdit != 0)
						{
							lineEdit->setText(tempFilterList[1]);
							storeFilterValue(-1, lineEdit);
						}
						}
								break;
				}//end of switch
				
			}//end of not empty key else
			windowIdx++;
		}//end of if tempfilter not empty
Example #12
0
bool operator==(Polygon const& lhs, Polygon const& rhs)
{
    if (lhs.getNumberOfPoints() != rhs.getNumberOfPoints())
        return false;

    const std::size_t n(lhs.getNumberOfPoints());
    const std::size_t start_pnt(lhs.getPointID(0));

    // search start point of first polygon in second polygon
    bool nfound(true);
    std::size_t k(0);
    for (; k < n-1 && nfound; k++) {
        if (start_pnt == rhs.getPointID(k)) {
            nfound = false;
            break;
        }
    }

    // case: start point not found in second polygon
    if (nfound) return false;

    // *** determine direction
    // opposite direction
    if (k == n-2) {
        for (k=1; k<n-1; k++) {
            if (lhs.getPointID(k) != rhs.getPointID(n-1-k)) {
                return false;
            }
        }
        return true;
    }

    // same direction - start point of first polygon at arbitrary position in second polygon
    if (lhs.getPointID(1) == rhs.getPointID(k+1)) {
        std::size_t j(k+2);
        for (; j<n-1; j++) {
            if (lhs.getPointID(j-k) != rhs.getPointID(j)) {
                return false;
            }
        }
        j=0; // new start point at second polygon
        for (; j<k+1; j++) {
            if (lhs.getPointID(n-(k+2)+j+1) != rhs.getPointID(j)) {
                return false;
            }
        }
        return true;
    } else {
        // opposite direction with start point of first polygon at arbitrary position
        // *** ATTENTION
        WARN("operator==(Polygon const& lhs, Polygon const& rhs) - not tested case (implementation is probably buggy) - please contact [email protected] mentioning the problem.");
        // in second polygon
        if (lhs.getPointID(1) == rhs.getPointID(k-1)) {
            std::size_t j(k-2);
            for (; j>0; j--) {
                if (lhs.getPointID(k-2-j) != rhs.getPointID(j)) {
                    return false;
                }
            }
            // new start point at second polygon - the point n-1 of a polygon is equal to the
            // first point of the polygon (for this reason: n-2)
            j=n-2;
            for (; j>k-1; j--) {
                if (lhs.getPointID(n-2+j+k-2) != rhs.getPointID(j)) {
                    return false;
                }
            }
            return true;
        } else {
            return false;
        }
    }
}
Example #13
0
/**
 *   Generates OAuth signature base
 *   @param url Url with encoded parameters
 *   @param method Http method
 *   @param timestamp timestamp
 *   @param nonce random string
 *   @return signature base
 */
QByteArray OAuth::generateSignatureBase(const QUrl& url, HttpMethod method, const QByteArray& timestamp, const QByteArray& nonce)
{
    //OAuth spec. 9.1 https://oauth.net/core/1.0/#anchor14

    //OAuth spec. 9.1.1
    QList<QPair<QByteArray, QByteArray> > urlParameters = url.encodedQueryItems();
    QList<QByteArray> normParameters;

    QListIterator<QPair<QByteArray, QByteArray> > i(urlParameters);
    while(i.hasNext()){
            QPair<QByteArray, QByteArray> queryItem = i.next();
            QByteArray normItem = queryItem.first + '=' + queryItem.second;
            normParameters.append(normItem);
    }

    //consumer key
    normParameters.append(QByteArray("oauth_consumer_key=") + m_oauthConsumerKey);

    //token
    if(!m_oauthToken.isEmpty()){
            normParameters.append(QByteArray("oauth_token=") + m_oauthToken);
    }

    //signature method, only HMAC_SHA1
    normParameters.append(QByteArray("oauth_signature_method=HMAC-SHA1"));
    //time stamp
    normParameters.append(QByteArray("oauth_timestamp=") + timestamp);
    //nonce
    normParameters.append(QByteArray("oauth_nonce=") + nonce);
    //version
    normParameters.append(QByteArray("oauth_version=1.0"));

    //OAuth spec. 9.1.1.1
    qSort(normParameters);

    //OAuth spec. 9.1.1.2
    //QByteArray normString;
    //QListIterator<QByteArray> j(normParameters);
    //while(j.hasNext()){
    //	normString += j.next();
    //	normString += '&';
    //}
    //normString.chop(1);

    QByteArray normString;
    QListIterator<QByteArray> j(normParameters);
    while (j.hasNext()) {
        normString += j.next().toPercentEncoding();
        normString += "%26";
    }
    normString.chop(3);

    //OAuth spec. 9.1.2
    QString urlScheme = url.scheme();
    QString urlPath = url.path();
    QString urlHost = url.host();
    QByteArray normUrl = urlScheme.toUtf8() + "://" + urlHost.toUtf8() + urlPath.toUtf8();

    QByteArray httpm;

    switch (method)
    {
        case OAuth::GET:
                httpm = "GET";
                break;
        case OAuth::POST:
                httpm = "POST";
                break;
        case OAuth::DELETE:
                httpm = "DELETE";
                break;
        case OAuth::PUT:
                httpm = "PUT";
                break;
    }

    //OAuth spec. 9.1.3
    return httpm + '&' + normUrl.toPercentEncoding() + '&' + normString;
}
Example #14
0
void FTSSpec::scoreDocument( const BSONObj& obj,
                             const FTSLanguage& parentLanguage,
                             const string& parentPath,
                             bool isArray,
                             TermFrequencyMap* term_freqs ) const {

    if ( _textIndexVersion == TEXT_INDEX_VERSION_1 ) {
        dassert( parentPath == "" );
        dassert( !isArray );
        return _scoreDocumentV1( obj, term_freqs );
    }

    const FTSLanguage& language = _getLanguageToUseV2( obj, parentLanguage );
    Stemmer stemmer( language );
    Tools tools( language, &stemmer, StopWords::getStopWords( language ) );

    // Perform a depth-first traversal of obj, skipping fields not touched by this spec.
    BSONObjIterator j( obj );
    while ( j.more() ) {

        BSONElement elem = j.next();
        string fieldName = elem.fieldName();

        // Skip "language" specifier fields if wildcard.
        if ( wildcard() && languageOverrideField() == fieldName ) {
            continue;
        }

        // Compose the dotted name of the current field:
        // 1. parent path empty (top level): use the current field name
        // 2. parent path non-empty and obj is an array: use the parent path
        // 3. parent path non-empty and obj is a sub-doc: append field name to parent path
        string dottedName = ( parentPath.empty() ? fieldName
                              : isArray ? parentPath
                              : parentPath + '.' + fieldName );

        // Find lower bound of dottedName in _weights.  lower_bound leaves us at the first
        // weight that could possibly match or be a prefix of dottedName.  And if this
        // element fails to match, then no subsequent weight can match, since the weights
        // are lexicographically ordered.
        Weights::const_iterator i = _weights.lower_bound( elem.type() == Object
                                    ? dottedName + '.'
                                    : dottedName );

        // possibleWeightMatch is set if the weight map contains either a match or some item
        // lexicographically larger than fieldName.  This boolean acts as a guard on
        // dereferences of iterator 'i'.
        bool possibleWeightMatch = ( i != _weights.end() );

        // Optimize away two cases, when not wildcard:
        // 1. lower_bound seeks to end(): no prefix match possible
        // 2. lower_bound seeks to a name which is not a prefix
        if ( !wildcard() ) {
            if ( !possibleWeightMatch ) {
                continue;
            }
            else if ( !_matchPrefix( dottedName, i->first ) ) {
                continue;
            }
        }

        // Is the current field an exact match on a weight?
        bool exactMatch = ( possibleWeightMatch && i->first == dottedName );

        double weight = ( possibleWeightMatch ? i->second : DEFAULT_WEIGHT );

        switch ( elem.type() ) {
        case String:
            // Only index strings on exact match or wildcard.
            if ( exactMatch || wildcard() ) {
                _scoreStringV2( tools, elem.valuestr(), term_freqs, weight );
            }
            break;
        case Object:
            // Only descend into a sub-document on proper prefix or wildcard.  Note that
            // !exactMatch is a sufficient test for proper prefix match, because of
            // matchPrefix() continue block above.
            if ( !exactMatch || wildcard() ) {
                scoreDocument( elem.Obj(), language, dottedName, false, term_freqs );
            }
            break;
        case Array:
            // Only descend into arrays from non-array parents or on wildcard.
            if ( !isArray || wildcard() ) {
                scoreDocument( elem.Obj(), language, dottedName, true, term_freqs );
            }
            break;
        default:
            // Skip over all other BSON types.
            break;
        }
    }
}
Example #15
0
main()
{
	i();
	j();
	abort(); //printf("shouldn't see this\n");
}
Example #16
0
void NetworkModel::remove_reaction_rule(const ReactionRule& rr)
{
    reaction_rule_container_type::iterator
        i(std::find(reaction_rules_.begin(), reaction_rules_.end(), rr));
    if (i == reaction_rules_.end())
    {
        throw NotFound("reaction rule not found");
    }

    reaction_rule_container_type::size_type const
        idx(i - reaction_rules_.begin()), last_idx(reaction_rules_.size() - 1);
    if (rr.reactants().size() == 1)
    {
        first_order_reaction_rules_map_type::iterator
            j(first_order_reaction_rules_map_.find(rr.reactants()[0].serial()));
        if (j == first_order_reaction_rules_map_.end())
        {
            throw IllegalState("no corresponding map key found");
        }

        first_order_reaction_rules_map_type::mapped_type::iterator
            k(std::remove((*j).second.begin(), (*j).second.end(), idx));
        if (k == (*j).second.end())
        {
            throw IllegalState("no corresponding map value found");
        }
        else
        {
            (*j).second.erase(k, (*j).second.end());
        }
    }
    else if (rr.reactants().size() == 2)
    {
        second_order_reaction_rules_map_type::iterator
            j(second_order_reaction_rules_map_.find(std::make_pair(
                rr.reactants()[0].serial(), rr.reactants()[1].serial())));
        if (j == second_order_reaction_rules_map_.end())
        {
            throw IllegalState("no corresponding map key found");
        }

        second_order_reaction_rules_map_type::mapped_type::iterator
            k(std::remove((*j).second.begin(), (*j).second.end(), idx));
        if (k == (*j).second.end())
        {
            throw IllegalState("no corresponding map value found");
        }
        else
        {
            (*j).second.erase(k, (*j).second.end());
        }
    }

    if (idx < last_idx)
    {
        reaction_rule_container_type::value_type const
            last_value(reaction_rules_[last_idx]);
        (*i) = last_value;

        if (last_value.reactants().size() == 1)
        {
            first_order_reaction_rules_map_type::iterator
                j(first_order_reaction_rules_map_.find(
                    last_value.reactants()[0].serial()));
            if (j == first_order_reaction_rules_map_.end())
            {
                throw IllegalState("no corresponding map key for the last found");
            }

            first_order_reaction_rules_map_type::mapped_type::iterator
                k(std::remove((*j).second.begin(), (*j).second.end(), last_idx));
            if (k == (*j).second.end())
            {
                throw IllegalState("no corresponding map value found");
            }
            else
            {
                (*j).second.erase(k, (*j).second.end());
            }
            (*j).second.push_back(idx);
        }
        else if (last_value.reactants().size() == 2)
        {
            second_order_reaction_rules_map_type::iterator
                j(second_order_reaction_rules_map_.find(std::make_pair(
                    last_value.reactants()[0].serial(),
                    last_value.reactants()[1].serial())));
            if (j == second_order_reaction_rules_map_.end())
            {
                throw IllegalState("no corresponding map key for the last found");
            }
            second_order_reaction_rules_map_type::mapped_type::iterator
                k(std::remove((*j).second.begin(), (*j).second.end(), last_idx));
            if (k == (*j).second.end())
            {
                throw IllegalState("no corresponding map value found");
            }
            else
            {
                (*j).second.erase(k, (*j).second.end());
            }
            (*j).second.push_back(idx);
        }
    }

    reaction_rules_.pop_back();
}
Example #17
0
void GEOObjects::mergeGeometries (std::vector<std::string> const & geo_names,
                                  std::string &merged_geo_name)
{
	const size_t n_geo_names(geo_names.size());
	std::vector<size_t> pnt_offsets(n_geo_names, 0);

	// *** merge points
	std::vector<GeoLib::Point*>* merged_points (new std::vector<GeoLib::Point*>);
	for (size_t j(0); j < n_geo_names; j++) {
		const std::vector<GeoLib::Point*>* pnts (this->getPointVec(geo_names[j]));
		if (pnts) {
			size_t n_pnts(0);
			// do not consider stations
			if (dynamic_cast<GeoLib::Station*>((*pnts)[0]) == NULL) {
				n_pnts = pnts->size();
				for (size_t k(0); k < n_pnts; k++)
					merged_points->push_back (new GeoLib::Point (((*pnts)[k])->getCoords()));
			}
			if (n_geo_names - 1 > j) {
				pnt_offsets[j + 1] = n_pnts + pnt_offsets[j];
			}
		}
	}
	addPointVec (merged_points, merged_geo_name, NULL, 1e-6);
	std::vector<size_t> const& id_map (this->getPointVecObj(merged_geo_name)->getIDMap ());

	// *** merge polylines
	std::vector<GeoLib::Polyline*>* merged_polylines (new std::vector<GeoLib::Polyline*>);
	for (size_t j(0); j < n_geo_names; j++) {
		const std::vector<GeoLib::Polyline*>* plys (this->getPolylineVec(geo_names[j]));
		if (plys) {
			for (size_t k(0); k < plys->size(); k++) {
				GeoLib::Polyline* kth_ply_new(new GeoLib::Polyline (*merged_points));
				GeoLib::Polyline const* const kth_ply_old ((*plys)[k]);
				const size_t size_of_kth_ply (kth_ply_old->getNumberOfPoints());
				// copy point ids from old ply to new ply (considering the offset)
				for (size_t i(0); i < size_of_kth_ply; i++) {
					kth_ply_new->addPoint (id_map[pnt_offsets[j] +
					                              kth_ply_old->getPointID(i)]);
				}
				merged_polylines->push_back (kth_ply_new);
			}
		}
	}
	this->addPolylineVec (merged_polylines, merged_geo_name);

	// *** merge surfaces
	std::vector<GeoLib::Surface*>* merged_sfcs (new std::vector<GeoLib::Surface*>);
	for (size_t j(0); j < n_geo_names; j++) {
		const std::vector<GeoLib::Surface*>* sfcs (this->getSurfaceVec(geo_names[j]));
		if (sfcs) {
			for (size_t k(0); k < sfcs->size(); k++) {
				GeoLib::Surface* kth_sfc_new(new GeoLib::Surface (*merged_points));
				GeoLib::Surface const* const kth_sfc_old ((*sfcs)[k]);
				const size_t size_of_kth_sfc (kth_sfc_old->getNTriangles());
				// copy point ids from old ply to new ply (considering the offset)
				for (size_t i(0); i < size_of_kth_sfc; i++) {
					const GeoLib::Triangle* tri ((*kth_sfc_old)[i]);
					const size_t id0 (id_map[pnt_offsets[j] + (*tri)[0]]);
					const size_t id1 (id_map[pnt_offsets[j] + (*tri)[1]]);
					const size_t id2 (id_map[pnt_offsets[j] + (*tri)[2]]);
					kth_sfc_new->addTriangle (id0, id1, id2);
				}
				merged_sfcs->push_back (kth_sfc_new);
			}
		}
	}
	this->addSurfaceVec (merged_sfcs, merged_geo_name);
}
void
indel_digt_caller::
get_high_low_het_ratio_lhood(const starling_options& /*opt*/,
                             const starling_deriv_options& dopt,
                             const starling_sample_options& sample_opt,
                             const double indel_error_lnp,
                             const double indel_real_lnp,
                             const double ref_error_lnp,
                             const double ref_real_lnp,
                             const indel_key& ik,
                             const indel_data& id,
                             const double het_ratio,
                             const bool is_tier2_pass,
                             const bool is_use_alt_indel,
                             double& het_lhood_high,
                             double& het_lhood_low) {

    // handle het ratio and its complement in one step:
    const double chet_ratio(1.-het_ratio);

    const double log_het_ratio(std::log(het_ratio));
    const double log_chet_ratio(std::log(chet_ratio));

    const bool is_breakpoint(ik.is_breakpoint());

    het_lhood_high=0;
    het_lhood_low=0;

    //    typedef read_path_scores::alt_indel_t::const_iterator aiter;

    typedef indel_data::score_t::const_iterator siter;
    siter i(id.read_path_lnp.begin()), i_end(id.read_path_lnp.end());
    for (; i!=i_end; ++i) {
        const read_path_scores& path_lnp(i->second);

        // optionally skip tier2 data:
        if ((! is_tier2_pass) && (! path_lnp.is_tier1_read)) continue;

        // get alt path lnp:
        double alt_path_lnp(path_lnp.ref);
#if 0
        if (is_use_alt_indel && path_lnp.is_alt &&
            (path_lnp.alt > alt_path_lnp)) {
            alt_path_lnp=path_lnp.alt;
        }
#else
        if (is_use_alt_indel && (! path_lnp.alt_indel.empty()) ) {
            typedef read_path_scores::alt_indel_t::const_iterator aiter;
            aiter j(path_lnp.alt_indel.begin()), j_end(path_lnp.alt_indel.end());
            for (; j!=j_end; ++j) {
                if (j->second>alt_path_lnp) alt_path_lnp=j->second;
            }
        }
#endif

        const double noindel_lnp(log_sum(alt_path_lnp+ref_real_lnp,path_lnp.indel+indel_error_lnp));
        const double hom_lnp(log_sum(alt_path_lnp+ref_error_lnp,path_lnp.indel+indel_real_lnp));

        // allele ratio convention is that the indel occurs at the
        // het_allele ratio and the alternate allele occurs at
        // (1-het_allele_ratio):
        {
            double log_ref_prob(log_chet_ratio);
            double log_indel_prob(log_het_ratio);
            if (! is_breakpoint) {
                get_het_observed_allele_ratio(path_lnp.read_length,sample_opt.min_read_bp_flank,
                                              ik,het_ratio,log_ref_prob,log_indel_prob);
            }
            const double het_lnp(log_sum(noindel_lnp+log_ref_prob,hom_lnp+log_indel_prob));

            het_lhood_low += integrate_out_sites(dopt,path_lnp.nsite,het_lnp,is_tier2_pass);
        }

        {
            double log_ref_prob(log_het_ratio);
            double log_indel_prob(log_chet_ratio);
            if (! is_breakpoint) {
                get_het_observed_allele_ratio(path_lnp.read_length,sample_opt.min_read_bp_flank,
                                              ik,chet_ratio,log_ref_prob,log_indel_prob);
            }
            const double het_lnp(log_sum(noindel_lnp+log_ref_prob,hom_lnp+log_indel_prob));

            het_lhood_high += integrate_out_sites(dopt,path_lnp.nsite,het_lnp,is_tier2_pass);
        }
    }
}
Example #19
0
        bool handleSpecialNamespaces( Request& r , QueryMessage& q ) {
            const char * ns = r.getns();
            ns = strstr( r.getns() , ".$cmd.sys." );
            if ( ! ns )
                return false;
            ns += 10;

            BSONObjBuilder b;
            vector<Shard> shards;

            if ( strcmp( ns , "inprog" ) == 0 ) {
                Shard::getAllShards( shards );

                BSONArrayBuilder arr( b.subarrayStart( "inprog" ) );

                for ( unsigned i=0; i<shards.size(); i++ ) {
                    Shard shard = shards[i];
                    ScopedDbConnection conn( shard );
                    BSONObj temp = conn->findOne( r.getns() , BSONObj() );
                    if ( temp["inprog"].isABSONObj() ) {
                        BSONObjIterator i( temp["inprog"].Obj() );
                        while ( i.more() ) {
                            BSONObjBuilder x;

                            BSONObjIterator j( i.next().Obj() );
                            while( j.more() ) {
                                BSONElement e = j.next();
                                if ( str::equals( e.fieldName() , "opid" ) ) {
                                    stringstream ss;
                                    ss << shard.getName() << ':' << e.numberInt();
                                    x.append( "opid" , ss.str() );
                                }
                                else if ( str::equals( e.fieldName() , "client" ) ) {
                                    x.appendAs( e , "client_s" );
                                }
                                else {
                                    x.append( e );
                                }
                            }
                            arr.append( x.obj() );
                        }
                    }
                    conn.done();
                }

                arr.done();
            }
            else if ( strcmp( ns , "killop" ) == 0 ) {
                BSONElement e = q.query["op"];
                if ( strstr( r.getns() , "admin." ) != 0 ) {
                    b.append( "err" , "unauthorized" );
                }
                else if ( e.type() != String ) {
                    b.append( "err" , "bad op" );
                    b.append( e );
                }
                else {
                    b.append( e );
                    string s = e.String();
                    string::size_type i = s.find( ':' );
                    if ( i == string::npos ) {
                        b.append( "err" , "bad opid" );
                    }
                    else {
                        string shard = s.substr( 0 , i );
                        int opid = atoi( s.substr( i + 1 ).c_str() );
                        b.append( "shard" , shard );
                        b.append( "shardid" , opid );

                        log() << "want to kill op: " << e << endl;
                        Shard s(shard);

                        ScopedDbConnection conn( s );
                        conn->findOne( r.getns() , BSON( "op" << opid ) );
                        conn.done();
                    }
                }
            }
            else if ( strcmp( ns , "unlock" ) == 0 ) {
                b.append( "err" , "can't do unlock through mongos" );
            }
            else {
                log( LL_WARNING ) << "unknown sys command [" << ns << "]" << endl;
                return false;
            }

            BSONObj x = b.done();
            replyToQuery(0, r.p(), r.m(), x);
            return true;
        }
void
indel_digt_caller::
get_indel_digt_lhood(const starling_options& opt,
                     const starling_deriv_options& dopt,
                     const starling_sample_options& sample_opt,
                     const double indel_error_prob,
                     const double ref_error_prob,
                     const indel_key& ik,
                     const indel_data& id,
                     const bool is_het_bias,
                     const double het_bias,
                     const bool is_tier2_pass,
                     const bool is_use_alt_indel,
                     double* const lhood) {

    static const double loghalf(-std::log(2.));

    for (unsigned gt(0); gt<STAR_DIINDEL::SIZE; ++gt) lhood[gt] = 0.;

    const bool is_breakpoint(ik.is_breakpoint());

    const double indel_error_lnp(std::log(indel_error_prob));
    const double indel_real_lnp(std::log(1.-indel_error_prob));
    const double ref_error_lnp(std::log(ref_error_prob));
    const double ref_real_lnp(std::log(1.-ref_error_prob));

    //    typedef read_path_scores::alt_indel_t::const_iterator aiter;

    typedef indel_data::score_t::const_iterator siter;
    siter it(id.read_path_lnp.begin()), it_end(id.read_path_lnp.end());
    for (; it!=it_end; ++it) {
        const read_path_scores& path_lnp(it->second);

        // optionally skip tier2 data:
        if ((! is_tier2_pass) && (! path_lnp.is_tier1_read)) continue;

        // get alt path lnp:
        double alt_path_lnp(path_lnp.ref);
#if 0
        if (is_use_alt_indel && path_lnp.is_alt &&
            (path_lnp.alt > alt_path_lnp)) {
            alt_path_lnp=path_lnp.alt;
        }
#else
        if (is_use_alt_indel and (not path_lnp.alt_indel.empty()) ) {
            typedef read_path_scores::alt_indel_t::const_iterator aiter;
            aiter j(path_lnp.alt_indel.begin()), j_end(path_lnp.alt_indel.end());
            for (; j!=j_end; ++j) {
                if (j->second>alt_path_lnp) alt_path_lnp=j->second;
            }
        }
#endif

        const double noindel_lnp(log_sum(alt_path_lnp+ref_real_lnp,path_lnp.indel+indel_error_lnp));
        const double hom_lnp(log_sum(alt_path_lnp+ref_error_lnp,path_lnp.indel+indel_real_lnp));

        // allele ratio convention is that the indel occurs at the
        // het_allele ratio and the alternate allele occurs at
        // (1-het_allele_ratio):

        double log_ref_prob(loghalf);
        double log_indel_prob(loghalf);
        if (not is_breakpoint) {
            static const double het_allele_ratio(0.5);
            get_het_observed_allele_ratio(path_lnp.read_length,sample_opt.min_read_bp_flank,
                                          ik,het_allele_ratio,log_ref_prob,log_indel_prob);
        }
        const double het_lnp(log_sum(noindel_lnp+log_ref_prob,hom_lnp+log_indel_prob));

        lhood[STAR_DIINDEL::NOINDEL] += integrate_out_sites(dopt,path_lnp.nsite,noindel_lnp,is_tier2_pass);
        lhood[STAR_DIINDEL::HOM]     += integrate_out_sites(dopt,path_lnp.nsite,hom_lnp,is_tier2_pass);
        lhood[STAR_DIINDEL::HET]     += integrate_out_sites(dopt,path_lnp.nsite,het_lnp,is_tier2_pass);

#ifdef DEBUG_INDEL_CALL
        //log_os << std::setprecision(8);
        //log_os << "INDEL_CALL i,ref_lnp,indel_lnp,lhood(noindel),lhood(hom),lhood(het): " << i << " " << path_lnp.ref << " " << path_lnp.indel << " " << lhood[STAR_DIINDEL::NOINDEL] << " " << lhood[STAR_DIINDEL::HOM] << " " << lhood[STAR_DIINDEL::HET] << "\n";
#endif
    }


    if (is_het_bias) {
        // loop is currently setup to assume a uniform het ratio subgenotype prior
        const unsigned n_bias_steps(1+static_cast<unsigned>(het_bias/opt.het_bias_max_ratio_inc));
        const double ratio_increment(het_bias/static_cast<double>(n_bias_steps));
        for (unsigned step(0); step<n_bias_steps; ++step) {
            const double het_ratio(0.5+(step+1)*ratio_increment);
            increment_het_ratio_lhood(opt,dopt,sample_opt,
                                      indel_error_lnp,indel_real_lnp,
                                      ref_error_lnp,ref_real_lnp,
                                      ik,id,het_ratio,is_tier2_pass,is_use_alt_indel,lhood);
        }

        const unsigned n_het_subgt(1+2*n_bias_steps);
        const double subgt_log_prior(std::log(static_cast<double>(n_het_subgt)));
        lhood[STAR_DIINDEL::HET] -= subgt_log_prior;
    }
}
Example #21
0
void test_j(int* ip) {
  int* ip1 = j(ip);
}
Example #22
0
int main(int argc, char* argv[]) {
  // Check arguments.
  if (argc < 2) {
    error(1, 0, "usage: run-as <package-name> [--user <uid>] <command> [<args>]\n");
  }

  // This program runs with CAP_SETUID and CAP_SETGID capabilities on Android
  // production devices. Check user id of caller --- must be 'shell' or 'root'.
  if (getuid() != AID_SHELL && getuid() != AID_ROOT) {
    error(1, 0, "only 'shell' or 'root' users can run this program");
  }

  // Some devices can disable running run-as, such as Chrome OS when running in
  // non-developer mode.
  if (android::base::GetBoolProperty("ro.boot.disable_runas", false)) {
    error(1, 0, "run-as is disabled from the kernel commandline");
  }

  char* pkgname = argv[1];
  int cmd_argv_offset = 2;

  // Get user_id from command line if provided.
  int userId = 0;
  if ((argc >= 4) && !strcmp(argv[2], "--user")) {
    userId = atoi(argv[3]);
    if (userId < 0) error(1, 0, "negative user id: %d", userId);
    cmd_argv_offset += 2;
  }

  // Retrieve package information from system, switching egid so we can read the file.
  gid_t old_egid = getegid();
  if (setegid(AID_PACKAGE_INFO) == -1) error(1, errno, "setegid(AID_PACKAGE_INFO) failed");
  pkg_info info;
  memset(&info, 0, sizeof(info));
  info.name = pkgname;
  if (!packagelist_parse(packagelist_parse_callback, &info)) {
    error(1, errno, "packagelist_parse failed");
  }

  // Handle a multi-user data path
  if (userId > 0) {
    free(info.data_dir);
    if (asprintf(&info.data_dir, "/data/user/%d/%s", userId, pkgname) == -1) {
      error(1, errno, "asprintf failed");
    }
  }

  if (info.uid == 0) {
    error(1, 0, "unknown package: %s", pkgname);
  }
  if (setegid(old_egid) == -1) error(1, errno, "couldn't restore egid");

  // Verify that user id is not too big.
  if ((UID_MAX - info.uid) / AID_USER_OFFSET < (uid_t)userId) {
    error(1, 0, "user id too big: %d", userId);
  }

  // Calculate user app ID.
  uid_t userAppId = (AID_USER_OFFSET * userId) + info.uid;

  // Reject system packages.
  if (userAppId < AID_APP) {
    error(1, 0, "package not an application: %s", pkgname);
  }

  // Reject any non-debuggable package.
  if (!info.debuggable) {
    error(1, 0, "package not debuggable: %s", pkgname);
  }

  // Check that the data directory path is valid.
  if (!check_data_path(info.data_dir, userAppId)) {
    error(1, 0, "package has corrupt installation: %s", pkgname);
  }

  // Ensure that we change all real/effective/saved IDs at the
  // same time to avoid nasty surprises.
  uid_t uid = userAppId;
  uid_t gid = userAppId;
  ScopedMinijail j(minijail_new());
  minijail_change_uid(j.get(), uid);
  minijail_change_gid(j.get(), gid);
  minijail_keep_supplementary_gids(j.get());
  minijail_enter(j.get());

  std::string seinfo = std::string(info.seinfo) + ":fromRunAs";
  if (selinux_android_setcontext(uid, 0, seinfo.c_str(), pkgname) < 0) {
    error(1, errno, "couldn't set SELinux security context");
  }

  // cd into the data directory, and set $HOME correspondingly.
  if (TEMP_FAILURE_RETRY(chdir(info.data_dir)) == -1) {
    error(1, errno, "couldn't chdir to package's data directory");
  }
  setenv("HOME", info.data_dir, 1);

  // Reset parts of the environment, like su would.
  setenv("PATH", _PATH_DEFPATH, 1);
  unsetenv("IFS");

  // Set the user-specific parts for this user.
  passwd* pw = getpwuid(uid);
  setenv("LOGNAME", pw->pw_name, 1);
  setenv("SHELL", pw->pw_shell, 1);
  setenv("USER", pw->pw_name, 1);

  // User specified command for exec.
  if ((argc >= cmd_argv_offset + 1) &&
      (execvp(argv[cmd_argv_offset], argv+cmd_argv_offset) == -1)) {
    error(1, errno, "exec failed for %s", argv[cmd_argv_offset]);
  }

  // Default exec shell.
  execlp(_PATH_BSHELL, "sh", NULL);
  error(1, errno, "exec failed");
}
Example #23
0
void tst_QSet::javaIterator()
{
    QSet<QString> set1;
    for (int k = 0; k < 25000; ++k)
        set1.insert(QString::number(k));

    {
        int sum = 0;
        QSetIterator<QString> i(set1);
        while (i.hasNext())
            sum += toNumber(i.next());
        QVERIFY(sum == 24999 * 25000 / 2);
    }

    {
        int sum = 0;
        QSetIterator<QString> i(set1);
        while (i.hasNext()) {
            sum += toNumber(i.peekNext());
            i.next();
        }
        QVERIFY(sum == 24999 * 25000 / 2);
    }

    {
        int sum = 0;
        QSetIterator<QString> i(set1);
        while (i.hasNext()) {
            i.next();
            sum += toNumber(i.peekPrevious());
        }
        QVERIFY(sum == 24999 * 25000 / 2);
    }

    {
        int sum = 0;
        QSetIterator<QString> i(set1);
        i.toBack();
        while (i.hasPrevious())
            sum += toNumber(i.previous());
        QVERIFY(sum == 24999 * 25000 / 2);
    }

    {
        int sum = 0;
        QSetIterator<QString> i(set1);
        i.toBack();
        while (i.hasPrevious()) {
            sum += toNumber(i.peekPrevious());
            i.previous();
        }
        QVERIFY(sum == 24999 * 25000 / 2);
    }

    {
        int sum = 0;
        QSetIterator<QString> i(set1);
        i.toBack();
        while (i.hasPrevious()) {
            i.previous();
            sum += toNumber(i.peekNext());
        }
        QVERIFY(sum == 24999 * 25000 / 2);
    }

    int sum1 = 0;
    int sum2 = 0;
    QSetIterator<QString> i(set1);
    QSetIterator<QString> j(set1);

    int n = 0;
    while (i.hasNext()) {
        QVERIFY(j.hasNext());
        set1.remove(i.peekNext());
        sum1 += toNumber(i.next());
        sum2 += toNumber(j.next());
        ++n;
    }
    QVERIFY(!j.hasNext());
    QVERIFY(sum1 == 24999 * 25000 / 2);
    QVERIFY(sum2 == sum1);
    QVERIFY(set1.isEmpty());
}
Example #24
0
// returns false if no baseline exists
bool atWrapper::setupFTP()
{
    qDebug( "Setting up FTP environment" );

    QString dir = "";
    ftpMkDir( ftpBaseDir );

    ftpBaseDir += "/" + QLibraryInfo::buildKey();

    ftpMkDir( ftpBaseDir );

    ftpBaseDir += "/" + QString( qVersion() );

    ftpMkDir( ftpBaseDir );

    QHashIterator<QString, QString> i(enginesToTest);
    QHashIterator<QString, QString> j(enginesToTest);

    bool haveBaseline = true;
    //Creating the baseline directories for each engine
    while ( i.hasNext() )
    {
        i.next();
        //qDebug() << "Creating dir with key:" << i.key();
        ftpMkDir( ftpBaseDir + "/" +  QString( i.key() ) + ".failed" );
        ftpMkDir( ftpBaseDir + "/" +  QString( i.key() ) + ".diff" );
        if (!ftpMkDir( ftpBaseDir + "/" + QString( i.key() ) + ".baseline" ))
            haveBaseline = false;
    }


    QFtp ftp;
    ftp.connectToHost( ftpHost );
    ftp.login( ftpUser, ftpPass );

    ftp.cd( ftpBaseDir );
    //Deleting previous failed directory and all the files in it, then recreating it.
    while ( j.hasNext() )
    {
        j.next();
        rmDirList.clear();
        rmDirList << ftpBaseDir + "/" + j.key() + ".failed" + "/";
        ftpRmDir( j.key() + ".failed" );
        ftp.rmdir( j.key() + ".failed" );
        ftp.mkdir( j.key() + ".failed" );
        ftp.list();

        while ( ftp.hasPendingCommands() )
            QCoreApplication::instance()->processEvents();

        rmDirList.clear();
        rmDirList << ftpBaseDir + "/" + j.key() + ".diff" + "/";
        ftpRmDir( j.key() + ".diff" );
        ftp.rmdir( j.key() + ".diff" );
        ftp.mkdir( j.key() + ".diff" );
        ftp.list();

        while ( ftp.hasPendingCommands() )
            QCoreApplication::instance()->processEvents();

    }

    ftp.close();

    while ( ftp.hasPendingCommands() )
        QCoreApplication::instance()->processEvents();

    return haveBaseline;
}
int main (int argc, char* argv[])
{
    ApplicationsLib::LogogSetup logog_setup;

    TCLAP::CmdLine cmd("Computes ids of mesh nodes that are in polygonal "
        "regions and resides on the top surface. The polygonal regions have to "
        "be given in a gml- or gli-file. The found mesh nodes and the associated"
        " area are written as txt and csv data."
        "The documentation is available at https://docs.opengeosys.org/docs/tools/model-preparation/computesurfacenodeidsinpolygonalregion",
        ' ',
        "0.1");
    TCLAP::ValueArg<std::string> mesh_in("m", "mesh-input-file",
        "the name of the file containing the input mesh", true,
        "", "file name of input mesh");
    cmd.add(mesh_in);
    TCLAP::ValueArg<std::string> geo_in("g", "geo-file",
        "the name of the gml file containing the polygons", true,
        "", "file name of input geometry");
    cmd.add(geo_in);

    cmd.parse(argc, argv);

    std::unique_ptr<MeshLib::Mesh const> mesh(MeshLib::IO::readMeshFromFile(mesh_in.getValue()));
    INFO("Mesh read: %u nodes, %u elements.", mesh->getNNodes(), mesh->getNElements());

    GeoLib::GEOObjects geo_objs;
    GeoLib::IO::readGeometryFromFile(geo_in.getValue(), geo_objs);
    std::vector<std::string> geo_names;
    geo_objs.getGeometryNames(geo_names);
    INFO("Geometry \"%s\" read: %u points, %u polylines.",
        geo_names[0].c_str(),
        geo_objs.getPointVec(geo_names[0])->size(),
        geo_objs.getPolylineVec(geo_names[0])->size());

    MathLib::Vector3 const dir(0.0, 0.0, -1.0);
    double angle(90);

    auto computeElementTopSurfaceAreas = [](MeshLib::Mesh const& mesh,
        MathLib::Vector3 const& d, double angle)
    {
        std::unique_ptr<MeshLib::Mesh> surface_mesh(
            MeshLib::MeshSurfaceExtraction::getMeshSurface(mesh, d, angle));
        return MeshLib::MeshSurfaceExtraction::getSurfaceAreaForNodes(
            *surface_mesh.get());
    };

    std::vector<double> areas(computeElementTopSurfaceAreas(*mesh, dir, angle));
    std::vector<GeoLib::Point*> all_sfc_pnts(
        MeshLib::MeshSurfaceExtraction::getSurfaceNodes(*mesh, dir, angle)
    );

    std::for_each(all_sfc_pnts.begin(), all_sfc_pnts.end(), [](GeoLib::Point* p) { (*p)[2] = 0.0; });

    std::vector<MeshLib::Node*> const& mesh_nodes(mesh->getNodes());
    GeoLib::PolylineVec const* ply_vec(
        geo_objs.getPolylineVecObj(geo_names[0])
    );
    std::vector<GeoLib::Polyline*> const& plys(*(ply_vec->getVector()));

    for (std::size_t j(0); j<plys.size(); j++) {
        if (! plys[j]->isClosed()) {
            continue;
        }
        std::string polygon_name;
        ply_vec->getNameOfElement(plys[j], polygon_name);
        if (polygon_name.empty())
            polygon_name = "Polygon-" + std::to_string(j);
        // create Polygon from Polyline
        GeoLib::Polygon const& polygon(*(plys[j]));
        // ids of mesh nodes on surface that are within the given polygon
        std::vector<std::pair<std::size_t, double>> ids_and_areas;
        for (std::size_t k(0); k<all_sfc_pnts.size(); k++) {
            GeoLib::Point const& pnt(*(all_sfc_pnts[k]));
            if (polygon.isPntInPolygon(pnt)) {
                ids_and_areas.push_back(std::make_pair(pnt.getID(), areas[k]));
            }
        }
        if (ids_and_areas.empty()) {
            ERR("Polygonal part of surface \"%s\" doesn't contains nodes. No "
                "output will be generated.", polygon_name.c_str());
            continue;
        }

        std::string const out_path(BaseLib::extractPath(geo_in.getValue()));
        std::string id_and_area_fname(out_path + polygon_name);
        std::string csv_fname(out_path + polygon_name);
        id_and_area_fname += std::to_string(j) + ".txt";
        csv_fname += std::to_string(j) + ".csv";
        INFO("Polygonal part of surface \"%s\" contains %ul nodes. Writting to"
            " files \"%s\" and \"%s\".",
            polygon_name.c_str(),
            ids_and_areas.size(),
            id_and_area_fname.c_str(),
            csv_fname.c_str()
        );
        writeToFile(id_and_area_fname, csv_fname, ids_and_areas, mesh_nodes);
    }

    std::for_each(all_sfc_pnts.begin(), all_sfc_pnts.end(), std::default_delete<GeoLib::Point>());

    return 0;
}
Example #26
0
void ModifyMeshProperties::setMaterial(const GEOLIB::Polygon& polygon, size_t mat_id)
{
	// get all nodes of mesh
	const std::vector<MeshLib::CNode*>& msh_nodes(_mesh->getNodeVector());

	// *** rotate polygon to xy_plane
	// 1 copy all points
	std::vector<GEOLIB::Point*> polygon_points;
	for (size_t k(0); k < polygon.getNumberOfPoints(); k++)
		polygon_points.push_back(new GEOLIB::Point(*(polygon[k])));
	// 2 rotate points
	MathLib::Vector plane_normal_polygon(0.0, 0.0, 0.0);
	double d_polygon(0.0);
	MathLib::getNewellPlane(polygon_points, plane_normal_polygon, d_polygon);

	//	std::cout << "plane normal: " << plane_normal_polygon << std::endl;
	MathLib::Vector tmp_plane_normal_polygon(plane_normal_polygon); // NW need to keep plane_normal_polygon for later
	// use
	MathLib::rotatePointsToXY(tmp_plane_normal_polygon, polygon_points);

	// 3 create new polygon
	GEOLIB::Polyline rot_polyline(polygon_points);
	for (size_t k(0); k < polygon.getNumberOfPoints(); k++)
		rot_polyline.addPoint(k);
	rot_polyline.addPoint(0);
	GEOLIB::Polygon rot_polygon(rot_polyline);

	//	std::cout << "Polygon: " << std::endl;
	//	for (size_t k(0); k<polygon.getNumberOfPoints(); k++) {
	//		std::cout << k << ": " << *(polygon[k]) << std::endl;
	//	}
	//	std::cout << std::endl;
	//	std::cout << "rotiertes Polygon: " << std::endl;
	//	for (size_t k(0); k<rot_polygon.getNumberOfPoints(); k++) {
	//		std::cout << k << ": " << *(rot_polygon[k]) << std::endl;
	//	}
	//	std::cout << std::endl;

	// *** rotate mesh nodes to xy-plane
	// 1 copy all mesh nodes to GEOLIB::Points
	std::vector<GEOLIB::Point*> mesh_nodes_as_points;
	for (size_t j(0); j < msh_nodes.size(); j++)
		mesh_nodes_as_points.push_back(new GEOLIB::Point(msh_nodes[j]->getData()));
	// 2 rotate the Points
	MathLib::rotatePointsToXY(plane_normal_polygon, mesh_nodes_as_points);

	// get all elements of mesh
	const std::vector<MeshLib::CElem*>& msh_elem(_mesh->getElementVector());

	// *** perform search and modify mesh
	const size_t msh_elem_size(msh_elem.size());
	for (size_t j(0); j < msh_elem_size; j++)
	{
		// indices of nodes of the j-th element
		const Math_Group::vec<long>& nodes_indices(msh_elem[j]->GetNodeIndeces());
		//		size_t k;
		//		for (k = 0; k<nodes_indices.Size(); k++) {
		//			if (! rot_polygon.isPntInPolygon(*(mesh_nodes_as_points[nodes_indices[k]]))) {
		//				break;
		//			}
		//		}
		//
		//		if (k == nodes_indices.Size()) {
		//			msh_elem[j]->setPatchIndex (mat_id);
		//		}

		//		size_t cnt (0);
		//		for (k = 0; k < nodes_indices.Size(); k++)
		//			if (rot_polygon.isPntInPolygon(*(mesh_nodes_as_points[nodes_indices[k]])))
		//				cnt++;
		//
		//		if (cnt >= 2)
		//			msh_elem[j]->setPatchIndex (mat_id);

		double center[3] = {0.0, 0.0, 0.0};
		for (size_t k(0); k < nodes_indices.Size(); k++)
		{
			center[0] += (*(mesh_nodes_as_points[nodes_indices[k]]))[0];
			center[1] += (*(mesh_nodes_as_points[nodes_indices[k]]))[1];
			//			center[2] += (*(mesh_nodes_as_points[nodes_indices[k]]))[2];
		}
		center[0] /= nodes_indices.Size();
		center[1] /= nodes_indices.Size();
		//		center[2] /= nodes_indices.Size();

		//		std::cout << "center of element " << j << ": " << center[0] << ", " << center[1] << ", " << center[2] <<
		// std::endl;

		if (rot_polygon.isPntInPolygon(center[0], center[1], center[2]))
		{
			msh_elem[j]->setPatchIndex(mat_id);
		}
	}

	for (size_t k(0); k < polygon_points.size(); k++)
		delete polygon_points[k];
	for (size_t j(0); j < mesh_nodes_as_points.size(); j++)
		delete mesh_nodes_as_points[j];
}
Example #27
0
void
ITunesImporterWorker::run()
{
    DEBUG_BLOCK
    debug() << "file:" << m_databaseLocation;
    QFile* file = new QFile( m_databaseLocation );
    if( !file->exists() )
    {
        debug() << "COULDN'T FIND DB FILE!";
        emit importError( "" );
        m_failed = true;
        return;
    }
    if ( !file->open( QIODevice::ReadOnly ) )
    {
        debug() << "COULDN'T OPEN DB FILE!";
        emit importError( "" );
        m_failed = true;
        return;
    }
    setDevice( file );

    //debug() << "got element:" << name().toString();

    while( !atEnd() )
    {
        if( m_aborted )
            return;

        readNext();

        if ( name() == "key" && readElementText() == "Tracks" ) // ok, we're at the start
        {
            readNext();
            readNext();
            readNext(); // this skips the first all-encompassing <dict> tag 
            debug() << "got start of tracks";
            while( !atEnd() && !( isEndElement() && name() == "dict" ) )
            {
                if( m_aborted )
                    return;

                //debug() << "reading element name:" << name().toString();
                if( isStartElement() && name() == "dict") // this is a track item!
                {
                    readTrackElement();
                }
                readNext();
            }
        }
    }

    if( m_tracksForInsert.size() > 0 )
    {
        Collections::CollectionLocation *location = CollectionManager::instance()->primaryCollection()->location();

        QMapIterator<Meta::TrackPtr, QString> j(m_tracksForInsert);
        while (j.hasNext()) {
            j.next();
            location->insert( j.key(), j.value() );
        }
    }

    debug() << "done importing xml file";
}
Example #28
0
mu::io::token_result mu::io::lexer::complex_identifier ()
{
    assert (source [0] == U'{');
    mu::io::token_result result ({nullptr, nullptr});
    auto identifier (new mu::io::identifier (mu::core::region (position, position)));
    auto last (position);
    consume (1);
    auto have_terminator (false);
    mu::string terminator;
    while (result.token == nullptr && result.error == nullptr && !have_terminator && terminator.size () <= 16)
    {
        auto character (source [0]);
        if (character == U'}')
        {
            last = position;
            have_terminator = true;
        }
        else
        {
            terminator.push_back (character);
        }
        consume (1);
    }
    if (terminator.size () > 16)
    {
        result.error = new mu::core::error_string (U"Termiator token is greater than 16 characters", mu::core::error_type::terminator_token_too_long, mu::core::region (identifier->region.first, position));
    }
    
    while (result.token == nullptr && result.error == nullptr)
    {
        auto have_terminator (true);
        for (size_t i (0), j (terminator.size ()); i < j && have_terminator; ++i)
        {
            have_terminator = (terminator [i] == source [i]);
        }
        if (have_terminator)
        {
            result.token = identifier;
            if (!terminator.empty ())
            {
                consume (terminator.size () - 1);
                identifier->region.last = position;
                consume (1);
            }
            else
            {
                identifier->region.last = last;
            }
        }
        else
        {
            auto character (source [0]);
            if (character != U'\U0000FFFF')
            {
                identifier->string.push_back (character);
                consume (1);
            }
            else
            {
                result.error = new mu::core::error_string (U"End of stream inside complex identifier", mu::core::error_type::end_of_stream_inside_complex_identifier, mu::core::region (position, position));
            }
        }
    }
    return result;
}
  void TheoreticalSpectrumGenerator::addPrecursorPeaks(RichPeakSpectrum & spec, const AASequence & peptide, Int charge) const
  {
    RichPeak1D p;

    // precursor peak
    double mono_pos = peptide.getMonoWeight(Residue::Full, charge) / double(charge);
    if (add_isotopes_)
    {
      IsotopeDistribution dist = peptide.getFormula(Residue::Full, charge).getIsotopeDistribution(max_isotope_);
      UInt j(0);
      for (IsotopeDistribution::ConstIterator it = dist.begin(); it != dist.end(); ++it, ++j)
      {
        p.setMZ((double)(mono_pos + j * Constants::NEUTRON_MASS_U) / (double)charge);
        p.setIntensity(pre_int_ *  it->second);
        if (add_metainfo_)
        {
          String name("[M+H]+");
          if (charge == 2)
          {
            name = "[M+2H]++";
          }
          p.setMetaValue("IonName", name);
        }
        spec.push_back(p);
      }
    }
    else
    {
      p.setMZ(mono_pos);
      p.setIntensity(pre_int_);
      if (add_metainfo_)
      {
        String name("[M+H]+");
        if (charge == 2)
        {
          name = "[M+2H]++";
        }
        p.setMetaValue("IonName", name);
      }
      spec.push_back(p);
    }
    // loss peaks of the precursor

    //loss of water
    EmpiricalFormula ion = peptide.getFormula(Residue::Full, charge) - EmpiricalFormula("H2O");
    mono_pos = ion.getMonoWeight() / double(charge);
    if (add_isotopes_)
    {
      IsotopeDistribution dist = ion.getIsotopeDistribution(max_isotope_);
      UInt j(0);
      for (IsotopeDistribution::ConstIterator it = dist.begin(); it != dist.end(); ++it, ++j)
      {
        p.setMZ((double)(mono_pos + j * Constants::NEUTRON_MASS_U) / (double)charge);
        p.setIntensity(pre_int_H2O_ *  it->second);
        if (add_metainfo_)
        {
          String name("[M+H]-H2O+");
          if (charge == 2)
          {
            name = "[M+2H]-H2O++";
          }
          p.setMetaValue("IonName", name);
        }
        spec.push_back(p);
      }
    }
    else
    {
      p.setMZ(mono_pos);
      p.setIntensity(pre_int_H2O_);
      if (add_metainfo_)
      {
        String name("[M+H]-H2O+");
        if (charge == 2)
        {
          name = "[M+2H]-H2O++";
        }
        p.setMetaValue("IonName", name);
      }
      spec.push_back(p);
    }

    //loss of ammonia
    ion = peptide.getFormula(Residue::Full, charge) - EmpiricalFormula("NH3");
    mono_pos = ion.getMonoWeight() / double(charge);
    if (add_isotopes_)
    {
      IsotopeDistribution dist = ion.getIsotopeDistribution(max_isotope_);
      UInt j(0);
      for (IsotopeDistribution::ConstIterator it = dist.begin(); it != dist.end(); ++it, ++j)
      {
        p.setMZ((double)(mono_pos + j * Constants::NEUTRON_MASS_U) / (double)charge);
        p.setIntensity(pre_int_NH3_ *  it->second);
        if (add_metainfo_)
        {
          String name("[M+H]-NH3+");
          if (charge == 2)
          {
            name = "[M+2H]-NH3++";
          }
          p.setMetaValue("IonName", name);
        }
        spec.push_back(p);
      }
    }
    else
    {
      p.setMZ(mono_pos);
      p.setIntensity(pre_int_NH3_);
      if (add_metainfo_)
      {
        String name("[M+H]-NH3+");
        if (charge == 2)
        {
          name = "[M+2H]-NH3++";
        }
        p.setMetaValue("IonName", name);
      }
      spec.push_back(p);
    }

    spec.sortByPosition();
  }
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
MStatus CVstSelectCoincidentFacesCmd::DoSelect()
{
	MSelectionList meshList;
	GetSpecifiedMeshes( meshList );

	MSelectionList coincidentList;

	MDagPath mDagPath;
	MObject cObj;

	MPointArray points;
	MIntArray iIndexes;
	MIntArray jIndexes;

	uint iCount;
	bool addI;
	bool same;
	bool foundVertex;

	double tolerance( MPoint_kTol );
	if ( m_undo.ArgDatabase().isFlagSet( kOptTolerance ) )
	{
		MDistance optTolerance;
		m_undo.ArgDatabase().getFlagArgument( kOptTolerance, 0U, optTolerance );
		tolerance = optTolerance.as( MDistance::internalUnit() );
	}

	for ( MItSelectionList sIt( meshList ); !sIt.isDone(); sIt.next() )
	{
		if ( !sIt.getDagPath( mDagPath, cObj ) )
			continue;

		MFnSingleIndexedComponent sFn;
		MObject sObj( sFn.create( MFn::kMeshPolygonComponent ) );

		MFnMesh meshFn( mDagPath );
		meshFn.getPoints( points );

		if ( !sIt.hasComponents() )
		{
			const uint nFaces( meshFn.numPolygons() );
			for ( uint i( 0U ); i != nFaces; ++i )
			{
				meshFn.getPolygonVertices( i, iIndexes );
				iCount = iIndexes.length();

				addI = false;

				for ( uint j( i + 1 ); j < nFaces; ++j )
				{
					meshFn.getPolygonVertices( j, jIndexes );
					if ( jIndexes.length() == iCount )
					{
						same = true;

						for ( uint k( 0U ); k != iCount; ++k )
						{
							foundVertex = false;

							const MPoint &kPoint( points[ iIndexes[ k ] ] );

							for ( uint l( 0U ); l < iCount; ++l )
							{
								if ( kPoint.isEquivalent( points[ jIndexes[ l ] ], tolerance ) )
								{
									foundVertex = true;
									break;
								}
							}

							if ( !foundVertex )
							{
								same = false;
								break;
							}
						}

						if ( same )
						{
							addI = true;
							sFn.addElement( j );
						}
					}
				}

				if ( addI )
				{
					sFn.addElement( i );
				}
			}
		}
		else
		{
			MFnSingleIndexedComponent cFn( cObj );
			MIntArray cA;
			MFnSingleIndexedComponent( cObj ).getElements( cA );

			const uint nFaces( cA.length() );

			for ( uint i( 0U ); i != nFaces; ++i )
			{
				meshFn.getPolygonVertices( cA[ i ], iIndexes );
				iCount = iIndexes.length();

				addI = false;

				for ( uint j( i + 1U ); j < nFaces; ++j )
				{
					meshFn.getPolygonVertices( cA[ j ], jIndexes );
					if ( jIndexes.length() == iCount )
					{
						same = true;

						for ( uint k( 0U ); k != iCount; ++k )
						{
							foundVertex = false;

							const MPoint &kPoint( points[ iIndexes[ k ] ] );

							for ( uint l( 0U ); l < iCount; ++l )
							{
								if ( kPoint.isEquivalent( points[ jIndexes[ l ] ], tolerance ) )
								{
									foundVertex = true;
									break;
								}
							}

							if ( !foundVertex )
							{
								same = false;
								break;
							}
						}

						if ( same )
						{
							addI = true;
							sFn.addElement( cA[ j ] );
						}
					}
				}

				if ( addI )
				{
					sFn.addElement( cA[ i ] );
				}
			}
		}

		if ( sFn.elementCount() > 0 )
		{
			coincidentList.add( mDagPath, sObj );
		}
		else
		{
			MSelectionList tmpList;
			tmpList.add( mDagPath, cObj );
			MStringArray tmpA;
			tmpList.getSelectionStrings( tmpA );

			minfo << "No coincident faces on:";
			for ( uint i( 0U ); i != tmpA.length(); ++i )
			{
				minfo << " " << tmpA[ i ];
			}
			minfo << std::endl;
		}
	}

	if ( coincidentList.length() )
	{
		MGlobal::setActiveSelectionList( coincidentList );
		MStringArray tmpA;
		coincidentList.getSelectionStrings( tmpA );
		setResult( tmpA );
	}
	else
	{
		if ( meshList.length() > 0U )
		{
			minfo << "No coincident faces found" << std::endl;
		}
	}

	return MS::kSuccess;
}