コード例 #1
0
int CDfuPlusHelper::despray()
{
    const char* srcname = globals->queryProp("srcname");
    if(srcname == NULL)
        throw MakeStringException(-1, "srcname not specified");
    
    const char* dstxml = globals->queryProp("dstxml");
    const char* dstip = globals->queryProp("dstip");
    const char* dstfile = globals->queryProp("dstfile");

    bool nowait = globals->getPropBool("nowait", false);

    MemoryBuffer xmlbuf;
    if(dstxml == NULL)
    {
        if(dstfile == NULL)
            throw MakeStringException(-1, "dstfile not specified");
        if(dstip == NULL) {
#ifdef DAFILESRV_LOCAL
            progress("dstip not specified - assuming spray from local machine\n");
            dstip = ".";
#else
            throw MakeStringException(-1, "dstip not specified");
#endif
        }
    }
    else
    {
        if(dstip != NULL || dstfile != NULL)
            throw MakeStringException(-1, "dstip/dstfile and dstxml can't be used at the same time");
        StringBuffer buf;
        buf.loadFile(dstxml);
        int len = buf.length();
        xmlbuf.setBuffer(len, buf.detach(), true);
    }

    if(dstxml == NULL)
        info("\nDespraying %s to host %s file %s\n", srcname, dstip, dstfile);
    else
        info("\nDespraying %s\n", srcname);

    Owned<IClientDespray> req = sprayclient->createDesprayRequest();
    req->setSourceLogicalName(srcname);
    if(dstxml == NULL)
    {
        req->setDestIP(dstip);
        req->setDestPath(dstfile);
    }
    else
        req->setDstxml(xmlbuf);

    req->setOverwrite(globals->getPropBool("overwrite", false));
    if(globals->hasProp("connect"))
        req->setMaxConnections(globals->getPropInt("connect"));
    if(globals->hasProp("throttle"))
        req->setThrottle(globals->getPropInt("throttle"));
    if(globals->hasProp("transferbuffersize"))
        req->setTransferBufferSize(globals->getPropInt("transferbuffersize"));

    if(globals->hasProp("norecover"))
        req->setNorecover(globals->getPropBool("norecover", false));
    else if(globals->hasProp("noRecover"))
        req->setNorecover(globals->getPropBool("noRecover", false));

    if(globals->hasProp("splitprefix"))
        req->setSplitprefix(globals->queryProp("splitprefix"));
    else if(globals->hasProp("splitPrefix"))
        req->setSplitprefix(globals->queryProp("splitPrefix"));

    if(globals->hasProp("wrap"))
        req->setWrap(globals->getPropBool("wrap", false));
    if(globals->hasProp("multicopy"))
        req->setMultiCopy(globals->getPropBool("multicopy", false));
    else if(globals->hasProp("multiCopy"))
        req->setMultiCopy(globals->getPropBool("multiCopy", false));

    if(globals->hasProp("compress"))
        req->setCompress(globals->getPropBool("compress", false));
    if(globals->hasProp("encrypt"))
        req->setEncrypt(globals->queryProp("encrypt"));
    if(globals->hasProp("decrypt"))
        req->setDecrypt(globals->queryProp("decrypt"));


    SocketEndpoint localep;
    StringBuffer localeps;
    if (checkLocalDaFileSvr(dstip,localep))
        dstip = localep.getUrlStr(localeps).str();
    Owned<IClientDesprayResponse> result = sprayclient->Despray(req);
    const char* wuid = result->getWuid();
    if(wuid == NULL || *wuid == '\0')
        exc(result->getExceptions(),"despraying");
    else
    {
        const char* jobname = globals->queryProp("jobname");
        if(jobname && *jobname)
            updatejobname(wuid, jobname);

        info("Submitted WUID %s\n", wuid);
        if(!nowait)
            waitToFinish(wuid);
    }

    return 0;
}
コード例 #2
0
void train_one_round(const Fast5Map& name_map, size_t round)
{
    const PoreModelMap& current_models = PoreModelSet::get_models(opt::trained_model_type);

    // Initialize the training summary stats for each kmer for each model
    ModelTrainingMap model_training_data;
    for(auto current_model_iter = current_models.begin(); current_model_iter != current_models.end(); current_model_iter++) {
        // one summary entry per kmer in the model
        std::vector<StateSummary> summaries(current_model_iter->second.get_num_states());
        model_training_data[current_model_iter->first] = summaries;
    }

    // Open the BAM and iterate over reads

    // load bam file
    htsFile* bam_fh = sam_open(opt::bam_file.c_str(), "r");
    assert(bam_fh != NULL);

    // load bam index file
    std::string index_filename = opt::bam_file + ".bai";
    hts_idx_t* bam_idx = bam_index_load(index_filename.c_str());
    assert(bam_idx != NULL);

    // read the bam header
    bam_hdr_t* hdr = sam_hdr_read(bam_fh);

    // load reference fai file
    faidx_t *fai = fai_load(opt::genome_file.c_str());

    hts_itr_t* itr;

    // If processing a region of the genome, only emit events aligned to this window
    int clip_start = -1;
    int clip_end = -1;

    if(opt::region.empty()) {
        // TODO: is this valid?
        itr = sam_itr_queryi(bam_idx, HTS_IDX_START, 0, 0);
    } else {
        fprintf(stderr, "Region: %s\n", opt::region.c_str());
        itr = sam_itr_querys(bam_idx, hdr, opt::region.c_str());
        hts_parse_reg(opt::region.c_str(), &clip_start, &clip_end);
    }

#ifndef H5_HAVE_THREADSAFE
    if(opt::num_threads > 1) {
        fprintf(stderr, "You enabled multi-threading but you do not have a threadsafe HDF5\n");
        fprintf(stderr, "Please recompile nanopolish's built-in libhdf5 or run with -t 1\n");
        exit(1);
    }
#endif

    // Initialize iteration
    std::vector<bam1_t*> records(opt::batch_size, NULL);
    for(size_t i = 0; i < records.size(); ++i) {
        records[i] = bam_init1();
    }

    int result;
    size_t num_reads_realigned = 0;
    size_t num_records_buffered = 0;
    Progress progress("[methyltrain]");

    do {
        assert(num_records_buffered < records.size());
        
        // read a record into the next slot in the buffer
        result = sam_itr_next(bam_fh, itr, records[num_records_buffered]);
        num_records_buffered += result >= 0;

        // realign if we've hit the max buffer size or reached the end of file
        if(num_records_buffered == records.size() || result < 0) {
            #pragma omp parallel for            
            for(size_t i = 0; i < num_records_buffered; ++i) {
                bam1_t* record = records[i];
                size_t read_idx = num_reads_realigned + i;
                if( (record->core.flag & BAM_FUNMAP) == 0) {
                    add_aligned_events(name_map, fai, hdr, record, read_idx, clip_start, clip_end, round, model_training_data);
                }
            }

            num_reads_realigned += num_records_buffered;
            num_records_buffered = 0;
        }

        if(opt::progress) {
            fprintf(stderr, "Realigned %zu reads in %.1lfs\r", num_reads_realigned, progress.get_elapsed_seconds());
        }
    } while(result >= 0);
    
    assert(num_records_buffered == 0);
    progress.end();
    
    // open the summary file
    std::stringstream summary_fn;
    summary_fn << "methyltrain" << opt::out_suffix << ".summary";
    FILE* summary_fp = fopen(summary_fn.str().c_str(), "w");
    fprintf(summary_fp, "model_short_name\tkmer\tnum_matches\tnum_skips\t"
                         "num_stays\tnum_events_for_training\twas_trained\t"
                         "trained_level_mean\ttrained_level_stdv\n");

    // open the tsv file with the raw training data
    std::stringstream training_fn;
    training_fn << "methyltrain" << opt::out_suffix << ".round" << round << ".events.tsv";
    std::ofstream training_ofs(training_fn.str());

    // write out a header for the training data
    StateTrainingData::write_header(training_ofs);

    // iterate over models: template, complement_pop1, complement_pop2
    for(auto model_training_iter = model_training_data.begin(); 
             model_training_iter != model_training_data.end(); model_training_iter++) {
        
        // Initialize the trained model from the input model
        auto current_model_iter = current_models.find(model_training_iter->first);
        assert(current_model_iter != current_models.end());

        std::string model_name = model_training_iter->first;
        std::string model_short_name = current_model_iter->second.metadata.get_short_name();
        
        // Initialize the new model from the current model
        PoreModel updated_model = current_model_iter->second;
        uint32_t k = updated_model.k;
        const std::vector<StateSummary>& summaries = model_training_iter->second;

        // Generate the complete set of kmers
        std::string gen_kmer(k, 'A');
        std::vector<std::string> all_kmers;
        for(size_t ki = 0; ki < summaries.size(); ++ki) {
            all_kmers.push_back(gen_kmer);
            mtrain_alphabet->lexicographic_next(gen_kmer);
        }
        assert(gen_kmer == std::string(k, 'A'));
        assert(all_kmers.front() == std::string(k, 'A'));
        assert(all_kmers.back() == std::string(k, 'T'));

        // Update means for each kmer
        #pragma omp parallel for
        for(size_t ki = 0; ki < summaries.size(); ++ki) {
            assert(ki < all_kmers.size());
            std::string kmer = all_kmers[ki];

            // write the observed values to a tsv file
            #pragma omp critical
            {
                for(size_t ei = 0; ei < summaries[ki].events.size(); ++ei) {
                    summaries[ki].events[ei].write_tsv(training_ofs, model_short_name, kmer);
                }

            }

            bool is_m_kmer = kmer.find('M') != std::string::npos;
            bool update_kmer = opt::training_target == TT_ALL_KMERS ||
                               (is_m_kmer && opt::training_target == TT_METHYLATED_KMERS) ||
                               (!is_m_kmer && opt::training_target == TT_UNMETHYLATED_KMERS);
            bool trained = false;
            // only train if there are a sufficient number of events for this kmer
            if(update_kmer && summaries[ki].events.size() >= opt::min_number_of_events_to_train) {
                
                // train a mixture model where a minority of k-mers aren't methylated
                ParamMixture mixture;
                
                float incomplete_methylation_rate = 0.05f;
                std::string um_kmer = mtrain_alphabet->unmethylate(kmer);
                size_t um_ki = mtrain_alphabet->kmer_rank(um_kmer.c_str(), k);
                
                // Initialize the training parameters. If this is a kmer containing
                // a methylation site we train a two component mixture, otherwise
                // just fit a gaussian
                float major_weight = is_m_kmer ? 1 - incomplete_methylation_rate : 1.0f;
                mixture.log_weights.push_back(log(major_weight));
                mixture.params.push_back(current_model_iter->second.get_parameters(ki));
                
                if(is_m_kmer) {
                    // add second unmethylated component
                    mixture.log_weights.push_back(std::log(incomplete_methylation_rate));
                    mixture.params.push_back(current_model_iter->second.get_parameters(um_ki));
                }

                if(opt::verbose > 1) {
                    fprintf(stderr, "INIT__MIX %s\t%s\t[%.2lf %.2lf %.2lf]\t[%.2lf %.2lf %.2lf]\n", model_training_iter->first.c_str(), kmer.c_str(), 
                        std::exp(mixture.log_weights[0]), mixture.params[0].level_mean, mixture.params[0].level_stdv,
                        std::exp(mixture.log_weights[1]), mixture.params[1].level_mean, mixture.params[1].level_stdv);
                }

                ParamMixture trained_mixture = train_gaussian_mixture(summaries[ki].events, mixture);

                if(opt::verbose > 1) {
                    fprintf(stderr, "TRAIN_MIX %s\t%s\t[%.2lf %.2lf %.2lf]\t[%.2lf %.2lf %.2lf]\n", model_training_iter->first.c_str(), kmer.c_str(), 
                        std::exp(trained_mixture.log_weights[0]), trained_mixture.params[0].level_mean, trained_mixture.params[0].level_stdv,
                        std::exp(trained_mixture.log_weights[1]), trained_mixture.params[1].level_mean, trained_mixture.params[1].level_stdv);
                }

                #pragma omp critical
                updated_model.states[ki] = trained_mixture.params[0];

                if (model_stdv()) {
                    ParamMixture ig_mixture;
                    // weights
                    ig_mixture.log_weights = trained_mixture.log_weights;
                    // states
                    ig_mixture.params.emplace_back(trained_mixture.params[0]);

                    if(is_m_kmer) {
                        ig_mixture.params.emplace_back(current_model_iter->second.get_parameters(um_ki));
                    }
                    // run training
                    auto trained_ig_mixture = train_invgaussian_mixture(summaries[ki].events, ig_mixture);

                    LOG("methyltrain", debug)
                        << "IG_INIT__MIX " << model_training_iter->first.c_str() << " " << kmer.c_str() << " ["
                        << std::fixed << std::setprecision(5) << ig_mixture.params[0].sd_mean << " "
                        << ig_mixture.params[1].sd_mean << "]" << std::endl
                        << "IG_TRAIN_MIX " << model_training_iter->first.c_str() << " " << kmer.c_str() << " ["
                        << trained_ig_mixture.params[0].sd_mean << " "
                        << trained_ig_mixture.params[1].sd_mean << "]" << std::endl;

                    // update state
                    #pragma omp critical
                    {
                        updated_model.states[ki] = trained_ig_mixture.params[0];
                    }
                }

                trained = true;
            }

            #pragma omp critical
            {
                fprintf(summary_fp, "%s\t%s\t%d\t%d\t%d\t%zu\t%d\t%.2lf\t%.2lf\n",
                                        model_short_name.c_str(), kmer.c_str(), 
                                        summaries[ki].num_matches, summaries[ki].num_skips, summaries[ki].num_stays, 
                                        summaries[ki].events.size(), trained, updated_model.states[ki].level_mean, updated_model.states[ki].level_stdv);
            }

            // add the updated model into the collection (or replace what is already there)
            PoreModelSet::insert_model(opt::trained_model_type, updated_model);
        }
    }

    // cleanup records
    for(size_t i = 0; i < records.size(); ++i) {
        bam_destroy1(records[i]);
    }

    // cleanup
    sam_itr_destroy(itr);
    bam_hdr_destroy(hdr);
    fai_destroy(fai);
    sam_close(bam_fh);
    hts_idx_destroy(bam_idx);
    fclose(summary_fp);
}
コード例 #3
0
ファイル: topolTest.cpp プロジェクト: cz172638/QGIS
ErrorList topolTest::checkPointInPolygon( double tolerance, QgsVectorLayer *layer1, QgsVectorLayer *layer2, bool isExtent )
{
  Q_UNUSED( tolerance );

  int i = 0;
  ErrorList errorList;

  if ( layer1->geometryType() != QgsWkbTypes::PointGeometry )
  {
    return errorList;
  }

  if ( layer2->geometryType() != QgsWkbTypes::PolygonGeometry )
  {
    return errorList;
  }

  QgsSpatialIndex *index = mLayerIndexes[layer2->id()];

  QgsGeometry canvasExtentPoly = QgsGeometry::fromWkt( qgsInterface->mapCanvas()->extent().asWktPolygon() );

  QList<FeatureLayer>::Iterator it;
  for ( it = mFeatureList1.begin(); it != mFeatureList1.end(); ++it )
  {
    if ( !( ++i % 100 ) )
      emit progress( i );
    if ( testCanceled() )
      break;
    QgsGeometry g1 = it->feature.geometry();
    QgsRectangle bb = g1.boundingBox();
    QList<QgsFeatureId> crossingIds;
    crossingIds = index->intersects( bb );
    QList<QgsFeatureId>::ConstIterator cit = crossingIds.begin();
    QList<QgsFeatureId>::ConstIterator crossingIdsEnd = crossingIds.end();
    bool touched = false;
    for ( ; cit != crossingIdsEnd; ++cit )
    {
      QgsFeature &f = mFeatureMap2[*cit].feature;
      QgsGeometry g2 = f.geometry();
      if ( g2.isNull() || !_canExportToGeos( g2 ) )
      {
        QgsMessageLog::logMessage( tr( "Second geometry missing or GEOS import failed." ), tr( "Topology plugin" ) );
        continue;
      }
      if ( g2.contains( g1 ) )
      {
        touched = true;
        break;
      }
    }
    if ( !touched )
    {
      QgsGeometry conflictGeom = g1;

      if ( isExtent )
      {
        if ( canvasExtentPoly.disjoint( conflictGeom ) )
        {
          continue;
        }
      }

      QList<FeatureLayer> fls;
      fls << *it << *it;
      //bb.scale(10);

      TopolErrorPointNotInPolygon *err = new TopolErrorPointNotInPolygon( bb, conflictGeom, fls );
      errorList << err;
    }
  }

  return errorList;
}
コード例 #4
0
ファイル: waves.cpp プロジェクト: sellibitze/LEDparticles
int main() {
    // The physical computations run at about 1500 Hz. 30 consecutive
    // time steps are mixed into a frame buffer for motion blur. But
    // this is not only about motion blur. This higher temporal resolution
    // is necessary to avoid discretization errors to grow too large and
    // to keep the whole thing "stable".
    //
    // The framebuffer is "oversampled" (higher resolution) for
    // anti-aliasing.
    const int num_particles = 10;
    const int substeps = 30; // temporal oversampling
    const int sover = 5; // spatial oversampling

    std::srand(std::time(0));
    std::vector<particle> ps;
    ps.push_back( // bottom border particle that is not moving
        make_particle(
            make_random_color(),
            -1
        )
    );
    for (int i = 0; i < num_particles; ++i) {
        ps.push_back( // almost equidistantly spaced particles
            make_particle(
                make_random_color(),
                (i + 0.9f + 0.2f * uniform_random()) / (1 + num_particles) * (num_leds + 1) - 1
            )
        );
    }
    ps.push_back( // top border particle that is not moving
        make_particle(
            make_random_color(),
            num_leds
        )
    );

    std::vector<int> framebuff (num_leds * 3 * sover);
    std::vector<unsigned char> scratch;
    int time = -1;
    int period = 10000; // time steps until next impulse
    int part_change_color_index = 1;
    color part_change_color_new = {{0}}, part_change_color_old = {{0}};
#ifdef WRITE_PPM
    const int iters = 500; // for debugging purposes
    write_binary_ppm_header(num_leds, iters, std::cout);
    for (int i = 0; i < iters; ++i) {
#else
    for (;;) {
#endif
        // blank frame buffer (black)
        std::fill(framebuff.begin(), framebuff.end(), 0);
        for (int s = 0; s < substeps; ++s) {
            render_frame(ps, sover, substeps, framebuff);
            time = (time + 1) % period;
            if (time <= 1000) { // impulse phase
                if (time == 0) {
                    period = 5000 + std::rand() % 10000;
                    part_change_color_index = (std::rand() % num_particles) + 1;
                    part_change_color_old = ps[part_change_color_index].col;
                    part_change_color_new = make_random_color();
                }
                float s1 = squared(std::sin(time * (3.14159265 / 2000)));
                float s2 = squared(std::sin(time * (3.14159265 / 1000)));
                ps[0].pos = s2 * 15 - 1; // move bottom particle around
                // also change the color of a random particle once in a while
                ps[part_change_color_index].col = fade(
                    part_change_color_old,
                    part_change_color_new, s1);
            }
            update_accels(ps);
            progress(ps);
        }
        write_frame(framebuff, 1.0f/substeps, sover, scratch, std::cout);
#ifndef WRITE_PPM
        usleep(20000); // wait 20 ms (=> about 50 frames/sec)
#endif
    }
    return 0;
}
コード例 #5
0
/**
 * Apply the detector test criterion
 * @param counts1 :: A workspace containing the integrated counts of the first
 * white beam run
 * @param counts2 :: A workspace containing the integrated counts of the first
 * white beam run
 * @param average :: The computed median
 * @param variation :: The allowed variation in terms of number of medians, i.e
 * those spectra where
 * the ratio of the counts outside this range will fail the tests and will be
 * masked on counts1
 * @return number of detectors for which tests failed
 */
int DetectorEfficiencyVariation::doDetectorTests(
    API::MatrixWorkspace_const_sptr counts1,
    API::MatrixWorkspace_const_sptr counts2, const double average,
    double variation) {
  // DIAG in libISIS did this.  A variation of less than 1 doesn't make sense in
  // this algorithm
  if (variation < 1) {
    variation = 1.0 / variation;
  }
  // criterion for if the the first spectrum is larger than expected
  double largest = average * variation;
  // criterion for if the the first spectrum is lower than expected
  double lowest = average / variation;

  const int numSpec = static_cast<int>(counts1->getNumberHistograms());
  const int progStep = static_cast<int>(std::ceil(numSpec / 30.0));

  // Create a workspace for the output
  MaskWorkspace_sptr maskWS = this->generateEmptyMask(counts1);

  bool checkForMask = false;
  Geometry::Instrument_const_sptr instrument = counts1->getInstrument();
  if (instrument != nullptr) {
    checkForMask = ((instrument->getSource() != nullptr) &&
                    (instrument->getSample() != nullptr));
  }

  const double deadValue(1.0);
  int numFailed(0);
  PARALLEL_FOR3(counts1, counts2, maskWS)
  for (int i = 0; i < numSpec; ++i) {
    PARALLEL_START_INTERUPT_REGION
    // move progress bar
    if (i % progStep == 0) {
      advanceProgress(progStep * static_cast<double>(RTMarkDetects) / numSpec);
      progress(m_fracDone);
      interruption_point();
    }

    if (checkForMask) {
      const std::set<detid_t> &detids =
          counts1->getSpectrum(i)->getDetectorIDs();
      if (instrument->isMonitor(detids))
        continue;
      if (instrument->isDetectorMasked(detids)) {
        // Ensure it is masked on the output
        maskWS->dataY(i)[0] = deadValue;
        continue;
      }
    }

    const double signal1 = counts1->readY(i)[0];
    const double signal2 = counts2->readY(i)[0];

    // Mask out NaN and infinite
    if (boost::math::isinf(signal1) || boost::math::isnan(signal1) ||
        boost::math::isinf(signal2) || boost::math::isnan(signal2)) {
      maskWS->dataY(i)[0] = deadValue;
      PARALLEL_ATOMIC
      ++numFailed;
      continue;
    }

    // Check the ratio is within the given range
    const double ratio = signal1 / signal2;
    if (ratio < lowest || ratio > largest) {
      maskWS->dataY(i)[0] = deadValue;
      PARALLEL_ATOMIC
      ++numFailed;
    }

    PARALLEL_END_INTERUPT_REGION
  }
  PARALLEL_CHECK_INTERUPT_REGION

  // Register the results with the ADS
  setProperty("OutputWorkspace", maskWS);

  return numFailed;
}
コード例 #6
0
ファイル: test_planner.cpp プロジェクト: alexlee-gk/surgical
void DimensionReductionBestPath(Thread* start, Thread* target, int n, vector<Thread*>& traj, vector<vector<VectorXd> >& mot) 
{
  Thread* st = new Thread(*start);
  Thread* en = new Thread(*target); 

  // need to call minimize energy on start from outside this method 
  if (n == 0) {
    glThreads[4]->setThread(new Thread(*start));
    glThreads[4]->updateThreadPoints();
    glThreads[5]->setThread(new Thread(*target));
    glThreads[5]->updateThreadPoints();
    glThreads[6]->setThread(new Thread(*target));
    glThreads[6]->updateThreadPoints();
    planner.initialize(new Thread(*st), new Thread(*en));
    initialized = true; 

    Thread goal_thread; Thread prev_thread; Thread next_thread; 
    while(!interruptEnabled) {
      #pragma omp parallel for num_threads(NUM_CPU_THREADS)
      for (int i = 0; i < 999999; i++) {
        if (!interruptEnabled) {  
          planner.planStep(goal_thread, prev_thread, next_thread);
        }
      }
    }
    
    planner.updateBestPath();
    
    RRTNode* node = planner.getTree()->front();
    node = node->next;
    while (node != NULL) {
      traj.push_back(new Thread(*node->thread));
      vector<VectorXd> wrapper_mot;
      wrapper_mot.push_back(node->motion);
      mot.push_back(wrapper_mot);
      node = node->next;
    }

  } else { 
    Thread* approxStart = planner.halfDimApproximation(st);
    Thread* approxTarget = planner.halfDimApproximation(en); 
    vector<Thread*> path;
    vector<vector<VectorXd> > motions; 
    DimensionReductionBestPath(approxStart, approxTarget, n-1, path, motions);
    /* use Trajectory Follower to follow the path in higher D space. */ 
    
    // transform the path to higher D space 
    vector<Thread*> transformed_path; 
    transformed_path.resize(path.size());
    boost::progress_display progress(path.size()); 
    for (int i = 0; i < path.size(); i++) {
      transformed_path[i] = planner.doubleDimApproximation(path[i]);
      ++progress; 
    }
     
    // follow the trajectory of the transformed approximation
    Trajectory_Follower *pathFollower = 
      new Trajectory_Follower(transformed_path, motions, st);

    pathFollower->control_to_finish();
    pathFollower->getReachedStates(traj);
    cout << traj.size() << endl;
    pathFollower->getMotions(mot);
  }

  apprxNodes[n] = new RRTNode(new Thread(*st));
  curApprxNodes[n] = apprxNodes[n]; 
  RRTNode* prevNode = apprxNodes[n]; 
  for (int i = 0; i < traj.size(); i++) {
    RRTNode* node = new RRTNode(new Thread(*traj[i]));
    node->prev = prevNode;
    prevNode->next = node;
    prevNode = node; 
  }

}
コード例 #7
0
ファイル: dsa-keygen.c プロジェクト: Zeranoe/gstreamer-nettle
/* Valid sizes, according to FIPS 186-3 are (1024, 160), (2048. 224),
   (2048, 256), (3072, 256). Currenty, we use only q_bits of 160 or
   256. */
int
dsa_generate_keypair(struct dsa_public_key *pub,
		     struct dsa_private_key *key,
		     void *random_ctx, nettle_random_func *random,
		     void *progress_ctx, nettle_progress_func *progress,
		     unsigned p_bits, unsigned q_bits)
{
  mpz_t p0, p0q, r;
  unsigned p0_bits;
  unsigned a;

  switch (q_bits)
    {
    case 160:
      if (p_bits < DSA_SHA1_MIN_P_BITS)
	return 0;
      break;
    case 256:
      if (p_bits < DSA_SHA256_MIN_P_BITS)
	return 0;
      break;
    default:
      return 0;
    }

  mpz_init (p0);
  mpz_init (p0q);
  mpz_init (r);

  nettle_random_prime (pub->q, q_bits, 0, random_ctx, random,
		       progress_ctx, progress);

  p0_bits = (p_bits + 3)/2;
  
  nettle_random_prime (p0, p0_bits, 0,
		       random_ctx, random,
		       progress_ctx, progress);

  if (progress)
    progress (progress_ctx, 'q');
  
  /* Generate p = 2 r q p0 + 1, such that 2^{n-1} < p < 2^n.
   *
   * We select r in the range i + 1 < r <= 2i, with i = floor (2^{n-2} / (p0 q). */

  mpz_mul (p0q, p0, pub->q);

  _nettle_generate_pocklington_prime (pub->p, r, p_bits, 0,
				      random_ctx, random,
				      p0, pub->q, p0q);

  if (progress)
    progress (progress_ctx, 'p');

  mpz_mul (r, r, p0);

  for (a = 2; ; a++)
    {
      mpz_set_ui (pub->g, a);
      mpz_powm (pub->g, pub->g, r, pub->p);
      if (mpz_cmp_ui (pub->g, 1) != 0)
	break;
    }

  if (progress)
    progress (progress_ctx, 'g');

  mpz_set(r, pub->q);
  mpz_sub_ui(r, r, 2);
  nettle_mpz_random(key->x, random_ctx, random, r);

  mpz_add_ui(key->x, key->x, 1);

  mpz_powm(pub->y, pub->g, key->x, pub->p);

  if (progress)
    progress (progress_ctx, '\n');
  
  mpz_clear (p0);
  mpz_clear (p0q);
  mpz_clear (r);

  return 1;
}
コード例 #8
0
ファイル: kumy2seed.c プロジェクト: KUM-Kiel/seisconvert
int main(int argc, char **argv)
{
  kumy_file_t *kumy;
  miniseed_file_t *mseed[KUMY_FILE_CHANNELS];
  int32_t frame[KUMY_FILE_CHANNELS];
  uint64_t frames, l, frames_per_file, frames_total, frame_count = 0;
  int i;
  char oname[1024], folder[1024];
  uint32_t sample_rate, seconds_per_file;
  int percent = 0, old_percent = -1;
  int compression = 1, show_progress = 1;
  char *infile = 0;

  struct taia start_time; /* 1871 */
  struct taia stop_time;  /* 1951 */
  struct taia sync_time;  /* 2031 */
  struct taia skew_time;  /* 2111 */

  struct caltime ct;
  struct taia tt, dt;

  program = argv[0];
  parse_options(&argc, &argv, OPTIONS(
    FLAG('n', "no-compression", compression, 0),
    FLAG('q', "quiet", show_progress, 0),
    FLAG_CALLBACK('h', "help", usage)
  ));

  if (argc < 2) {
    usage(0, 0, 0);
  }

  infile = argv[1];
  if (!(kumy = kumy_file_open(infile))) {
    fprintf(stderr, "Invalid file: %s.\n", infile);
    return -1;
  }

  parse_text_date(&start_time, kumy->text_header[0].content + 1871);
  parse_text_date(&stop_time,  kumy->text_header[0].content + 1951);
  parse_text_date(&sync_time,  kumy->text_header[0].content + 2031);
  parse_text_date(&skew_time,  kumy->text_header[0].content + 2111);

  sample_rate = 1000000 / kumy->binary_header[0].sample_interval;

  if (sample_rate <= 250) {
    seconds_per_file = 86400;
  } else if (sample_rate <= 500) {
    seconds_per_file = 43200;
  } else if (sample_rate <= 1000) {
    seconds_per_file = 21600;
  } else if (sample_rate <= 2000) {
    seconds_per_file = 10800;
  } else {
    seconds_per_file = 5400;
  }

  frames_per_file = sample_rate * seconds_per_file;
  frames_total = kumy->binary_header[0].num_samples;

  caltime_utc(&ct, &start_time.sec, 0, 0);
  ct.hour = 0;
  ct.minute = 0;
  ct.second = 0;
  caltime_tai(&ct, &tt.sec);
  tt.nano = tt.atto = 0;

  while (!taia_less(&start_time, &tt)) {
    tt.sec.x += seconds_per_file;
  }

  taia_sub(&dt, &tt, &start_time);
  frames = taia_approx(&dt) / seconds_per_file * frames_per_file;

  l = last('.', infile);
  if (l == -1 || l >= sizeof(folder)) return -1;

  /* Create folder. */
  copy(folder, l, infile);
  folder[l] = 0;
  mkdir_p(folder);

  for (i = 0; i < KUMY_FILE_CHANNELS; ++i) {
    caltime_utc(&ct, &start_time.sec, 0, 0);
    snprintf(oname, sizeof(oname), "%s/%lld.%02lld.%02lld.%02lld.%02lld.%02lld.%s.seed",
      folder, (long long)ct.date.year, (long long)ct.date.month, (long long)ct.date.day, (long long)ct.hour, (long long)ct.minute, (long long)ct.second, channel_names[i]);

    if (!(mseed[i] = miniseed_file_create((char*)oname))) {
      fprintf(stderr, "Invalid file: %s.\n", oname);
      return -1;
    }
    miniseed_file_set_sample_rate(mseed[i], sample_rate);
    miniseed_file_set_start_time(mseed[i], &start_time);
    miniseed_file_set_info(mseed[i], "OBS", "DE", channel_names[i], "K");
    miniseed_file_set_compression(mseed[i], compression);
  }

  while (kumy_file_read_int_frame(kumy, frame) >= 0) {
    if (frames == 0) {
      /* Create new files.*/
      for (i = 0; i < KUMY_FILE_CHANNELS; ++i) {
        miniseed_file_close(mseed[i]);
        caltime_utc(&ct, &tt.sec, 0, 0);
        snprintf(oname, sizeof(oname), "%s/%lld.%02lld.%02lld.%02lld.%02lld.%02lld.%s.seed",
          folder, (long long)ct.date.year, (long long)ct.date.month, (long long)ct.date.day, (long long)ct.hour, (long long)ct.minute, (long long)ct.second, channel_names[i]);
        if (!(mseed[i] = miniseed_file_create((char*)oname))) {
          fprintf(stderr, "Invalid file: %s.\n", oname);
          return -1;
        }
        miniseed_file_set_sample_rate(mseed[i], sample_rate);
        miniseed_file_set_start_time(mseed[i], &tt);
        miniseed_file_set_info(mseed[i], "OBS", "DE", channel_names[i], "K");
        miniseed_file_set_compression(mseed[i], compression);
      }
      frames = frames_per_file;
      tt.sec.x += seconds_per_file;
    }

    for (i = 0; i < KUMY_FILE_CHANNELS; ++i) {
      miniseed_file_write_int_frame(mseed[i], frame + i);
    }
    if (show_progress && frame_count % 10000 == 0) {
      percent = 100 * frame_count / frames_total;
      if (percent != old_percent) {
        progress(percent, 0);
        old_percent = percent;
      }
    }
    --frames;
    ++frame_count;
  }
  if (show_progress) progress(100, 1);

  kumy_file_close(kumy);
  for (i = 0; i < KUMY_FILE_CHANNELS; ++i) {
    miniseed_file_close(mseed[i]);
  }

  return 0;
}
コード例 #9
0
void ProgressReceiver::timeout()
{
	emit progress(mReader->progress() * mScale);
}
コード例 #10
0
ファイル: grdplugin.cpp プロジェクト: mdoube/drishti
void
GrdPlugin::saveTrimmed(QString trimFile,
			    int dmin, int dmax,
			    int wmin, int wmax,
			    int hmin, int hmax)
{
  QProgressDialog progress("Saving trimmed volume",
			   0,
			   0, 100,
			   0);
  progress.setMinimumDuration(0);

  int nX, nY, nZ;
  nX = m_depth;
  nY = m_width;
  nZ = m_height;

  int mX, mY, mZ;
  mX = dmax-dmin+1;
  mY = wmax-wmin+1;
  mZ = hmax-hmin+1;

  int nbytes = nY*nZ*m_bytesPerVoxel;
  uchar *tmp = new uchar[nbytes];

  uchar vt;
  if (m_voxelType == _UChar) vt = 0; // unsigned byte
  if (m_voxelType == _Char) vt = 1; // signed byte
  if (m_voxelType == _UShort) vt = 2; // unsigned short
  if (m_voxelType == _Short) vt = 3; // signed short
  if (m_voxelType == _Int) vt = 4; // int
  if (m_voxelType == _Float) vt = 8; // float
  
  QFile fout(trimFile);
  fout.open(QFile::WriteOnly);

  fout.write((char*)&vt, 1);
  fout.write((char*)&mX, 4);
  fout.write((char*)&mY, 4);
  fout.write((char*)&mZ, 4);

  for(uint i=dmin; i<=dmax; i++)
    {
      //----------------------------
      QFile fin(m_imageList[i]);
      fin.open(QFile::ReadOnly);
      fin.seek(m_headerBytes);
      fin.read((char*)tmp, nbytes);
      fin.close();
      //----------------------------      

      for(uint j=wmin; j<=wmax; j++)
	{
	  memcpy(tmp+(j-wmin)*mZ*m_bytesPerVoxel,
		 tmp+(j*nZ + hmin)*m_bytesPerVoxel,
		 mZ*m_bytesPerVoxel);
	}
	  
      fout.write((char*)tmp, mY*mZ*m_bytesPerVoxel);

      progress.setValue((int)(100*(float)(i-dmin)/(float)mX));
      qApp->processEvents();
    }

  fout.close();

  delete [] tmp;

  m_headerBytes = 13; // to be used for applyMapping function
}
コード例 #11
0
ファイル: setup.c プロジェクト: zcw159357/citadel
                  int main(int argc, char *argv[])
{
    int a, i;
    int curr;
    char buf[1024];
    char aaa[128];
    int relh = 0;
    int home = 0;
    int nRetries = 0;
    char relhome[PATH_MAX]="";
    char ctdldir[PATH_MAX]=CTDLDIR;
    struct passwd *pw;
    gid_t gid;
    char *activity = NULL;

    /* Keep a mild groove on */
    program_title = _("Citadel setup program");

    /* set an invalid setup type */
    setup_type = (-1);

    /* parse command line args */
    for (a = 0; a < argc; ++a) {
        if (!strncmp(argv[a], "-u", 2)) {
            strcpy(aaa, argv[a]);
            strcpy(aaa, &aaa[2]);
            setup_type = atoi(aaa);
        }
        else if (!strcmp(argv[a], "-q")) {
            setup_type = UI_SILENT;
        }
        else if (!strncmp(argv[a], "-h", 2)) {
            relh=argv[a][2]!='/';
            if (!relh) {
                safestrncpy(ctdl_home_directory, &argv[a][2], sizeof ctdl_home_directory);
            } else {
                safestrncpy(relhome, &argv[a][2], sizeof relhome);
            }
            home = 1;
        }

    }

    calc_dirs_n_files(relh, home, relhome, ctdldir, 0);
    SetTitles();

    /* If a setup type was not specified, try to determine automatically
     * the best one to use out of all available types.
     */
    if (setup_type < 0) {
        setup_type = discover_ui();
    }

    enable_home = ( relh | home );

    if (chdir(ctdl_run_dir) != 0) {
        display_error("%s: [%s]\n", _("The directory you specified does not exist"), ctdl_run_dir);
        exit(errno);
    }


    /*
     * Connect to the running Citadel server.
     */
    while ((serv_sock < 0) && (nRetries < 10)) {
        serv_sock = uds_connectsock(file_citadel_admin_socket);
        nRetries ++;
        if (serv_sock < 0)
            sleep(1);
    }
    if (serv_sock < 0) {
        display_error(
            "%s: %s %s\n",
            _("Setup could not connect to a running Citadel server."),
            strerror(errno), file_citadel_admin_socket
        );
        exit(1);
    }

    /*
     * read the server greeting
     */
    serv_gets(buf);
    if (buf[0] != '2') {
        display_error("%s\n", buf);
        exit(2);
    }

    /*
     * Are we connected to the correct Citadel server?
     */
    serv_puts("INFO");
    serv_gets(buf);
    if (buf[0] != '1') {
        display_error("%s\n", buf);
        exit(3);
    }
    a = 0;
    while (serv_gets(buf), strcmp(buf, "000")) {
        if (a == 5) {
            if (atoi(buf) != REV_LEVEL) {
                display_error("%s\n",
                              _("Your setup program and Citadel server are from different versions.")
                             );
                exit(4);
            }
        }
        ++a;
    }

    /*
     * Now begin.
     */


    if (setup_type == UI_TEXT) {
        printf("\n\n\n	       *** %s ***\n\n", program_title);
    }

    if (setup_type == UI_DIALOG) {
        system("clear 2>/dev/null");
    }

    /* Go through a series of dialogs prompting for config info */
    for (curr = 1; curr < eMaxQuestions; ++curr) {
        edit_value(curr);

        if (	(curr == eAuthType)
                && (getconf_int("c_auth_mode") != AUTHMODE_LDAP)
                && (getconf_int("c_auth_mode") != AUTHMODE_LDAP_AD)
           ) {
            curr += 5;	/* skip LDAP questions if we're not authenticating against LDAP */
        }

        if (curr == eSysAdminName) {
            if (getconf_int("c_auth_mode") == AUTHMODE_NATIVE) {
                /* for native auth mode, fetch the admin's existing pw */
                snprintf(buf, sizeof buf, "AGUP %s", admin_name);
                serv_puts(buf);
                serv_gets(buf);
                if (buf[0] == '2') {
                    extract_token(admin_pass, &buf[4], 1, '|', sizeof admin_pass);
                }
            }
            else {
                ++curr;		/* skip the password question for non-native auth modes */
            }
        }
    }

    if ((pw = getpwuid( getconf_int("c_ctdluid") )) == NULL) {
        gid = getgid();
    } else {
        gid = pw->pw_gid;
    }

    if (create_run_directories(getconf_int("c_ctdluid"), gid) != 0) {
        display_error("%s\n", _("failed to create directories"));
    }

    activity = _("Reconfiguring Citadel server");
    progress(activity, 0, 5);
    sleep(1);					/* Let the message appear briefly */

    /*
     * Create the administrator account.  It's ok if the command fails if this user already exists.
     */
    progress(activity, 1, 5);
    snprintf(buf, sizeof buf, "CREU %s|%s", admin_name, admin_pass);
    serv_puts(buf);
    progress(activity, 2, 5);
    serv_gets(buf);
    progress(activity, 3, 5);

    /*
     * Assign the desired password and access level to the administrator account.
     */
    snprintf(buf, sizeof buf, "AGUP %s", admin_name);
    serv_puts(buf);
    progress(activity, 4, 5);
    serv_gets(buf);
    if (buf[0] == '2') {
        int admin_flags = extract_int(&buf[4], 2);
        int admin_times_called = extract_int(&buf[4], 3);
        int admin_msgs_posted = extract_int(&buf[4], 4);
        snprintf(buf, sizeof buf, "ASUP %s|%s|%d|%d|%d|6",
                 admin_name, admin_pass, admin_flags, admin_times_called, admin_msgs_posted
                );
        serv_puts(buf);
        serv_gets(buf);
    }
    progress(activity, 5, 5);

#ifndef __CYGWIN__
    check_xinetd_entry();	/* Check /etc/xinetd.d/telnet */
    disable_other_mtas();   /* Offer to disable other MTAs */
    fixnss();		/* Check for the 'db' nss and offer to disable it */
#endif

    /*
     * Restart citserver
     */
    activity = _("Restarting Citadel server to apply changes");
    progress(activity, 0, 41);

    serv_puts("TIME");
    serv_gets(buf);
    long original_start_time = extract_long(&buf[4], 3);

    progress(activity, 1, 41);
    serv_puts("DOWN 1");
    progress(activity, 2, 41);
    serv_gets(buf);
    if (buf[0] != '2') {
        display_error("%s\n", buf);
        exit(6);
    }

    close(serv_sock);
    serv_sock = (-1);

    for (i=3; i<=6; ++i) {					/* wait for server to shut down */
        progress(activity, i, 41);
        sleep(1);
    }

    for (i=7; ((i<=38) && (serv_sock < 0)) ; ++i) {		/* wait for server to start up */
        progress(activity, i, 41);
        serv_sock = uds_connectsock(file_citadel_admin_socket);
        sleep(1);
    }

    progress(activity, 39, 41);
    serv_gets(buf);

    progress(activity, 40, 41);
    serv_puts("TIME");
    serv_gets(buf);
    long new_start_time = extract_long(&buf[4], 3);

    close(serv_sock);
    progress(activity, 41, 41);

    if (	(original_start_time == new_start_time)
            || (new_start_time <= 0)
       ) {
        display_error("%s\n", _("Setup failed to restart Citadel server.  Please restart it manually."));
        exit(7);
    }

    exit(0);
    return 0;
}
コード例 #12
0
ファイル: grdplugin.cpp プロジェクト: mdoube/drishti
void
GrdPlugin::generateHistogram()
{
  QProgressDialog progress("Generating Histogram",
			   "Cancel",
			   0, 100,
			   0);
  progress.setMinimumDuration(0);


  float rSize = m_rawMax-m_rawMin;
  int nbytes = m_width*m_height*m_bytesPerVoxel;
  uchar *tmp = new uchar[nbytes];

  QFile fin(m_fileName[0]);
  fin.open(QFile::ReadOnly);
  fin.seek(m_headerBytes);

  m_histogram.clear();
  if (m_voxelType == _UChar ||
      m_voxelType == _Char ||
      m_voxelType == _UShort ||
      m_voxelType == _Short)
    {
      for(uint i=0; i<rSize+1; i++)
	m_histogram.append(0);
    }
  else
    {      
      for(uint i=0; i<65536; i++)
	m_histogram.append(0);
    }

  int histogramSize = m_histogram.size()-1;
  for(uint i=0; i<m_depth; i++)
    {
      progress.setValue((int)(100.0*(float)i/(float)m_depth));
      qApp->processEvents();

      //----------------------------
      QFile fin(m_imageList[i]);
      fin.open(QFile::ReadOnly);
      fin.seek(m_headerBytes);
      fin.read((char*)tmp, nbytes);
      fin.close();
      //----------------------------

      if (m_voxelType == _UChar)
	{
	  uchar *ptr = tmp;
	  GENHISTOGRAM();
	}
      else if (m_voxelType == _Char)
	{
	  char *ptr = (char*) tmp;
	  GENHISTOGRAM();
	}
      if (m_voxelType == _UShort)
	{
	  ushort *ptr = (ushort*) tmp;
	  GENHISTOGRAM();
	}
      else if (m_voxelType == _Short)
	{
	  short *ptr = (short*) tmp;
	  GENHISTOGRAM();
	}
      else if (m_voxelType == _Int)
	{
	  int *ptr = (int*) tmp;
	  GENHISTOGRAM();
	}
      else if (m_voxelType == _Float)
	{
	  float *ptr = (float*) tmp;
	  GENHISTOGRAM();
	}
    }
  fin.close();

  delete [] tmp;

//  while(m_histogram.last() == 0)
//    m_histogram.removeLast();
//  while(m_histogram.first() == 0)
//    m_histogram.removeFirst();

//  QMessageBox::information(0, "",  QString("%1 %2 : %3").\
//			   arg(m_rawMin).arg(m_rawMax).arg(rSize));

  progress.setValue(100);
  qApp->processEvents();
}
コード例 #13
0
ファイル: grdplugin.cpp プロジェクト: mdoube/drishti
void
GrdPlugin::findMinMax()
{
  QProgressDialog progress("Finding Min and Max",
			   "Cancel",
			   0, 100,
			   0);
  progress.setMinimumDuration(0);


  int nbytes = m_width*m_height*m_bytesPerVoxel;
  uchar *tmp = new uchar[nbytes];

  QFile fin(m_fileName[0]);
  fin.open(QFile::ReadOnly);
  fin.seek(m_headerBytes);

  m_rawMin = 10000000;
  m_rawMax = -10000000;
  for(uint i=0; i<m_depth; i++)
    {
      progress.setValue((int)(100.0*(float)i/(float)m_depth));
      qApp->processEvents();

      //----------------------------
      QFile fin(m_imageList[i]);
      fin.open(QFile::ReadOnly);
      fin.seek(m_headerBytes);
      fin.read((char*)tmp, nbytes);
      fin.close();
      //----------------------------

      if (m_voxelType == _UChar)
	{
	  uchar *ptr = tmp;
	  FINDMINMAX();
	}
      else if (m_voxelType == _Char)
	{
	  char *ptr = (char*) tmp;
	  FINDMINMAX();
	}
      if (m_voxelType == _UShort)
	{
	  ushort *ptr = (ushort*) tmp;
	  FINDMINMAX();
	}
      else if (m_voxelType == _Short)
	{
	  short *ptr = (short*) tmp;
	  FINDMINMAX();
	}
      else if (m_voxelType == _Int)
	{
	  int *ptr = (int*) tmp;
	  FINDMINMAX();
	}
      else if (m_voxelType == _Float)
	{
	  float *ptr = (float*) tmp;
	  FINDMINMAX();
	}
    }
  fin.close();

  delete [] tmp;

  progress.setValue(100);
  qApp->processEvents();
}
コード例 #14
0
ファイル: grdplugin.cpp プロジェクト: mdoube/drishti
void
GrdPlugin::findMinMaxandGenerateHistogram()
{
  float rSize;
  float rMin;
  m_histogram.clear();
  if (m_voxelType == _UChar ||
      m_voxelType == _Char)
    {
      if (m_voxelType == _UChar) rMin = 0;
      if (m_voxelType == _Char) rMin = -127;
      rSize = 255;
      for(uint i=0; i<256; i++)
	m_histogram.append(0);
    }
  else if (m_voxelType == _UShort ||
      m_voxelType == _Short)
    {
      if (m_voxelType == _UShort) rMin = 0;
      if (m_voxelType == _Short) rMin = -32767;
      rSize = 65535;
      for(uint i=0; i<65536; i++)
	m_histogram.append(0);
    }
  else
    {
      QMessageBox::information(0, "Error", "Why am i here ???");
      return;
    }

//  //==================
//  // do not calculate histogram
//  if (m_voxelType == _UChar)
//    {
//      m_rawMin = 0;
//      m_rawMax = 255;
//      return;
//    }
//  //==================

  int nbytes = m_width*m_height*m_bytesPerVoxel;
  uchar *tmp = new uchar[nbytes];

  m_rawMin = 10000000;
  m_rawMax = -10000000;

  QProgressDialog progress("Generating Histogram",
			   0,
			   0, 100,
			   0);
  progress.setMinimumDuration(0);

  for(uint i=0; i<m_depth; i++)
    {
      progress.setValue((int)(100.0*(float)i/(float)m_depth));
      qApp->processEvents();

      //----------------------------
      QFile fin(m_imageList[i]);
      fin.open(QFile::ReadOnly);
      fin.seek(m_headerBytes);
      fin.read((char*)tmp, nbytes);
      fin.close();
      //----------------------------

      if (m_voxelType == _UChar)
	{
	  uchar *ptr = tmp;
	  MINMAXANDHISTOGRAM();
	}
      else if (m_voxelType == _Char)
	{
	  char *ptr = (char*) tmp;
	  MINMAXANDHISTOGRAM();
	}
      if (m_voxelType == _UShort)
	{
	  ushort *ptr = (ushort*) tmp;
	  MINMAXANDHISTOGRAM();
	}
      else if (m_voxelType == _Short)
	{
	  short *ptr = (short*) tmp;
	  MINMAXANDHISTOGRAM();
	}
      else if (m_voxelType == _Int)
	{
	  int *ptr = (int*) tmp;
	  MINMAXANDHISTOGRAM();
	}
      else if (m_voxelType == _Float)
	{
	  float *ptr = (float*) tmp;
	  MINMAXANDHISTOGRAM();
	}
    }

  delete [] tmp;

//  while(m_histogram.last() == 0)
//    m_histogram.removeLast();
//  while(m_histogram.first() == 0)
//    m_histogram.removeFirst();

  progress.setValue(100);
  qApp->processEvents();
}
コード例 #15
0
void KeyframeAnimation::animate(CompositeAnimation* animation, RenderObject* renderer, const RenderStyle* currentStyle, 
                                    const RenderStyle* targetStyle, RefPtr<RenderStyle>& animatedStyle)
{
    // If we have not yet started, we will not have a valid start time, so just start the animation if needed.
    if (isNew() && m_animation->playState() == AnimPlayStatePlaying)
        updateStateMachine(AnimationStateInputStartAnimation, -1);

    // If we get this far and the animation is done, it means we are cleaning up a just finished animation.
    // If so, we need to send back the targetStyle.
    if (postActive()) {
        if (!animatedStyle)
            animatedStyle = const_cast<RenderStyle*>(targetStyle);
        return;
    }

    // If we are waiting for the start timer, we don't want to change the style yet.
    // Special case - if the delay time is 0, then we do want to set the first frame of the
    // animation right away. This avoids a flash when the animation starts.
    if (waitingToStart() && m_animation->delay() > 0)
        return;

    // FIXME: we need to be more efficient about determining which keyframes we are animating between.
    // We should cache the last pair or something.

    // Find the first key
    double elapsedTime = (m_startTime > 0) ? ((!paused() ? currentTime() : m_pauseTime) - m_startTime) : 0;
    if (elapsedTime < 0)
        elapsedTime = 0;

    double t = m_animation->duration() ? (elapsedTime / m_animation->duration()) : 1;
    int i = static_cast<int>(t);
    t -= i;
    if (m_animation->direction() && (i & 1))
        t = 1 - t;

    const RenderStyle* fromStyle = 0;
    const RenderStyle* toStyle = 0;
    double scale = 1;
    double offset = 0;
    Vector<KeyframeValue>::const_iterator endKeyframes = m_keyframes.endKeyframes();
    for (Vector<KeyframeValue>::const_iterator it = m_keyframes.beginKeyframes(); it != endKeyframes; ++it) {
        if (t < it->key()) {
            // The first key should always be 0, so we should never succeed on the first key
            if (!fromStyle)
                break;
            scale = 1.0 / (it->key() - offset);
            toStyle = it->style();
            break;
        }

        offset = it->key();
        fromStyle = it->style();
    }

    // If either style is 0 we have an invalid case, just stop the animation.
    if (!fromStyle || !toStyle) {
        updateStateMachine(AnimationStateInputEndAnimation, -1);
        return;
    }

    // Run a cycle of animation.
    // We know we will need a new render style, so make one if needed.
    if (!animatedStyle)
        animatedStyle = RenderStyle::clone(targetStyle);

    const TimingFunction* timingFunction = 0;
    if (fromStyle->animations() && fromStyle->animations()->size() > 0)
        timingFunction = &(fromStyle->animations()->animation(0)->timingFunction());

    double prog = progress(scale, offset, timingFunction);

    HashSet<int>::const_iterator endProperties = m_keyframes.endProperties();
    for (HashSet<int>::const_iterator it = m_keyframes.beginProperties(); it != endProperties; ++it) {
        if (blendProperties(this, *it, animatedStyle.get(), fromStyle, toStyle, prog))
            setAnimating();
    }
}
コード例 #16
0
void ConfigureDiveComputer::progressEvent(int percent)
{
	emit progress(percent);
}
コード例 #17
0
ファイル: syncengine.cpp プロジェクト: guruz/client
void SyncEngine::slotDiscoveryJobFinished(int discoveryResult)
{
    // To clean the progress info
    emit folderDiscovered(false, QString());

    if (discoveryResult < 0 ) {
        handleSyncError(_csync_ctx, "csync_update");
        return;
    }
    qDebug() << "<<#### Discovery end #################################################### " << _stopWatch.addLapTime(QLatin1String("Discovery Finished"));

    // Sanity check
    if (!_journal->isConnected()) {
        qDebug() << "Bailing out, DB failure";
        emit csyncError(tr("Cannot open the sync journal"));
        finalize();
        return;
    } else {
        // Commits a possibly existing (should not though) transaction and starts a new one for the propagate phase
        _journal->commitIfNeededAndStartNewTransaction("Post discovery");
    }

    if( csync_reconcile(_csync_ctx) < 0 ) {
        handleSyncError(_csync_ctx, "csync_reconcile");
        return;
    }

    _stopWatch.addLapTime(QLatin1String("Reconcile Finished"));

    _progressInfo = Progress::Info();

    _hasNoneFiles = false;
    _hasRemoveFile = false;
    bool walkOk = true;
    _seenFiles.clear();

    if( csync_walk_local_tree(_csync_ctx, &treewalkLocal, 0) < 0 ) {
        qDebug() << "Error in local treewalk.";
        walkOk = false;
    }
    if( walkOk && csync_walk_remote_tree(_csync_ctx, &treewalkRemote, 0) < 0 ) {
        qDebug() << "Error in remote treewalk.";
    }

    if (_csync_ctx->remote.root_perms) {
        _remotePerms[QLatin1String("")] = _csync_ctx->remote.root_perms;
        qDebug() << "Permissions of the root folder: " << _remotePerms[QLatin1String("")];
    }

    // The map was used for merging trees, convert it to a list:
    _syncedItems = _syncItemMap.values().toVector();

    // Adjust the paths for the renames.
    for (SyncFileItemVector::iterator it = _syncedItems.begin();
            it != _syncedItems.end(); ++it) {
        it->_file = adjustRenamedPath(it->_file);
    }

    // Sort items per destination
    std::sort(_syncedItems.begin(), _syncedItems.end());

    // make sure everything is allowed
    checkForPermission();

    // To announce the beginning of the sync
    emit aboutToPropagate(_syncedItems);
    _progressInfo._completedFileCount = ULLONG_MAX; // indicate the start with max
    emit transmissionProgress(_progressInfo);
    _progressInfo._completedFileCount = 0;

    if (!_hasNoneFiles && _hasRemoveFile) {
        qDebug() << Q_FUNC_INFO << "All the files are going to be changed, asking the user";
        bool cancel = false;
        emit aboutToRemoveAllFiles(_syncedItems.first()._direction, &cancel);
        if (cancel) {
            qDebug() << Q_FUNC_INFO << "Abort sync";
            finalize();
            return;
        }
    }

    ne_session_s *session = 0;
    // that call to set property actually is a get which will return the session
    csync_set_module_property(_csync_ctx, "get_dav_session", &session);
    Q_ASSERT(session);

    // post update phase script: allow to tweak stuff by a custom script in debug mode.
    if( !qgetenv("OWNCLOUD_POST_UPDATE_SCRIPT").isEmpty() ) {
#ifndef NDEBUG
        QString script = qgetenv("OWNCLOUD_POST_UPDATE_SCRIPT");

        qDebug() << "OOO => Post Update Script: " << script;
        QProcess::execute(script.toUtf8());
#else
    qDebug() << "**** Attention: POST_UPDATE_SCRIPT installed, but not executed because compiled with NDEBUG";
#endif
    }

    // do a database commit
    _journal->commit("post treewalk");

    _propagator = QSharedPointer<OwncloudPropagator>(
        new OwncloudPropagator (session, _localPath, _remoteUrl, _remotePath, _journal, &_thread));
    connect(_propagator.data(), SIGNAL(completed(SyncFileItem)),
            this, SLOT(slotJobCompleted(SyncFileItem)));
    connect(_propagator.data(), SIGNAL(progress(SyncFileItem,quint64)),
            this, SLOT(slotProgress(SyncFileItem,quint64)));
    connect(_propagator.data(), SIGNAL(adjustTotalTransmissionSize(qint64)), this, SLOT(slotAdjustTotalTransmissionSize(qint64)));
    connect(_propagator.data(), SIGNAL(finished()), this, SLOT(slotFinished()), Qt::QueuedConnection);

    // apply the network limits to the propagator
    setNetworkLimits(_uploadLimit, _downloadLimit);

    deleteStaleDownloadInfos();
    deleteStaleUploadInfos();
    deleteStaleBlacklistEntries();
    _journal->commit("post stale entry removal");

    // Emit the started signal only after the propagator has been set up.
    if (_needsUpdate)
        emit(started());

    _propagator->start(_syncedItems);
}
コード例 #18
0
ファイル: scene.cpp プロジェクト: acpa2691/cs348b
// Scene Methods
void Scene::Render() {
	/*if(!Spectrum::SpectrumTest())
	{
		printf("FAILED spectrum unit test. NO rendering allowed.\n");
		return;
	}else{
		printf("PASSED spectrum unit test. YES!\n");
	}*/
	// Allocate and initialize _sample_
	Sample *sample = new Sample(surfaceIntegrator,
	                            volumeIntegrator,
	                            this);
	// Allow integrators to do pre-processing for the scene
	surfaceIntegrator->Preprocess(this);
	volumeIntegrator->Preprocess(this);
	camera->AutoFocus(this);
	// Trace rays: The main loop
	ProgressReporter progress(sampler->TotalSamples(), "Rendering");
	while (sampler->GetNextSample(sample)) {
		// Find camera ray for _sample_
		RayDifferential ray;
		float rayWeight = camera->GenerateRay(*sample, &ray);
		// Generate ray differentials for camera ray
		++(sample->imageX);
		float wt1 = camera->GenerateRay(*sample, &ray.rx);
		--(sample->imageX);
		++(sample->imageY);
		float wt2 = camera->GenerateRay(*sample, &ray.ry);
		if (wt1 > 0 && wt2 > 0) ray.hasDifferentials = true;
		--(sample->imageY);
		// Evaluate radiance along camera ray
		float alpha = 1.f;//initialized to count rayWeight 0 as opaque black
		Spectrum Ls = 0.f;
		if (rayWeight > 0.f)
		{
		  Ls = rayWeight * Li(ray, sample, &alpha);
		 // Ls = Li(ray, sample, &alpha);
			//printf("Li Value: ");
			//Ls.printSelf();
		}
		// Issue warning if unexpected radiance value returned
		if (Ls.IsNaN()) {
			Error("Not-a-number radiance value returned "
		          "for image sample.  Setting to black.");
			Ls = Spectrum(0.f);
			//printf("NAN ALERT\n");
		}
		else if (Ls.y() < -1e-5) {
			Error("Negative luminance value, %g, returned "
		          "for image sample.  Setting to black.", Ls.y());
			Ls = Spectrum(0.f);
			//printf("NEGATIVE LUMINANCE ALERT\n");
		}
		else if (isinf(Ls.y())) {
			Error("Infinite luminance value returned "
		          "for image sample.  Setting to black.");
			Ls = Spectrum(0.f);
			//printf("INFINITE LUMINANCE ALERT\n");
		}
		// Add sample contribution to image
		camera->film->AddSample(*sample, ray, Ls, alpha);
		// Free BSDF memory from computing image sample value
		BSDF::FreeAll();
		// Report rendering progress
		static StatsCounter cameraRaysTraced("Camera", "Camera Rays Traced");
		++cameraRaysTraced;
		progress.Update();
	}
	// Clean up after rendering and store final image
	delete sample;
	progress.Done();
	camera->film->WriteImage();
}
コード例 #19
0
ファイル: csyncthread.cpp プロジェクト: hlarcher/mirall
void CSyncThread::startSync()
{
    if (!_syncMutex.tryLock()) {
        qDebug() << Q_FUNC_INFO << "WARNING: Another sync seems to be running. Not starting a new one.";
        return;
    }

    if( ! _csync_ctx ) {
        qDebug() << "XXXXXXXXXXXXXXXX FAIL: do not have csync_ctx!";
    }
    qDebug() << Q_FUNC_INFO << "Sync started";


    qDebug() << "starting to sync " << qApp->thread() << QThread::currentThread();
    _syncedItems.clear();

    _mutex.lock();
    _needsUpdate = false;
    if (!_abortRequested.fetchAndAddRelease(0)) {
        csync_resume(_csync_ctx);
    }
    _mutex.unlock();


    // maybe move this somewhere else where it can influence a running sync?
    MirallConfigFile cfg;

    int fileRecordCount = 0;
    if (!_journal->exists()) {
        qDebug() << "=====sync looks new (no DB exists), activating recursive PROPFIND if csync supports it";
        bool no_recursive_propfind = false;
        csync_set_module_property(_csync_ctx, "no_recursive_propfind", &no_recursive_propfind);
    } else if ((fileRecordCount = _journal->getFileRecordCount()) < 50) {
        qDebug() << "=====sync DB has only" << fileRecordCount << "items, enable recursive PROPFIND if csync supports it";
        bool no_recursive_propfind = false;
        csync_set_module_property(_csync_ctx, "no_recursive_propfind", &no_recursive_propfind);
    } else {
        qDebug() << "=====sync with existing DB";
    }

    csync_set_module_property(_csync_ctx, "csync_context", _csync_ctx);
    csync_set_userdata(_csync_ctx, this);
    // TODO: This should be a part of this method, but we don't have
    // any way to get "session_key" module property from csync. Had we
    // have it, then we could keep this code and remove it from
    // AbstractCredentials implementations.
    cfg.getCredentials()->syncContextPreStart(_csync_ctx);
    // if (_lastAuthCookies.length() > 0) {
    //     // Stuff cookies inside csync, then we can avoid the intermediate HTTP 401 reply
    //     // when https://github.com/owncloud/core/pull/4042 is merged.
    //     QString cookiesAsString;
    //     foreach(QNetworkCookie c, _lastAuthCookies) {
    //         cookiesAsString += c.name();
    //         cookiesAsString += '=';
    //         cookiesAsString += c.value();
    //         cookiesAsString += "; ";
    //     }
    //     csync_set_module_property(_csync_ctx, "session_key", cookiesAsString.to
    // }

    // csync_set_auth_callback( _csync_ctx, getauth );
    csync_set_log_callback( csyncLogCatcher );
    csync_set_log_level( 11 );

    _syncTime.start();

    QElapsedTimer updateTime;
    updateTime.start();
    qDebug() << "#### Update start #################################################### >>";

    if( csync_update(_csync_ctx) < 0 ) {
        handleSyncError(_csync_ctx, "csync_update");
        return;
    }
    qDebug() << "<<#### Update end #################################################### " << updateTime.elapsed();

    if( csync_reconcile(_csync_ctx) < 0 ) {
        handleSyncError(_csync_ctx, "csync_reconcile");
        return;
    }

    _progressInfo = Progress::Info();

    _hasFiles = false;
    bool walkOk = true;
    if( csync_walk_local_tree(_csync_ctx, &treewalkLocal, 0) < 0 ) {
        qDebug() << "Error in local treewalk.";
        walkOk = false;
    }
    if( walkOk && csync_walk_remote_tree(_csync_ctx, &treewalkRemote, 0) < 0 ) {
        qDebug() << "Error in remote treewalk.";
    }

    // Adjust the paths for the renames.
    for (SyncFileItemVector::iterator it = _syncedItems.begin();
            it != _syncedItems.end(); ++it) {
        it->_file = adjustRenamedPath(it->_file);
    }

    qSort(_syncedItems);

    if (!_hasFiles && !_syncedItems.isEmpty()) {
        qDebug() << Q_FUNC_INFO << "All the files are going to be removed, asking the user";
        bool cancel = false;
        emit aboutToRemoveAllFiles(_syncedItems.first()._dir, &cancel);
        if (cancel) {
            qDebug() << Q_FUNC_INFO << "Abort sync";
            return;
        }
    }

    if (_needsUpdate)
        emit(started());

    ne_session_s *session = 0;
    // that call to set property actually is a get which will return the session
    csync_set_module_property(_csync_ctx, "get_dav_session", &session);
    Q_ASSERT(session);

    _propagator.reset(new OwncloudPropagator (session, _localPath, _remotePath,
                      _journal, &_abortRequested));
    connect(_propagator.data(), SIGNAL(completed(SyncFileItem)),
            this, SLOT(transferCompleted(SyncFileItem)), Qt::QueuedConnection);
    connect(_propagator.data(), SIGNAL(progress(Progress::Kind,QString,quint64,quint64)),
            this, SLOT(slotProgress(Progress::Kind,QString,quint64,quint64)));
    _iterator = 0;

    int downloadLimit = 0;
    if (cfg.useDownloadLimit()) {
        downloadLimit = cfg.downloadLimit() * 1000;
    }
    _propagator->_downloadLimit = downloadLimit;

    int uploadLimit = -75; // 75%
    int useUpLimit = cfg.useUploadLimit();
    if ( useUpLimit >= 1) {
        uploadLimit = cfg.uploadLimit() * 1000;
    } else if (useUpLimit == 0) {
        uploadLimit = 0;
    }
    _propagator->_uploadLimit = uploadLimit;

    slotProgress(Progress::StartSync, QString(), 0, 0);

    startNextTransfer();
}
コード例 #20
0
ファイル: DataIO_OpenDx.cpp プロジェクト: SAMAN-64/cpptraj
// DataIO_OpenDx::LoadGrid()
int DataIO_OpenDx::LoadGrid(const char* filename, DataSet& ds)
{
  // TODO: This may need to be changed if new 3D types introduced.
  DataSet_GridFlt& grid = static_cast<DataSet_GridFlt&>( ds );
  // Open file
  BufferedLine infile;
  if (infile.OpenFileRead(filename)) return 1;
  // Skip comments
  std::string line = infile.GetLine();
  while (!line.empty() && line[0] == '#') {
    mprintf("\t%s", line.c_str());
    line = infile.GetLine();
  }
  if (line.empty()) {
    mprinterr("Error: Unexpected EOF in DX file %s\n", filename);
    return 1;
  }
  // object 1 class gridpositions counts nx ny nz
  int nx, ny, nz;
  if (sscanf(line.c_str(), "object 1 class gridpositions counts %d %d %d",
             &nx, &ny, &nz) != 3)
  {
    mprinterr("Error: Reading grid counts from DX file %s\n", filename);
    return 1;
  }
  // origin xmin ymin zmin 
  double oxyz[3];
  line = infile.GetLine();
  if (sscanf(line.c_str(), "origin %lg %lg %lg", oxyz, oxyz+1, oxyz+2) != 3) {
    mprinterr("Error: Reading origin line from DX file %s\n", filename);
    return 1;
  }
  // 3x 'delta hx hy hz'
  double dxyz[3];
  Matrix_3x3 delta(0.0);
  bool isNonortho = false;
  int midx = 0;
  for (int i = 0; i < 3; i++, midx += 3) {
    line = infile.GetLine();
    if (sscanf(line.c_str(), "delta %lg %lg %lg", dxyz, dxyz+1, dxyz+2) != 3) {
      mprinterr("Error: Reading delta line from DX file %s\n", filename);
      return 1;
    }
    // Check that only 1 of the 3 values is non-zero. Otherwise non-ortho.
    if (dxyz[i] != (dxyz[0] + dxyz[1] + dxyz[2]))
      isNonortho = true;
    delta[midx  ] = dxyz[0];
    delta[midx+1] = dxyz[1];
    delta[midx+2] = dxyz[2];
  }
  // object 2 class gridconnections counts nx ny nz
  int nxyz[3];
  line = infile.GetLine();
  if (sscanf(line.c_str(), "object 2 class gridconnections counts %d %d %d",
             nxyz, nxyz+1, nxyz+2) != 3)
  {
    mprinterr("Error: Reading grid connections from DX file %s\n", filename);
    return 1;
  }
  // Sanity check for conflicting grid dimensions
  if (nxyz[0] != nx || nxyz[1] != ny || nxyz[2] != nz) {
    mprinterr("Error: Conflicting grid dimensions in input DX density file %s.\n",
              filename);
    mprinterr("Error: Grid positions: %d %d %d\n", nx, ny, nz);
    mprinterr("Error: Grid connections: %d %d %d\n", nxyz[0], nxyz[1], nxyz[2]);
    return 1;
  }
  // object 3 class array type <type> rank <r> times <i>
  // This line describes whether data will be in binary or ascii format.
  line = infile.GetLine();
  if (line.compare(0, 8, "object 3") != 0) {
    mprinterr("Error: DX file %s; expected 'object 3 ...', got [%s]\n",
              filename, line.c_str());
    return 1;
  }
  if (line.find("binary") != std::string::npos) {
    mprinterr("Error: DX file %s; binary DX files not yet supported.\n", filename);
    return 1;
  }
  // Allocate Grid from dims, origin, and spacing
  int err = 0;
  if (isNonortho) {
    // Create unit cell from delta and bins.
    delta[0] *= (double)nx; delta[1] *= (double)nx; delta[2] *= (double)nx;
    delta[3] *= (double)ny; delta[4] *= (double)ny; delta[5] *= (double)ny;
    delta[6] *= (double)nz; delta[7] *= (double)nz; delta[8] *= (double)nz;
    err = grid.Allocate_N_O_Box(nx,ny,nz, Vec3(oxyz), Box(delta));
  } else
    err = grid.Allocate_N_O_D(nx,ny,nz, Vec3(oxyz), Vec3(delta[0],delta[4],delta[8]));
  if (err != 0) { 
    mprinterr("Error: Could not allocate grid.\n");
    return 1;
  }
  grid.GridInfo();
  // Read in data
  size_t gridsize = grid.Size();
  mprintf("\tReading in %zu data elements from DX file.\n", gridsize); 
  size_t ndata = 0;
  ProgressBar progress( gridsize );
  while (ndata < gridsize) {
    if (infile.Line() == 0) {
      mprinterr("Error: Unexpected EOF hit in %s\n", filename);
      return 1;
    }
    int nTokens = infile.TokenizeLine(" \t");
    for (int j = 0; j < nTokens; j++) {
      if (ndata >= gridsize) {
        mprintf("Warning: Too many grid points found. Only reading %zu grid points.\n", gridsize);
        mprintf("Warning: Check that data region ends with a newline.\n");
        break;
      }
      grid[ndata++] = (float)atof(infile.NextToken());
    }
    progress.Update( ndata );
  }
  // Set dimensions
  // FIXME: This should be integrated with allocation
  //grid.SetDim(Dimension::X, Dimension(oxyz[0], dx, nx, "X"));
  //grid.SetDim(Dimension::Y, Dimension(oxyz[1], dy, ny, "Y"));
  //grid.SetDim(Dimension::Z, Dimension(oxyz[2], dz, nz, "Z"));
  return 0;
}
コード例 #21
0
ファイル: loaderthread.cpp プロジェクト: digideskio/emsload
void LoaderThread::run()
{
	qint64 currentmsec = QDateTime::currentMSecsSinceEpoch();
	if (operation == "rip")
	{
		serialMonitor = new SerialMonitor();
		serialMonitor->openPort(m_portName);
		if (!serialMonitor->verifySM())
		{
			//Timed out
			serialMonitor->closePort();
			emit error("Unable to verify SM mode");
			delete serialMonitor;
			return;
		}

		//We're in SM mode! Let's do stuff.


		QFile output(m_fwFileName);
		if (!output.open(QIODevice::ReadWrite | QIODevice::Truncate))
		{
			serialMonitor->closePort();
			emit error("Unable to open file for writing");
			delete serialMonitor;

		}

		for (int i=0xE0;i<0xFF;i++)
		{

			//Select page
			serialMonitor->selectPage(i);

			for (int j=0x8000;j<0xBFFF;j+=16)
			{
				QString record;

				if (serialMonitor->readBlockToS19(i,j,16,&record))
				{
					if (record == "")
					{
						//Blank record
					}
					else
					{
                        output.write(record.toLatin1());
						output.flush();
					}
				}
				//msleep(1);
			}
		}
		serialMonitor->closePort();
		emit done(QDateTime::currentMSecsSinceEpoch() - currentmsec);
		qDebug() << "Current operation completed in:" << (QDateTime::currentMSecsSinceEpoch() - currentmsec) / 1000.0 << "seconds";
		delete serialMonitor;
		return;
	}
	else if (operation == "load")
	{
		serialMonitor = new SerialMonitor();

		if (!serialMonitor->openPort(m_portName))
		{
			qDebug() << "Unable to open port";
			emit error("Unable to open port. Ensure the device is connected, and you have the proper permissions to open the device");
			delete serialMonitor;
			return;
		}

		if (!serialMonitor->isStreaming()) /* See if the ecu is NOT in SM mode */
		{
			serialMonitor->jumpToSM(); /* Tell it to get with the program */
		}
		else
		{
			if (!serialMonitor->verifySM()) /* we didn't detect streaming, *  but it may NOT be in SM so check it */
			{
				serialMonitor->jumpToSM();
			}
		}
		if (!serialMonitor->verifySM()) /* IF we didn't make it, give up... */
		{
			//Timed out
			serialMonitor->closePort();
			//qDebug() << "Unable to open port";
			emit error("Unable to verify SM mode. Ensure you have the correct device selected, and it is in SM mode.");
			delete serialMonitor;
			return;
		}

		//We're in SM mode! Let's do stuff.

		unsigned char currpage = 0;
		int totalerror = 0;
		for (int i=0;i<m_s19File->getCompactRecordCount();i++)
		{

			emit progress(i*100,m_s19File->getCompactRecordCount()*100);
			unsigned int address = m_s19File->getCompactRecord(i).first;
			unsigned char newpage = (address >> 16) & 0xFF;

			if (newpage != currpage)
			{
				currpage = newpage;
				qDebug() << "Selecting page:" << currpage;
				serialMonitor->selectPage(currpage);
				serialMonitor->eraseBlock();
			}

			int size = m_s19File->getCompactRecord(i).second.size();
			for (int j=0;j<m_s19File->getCompactRecord(i).second.size();j+=252)
			{
				emit progress((i*100) + (((double)j/(double)size) * 100.0),m_s19File->getCompactRecordCount()*100);
				if (totalerror >= 100)
				{
					serialMonitor->closePort();
					emit error("Too many errors while loading firmware. Make sure you have a good connection to the device and try again");
					delete serialMonitor;
					return;
				}
				int size = (j+252< m_s19File->getCompactRecord(i).second.size()) ? 252 : (j - m_s19File->getCompactRecord(i).second.size());
				if (!serialMonitor->writeBlock((address & 0xFFFF) + j,m_s19File->getCompactRecord(i).second.mid(j,size)))
				{
					//Bad block. Retry.
					i--;
					totalerror++;
					continue;

				}
				if (!serialMonitor->verifyBlock((address & 0xFFFF) + j,m_s19File->getCompactRecord(i).second.mid(j,size)))
				{
					qDebug() << "Bad verification of written data. Go back one and try again";
					i--;
					totalerror++;
					continue;

				}

			}
		}
		serialMonitor->sendReset();
		serialMonitor->closePort();
		emit done(QDateTime::currentMSecsSinceEpoch() - currentmsec);
		qDebug() << "Current operation completed in:" << (QDateTime::currentMSecsSinceEpoch() - currentmsec) / 1000.0 << "seconds";
		delete serialMonitor;
		return;
	}
コード例 #22
0
void UI_ReduceSignalsWindow::StartConversion()
{
  int i, j, k, n,
      new_edfsignals,
      datarecords=0,
      annot_smp_per_record,
      annot_recordsize,
      timestamp_digits=0,
      timestamp_decimals=0,
      annot_len,
      tallen=0,
      len,
      annot_cnt,
      annots_per_datrec=0,
      smplrt,
      tmp,
      val,
      progress_steps,
      datrecs_processed;

  char *readbuf=NULL,
       scratchpad[256];

  long long new_starttime,
            time_diff,
            onset_diff,
            taltime,
            l_temp,
            endtime=0,
            l_tmp;

  struct date_time_struct dts;

  struct annotationblock *new_annot_list=NULL,
                         *root_new_annot_list=NULL,
                         *annot_list=NULL;

  union {
          unsigned int one;
          signed int one_signed;
          unsigned short two[2];
          signed short two_signed[2];
          unsigned char four[4];
        } var;

  QProgressDialog progress("Processing file...", "Abort", 0, 1);
  progress.setWindowModality(Qt::WindowModal);
  progress.setMinimumDuration(200);


  pushButton3->setEnabled(false);
  pushButton4->setEnabled(false);
  pushButton5->setEnabled(false);
  pushButton6->setEnabled(false);
  spinBox1->setEnabled(false);
  spinBox2->setEnabled(false);
  spinBox3->setEnabled(false);
  spinBox4->setEnabled(false);
  radioButton1->setEnabled(false);
  radioButton2->setEnabled(false);
  label2->setEnabled(false);
  label3->setEnabled(false);

  if(edfhdr==NULL)
  {
    return;
  }

  if(file_num < 0)
  {
    return;
  }

  new_edfsignals = 0;

  annot_smp_per_record = 0;

  annot_cnt = 0;

  aa_filter_order = spinBox4->value() - 1;

  time_diff = (long long)(spinBox1->value() - 1) * edfhdr->long_data_record_duration;

  taltime = (time_diff + edfhdr->starttime_offset) % TIME_DIMENSION;

  endtime = (long long)(spinBox2->value() - (spinBox1->value() - 1)) * edfhdr->long_data_record_duration + taltime;

  for(i=0; i<edfhdr->edfsignals; i++)
  {
    if(!edfhdr->edfparam[i].annotation)
    {
      if(((QCheckBox *)(SignalsTablewidget->cellWidget(i, 0)))->checkState()==Qt::Checked)
      {
        signalslist[new_edfsignals] = i;

        dividerlist[new_edfsignals] = ((QComboBox *)(SignalsTablewidget->cellWidget(i, 1)))->itemData(((QComboBox *)(SignalsTablewidget->cellWidget(i, 1)))->currentIndex()).toInt();

        new_edfsignals++;
      }
    }
  }

  datarecords = spinBox2->value() - spinBox1->value() + 1;

  if(edfhdr->edfplus || edfhdr->bdfplus)
  {
    timestamp_decimals = get_tal_timestamp_decimal_cnt(edfhdr);
    if(timestamp_decimals < 0)
    {
      showpopupmessage("Error", "Internal error, get_tal_timestamp_decimal_cnt(");
      goto END_1;
    }

    timestamp_digits = get_tal_timestamp_digit_cnt(edfhdr);
    if(timestamp_digits < 0)
    {
      showpopupmessage("Error", "Internal error, get_tal_timestamp_digit_cnt(");
      goto END_1;
    }

    annot_list = mainwindow->annotationlist[file_num];

    while(annot_list != NULL)
    {
      l_temp = annot_list->onset - time_diff;

      if((l_temp >= 0LL) && (l_temp <= endtime))
      {
        annot_cnt++;

        edfplus_annotation_add_copy(&new_annot_list, annot_list);
      }

      annot_list = annot_list->next_annotation;
    }

    annot_list = new_annot_list;

    root_new_annot_list = new_annot_list;

    new_starttime = edfhdr->utc_starttime + ((time_diff + edfhdr->starttime_offset) / TIME_DIMENSION);

    onset_diff = (new_starttime - edfhdr->utc_starttime) * TIME_DIMENSION;

    while(annot_list != NULL)
    {
      annot_list->onset -= onset_diff;

      annot_list = annot_list->next_annotation;
    }

    edfplus_annotation_sort(&new_annot_list);

    annots_per_datrec = annot_cnt / datarecords;

    if(annot_cnt % datarecords)
    {
      annots_per_datrec++;
    }

    annot_len = get_max_annotation_strlen(&new_annot_list);

    if(!annot_cnt)
    {
      annots_per_datrec = 0;
    }

    annot_recordsize = (annot_len * annots_per_datrec) + timestamp_digits + timestamp_decimals + 4;

    if(timestamp_decimals)
    {
      annot_recordsize++;
    }

    if(edfhdr->edf)
    {
      annot_smp_per_record = annot_recordsize / 2;

      if(annot_recordsize % annot_smp_per_record)
      {
        annot_smp_per_record++;

        annot_recordsize = annot_smp_per_record * 2;
      }
    }
    else
    {
      annot_smp_per_record = annot_recordsize / 3;

      if(annot_recordsize % annot_smp_per_record)
      {
        annot_smp_per_record++;

        annot_recordsize = annot_smp_per_record * 3;
      }
    }
  }
  else
  {
    annot_smp_per_record = 0;

    annot_recordsize = 0;
  }

  readbuf = (char *)malloc(edfhdr->recordsize);
  if(readbuf==NULL)
  {
    showpopupmessage("Error", "Malloc error, (readbuf).");
    goto END_2;
  }
///////////////////////////////////////////////////////////////////

  for(i=0; i<new_edfsignals; i++)
  {
    if(dividerlist[i] > 1)
    {
      for(j=0; j<aa_filter_order; j++)
      {
        filterlist[i][j] = create_ravg_filter(1, dividerlist[i]);

        if(filterlist[i][j] == NULL)
        {
          showpopupmessage("Error", "Malloc error, (create_ravg_filter()).");

          goto END_3;
        }
      }
    }
  }
///////////////////////////////////////////////////////////////////

  outputpath[0] = 0;
  if(recent_savedir[0]!=0)
  {
    strcpy(outputpath, recent_savedir);
    strcat(outputpath, "/");
  }
  len = strlen(outputpath);
  get_filename_from_path(outputpath + len, inputpath, MAX_PATH_LENGTH - len);
  remove_extension_from_filename(outputpath);
  if(edfhdr->edf)
  {
    strcat(outputpath, "_reduced.edf");

    strcpy(outputpath, QFileDialog::getSaveFileName(0, "Save file", QString::fromLocal8Bit(outputpath), "EDF files (*.edf *.EDF)").toLocal8Bit().data());
  }
  else
  {
    strcat(outputpath, "_reduced.bdf");

    strcpy(outputpath, QFileDialog::getSaveFileName(0, "Save file", QString::fromLocal8Bit(outputpath), "BDF files (*.bdf *.BDF)").toLocal8Bit().data());
  }

  if(!strcmp(outputpath, ""))
  {
    goto END_3;
  }

  get_directory_from_path(recent_savedir, outputpath, MAX_PATH_LENGTH);

  if(mainwindow->file_is_opened(outputpath))
  {
    showpopupmessage("Reduce signals", "Error, selected file is in use.");
    goto END_3;
  }

  outputfile = fopeno(outputpath, "wb");
  if(outputfile==NULL)
  {
    showpopupmessage("Error", "Can not open outputfile for writing.");
    goto END_3;
  }

  new_starttime = edfhdr->utc_starttime + ((time_diff + edfhdr->starttime_offset) / TIME_DIMENSION);

  utc_to_date_time(new_starttime, &dts);

  rewind(inputfile);
  if(fread(scratchpad, 168, 1, inputfile)!=1)
  {
    showpopupmessage("Error", "Read error (1).");
    goto END_4;
  }

  if(edfhdr->edfplus || edfhdr->bdfplus)
  {
    if(scratchpad[98] != 'X')
    {
      sprintf(scratchpad + 98, "%02i-%s-%04i", dts.day, dts.month_str, dts.year);

      scratchpad[109] = ' ';
    }
  }

  if(fwrite(scratchpad, 168, 1, outputfile)!=1)
  {
    showpopupmessage("Error", "Write error (1).");
    goto END_4;
  }

  fprintf(outputfile, "%02i.%02i.%02i%02i.%02i.%02i",
          dts.day,
          dts.month,
          dts.year % 100,
          dts.hour,
          dts.minute,
          dts.second);

  if(edfhdr->edfplus || edfhdr->bdfplus)
  {
    fprintf(outputfile, "%-8i", new_edfsignals * 256 + 512);
  }
  else
  {
    fprintf(outputfile, "%-8i", new_edfsignals * 256 + 256);
  }
  if(edfhdr->edfplus)
  {
    fprintf(outputfile, "EDF+C");
    for(i=0; i<39; i++)
    {
      fputc(' ', outputfile);
    }
  }
  if(edfhdr->bdfplus)
  {
    fprintf(outputfile, "BDF+C");
    for(i=0; i<39; i++)
    {
      fputc(' ', outputfile);
    }
  }
  if((!edfhdr->edfplus) && (!edfhdr->bdfplus))
  {
    for(i=0; i<44; i++)
    {
      fputc(' ', outputfile);
    }
  }
  fprintf(outputfile, "%-8i", datarecords);
  snprintf(scratchpad, 256, "%f", edfhdr->data_record_duration);
  convert_trailing_zeros_to_spaces(scratchpad);
  if(scratchpad[7]=='.')
  {
    scratchpad[7] = ' ';
  }
  scratchpad[8] = 0;

  fprintf(outputfile, "%s", scratchpad);
  if(edfhdr->edfplus || edfhdr->bdfplus)
  {
    fprintf(outputfile, "%-4i", new_edfsignals + 1);
  }
  else
  {
    fprintf(outputfile, "%-4i", new_edfsignals);
  }

  for(i=0; i<new_edfsignals; i++)
  {
    fprintf(outputfile, "%s", edfhdr->edfparam[signalslist[i]].label);
  }
  if(edfhdr->edfplus)
  {
    fprintf(outputfile, "EDF Annotations ");
  }
  if(edfhdr->bdfplus)
  {
    fprintf(outputfile, "BDF Annotations ");
  }
  for(i=0; i<new_edfsignals; i++)
  {
    fprintf(outputfile, "%s", edfhdr->edfparam[signalslist[i]].transducer);
  }
  if(edfhdr->edfplus || edfhdr->bdfplus)
  {
    for(i=0; i<80; i++)
    {
      fputc(' ', outputfile);
    }
  }
  for(i=0; i<new_edfsignals; i++)
  {
    fprintf(outputfile, "%s", edfhdr->edfparam[signalslist[i]].physdimension);
  }
  if(edfhdr->edfplus || edfhdr->bdfplus)
  {
    for(i=0; i<8; i++)
    {
      fputc(' ', outputfile);
    }
  }
  for(i=0; i<new_edfsignals; i++)
  {
    snprintf(scratchpad, 256, "%f", edfhdr->edfparam[signalslist[i]].phys_min);
    convert_trailing_zeros_to_spaces(scratchpad);
    if(scratchpad[7]=='.')
    {
      scratchpad[7] = ' ';
    }
    scratchpad[8] = 0;
    fprintf(outputfile, "%s", scratchpad);
  }
  if(edfhdr->edfplus || edfhdr->bdfplus)
  {
    fprintf(outputfile, "-1      ");
  }
  for(i=0; i<new_edfsignals; i++)
  {
    snprintf(scratchpad, 256, "%f", edfhdr->edfparam[signalslist[i]].phys_max);
    convert_trailing_zeros_to_spaces(scratchpad);
    if(scratchpad[7]=='.')
    {
      scratchpad[7] = ' ';
    }
    scratchpad[8] = 0;
    fprintf(outputfile, "%s", scratchpad);
  }
  if(edfhdr->edfplus || edfhdr->bdfplus)
  {
    fprintf(outputfile, "1       ");
  }
  for(i=0; i<new_edfsignals; i++)
  {
    fprintf(outputfile, "%-8i", edfhdr->edfparam[signalslist[i]].dig_min);
  }
  if(edfhdr->edfplus)
  {
    fprintf(outputfile, "-32768  ");
  }
  if(edfhdr->bdfplus)
  {
    fprintf(outputfile, "-8388608");
  }
  for(i=0; i<new_edfsignals; i++)
  {
    fprintf(outputfile, "%-8i", edfhdr->edfparam[signalslist[i]].dig_max);
  }
  if(edfhdr->edfplus)
  {
    fprintf(outputfile, "32767   ");
  }
  if(edfhdr->bdfplus)
  {
    fprintf(outputfile, "8388607 ");
  }
  for(i=0; i<new_edfsignals; i++)
  {
    fprintf(outputfile, "%s", edfhdr->edfparam[signalslist[i]].prefilter);
  }
  if(edfhdr->edfplus || edfhdr->bdfplus)
  {
    for(i=0; i<80; i++)
    {
      fputc(' ', outputfile);
    }
  }
  for(i=0; i<new_edfsignals; i++)
  {
    fprintf(outputfile, "%-8i", edfhdr->edfparam[signalslist[i]].smp_per_record / dividerlist[i]);
  }
  if(edfhdr->edfplus || edfhdr->bdfplus)
  {
    fprintf(outputfile, "%-8i", annot_smp_per_record);
  }
  for(i=0; i<(new_edfsignals * 32); i++)
  {
   fputc(' ', outputfile);
  }
  if(edfhdr->edfplus || edfhdr->bdfplus)
  {
    for(i=0; i<32; i++)
    {
      fputc(' ', outputfile);
    }
  }
///////////////////////////////////////////////////////////////////

  progress.setRange(0, datarecords);
  progress.setValue(0);

  progress_steps = datarecords / 100;
  if(progress_steps < 1)
  {
    progress_steps = 1;
  }

  fseeko(inputfile, (long long)edfhdr->hdrsize + ((long long)(spinBox1->value() - 1) * (long long)edfhdr->recordsize), SEEK_SET);

  for(datrecs_processed=0; datrecs_processed<datarecords; datrecs_processed++)
  {
    if(!(datrecs_processed % progress_steps))
    {
      progress.setValue(datrecs_processed);

      qApp->processEvents();

      if(progress.wasCanceled() == true)
      {
        goto END_4;
      }
    }

    if(fread(readbuf, edfhdr->recordsize, 1, inputfile) != 1)
    {
      progress.reset();
      showpopupmessage("Error", "Read error (2).");
      goto END_4;
    }

    if(edfhdr->edf)
    {
      for(i=0; i<new_edfsignals; i++)
      {
        if(dividerlist[i] == 1)
        {
          smplrt = edfhdr->edfparam[signalslist[i]].smp_per_record;

          for(j=0; j<smplrt; j++)
          {
            fputc(readbuf[edfhdr->edfparam[signalslist[i]].buf_offset + (j * 2)], outputfile);
            if(fputc(readbuf[edfhdr->edfparam[signalslist[i]].buf_offset + (j * 2) + 1], outputfile)==EOF)
            {
              progress.reset();
              showpopupmessage("Error", "Write error (4).");
              goto END_4;
            }
          }
        }
        else
        {
          smplrt = edfhdr->edfparam[signalslist[i]].smp_per_record / dividerlist[i];

          for(j=0; j<smplrt; j++)
          {
            tmp = 0;

            for(k=0; k<dividerlist[i]; k++)
            {
              val = *(((signed short *)(readbuf + edfhdr->edfparam[signalslist[i]].buf_offset)) + (dividerlist[i] * j) + k);

              for(n=0; n<aa_filter_order; n++)
              {
                val = run_ravg_filter(val, filterlist[i][n]);
              }

              tmp += val;
            }

            tmp /= dividerlist[i];

            fputc(tmp & 0xff, outputfile);
            if(fputc((tmp >> 8) & 0xff, outputfile)==EOF)
            {
              progress.reset();
              showpopupmessage("Error", "Write error (4).");
              goto END_4;
            }
          }
        }
      }
    }
    else
    {
      for(i=0; i<new_edfsignals; i++)
      {
        if(dividerlist[i] == 1)
        {
          smplrt = edfhdr->edfparam[signalslist[i]].smp_per_record;

          for(j=0; j<smplrt; j++)
          {
            fputc(readbuf[edfhdr->edfparam[signalslist[i]].buf_offset + (j * 3)], outputfile);
            fputc(readbuf[edfhdr->edfparam[signalslist[i]].buf_offset + (j * 3) + 1], outputfile);
            if(fputc(readbuf[edfhdr->edfparam[signalslist[i]].buf_offset + (j * 3) + 2], outputfile)==EOF)
            {
              progress.reset();
              showpopupmessage("Error", "Write error (4).");
              goto END_4;
            }
          }
        }
        else
        {
          smplrt = edfhdr->edfparam[signalslist[i]].smp_per_record / dividerlist[i];

          for(j=0; j<smplrt; j++)
          {
            l_tmp = 0LL;

            for(k=0; k<dividerlist[i]; k++)
            {
              var.two[0] = *((unsigned short *)(readbuf + edfhdr->edfparam[signalslist[i]].buf_offset + (dividerlist[i] * j * 3) + (k * 3)));
              var.four[2] = *((unsigned char *)(readbuf + edfhdr->edfparam[signalslist[i]].buf_offset + (dividerlist[i] * j * 3) + (k * 3) + 2));

              if(var.four[2]&0x80)
              {
                var.four[3] = 0xff;
              }
              else
              {
                var.four[3] = 0x00;
              }

              for(n=0; n<aa_filter_order; n++)
              {
                var.one_signed = run_ravg_filter(var.one_signed, filterlist[i][n]);
              }

              l_tmp += var.one_signed;
            }

            l_tmp /= dividerlist[i];

            fputc(l_tmp & 0xff, outputfile);
            fputc((l_tmp >> 8) & 0xff, outputfile);
            if(fputc((l_tmp >> 16) & 0xff, outputfile)==EOF)
            {
              progress.reset();
              showpopupmessage("Error", "Write error (4).");
              goto END_4;
            }
          }
        }
      }
    }

    if(edfhdr->edfplus || edfhdr->bdfplus)
    {
      switch(timestamp_decimals)
      {
        case 0 : tallen = fprintf(outputfile, "+%i", (int)(taltime / TIME_DIMENSION));
                  break;
        case 1 : tallen = fprintf(outputfile, "+%i.%01i", (int)(taltime / TIME_DIMENSION), (int)((taltime % TIME_DIMENSION) / 1000000LL));
                  break;
        case 2 : tallen = fprintf(outputfile, "+%i.%02i", (int)(taltime / TIME_DIMENSION), (int)((taltime % TIME_DIMENSION) / 100000LL));
                  break;
        case 3 : tallen = fprintf(outputfile, "+%i.%03i", (int)(taltime / TIME_DIMENSION), (int)((taltime % TIME_DIMENSION) / 10000LL));
                  break;
        case 4 : tallen = fprintf(outputfile, "+%i.%04i", (int)(taltime / TIME_DIMENSION), (int)((taltime % TIME_DIMENSION) / 1000LL));
                  break;
        case 5 : tallen = fprintf(outputfile, "+%i.%05i", (int)(taltime / TIME_DIMENSION), (int)((taltime % TIME_DIMENSION) / 100LL));
                  break;
        case 6 : tallen = fprintf(outputfile, "+%i.%06i", (int)(taltime / TIME_DIMENSION), (int)((taltime % TIME_DIMENSION) / 10LL));
                  break;
        case 7 : tallen = fprintf(outputfile, "+%i.%07i", (int)(taltime / TIME_DIMENSION), (int)(taltime % TIME_DIMENSION));
                  break;
      }

      fputc(20, outputfile);
      fputc(20, outputfile);
      fputc(0, outputfile);

      tallen += 3;

      if(new_annot_list != NULL)
      {
        for(i=0; i<annots_per_datrec; i++)
        {
          if(new_annot_list != NULL)
          {
            len = snprintf(scratchpad, 256, "%+i.%07i",
            (int)(new_annot_list->onset / TIME_DIMENSION),
            (int)(new_annot_list->onset % TIME_DIMENSION));

            for(j=0; j<7; j++)
            {
              if(scratchpad[len - j - 1] != '0')
              {
                break;
              }
            }

            if(j)
            {
              len -= j;

              if(j == 7)
              {
                len--;
              }
            }

            if(fwrite(scratchpad, len, 1, outputfile) != 1)
            {
              progress.reset();
              showpopupmessage("Error", "Write error (5).");
              goto END_4;
            }

            tallen += len;

            if(new_annot_list->duration[0]!=0)
            {
              fputc(21, outputfile);
              tallen++;

              tallen += fprintf(outputfile, "%s", new_annot_list->duration);
            }

            fputc(20, outputfile);
            tallen++;

            tallen += fprintf(outputfile, "%s", new_annot_list->annotation);

            fputc(20, outputfile);
            fputc(0, outputfile);
            tallen += 2;

            new_annot_list = new_annot_list->next_annotation;
          }
        }
      }

      for(k=tallen; k<annot_recordsize; k++)
      {
        fputc(0, outputfile);
      }

      taltime += edfhdr->long_data_record_duration;
    }
  }
コード例 #23
0
void MetropolisRenderer::Render(const Scene *scene) {
    PBRT_MLT_STARTED_RENDERING();
    if (scene->lights.size() > 0) {
        int x0, x1, y0, y1;
        camera->film->GetPixelExtent(&x0, &x1, &y0, &y1);
        float t0 = camera->shutterOpen, t1 = camera->shutterClose;
        Distribution1D *lightDistribution = ComputeLightSamplingCDF(scene);

        if (directLighting != NULL) {
            PBRT_MLT_STARTED_DIRECTLIGHTING();
            // Compute direct lighting before Metropolis light transport
            if (nDirectPixelSamples > 0) {
                LDSampler sampler(x0, x1, y0, y1, nDirectPixelSamples, t0, t1);
                Sample *sample = new Sample(&sampler, directLighting, NULL, scene);
                vector<Task *> directTasks;
                int nDirectTasks = max(32 * NumSystemCores(),
                                 (camera->film->xResolution * camera->film->yResolution) / (16*16));
                nDirectTasks = RoundUpPow2(nDirectTasks);
                ProgressReporter directProgress(nDirectTasks, "Direct Lighting");
                for (int i = 0; i < nDirectTasks; ++i)
                    directTasks.push_back(new SamplerRendererTask(scene, this, camera, camera->film, directProgress,
                                                                  &sampler, sample, false, i, nDirectTasks));
                std::reverse(directTasks.begin(), directTasks.end());
                EnqueueTasks(directTasks);
                WaitForAllTasks();
                for (uint32_t i = 0; i < directTasks.size(); ++i)
                    delete directTasks[i];
                delete sample;
                directProgress.Done();
            }
            camera->film->WriteImage();
            PBRT_MLT_FINISHED_DIRECTLIGHTING();
        }
        // Take initial set of samples to compute $b$
        PBRT_MLT_STARTED_BOOTSTRAPPING(nBootstrap);
        RNG rng(0);
        MemoryArena arena;
        vector<float> bootstrapI;
        vector<PathVertex> cameraPath(maxDepth, PathVertex());
        vector<PathVertex> lightPath(maxDepth, PathVertex());
        float sumI = 0.f;
        bootstrapI.reserve(nBootstrap);
        MLTSample sample(maxDepth);
        for (uint32_t i = 0; i < nBootstrap; ++i) {
            // Generate random sample and path radiance for MLT bootstrapping
            float x = Lerp(rng.RandomFloat(), x0, x1);
            float y = Lerp(rng.RandomFloat(), y0, y1);
            LargeStep(rng, &sample, maxDepth, x, y, t0, t1, bidirectional);
            Spectrum L = PathL(sample, scene, arena, camera, lightDistribution,
                               &cameraPath[0], &lightPath[0], rng);

            // Compute contribution for random sample for MLT bootstrapping
            float I = ::I(L);
            sumI += I;
            bootstrapI.push_back(I);
            arena.FreeAll();
        }
        float b = sumI / nBootstrap;
        PBRT_MLT_FINISHED_BOOTSTRAPPING(b);
        Info("MLT computed b = %f", b);

        // Select initial sample from bootstrap samples
        float contribOffset = rng.RandomFloat() * sumI;
        rng.Seed(0);
        sumI = 0.f;
        MLTSample initialSample(maxDepth);
        for (uint32_t i = 0; i < nBootstrap; ++i) {
            float x = Lerp(rng.RandomFloat(), x0, x1);
            float y = Lerp(rng.RandomFloat(), y0, y1);
            LargeStep(rng, &initialSample, maxDepth, x, y, t0, t1,
                      bidirectional);
            sumI += bootstrapI[i];
            if (sumI > contribOffset)
                break;
        }

        // Launch tasks to generate Metropolis samples
        uint32_t nTasks = largeStepsPerPixel;
        uint32_t largeStepRate = nPixelSamples / largeStepsPerPixel;
        Info("MLT running %d tasks, large step rate %d", nTasks, largeStepRate);
        ProgressReporter progress(nTasks * largeStepRate, "Metropolis");
        vector<Task *> tasks;
        Mutex *filmMutex = Mutex::Create();
        Assert(IsPowerOf2(nTasks));
        uint32_t scramble[2] = { rng.RandomUInt(), rng.RandomUInt() };
        uint32_t pfreq = (x1-x0) * (y1-y0);
        for (uint32_t i = 0; i < nTasks; ++i) {
            float d[2];
            Sample02(i, scramble, d);
            tasks.push_back(new MLTTask(progress, pfreq, i,
                d[0], d[1], x0, x1, y0, y1, t0, t1, b, initialSample,
                scene, camera, this, filmMutex, lightDistribution));
        }
        EnqueueTasks(tasks);
        WaitForAllTasks();
        for (uint32_t i = 0; i < tasks.size(); ++i)
            delete tasks[i];
        progress.Done();
        Mutex::Destroy(filmMutex);
        delete lightDistribution;
    }
    camera->film->WriteImage();
    PBRT_MLT_FINISHED_RENDERING();
}
コード例 #24
0
void recursive_bisection_contoller::run(parallel::hypergraph &hgraph,
                                        MPI_Comm comm) {
  initialize_coarsest_hypergraph(hgraph, comm);
  convToBisectionConstraints();

  progress("[R-B]: %i |", number_of_runs_);

  int i;
  int j;
  int ij;

  int numVertices = hypergraph_->number_of_vertices();
  int *pVector = nullptr;
  int destProcessor;
  int myPartitionIdx = 0;
  int v;

  dynamic_array<int> recvLens(number_of_processors_);
  dynamic_array<int> recvDispls(number_of_processors_);

  bisection *b;

  all_partition_info_.resize(numVertices << 1);

  for (i = 0; i < number_of_runs_; ++i) {
    destProcessor = i % number_of_processors_;
    sum_of_cuts_ = 0;
    local_vertex_part_info_length_ = 0;

    if (rank_ == destProcessor) {
      pVector = &partition_vector_[partition_vector_offsets_[myPartitionIdx]];
    }

    b = new bisection(hypergraph_, log_k_, 0);
    b->initMap();

    recursively_bisect(*b, comm);

    // ###
    // now recover the partition and
    // partition cutsize
    // ###

    MPI_Reduce(&sum_of_cuts_, &ij, 1, MPI_INT, MPI_SUM, destProcessor, comm);
    MPI_Gather(&local_vertex_part_info_length_, 1, MPI_INT, recvLens.data(), 1, MPI_INT,
               destProcessor, comm);

    if (rank_ == destProcessor) {
      partition_vector_cuts_[myPartitionIdx] = ij;
      ij = 0;

      for (j = 0; j < number_of_processors_; ++j) {
        recvDispls[j] = ij;
        ij += recvLens[j];
      }
    }

    MPI_Gatherv(local_vertex_partition_info_.data(), local_vertex_part_info_length_, MPI_INT,
                all_partition_info_.data(), recvLens.data(),
                recvDispls.data(), MPI_INT, destProcessor, comm);

    if (rank_ == destProcessor) {
      ij = numVertices << 1;

      for (j = 0; j < ij;) {
        v = all_partition_info_[j++];
        pVector[v] = all_partition_info_[j++];
      }

      ++myPartitionIdx;
    }
  }

  // ###
  // k-way refine local partitions
  // ###

  hypergraph_->set_number_of_partitions(number_of_partitions_);

  if (number_of_partitions_ > 0) {
    for (i = 0; i < number_of_partitions_; ++i) {
      int *start = &(partition_vector_.data()[partition_vector_offsets_[i]]);
      dynamic_array<int> p_vector(numVertices);
      p_vector.set_data(start, numVertices);
      hypergraph_->copy_in_partition(p_vector, numVertices, i, partition_vector_cuts_[i]);
    }

    refiner_->rebalance(*hypergraph_);
  }

  // ###
  // project partitions
  // ###

  initialize_serial_partitions(hgraph, comm);

#ifdef DEBUG_CONTROLLER
  hgraph.checkPartitions(numParts, maxPartWt, comm);
#endif
}
コード例 #25
0
/** Execute the algorithm.
 */
void CreateChunkingFromInstrument::exec() {
  // get the instrument
  Instrument_const_sptr inst = this->getInstrument();

  // setup the output workspace
  ITableWorkspace_sptr strategy =
      WorkspaceFactory::Instance().createTable("TableWorkspace");
  strategy->addColumn("str", "BankName");
  this->setProperty("OutputWorkspace", strategy);

  // get the correct level of grouping
  string groupLevel = this->getPropertyValue(PARAM_CHUNK_BY);
  vector<string> groupNames =
      getGroupNames(this->getPropertyValue(PARAM_CHUNK_NAMES));
  if (groupLevel.compare("All") == 0) {
    return; // nothing to do
  } else if (inst->getName().compare("SNAP") == 0 &&
             groupLevel.compare("Group") == 0) {
    groupNames.clear();
    groupNames.push_back("East");
    groupNames.push_back("West");
  }

  // set up a progress bar with the "correct" number of steps
  int maxBankNum = this->getProperty(PARAM_MAX_BANK_NUM);
  Progress progress(this, .2, 1., maxBankNum);

  // search the instrument for the bank names
  int maxRecurseDepth = this->getProperty(PARAM_MAX_RECURSE);
  map<string, vector<string>> grouping;
  // cppcheck-suppress syntaxError
    PRAGMA_OMP(parallel for schedule(dynamic, 1) )
    for (int num = 0; num < maxBankNum; ++num) {
      PARALLEL_START_INTERUPT_REGION
      ostringstream mess;
      mess << "bank" << num;
      IComponent_const_sptr comp =
          inst->getComponentByName(mess.str(), maxRecurseDepth);
      PARALLEL_CRITICAL(grouping)
      if (comp) {
        // get the name of the correct parent
        string parent;
        if (groupNames.empty()) {
          parent = parentName(comp, groupLevel);
        } else {
          parent = parentName(comp, groupNames);
        }

        // add it to the correct chunk
        if (!parent.empty()) {
          if (grouping.count(parent) == 0)
            grouping[parent] = vector<string>();

          grouping[parent].push_back(comp->getName());
        }
      }
      progress.report();
      PARALLEL_END_INTERUPT_REGION
    }
    PARALLEL_CHECK_INTERUPT_REGION

    // check to see that something happened
    if (grouping.empty())
      throw std::runtime_error("Failed to find any banks in the instrument");

    // fill in the table workspace
    for (auto group = grouping.begin(); group != grouping.end(); ++group) {
      stringstream banks;
      for (auto bank = group->second.begin(); bank != group->second.end();
           ++bank)
        banks << (*bank) << ",";

      // remove the trailing comma
      string banksStr = banks.str();
      banksStr = banksStr.substr(0, banksStr.size() - 1);

      // add it to the table
      TableRow row = strategy->appendRow();
      row << banksStr;
    }
}
コード例 #26
0
void recursive_bisection_contoller::initialize_serial_partitions(
    parallel::hypergraph &hgraph,
    MPI_Comm comm) {
  int i;
  int j;
  int ij;

  int numTotVertices = hypergraph_->number_of_vertices();
  int ijk;
  int startOffset;
  int endOffset;
  int totToSend;

  ds::dynamic_array<int> hGraphPartitionVector;
  ds::dynamic_array<int> hGraphPartVectorOffsets;
  ds::dynamic_array<int> hGraphPartCuts;

  auto hPartitionVector = hypergraph_->partition_vector();
  auto hPartOffsetsVector = hypergraph_->partition_offsets();
  auto hPartitionCutsArray = hypergraph_->partition_cuts();

  dynamic_array<int> numVperProc(number_of_processors_);
  dynamic_array<int> procDispls(number_of_processors_);

  dynamic_array<int> sendLens(number_of_processors_);
  dynamic_array<int> sendDispls(number_of_processors_);
  dynamic_array<int> recvLens(number_of_processors_);
  dynamic_array<int> recvDispls(number_of_processors_);
  dynamic_array<int> sendArray;

  hgraph.set_number_of_partitions(number_of_runs_);

  hGraphPartitionVector = hgraph.partition_vector();
  hGraphPartVectorOffsets = hgraph.partition_offsets();
  hGraphPartCuts = hgraph.partition_cuts();

  // ###
  // communicate partition vector values
  // ###

  j = number_of_processors_ - 1;
  ij = numTotVertices / number_of_processors_;

  for (i = 0; i < j; ++i)
    numVperProc[i] = ij;

  numVperProc[i] = ij + (numTotVertices % number_of_processors_);

  j = 0;
  ij = 0;

  for (i = 0; i < number_of_processors_; ++i) {
    sendDispls[i] = j;
    procDispls[i] = ij;
    sendLens[i] = numVperProc[i] * number_of_partitions_;
    j += sendLens[i];
    ij += numVperProc[i];
  }

  sendArray.resize(j);
  totToSend = j;

  ij = 0;

  for (ijk = 0; ijk < number_of_processors_; ++ijk) {
    for (j = 0; j < number_of_partitions_; ++j) {
      startOffset = hPartOffsetsVector[j] + procDispls[ijk];
      endOffset = startOffset + numVperProc[ijk];

      for (i = startOffset; i < endOffset; ++i) {
        sendArray[ij++] = hPartitionVector[i];
      }
    }
  }
#ifdef DEBUG_CONTROLLER
  assert(ij == totToSend);
#endif

  MPI_Alltoall(sendLens.data(), 1, MPI_INT, recvLens.data(), 1, MPI_INT,
               comm);

  ij = 0;

  for (i = 0; i < number_of_processors_; ++i) {
    recvDispls[i] = ij;
    ij += recvLens[i];
  }

#ifdef DEBUG_CONTROLLER
  assert(ij == hGraphPartVectorOffsets[numSeqRuns]);
#endif

  MPI_Alltoallv(sendArray.data(), sendLens.data(),
                sendDispls.data(), MPI_INT, hGraphPartitionVector.data(),
                recvLens.data(), recvDispls.data(), MPI_INT, comm);

  // ###
  // communicate partition cuts
  // ###

  MPI_Allgather(&number_of_partitions_, 1, MPI_INT, recvLens.data(), 1, MPI_INT,
                comm);

  ij = 0;

  for (i = 0; i < number_of_processors_; ++i) {
    recvDispls[i] = ij;
    ij += recvLens[i];
  }

  MPI_Allgatherv(hPartitionCutsArray.data(), number_of_partitions_, MPI_INT,
                 hGraphPartCuts.data(), recvLens.data(), recvDispls.data(),
                 MPI_INT, comm);

  for (i = 0; i < number_of_runs_; ++i) {
    progress("%i ", hGraphPartCuts[i]);
  }
  progress("\n");
}
コード例 #27
0
ファイル: topolTest.cpp プロジェクト: cz172638/QGIS
ErrorList topolTest::checkOverlapWithLayer( double tolerance, QgsVectorLayer *layer1, QgsVectorLayer *layer2, bool isExtent )
{
  Q_UNUSED( tolerance );

  int i = 0;
  ErrorList errorList;

  bool skipItself = layer1 == layer2;
  QgsSpatialIndex *index = mLayerIndexes[layer2->id()];

  QgsGeometry canvasExtentPoly = QgsGeometry::fromWkt( qgsInterface->mapCanvas()->extent().asWktPolygon() );

  QList<FeatureLayer>::iterator it;
  for ( it = mFeatureList1.begin(); it != mFeatureList1.end(); ++it )
  {
    if ( !( ++i % 100 ) )
      emit progress( i );

    if ( testCanceled() )
      break;

    QgsGeometry g1 = it->feature.geometry();
    QgsRectangle bb = g1.boundingBox();

    QList<QgsFeatureId> crossingIds;
    crossingIds = index->intersects( bb );

    QList<QgsFeatureId>::ConstIterator cit = crossingIds.begin();
    QList<QgsFeatureId>::ConstIterator crossingIdsEnd = crossingIds.end();
    for ( ; cit != crossingIdsEnd; ++cit )
    {
      QgsFeature &f = mFeatureMap2[*cit].feature;
      QgsGeometry g2 = f.geometry();

      // skip itself, when invoked with the same layer
      if ( skipItself && f.id() == it->feature.id() )
        continue;

      if ( g2.isNull() )
      {
        QgsMessageLog::logMessage( tr( "Second geometry missing." ), tr( "Topology plugin" ) );
        continue;
      }

      if ( g1.overlaps( g2 ) )
      {
        QgsRectangle r = bb;
        QgsRectangle r2 = g2.boundingBox();
        r.combineExtentWith( r2 );

        QgsGeometry conflictGeom = g1.intersection( g2 );
        // could this for some reason return NULL?
        if ( conflictGeom.isNull() )
        {
          continue;
        }

        if ( isExtent )
        {
          if ( canvasExtentPoly.disjoint( conflictGeom ) )
          {
            continue;
          }
          if ( canvasExtentPoly.crosses( conflictGeom ) )
          {
            conflictGeom = conflictGeom.intersection( canvasExtentPoly );
          }
        }

        //c = new QgsGeometry;

        QList<FeatureLayer> fls;
        FeatureLayer fl;
        fl.feature = f;
        fl.layer = layer2;
        fls << *it << fl;
        TopolErrorIntersection *err = new TopolErrorIntersection( r, conflictGeom, fls );

        errorList << err;
      }
    }
  }
  return errorList;
}
コード例 #28
0
ファイル: mediaupload.cpp プロジェクト: ceteigrek/yappari
void MediaUpload::updateProgress(float p)
{
    emit progress(msg,p);
}
コード例 #29
0
ファイル: topolTest.cpp プロジェクト: cz172638/QGIS
ErrorList topolTest::checkCloseFeature( double tolerance, QgsVectorLayer *layer1, QgsVectorLayer *layer2, bool isExtent )
{
  Q_UNUSED( isExtent );
  ErrorList errorList;
  QgsSpatialIndex *index = 0;

  bool badG1 = false, badG2 = false;
  bool skipItself = layer1 == layer2;

  int i = 0;
  QList<FeatureLayer>::Iterator it;
  QList<FeatureLayer>::ConstIterator FeatureListEnd = mFeatureList1.end();
  for ( it = mFeatureList1.begin(); it != FeatureListEnd; ++it )
  {
    if ( !( ++i % 100 ) )
      emit progress( i );

    if ( testCanceled() )
      break;

    QgsGeometry *g1 = it->feature.geometry();
    if ( !g1 || !g1->asGeos() )
    {
      badG1 = true;
      continue;
    }

    QgsRectangle bb = g1->boundingBox();

    // increase bounding box by tolerance
    QgsRectangle frame( bb.xMinimum() - tolerance, bb.yMinimum() - tolerance, bb.xMaximum() + tolerance, bb.yMaximum() + tolerance );

    QList<QgsFeatureId> crossingIds;
    crossingIds = index->intersects( frame );

    QList<QgsFeatureId>::Iterator cit = crossingIds.begin();
    QList<QgsFeatureId>::ConstIterator crossingIdsEnd = crossingIds.end();

    for ( ; cit != crossingIdsEnd; ++cit )
    {
      QgsFeature &f = mFeatureMap2[*cit].feature;
      QgsGeometry *g2 = f.geometry();

      // skip itself, when invoked with the same layer
      if ( skipItself && f.id() == it->feature.id() )
        continue;

      if ( !g2 || !g2->asGeos() )
      {
        badG2 = true;
        continue;
      }

      if ( g1->distance( *g2 ) < tolerance )
      {
        QgsRectangle r = g2->boundingBox();
        r.combineExtentWith( &bb );

        QList<FeatureLayer> fls;
        FeatureLayer fl;
        fl.feature = f;
        fl.layer = layer2;
        fls << *it << fl;
        QgsGeometry *conflict = new QgsGeometry( *g2 );
        TopolErrorClose *err = new TopolErrorClose( r, conflict, fls );
        //TopolErrorClose* err = new TopolErrorClose(r, g2, fls);

        errorList << err;
      }
    }
  }

  if ( badG2 )
    QgsMessageLog::logMessage( tr( "Invalid second geometry." ), tr( "Topology plugin" ) );

  if ( badG1 )
    QgsMessageLog::logMessage( tr( "Invalid first geometry." ), tr( "Topology plugin" ) );

  return errorList;
}
コード例 #30
0
int CDfuPlusHelper::spray()
{
    const char* srcxml = globals->queryProp("srcxml");
    const char* srcip = globals->queryProp("srcip");
    const char* srcfile = globals->queryProp("srcfile");
    
    bool nowait = globals->getPropBool("nowait", false);

    MemoryBuffer xmlbuf;

    if(srcxml == NULL)
    {
        if(srcfile == NULL)
            throw MakeStringException(-1, "srcfile not specified");
        if(srcip == NULL) {
#ifdef DAFILESRV_LOCAL
            progress("srcip not specified - assuming spray from local machine\n");
            srcip = ".";
#else
            throw MakeStringException(-1, "srcip not specified");
#endif
        }
    }
    else
    {
        if(srcip != NULL || srcfile != NULL)
            throw MakeStringException(-1, "srcip/srcfile and srcxml can't be used at the same time");
        StringBuffer buf;
        buf.loadFile(srcxml);
        int len = buf.length();
        xmlbuf.setBuffer(len, buf.detach(), true);
    }

    const char* dstname = globals->queryProp("dstname");
    if(dstname == NULL)
        throw MakeStringException(-1, "dstname not specified");
    const char* dstcluster = globals->queryProp("dstcluster");
    if(dstcluster == NULL)
        throw MakeStringException(-1, "dstcluster not specified");
    const char* format = globals->queryProp("format");
    if(format == NULL)
        format = "fixed";
    else if (stricmp(format, "delimited") == 0)
        format="csv";

    SocketEndpoint localep;
    StringBuffer localeps;
    if (checkLocalDaFileSvr(srcip,localep))
        srcip = localep.getUrlStr(localeps).str();
    StringBuffer wuid;
    StringBuffer errmsg;
    bool ok;
    if ((stricmp(format, "fixed") == 0)||(stricmp(format, "recfmvb") == 0)||(stricmp(format, "recfmv") == 0)||(stricmp(format, "variablebigendian") == 0))
        ok = fixedSpray(srcxml,srcip,srcfile,xmlbuf,dstcluster,dstname,format,wuid,errmsg);
    else if((stricmp(format, "csv") == 0)||(stricmp(format, "xml") == 0)||(stricmp(format, "variable") == 0))
        ok = variableSpray(srcxml,srcip,srcfile,xmlbuf,dstcluster,dstname,format,wuid, errmsg);
    else
        throw MakeStringException(-1, "format %s not supported", format);
    if (!ok) {
        if(errmsg.length())
            error("%s\n", errmsg.str());
        else
            throw MakeStringException(-1, "unknown error spraying");
    }
    else {
        const char* jobname = globals->queryProp("jobname");
        if(jobname && *jobname)
            updatejobname(wuid.str(), jobname);

        info("Submitted WUID %s\n", wuid.str());
        if(!nowait)
            waitToFinish(wuid.str());
    }

    return 0;
}