Example #1
0
  void Run() {
    Image datImg;
    datImg.LoadRaw(input.c_str());
    const RawImage *datRaw = datImg.GetImage(); 
    //    ShiftTraces(*bfT0, *wellT0, datRaw->frames, datRaw->baseFrameRate, datRaw->timestamps, datRaw->image);
    SynchDat sdat; //GridMesh<TraceChunk> traceChunks;
    AddMetaData(sdat, datRaw, acqNum);
    GenerateDataChunks(*config, *bfT0, datRaw, config->row_step, config->col_step, *sigmaTMid, sdat.mChunks, datImg);
    serializer.Write(output.c_str(), sdat);

    if (config->doDebug) {
      char buffer[2048];
      string tmp = input.substr(input.size()-8,8);
      snprintf(buffer, sizeof(buffer), "comIn_%s", tmp.c_str());
      OutputTraceChunks(sdat.mChunks,buffer);

      TraceChunkSerializer readSer;
      //      GridMesh<TraceChunk> traceIn;
      SynchDat sdatIn;
      readSer.Read(output.c_str(),sdatIn);
      snprintf(buffer, sizeof(buffer), "decomOut_%s", tmp.c_str());
      OutputTraceChunks(sdatIn.mChunks,buffer);
    }
    datImg.Close();
  }
Example #2
0
void ImageSpecClass::ReadFirstImage(Image &img, SystemContext &sys_context, ImageControlOpts &img_control, SpatialContext &loc_context )
{
  img.SetImgLoadImmediate ( false );
  img.SetNumAcqFiles ( 1 );
  img.SetIgnoreChecksumErrors ( img_control.ignoreChecksumErrors );
  bool datIsSdat = false;
  char *firstAcqFile = ( char * ) malloc ( strlen ( sys_context.dat_source_directory ) + strlen (
                         img_control.acqPrefix ) + 10 );
  sprintf ( firstAcqFile, "%s/%s%04d.dat", sys_context.dat_source_directory, img_control.acqPrefix, 0 );
  /* Check to see if our dat file is really an sdat file in disguise, if so updat our suffix and doing sdats. */
  if (H5File::IsH5File(firstAcqFile)) {
    datIsSdat = true;
    img_control.sdatSuffix = "dat";
    img_control.doSdat = true;
  }
  char *firstSdatAcqFile = ( char * ) malloc ( strlen ( sys_context.dat_source_directory ) + strlen (
                         img_control.acqPrefix ) + 11 );
  sprintf ( firstSdatAcqFile, "%s/%s%04d.%s", sys_context.dat_source_directory, img_control.acqPrefix, 0, img_control.sdatSuffix.c_str() );

  if (isFile(firstAcqFile) && !datIsSdat) {
    if ( !img.LoadRaw ( firstAcqFile, 0, true, false ) )
      {
        exit ( EXIT_FAILURE );
      }
  }
  else if(isFile(firstSdatAcqFile)) {
    TraceChunkSerializer reader;
    SynchDat sdat;
    reader.Read(firstSdatAcqFile, sdat);
    img.InitFromSdat(&sdat);
    if (loc_context.regionXSize == 0){ // not explicitly set, set now
      loc_context.regionXSize = sdat.GetColStep();
      loc_context.regionYSize = sdat.GetRowStep();
    }
    if ((sdat.GetColStep() != (size_t)loc_context.regionXSize) ||
	(sdat.GetRowStep() != (size_t)loc_context.regionYSize)) {
      fprintf(stdout, "Warning: Analysis region sizes width=%d, height=%d do not match sdat region sizes width=%d, height=%d\n", loc_context.regionXSize, loc_context.regionYSize, (int)sdat.GetColStep(), (int)sdat.GetRowStep());
    }
  }
  else {
    exit ( EXIT_FAILURE );
  }
  img.SetOffsetFromChipOrigin ( firstAcqFile );
  free ( firstAcqFile );
  free ( firstSdatAcqFile );

  if ( !loc_context.IsSetRegionXYSize() ){ // not yet set, has to be set now
    loc_context.SetRegionXYSize(50, 50);
  }

  img.SetDir ( sys_context.results_folder );
  img.SetFlowOffset ( img_control.flowTimeOffset );
}
Example #3
0
int main(int argc, const char *argv[]) {
  OptArgs opts;  
  TraceConfig config;
  string inputDir;
  string outputDir;
  bool help;

  opts.ParseCmdLine(argc, argv);
  opts.GetOption(inputDir, "", '-', "source-dir");
  opts.GetOption(outputDir, "", '-', "output-dir");
  opts.GetOption(config.precision, "5", '-', "precision");
  opts.GetOption(config.numEvec, "7", '-', "num-evec");
  opts.GetOption(config.doDebug, "false", '-', "debug-files");
  opts.GetOption(config.compressionType, "delta", '-', "compression");
  opts.GetOption(config.numFlows, "-1", '-', "num-flows");
  opts.GetOption(config.numCores, "6", '-', "num-cores");
  opts.GetOption(config.errCon,"0",'-',"err-con");
  opts.GetOption(config.rankGood,"0",'-',"rank-good");
  opts.GetOption(config.pivot,"0",'-',"pivot");
  opts.GetOption(help, "false", 'h', "help");
  opts.GetOption(config.isThumbnail, "false", '-', "thumbnail");
  opts.GetOption(config.use_hard_est, "false",'-', "use-hard-est");
  opts.GetOption(config.t0_hard, "0", '-', "t0-hard");
  opts.GetOption(config.tmid_hard, "0", '-', "tmid-hard");
  opts.GetOption(config.sigma_hard, "0", '-', "sigma-hard");
  opts.GetOption(config.row_step, "100", '-', "row-step");
  opts.GetOption(config.col_step, "100", '-', "col-step");
  opts.GetOption(config.bg_param, "", '-', "region-param");
  opts.GetOption(config.grind_acq_0, "0", '-', "grind-acq0");
  if(help || inputDir.empty() || outputDir.empty()) {
    usage();
  }
  char *explog_path = NULL;
  explog_path = MakeExpLogPathFromDatDir(inputDir.c_str());
  int numFlows = config.numFlows;
  if (numFlows < 0) { 
    numFlows = GetTotalFlows(explog_path); 
  }

  // Check and setup our compression type
  TraceChunkSerializer serializer;
  serializer.SetRecklessAbandon(true);
  if (config.compressionType == "svd") {
    SvdDatCompress *dc = new SvdDatCompress(config.precision, config.numEvec);
    serializer.SetCompressor(dc);
    cout << "Doing lossy svd compression. (" << serializer.GetCompressionType() << ")" << endl;
  }
  // else if (config.compressionType == "svd+") {
  //   SvdDatCompressPlus *dc = new SvdDatCompressPlus();
  //   serializer.SetCompressor(dc);
  //   cout << "Doing lossy svd compression. (" << serializer.GetCompressionType() << ")" << endl;
  // }
  // else if (config.compressionType == "svd++") {
  //   SvdDatCompressPlusPlus *dc = new SvdDatCompressPlusPlus();
  //   if (config.errCon >0 )
  //     dc->SetErrCon(config.errCon);
  //   if (config.rankGood > 0 )
  //     dc->SetRankGood(config.rankGood);
  //   if (config.pivot > 0)
  //     dc->SetPivot(config.pivot);
  //   serializer.SetCompressor(dc);
  //   cout << "Doing lossy svd compression for good traces and delta for bad ones. (" << serializer.GetCompressionType() << ")" << endl;
  // }
  else if (config.compressionType == "delta") {
    VencoLossless *venco = new VencoLossless();
    serializer.SetCompressor(venco);
    cout << "Doing lossless delta compression. (" << serializer.GetCompressionType() << ")" << endl;
  }
  else if (config.compressionType == "delta-plain") {
    DeltaComp *delta = new DeltaComp();
    serializer.SetCompressor(delta);
    cout << "Doing lossless delta plain compression. (" << serializer.GetCompressionType() << ")" << endl;
  }
  else if (config.compressionType == "delta-plain-fst") {
    DeltaCompFst *delta = new DeltaCompFst();
    serializer.SetCompressor(delta);
    cout << "Doing lossless delta plain fast compression. (" << serializer.GetCompressionType() << ")" << endl;
  }
  else if (config.compressionType == "delta-plain-fst-smx") {
   DeltaCompFstSmX *delta = new DeltaCompFstSmX();
    serializer.SetCompressor(delta);
    cout << "Doing lossless delta plain fast compression. (" << serializer.GetCompressionType() << ")" << endl;
  }
  else if (config.compressionType == "none") {
    TraceCompressor *vanilla = new TraceNoCompress();
    serializer.SetCompressor(vanilla);
    cout << "Doing no compression. (" << serializer.GetCompressionType() << ")" << endl;
  }
  else {
    ION_ABORT("Don't recognize compression type: " + config.compressionType);
  }

  const char *id = GetChipId(explog_path);
  if (explog_path) free (explog_path);
  ChipIdDecoder::SetGlobalChipId(id);
  ImageTransformer::CalibrateChannelXTCorrection(inputDir.c_str(), "lsrowimage.dat");

  Image bfImg1;
  string bfFile = inputDir + "/beadfind_pre_0003.dat";
  bfImg1.LoadRaw(bfFile.c_str());
  const RawImage *bf1raw = bfImg1.GetImage(); 
  Mask mask(bf1raw->cols, bf1raw->rows);
  ImageTransformer::XTChannelCorrect(bfImg1.raw,bfImg1.results_folder);

  bfImg1.FilterForPinned (&mask, MaskEmpty, false);

  Image bfImg2;
  string bfFile2 = inputDir + "/beadfind_pre_0001.dat";
  bfImg2.LoadRaw(bfFile2.c_str());
  ImageTransformer::XTChannelCorrect(bfImg2.raw,bfImg1.results_folder);

  bfImg2.FilterForPinned (&mask, MaskEmpty, false);
  const RawImage *bf2raw = bfImg2.GetImage(); 


  GridMesh<T0Prior> t0Prior;
  T0Calc bfT0;
  /* Calc t0 and get prior. */
  cout << "Doing beadfind t0" << endl;
  GenerateBfT0Prior(config, bf1raw->image, bf1raw->baseFrameRate, bf1raw->rows, bf1raw->cols,
                    bf1raw->frames, bf1raw->timestamps,
                    config.row_step, config.col_step, &mask, bfT0, t0Prior);

  GridMesh<T0Prior> t0Prior2;
  T0Calc bfT02;
  GenerateBfT0Prior(config, bf2raw->image, bf2raw->baseFrameRate, bf2raw->rows, bf2raw->cols,
                    bf2raw->frames, bf2raw->timestamps,
                    config.row_step, config.col_step, &mask, bfT02, t0Prior2);

  SigmaTMidNucEstimation sigmaEst;
  sigmaEst.Init(config.rate_sigma_intercept, config.rate_sigma_slope, 
                config.t0_tmid_intercept, config.t0_tmid_slope, bf1raw->baseFrameRate);
  GridMesh<SigmaEst> sigmaTMid;
  bfImg1.Close();
  bfImg2.Close();

  // Calculate individual well t0 by looking at neighboring regions
  vector<float> wellT0;
  bfT0.CalcIndividualT0(wellT0, 0);
  vector<float> wellT02;
  bfT02.CalcIndividualT0(wellT02, 0);
  for (size_t i =0; i< wellT0.size();i++) {
    if (wellT0[i] > 0 && wellT02[i] > 0) {
      wellT0[i] = (wellT0[i] + wellT02[i])/2.0f;
    }
    else {
      wellT0[i] = max(wellT0[i], wellT02[i]);
    }
  }

  // Average the region level t0, should we do this first and then just do sinle well level?
  for (size_t bIx = 0; bIx < bfT0.GetNumRegions(); bIx++) {
    double t1 = bfT0.GetT0(bIx);
    double t2 = bfT02.GetT0(bIx);
    if (t1 > 0 && t2 > 0) {
      t1 = (t1 + t2)/2.0;
    }
    else {
      t1 = max(t1,t2);
    }
    bfT0.SetT0(bIx, t1);
  }

  // Single thread first dat
  for (size_t datIx = 0; datIx < 1; ++datIx) {
    cout << "Doing: " << datIx << endl;
    char buffer[2048];
    snprintf(buffer, sizeof(buffer), "%s/acq_%.4d.dat", inputDir.c_str(), (int) datIx);
    string datFile = buffer;
    /* Use prior to calculate t0 and slope. */
    Image datImg;
    T0Calc t0;
    datImg.LoadRaw(datFile.c_str());
    //    ImageTransformer::XTChannelCorrect(datImg.raw,datImg.results_folder);
    const RawImage *datRaw = datImg.GetImage(); 

    /* Estimate sigma and t_mid_nuc */
    if (datIx == 0) {
      cout << "Doing acquisition t0" << endl;

      GenerateAcqT0Prior(config, datRaw->image, datRaw->baseFrameRate, datRaw->rows, datRaw->cols,
                         datRaw->frames, datRaw->timestamps,
                         config.row_step, config.col_step, &mask, t0, t0Prior);
      
      ClockTimer timer;
      cout << "Estimating sigma." << endl;
      sigmaTMid.Init(datRaw->rows, datRaw->cols, config.row_step, config.col_step);
      for (size_t bIx = 0; bIx < t0.GetNumRegions(); bIx++) {
        t0.SetT0(bIx, bfT0.GetT0(bIx));
      }
      int neighbors = 2;
      if (config.isThumbnail) {
        cout << "Doing thumbnail version of slope." << endl;
        neighbors = 1;
      }
      EstimateSigmaValue(t0, sigmaEst, sigmaTMid, neighbors);
      timer.PrintMilliSeconds(cout,"Sigma Est took:");
      string sigmaFile = outputDir + "/sigma_tmid_est.txt";
      OutputSigmaTmidEstimates(sigmaTMid, sigmaFile.c_str());
    }

    /* For each region do shifting */
    ClockTimer timer;
    cout << "Shifting traces" << endl;
    timer.StartTimer();
    //    ShiftTraces(bfT0, wellT0, datRaw->frames, datRaw->baseFrameRate, datRaw->timestamps, datRaw->image);
    timer.PrintMilliSeconds(cout,"Shift took:");
    if (!config.bg_param.empty()) {
      DataCube<int> rowsCols;
      DataCube<float> tmidSigma;
      DataCube<float> fitTmidSigma;
      string path = config.bg_param + ":/region/region_location";
      if (!H5File::ReadDataCube(path, rowsCols)) {
        ION_ABORT("Couldn't read file: " + path);
      }
      path = config.bg_param + ":/region/region_init_param";
      if (!H5File::ReadDataCube(path, fitTmidSigma)) {
        ION_ABORT("Couldn't read file: " + path);
      }
      for (size_t i = 0; i < rowsCols.GetNumX(); i++) {
        int row = rowsCols.At(i,1,0);
        int col = rowsCols.At(i,0,0);
        SigmaEst &est = sigmaTMid.GetItemByRowCol(row, col);
        float tmid_est =  fitTmidSigma.At(i,0,0);
        float sigma_est = fitTmidSigma.At(i,1,0);
        est.mTMidNuc = tmid_est;
        est.mSigma = sigma_est;
      }
      string fitSigmaFile = outputDir + "/bg_fit_sigma_tmid_est.txt";
      OutputSigmaTmidEstimates(sigmaTMid, fitSigmaFile.c_str());

      // path = config.bg_param + ":/region/region_init_param";
      // if (!H5File::ReadMatrix(path, tmidSigma)) {
      //   ION_ABORT("Couldn't read file: " + path);
      // }
      // for (size_t i = 0; i < rowsCols.n_rows; i++) {
      //   int row = rowsCols.at(i,0);
      //   int col = rowsCols.at(i,1);
      //   SigmaEst &est = sigmaTMid.GetItemByRowCol(row, col);
      //   float tmid_est =  tmidSigma.at(i,0);
      //   float sigma_est = tmidSigma.at(i,1);
      //   est.mTMidNuc = tmid_est;
      //   est.mSigma = sigma_est;
      // }
      // string sigmaFile = outputDir + "/supplied_sigma_tmid_est.txt";
      // OutputSigmaTmidEstimates(sigmaTMid, sigmaFile.c_str());
    }
    else if (config.use_hard_est) {
      for (size_t i = 0; i < bfT0.GetNumRegions(); i++) {
        bfT0.SetT0(i,config.t0_hard * datRaw->baseFrameRate + config.time_start_slop);
      }
      for (size_t i = 0; i < sigmaTMid.GetNumBin(); i++) {
        SigmaEst &est = sigmaTMid.GetItem(i);
        est.mTMidNuc = config.tmid_hard;
        est.mSigma = config.sigma_hard;
        est.mT0 = config.t0_hard;
      }
    }
    /* Use t0 and sigma to get the time compression bkgModel wants. */
    cout << "Generating chunks" << endl;
    //    GridMesh<TraceChunk> traceChunks;
    SynchDat sdat;
    if (datIx == 0  && config.grind_acq_0 > 0) {
      int nTimes = config.grind_acq_0;
      timer.StartTimer();
      size_t processMicroSec = 0;
      size_t hdf5MicroSec = 0;
      size_t compressMicroSec = 0;
      size_t convertMicroSec = 0;
      for (int i = 0; i <nTimes; i++) {
        //GridMesh<TraceChunk> traceChunken;
        SynchDat sdatIn;
        AddMetaData(sdat, datRaw, datIx);
	ClockTimer convTimer;
        GenerateDataChunks(config, bfT0, datRaw, config.row_step, config.col_step, sigmaTMid, sdatIn.mChunks,datImg);
	convertMicroSec += convTimer.GetMicroSec();
        snprintf(buffer, sizeof(buffer), "%s/acq_%.4d.sdat", outputDir.c_str(), (int)datIx);
        serializer.Write(buffer, sdatIn);
	processMicroSec += serializer.computeMicroSec;
	hdf5MicroSec += serializer.ioMicroSec;
	compressMicroSec += serializer.compressMicroSec;
      }
      size_t usec = timer.GetMicroSec();
      cout << "Took: " << usec / 1.0e6 << " seconds, " << usec / (nTimes * 1.0f) << " usec per write." << endl;
      timer.PrintMilliSeconds(cout,"Chunks took:");
      cout << "Read took: " << processMicroSec / (1e3 * nTimes) << " milli seconds per sdat compute." << endl;
      cout << "Read took: " << hdf5MicroSec / (1e3 * nTimes) << " milli seconds per sdat hdf5." << endl;
      cout << "Read took: " << compressMicroSec / (1e3 * nTimes) << " milli seconds per sdat compressing." << endl;
      cout << "Read took: " << convertMicroSec / (1e3 * nTimes) << " milli seconds per sdat converting." << endl;
      exit(0);
    }
    else {
      timer.StartTimer();
      AddMetaData(sdat, datRaw, datIx);
      GenerateDataChunks(config, bfT0, datRaw, config.row_step, config.col_step, sigmaTMid, sdat.mChunks,datImg);
      timer.PrintMilliSeconds(cout,"Chunks took:");
        if (datIx == 0 && config.doDebug) {
          OutputTraceChunks(sdat.mChunks,"flow_0_data_chunks.txt");
        }
    }
    datImg.Close();    

    /* Serialize onto disk. */
    snprintf(buffer, sizeof(buffer), "%s/acq_%.4d.sdat", outputDir.c_str(), (int)datIx);
    serializer.Write(buffer, sdat);
    /* Read back in first flow for checking */
    if (datIx == 0) {
      TraceChunkSerializer readSerializer;
      readSerializer.SetRecklessAbandon(true);
      //      GridMesh<TraceChunk> traceChunksIn;  
      SynchDat sdatIn;
      readSerializer.Read(buffer, sdatIn);
      if (datIx == 0 && config.doDebug) {
        OutputTraceChunks(sdatIn.mChunks, "flow_0_data_chunks_read.txt");
      }
      SampleQuantiles<float> s(50000);
      SampleQuantiles<float> s2(50000);
      SampleQuantiles<float> sAbs(50000);
      SampleStats<double> ss;
      int diffCount = 0;
      for (size_t bIx = 0; bIx < sdatIn.mChunks.mBins.size(); bIx++) {
        if (sdatIn.mChunks.mBins[bIx].mT0 != sdat.mChunks.mBins[bIx].mT0) {
          cout << "Got: " << sdatIn.mChunks.mBins[bIx].mT0 << " vs: " << sdat.mChunks.mBins[bIx].mT0 << endl;
          exit(1);
        }
        for (size_t i = 0; i < sdatIn.mChunks.mBins[bIx].mData.size(); i++) {
          double diff = (double)sdatIn.mChunks.mBins[bIx].mData[i] - (double)sdat.mChunks.mBins[bIx].mData[i];
          if (!std::isfinite(diff)) {
            cout << "NaNs!!" << endl;
          }
          if (diffCount < 10 && fabs(diff) > .00001) { // != 0) {
            diffCount++;
            cout << "Bin: " << bIx << " well: " << i << " diff is: " << diff << endl;
          }
          s.AddValue(diff);
          sAbs.AddValue(fabs(diff));
          ss.AddValue(sqrt(diff * diff));
          s2.AddValue(sqrt(diff * diff));
        }
      }
      cout << "Median rms: " << s2.GetMedian()  << " Avg: " << ss.GetMean() << " diff: " << s.GetMedian() << endl;
      cout << "Abs(diff) Quantiles:" << endl;
      for (size_t i = 0; i <= 100; i+=10) {
        cout << i << "\t" << sAbs.GetQuantile(i/100.0) << endl;
      }
    }      
  }
  // do the next N flows multithreaded
  if (numFlows > 1) {
    PJobQueue jQueue (config.numCores, numFlows-1);  
    vector<CreateSDat> jobs(numFlows -1);
    // for (int i = 0; i < 4; i++) {
    //   char buffer[2048];
    //   snprintf(buffer, sizeof(buffer), "%s/beadfind_pre_%.4d.dat", inputDir.c_str(), (int) i);
    //   string input = buffer;
    //   snprintf(buffer, sizeof(buffer), "%s/beadfind_pre_%.4d.sdat", outputDir.c_str(), (int)i);
    //   string output = buffer;
    //   jobs[i].Init(&config, input, output, &wellT0, &bfT0, &sigmaTMid);
    //   jQueue.AddJob(jobs[i]);
    // }

    // jQueue.WaitUntilDone();
    for (int i = 1; i < numFlows; i++) {
      char buffer[2048];
      snprintf(buffer, sizeof(buffer), "%s/acq_%.4d.dat", inputDir.c_str(), (int) i);
      string input = buffer;
      snprintf(buffer, sizeof(buffer), "%s/acq_%.4d.sdat", outputDir.c_str(), (int)i);
      string output = buffer;
      jobs[i-1].Init(&config, input, output, &wellT0, &bfT0, &sigmaTMid, i);
      jQueue.AddJob(jobs[i-1]);
    }
    jQueue.WaitUntilDone();
  }
  /* Serialize into backbround models */
  cout << "Done." << endl;
}
Example #4
0
void EmptyTraceTracker::Allocate(const Mask *bfmask, const ImageSpecClass &imgSpec, int flow_block_size)
{
  // assumes regions are indexed over a small non-negative range
  imgFrames.resize(maxNumRegions);

  // sdat handling
  SynchDat sdat;
  bool doSdat = inception_state.img_control.doSdat;
  for (unsigned int i=0; i<regions.size(); i++){
    Region region = regions[i];
    imgFrames[region.index] = imgSpec.uncompFrames;
  }
  
  emptyTracesForBMFitter.resize(maxNumRegions);
  for (int i=0; i<maxNumRegions; i++)
    emptyTracesForBMFitter[i] = NULL;

  for (unsigned int i=0; i<regions.size(); i++){
    Region region = regions[i];

    // allocate only once, if changed need to deallocate
    assert ((region.index < maxNumRegions) & (region.index>=0));
    assert (emptyTracesForBMFitter[region.index] == NULL);

    // allocate empty traces, current algorithm is 1 per region
    // each region is also used by a BkgModelFitters
    EmptyTrace *emptyTrace = AllocateEmptyTrace(region, imgFrames[region.index], flow_block_size);

    emptyTrace->SetTrimWildTraceOptions(inception_state.bkg_control.trace_control.do_ref_trace_trim,
                                        inception_state.bkg_control.trace_control.span_inflator_min,
                                        inception_state.bkg_control.trace_control.span_inflator_mult,
                                        inception_state.bkg_control.trace_control.cutoff_quantile,
                                        global_defaults.data_control.nuc_flow_frame_width);
  
    emptyTrace->T0EstimateToMap(sep_t0_est, &region, bfmask);
    if (doSdat){
      // make the empty trace's notion of time conform to an sdat region's
      // notion of time mapping the upper left corner of the empty trace's
      // region to the corresponding sdat region.  Note that multiple sdat
      // regions may cover the empty trace's region
      TraceChunkSerializer serializer;
      char sdatFile[1024];
      
      sprintf (sdatFile, "%s/%s%04d.%s", inception_state.sys_context.dat_source_directory,inception_state.img_control.acqPrefix, 
               0, inception_state.img_control.sdatSuffix.c_str() );
      bool ok = serializer.Read ( sdatFile, sdat);
      if (!ok) {
        ION_ABORT("Couldn't load file: " + ToStr(sdatFile));
      }
      emptyTrace->SetTimeFromSdatChunk(region, sdat);
    }

    emptyTrace->CountReferenceTraces(region, bfmask);
    //fprintf(stdout, "Found %d reference traces starting at %d in region %d\n", cnt, ((EmptyTraceRecorder *)emptyTrace)->regionIndicesStartIndex, region.index);

    // assign the empty trace to the lookup vector BkgModelFitter can use
    // all regions must match an entry into emptyTracesForBMFitter so
    // every BkgModelFitter can find an EmptyTrace for its region.
    emptyTracesForBMFitter[region.index] = emptyTrace;
  }

  if (inception_state.bkg_control.trace_control.do_ref_trace_trim)
    InitializeDumpOutlierTracesFile();

}
Example #5
0
void *FileSDatLoadWorker ( void *arg )
{
  WorkerInfoQueue *q = static_cast<WorkerInfoQueue *> ( arg );
  assert ( q );

  bool done = false;
  TraceChunkSerializer serializer;
  while ( !done )
  {
    WorkerInfoQueueItem item = q->GetItem();

    if ( item.finished == true )
    {
      // we are no longer needed...go away!
      done = true;
      q->DecrementDone();
      continue;
    }
    ClockTimer timer;
    ImageLoadWorkInfo *one_img_loader = ( ImageLoadWorkInfo * ) item.private_data;
    bool ok = serializer.Read ( one_img_loader->name, one_img_loader->sdat[one_img_loader->cur_buffer] );
    if (!ok) {
      ION_ABORT("Couldn't load file: " + ToStr(one_img_loader->name));
    }
    one_img_loader->pinnedInFlow->Update ( one_img_loader->flow, &one_img_loader->sdat[one_img_loader->cur_buffer],(ImageTransformer::gain_correction?ImageTransformer::gain_correction:0));
    //    one_img_loader->pinnedInFlow->Update ( one_img_loader->flow, &one_img_loader->sdat[one_img_loader->cur_buffer] );

    SynchDat &sdat = one_img_loader->sdat[one_img_loader->cur_buffer];
    // if ( ImageTransformer::gain_correction != NULL )
    //   ImageTransformer::GainCorrectImage ( &sdat );
    //   ImageTransformer::GainCorrectImage ( &one_img_loader->sdat[one_img_loader->cur_buffer] );

    //    int buffer_ix = one_img_loader->cur_buffer;
    if ( one_img_loader->inception_state->img_control.col_flicker_correct ) {
      ComparatorNoiseCorrector cnc;
      Mask &mask = *(one_img_loader->mask);
      for (size_t rIx = 0; rIx < sdat.GetNumBin(); rIx++) {
        TraceChunk &chunk = sdat.GetChunk(rIx);
        // Copy over temp mask for normalization
        Mask m(chunk.mWidth, chunk.mHeight);
        for (size_t r = 0; r < chunk.mHeight; r++) {
          for (size_t c = 0; c < chunk.mWidth; c++) {
            m[r*chunk.mWidth+c] = mask[(r+chunk.mRowStart) * mask.W() + (c+chunk.mColStart)];
          }
        }
        cnc.CorrectComparatorNoise(&chunk.mData[0], chunk.mHeight, chunk.mWidth, chunk.mDepth,
            &m, one_img_loader->inception_state->img_control.col_flicker_correct_verbose,
            one_img_loader->inception_state->img_control.aggressive_cnc);
      }
    }
    // @todo output trace and dc offset info
    sdat.AdjustForDrift();
    sdat.SubDcOffset();
    SetReadCompleted(one_img_loader);
    size_t usec = timer.GetMicroSec();
    fprintf ( stdout, "FileLoadWorker: ImageProcessing time for flow %d: %0.5lf sec\n", one_img_loader->flow, usec / 1.0e6);

    q->DecrementDone();
  }

  return ( NULL );
}
Example #6
0
void *FileSDatLoader ( void *arg )
{
  ImageLoadWorkInfo *master_img_loader = ( ImageLoadWorkInfo * ) arg;


  WorkerInfoQueue *loadWorkQ = new WorkerInfoQueue ( master_img_loader->flow_buffer_size );
  ImageLoadWorkInfo *n_image_loaders = new ImageLoadWorkInfo[master_img_loader->flow_buffer_size];
  SetUpIndividualImageLoaders ( n_image_loaders,master_img_loader );

  int numWorkers = numCores() /2; // @TODO - this should be subject to inception_state options
  //int numWorkers = 1;
  numWorkers = ( numWorkers < 1 ? 1:numWorkers );
  fprintf ( stdout, "FileLoader: numWorkers threads = %d\n", numWorkers );
  {
    int cworker;
    pthread_t work_thread;
    // spawn threads for doing background correction/fitting work
    for ( cworker = 0; cworker < numWorkers; cworker++ )
    {
      int t = pthread_create ( &work_thread, NULL, FileSDatLoadWorker,
                               loadWorkQ );
      pthread_detach(work_thread);
      if ( t )
        fprintf ( stderr, "Error starting thread\n" );
    }
  }

  WorkerInfoQueueItem item;
  Timer timer_file_access;
  //double file_access_time = 0;
  int flow_buffer_size = master_img_loader->flow_buffer_size;
  for ( int i_buffer = 0; i_buffer < flow_buffer_size;i_buffer++ )
  {
    ImageLoadWorkInfo *cur_image_loader = &n_image_loaders[i_buffer];

    int cur_flow = cur_image_loader->flow; // each job is an n_image_loaders item

    DontReadAheadOfSignalProcessing (cur_image_loader, master_img_loader->lead);
    TraceChunkSerializer serializer;
    timer_file_access.restart();
    bool ok = serializer.Read ( cur_image_loader->name, cur_image_loader->sdat[cur_image_loader->cur_buffer] );
    if (!ok) {
      ION_ABORT("Couldn't load file: " + ToStr(cur_image_loader->name));
    }
    // if ( ImageTransformer::gain_correction != NULL )
    //   ImageTransformer::GainCorrectImage ( &cur_image_loader->sdat[cur_image_loader->cur_buffer] );
  
    //file_access_time += timer_file_access.elapsed();
    fprintf ( stdout, "File access = %0.2lf sec.\n", timer_file_access.elapsed() );
    cur_image_loader->pinnedInFlow->Update ( cur_image_loader->flow, &cur_image_loader->sdat[cur_image_loader->cur_buffer] );
    cur_image_loader->sdat[cur_image_loader->cur_buffer].AdjustForDrift();
    cur_image_loader->sdat[cur_image_loader->cur_buffer].SubDcOffset();
    item.finished = false;
    item.private_data = cur_image_loader;
    loadWorkQ->PutItem ( item );
    
    if (ChipIdDecoder::GetGlobalChipId() != ChipId900)
      PauseForLongCompute ( cur_flow,cur_image_loader );

    /*if ( CheckFlowForWrite ( cur_flow,false ) )
    {
      fprintf (stdout, "File access Time for flow %d to %d: %.1f sec\n", ( ( cur_flow+1 ) - NUMFB ), cur_flow, file_access_time);
      file_access_time = 0;
    }*/
  }

  // wait for all of the images to be processed
  loadWorkQ->WaitTillDone();

  KillQueue ( loadWorkQ,numWorkers );

  delete loadWorkQ;

  delete[] n_image_loaders;

  master_img_loader->finished = true;

  return NULL;
}