Exemple #1
0
void OutputTraceChunks(GridMesh<TraceChunk> &traceChunks, const char *fileName) {
  //ofstream out("flow_0_data_chunks.txt");
  ofstream out(fileName);

  int rowStart,rowEnd,colStart, colEnd;      
  //      for (size_t bIx = 0; bIx < traceChunks.GetNumBin(); bIx++) {
  for (size_t bIx = 0; bIx < 1; bIx++) {
    traceChunks.GetBinCoords(bIx, rowStart, rowEnd, colStart, colEnd);
    TraceChunk &tc = traceChunks.GetItem(bIx);
    float sum = 0;
    for (int i = 0; i < tc.mTime.npts(); i++) {
      sum += tc.mTime.deltaFrameSeconds[i];
    }
    for (int row = rowStart; row < rowEnd; row++) {
      for (int col = colStart; col < colEnd; col++) {
        out << row << "\t" << col;
        for (size_t frame = 0; frame < tc.mDepth; frame++) {
          out << "\t" << tc.At(row, col, frame);
        }
        out << endl;
      }
    }
  }
  out.close();
}
Exemple #2
0
void GenerateDataChunks(TraceConfig &config, T0Calc &t0, const struct RawImage *raw, 
                        int rowStep, int colStep, GridMesh<SigmaEst> &sigmaTMid,
                        GridMesh<TraceChunk> &mTraceChunks, Image &img) {
  mTraceChunks.Init(raw->rows, raw->cols, rowStep, colStep);
  int rowStart,rowEnd,colStart, colEnd;
  size_t frameStep = raw->rows * raw->cols;
  int16_t *frameBuff[raw->frames];
  for (int i = 0; i < raw->frames; i++) {
    frameBuff[i] = raw->image + (i * frameStep);
  }
  for (size_t bIx = 0; bIx < t0.GetNumRegions(); bIx++) {
    SigmaEst &est = sigmaTMid.GetItem(bIx);
    t0.GetRegionCoords(bIx, rowStart, rowEnd, colStart, colEnd);
    TraceChunk &chunk = mTraceChunks.GetItem(bIx);
    float t0Time = t0.GetT0(bIx);
    chunk.mSigma = est.mSigma;
    chunk.mTMidNuc = est.mTMidNuc;
    t0Time -= config.time_start_slop;
    chunk.mStartDetailedTime = config.start_detailed_time;
    chunk.mStopDetailedTime = config.stop_detailed_time;
    chunk.mLeftAvg = config.left_avg;
    chunk.mOrigFrames = raw->frames;
    chunk.mT0 = t0Time/raw->baseFrameRate;
    // Old style using t0 instead of tmid nuc
    chunk.mTime.SetUpTime(raw->uncompFrames, chunk.mT0, chunk.mStartDetailedTime, chunk.mStopDetailedTime, chunk.mLeftAvg);
    chunk.mBaseFrameRate = raw->baseFrameRate;
    chunk.mTimePoints.resize(chunk.mTime.mTimePoints.size());
    copy(chunk.mTime.mTimePoints.begin(), chunk.mTime.mTimePoints.end(), chunk.mTimePoints.begin());
    chunk.mTime.SetupConvertVfcTimeSegments(raw->frames, raw->timestamps, raw->baseFrameRate, raw->rows * raw->cols);
    //    if (bIx == t0.GetNumRegions() /2) {
      // cout << "Setup Frames: ";
      // float deltaFrame = 0;
      // for (int i = 0; i < chunk.mTime.npts(); i++) {
      //   deltaFrame += chunk.mTime.deltaFrame[i];
      //   cout << "\t" << deltaFrame;
      // }
      // cout << endl;
      // chunk.mTime.WriteLinearTransformation(raw->rows * raw->cols);
      //    }
    chunk.SetChipInfo(raw->rows, raw->cols, raw->frames);
    chunk.SetDimensions(rowStart, rowEnd-rowStart, colStart, colEnd-colStart, 0, chunk.mTime.npts());
    chunk.ZeroData();
    chunk.mTime.ConvertVfcSegmentsOpt(rowStart, rowEnd, colStart, colEnd, 
                                      raw->rows, raw->cols, raw->frames,
                                      frameBuff, &chunk.mData[0]);
  }
}
Exemple #3
0
void Traces::CalcReference(int rowStep, int colStep, GridMesh<std::vector<float> > &gridReference) {
  gridReference.Init(mRow, mCol, rowStep, colStep);
  int numBin = gridReference.GetNumBin();
  int rowStart = -1, rowEnd = -1, colStart = -1, colEnd = -1;
  for (int binIx = 0; binIx < numBin; binIx++) {
    gridReference.GetBinCoords(binIx, rowStart, rowEnd, colStart, colEnd);
    vector<float> &trace = gridReference.GetItem(binIx);
    CalcRegionReference(MaskEmpty, rowStart, rowEnd, colStart, colEnd, trace);
  }
}
Exemple #4
0
void OutputSigmaTmidEstimates(GridMesh<SigmaEst> &sigmaTMid, const char *fileName) {
  ofstream out(fileName);
  int rowStart,rowEnd,colStart, colEnd;      
  out << "rowStar" << "\t" << "rowEnd" << "\t" << "colStart" << "\t" << "colEnd" << "\t" << "sigma.t0.est" << "\t" << "tmidnuc.t0.est" << "\t" << "t0.est" << "\t" << "rate" <<endl;
  for (size_t bIx = 0; bIx < sigmaTMid.GetNumBin(); bIx++) {            
    sigmaTMid.GetBinCoords(bIx, rowStart, rowEnd, colStart, colEnd);
    SigmaEst &est = sigmaTMid.GetItem(bIx);
    out << rowStart << "\t" << rowEnd << "\t" << colStart << "\t" << colEnd << "\t" << est.mSigma << "\t" << est.mTMidNuc << "\t" << est.mT0 << "\t" << est.mRate << endl;
  }
  out.close();
}
Exemple #5
0
void Traces::CalcIncorporationStartReference(int nRowStep, int nColStep, 
                                             GridMesh<SampleStats<float> > &grid) {
  grid.Init(mRow, mCol, nRowStep, nColStep);

  int numBin = grid.GetNumBin();
  int rowStart = -1, rowEnd = -1, colStart = -1, colEnd = -1;
  for (int binIx = 0; binIx < numBin; binIx++) {
    SampleStats<float> &startStat = grid.GetItem(binIx);
    grid.GetBinCoords(binIx, rowStart, rowEnd, colStart, colEnd);
    CalcIncorpBreakRegionStart(rowStart, rowEnd,
			       colStart, colEnd,
                               startStat);
  }
}
Exemple #6
0
void TraceChunkSerializer::ArrangeDataForWriting(GridMesh<TraceChunk> &dataMesh, struct FlowChunk *chunks) {

  if (dataMesh.GetNumBin() == 0) {
    return;
  }
  size_t maxSize = dataMesh.mBins[0].mDepth * dataMesh.mBins[0].mHeight * dataMesh.mBins[1].mWidth * 3;
  int8_t *compressed = new int8_t[maxSize];
  compressMicroSec = 0;
  for (size_t bIx = 0; bIx < dataMesh.GetNumBin(); bIx++) {
    TraceChunk &tc = dataMesh.GetItem(bIx);
    struct FlowChunk &fc = chunks[bIx];
    fc.CompressionType = mCompressor->GetCompressionType();
    fc.RowStart = tc.mRowStart;
    fc.ColStart = tc.mColStart;
    fc.FrameStart = tc.mFrameStart;
    fc.FrameStep = tc.mFrameStep;
    fc.ChipRow = tc.mChipRow;
    fc.ChipCol = tc.mChipCol;
    fc.ChipFrame = tc.mChipFrame;
    fc.StartDetailedTime = tc.mStartDetailedTime;
    fc.StopDetailedTime = tc.mStopDetailedTime;
    fc.LeftAvg = tc.mLeftAvg;
    fc.OrigFrames = tc.mOrigFrames;
    fc.T0 = tc.mT0;
    fc.Sigma = tc.mSigma;
    fc.TMidNuc = tc.mTMidNuc;
    fc.Height = tc.mHeight;
    fc.Width = tc.mWidth;
    fc.Depth = tc.mDepth;
    fc.BaseFrameRate = tc.mBaseFrameRate;
    size_t outsize;
    ClockTimer timer;
    mCompressor->Compress(tc, &compressed, &outsize, &maxSize);
    compressMicroSec += timer.GetMicroSec();
    //cout <<"Doing: " << fc.CompressionType << " Bytes per wells: " << outsize/(float) (tc.mHeight * tc.mWidth) <<" Compression ratio: "<< tc.mData.size()*2/(float)outsize << endl;
    fc.Data.p = (int8_t *)malloc(outsize*sizeof(int8_t));
    memcpy(fc.Data.p, compressed, outsize*sizeof(int8_t));
    fc.Data.len = outsize;
    if (0 == outsize) {
      cout << "How can there be zero blocks." << endl;
    }
    float * tmp = (float *)malloc(tc.mTimePoints.size() * sizeof(float));
    copy(tc.mTimePoints.begin(), tc.mTimePoints.end(), tmp);
    fc.DeltaFrame.p = tmp;
    fc.DeltaFrame.len = tc.mTimePoints.size() * sizeof(float);
  }
  delete [] compressed;
}
Exemple #7
0
void TraceChunkSerializer::DecompressFromReading(const struct FlowChunk *chunks, GridMesh<TraceChunk> &dataMesh) {
  compressMicroSec = 0;
  for (size_t bIx = 0; bIx < dataMesh.GetNumBin(); bIx++) {
    TraceChunk &tc = dataMesh.GetItem(bIx);
    const struct FlowChunk &fc = chunks[bIx];
    if (mCompressor == NULL) {
      if (mDebugMsg) { cout << "Got compression type: " << chunks[bIx].CompressionType << endl;}
      mCompressor = CompressorFactory::MakeCompressor((TraceCompressor::CodeType)chunks[bIx].CompressionType);
    }
    ION_ASSERT(chunks[bIx].CompressionType == (size_t)mCompressor->GetCompressionType(), "Wrong compression type: " + ToStr(chunks[bIx].CompressionType) + " vs: " + ToStr(mCompressor->GetCompressionType()));
    tc.mRowStart = fc.RowStart;
    tc.mColStart = fc.ColStart;
    tc.mFrameStart = fc.FrameStart;
    tc.mFrameStep = fc.FrameStep;
    tc.mChipRow = fc.ChipRow;
    tc.mChipCol = fc.ChipCol;
    tc.mChipFrame = fc.ChipFrame;
    tc.mStartDetailedTime = fc.StartDetailedTime;
    tc.mStopDetailedTime = fc.StopDetailedTime;
    tc.mLeftAvg = fc.LeftAvg;
    tc.mOrigFrames = fc.OrigFrames;
    tc.mT0 = fc.T0;
    tc.mSigma = fc.Sigma;
    tc.mTMidNuc = fc.TMidNuc;
    tc.mHeight = fc.Height;
    tc.mWidth = fc.Width;
    tc.mDepth = fc.Depth;
    tc.mBaseFrameRate = fc.BaseFrameRate;
    size_t outsize = fc.Height * fc.Width * fc.Depth;
    tc.mData.resize(outsize);
    tc.mTimePoints.resize(tc.mDepth);
    float * tmp = (float *)fc.DeltaFrame.p;
    copy(tmp,tmp+fc.Depth, tc.mTimePoints.begin());
    ClockTimer timer;
    mCompressor->Decompress(tc, (int8_t *)fc.Data.p, fc.Data.len);
    compressMicroSec += timer.GetMicroSec();
    outsize = fc.Height * fc.Width * fc.Depth;
  }
}
Exemple #8
0
int main(int argc, const char *argv[]) {
  OptArgs opts;  
  TraceConfig config;
  string inputDir;
  string outputDir;
  bool help;

  opts.ParseCmdLine(argc, argv);
  opts.GetOption(inputDir, "", '-', "source-dir");
  opts.GetOption(outputDir, "", '-', "output-dir");
  opts.GetOption(config.precision, "5", '-', "precision");
  opts.GetOption(config.numEvec, "7", '-', "num-evec");
  opts.GetOption(config.doDebug, "false", '-', "debug-files");
  opts.GetOption(config.compressionType, "delta", '-', "compression");
  opts.GetOption(config.numFlows, "-1", '-', "num-flows");
  opts.GetOption(config.numCores, "6", '-', "num-cores");
  opts.GetOption(config.errCon,"0",'-',"err-con");
  opts.GetOption(config.rankGood,"0",'-',"rank-good");
  opts.GetOption(config.pivot,"0",'-',"pivot");
  opts.GetOption(help, "false", 'h', "help");
  opts.GetOption(config.isThumbnail, "false", '-', "thumbnail");
  opts.GetOption(config.use_hard_est, "false",'-', "use-hard-est");
  opts.GetOption(config.t0_hard, "0", '-', "t0-hard");
  opts.GetOption(config.tmid_hard, "0", '-', "tmid-hard");
  opts.GetOption(config.sigma_hard, "0", '-', "sigma-hard");
  opts.GetOption(config.row_step, "100", '-', "row-step");
  opts.GetOption(config.col_step, "100", '-', "col-step");
  opts.GetOption(config.bg_param, "", '-', "region-param");
  opts.GetOption(config.grind_acq_0, "0", '-', "grind-acq0");
  if(help || inputDir.empty() || outputDir.empty()) {
    usage();
  }
  char *explog_path = NULL;
  explog_path = MakeExpLogPathFromDatDir(inputDir.c_str());
  int numFlows = config.numFlows;
  if (numFlows < 0) { 
    numFlows = GetTotalFlows(explog_path); 
  }

  // Check and setup our compression type
  TraceChunkSerializer serializer;
  serializer.SetRecklessAbandon(true);
  if (config.compressionType == "svd") {
    SvdDatCompress *dc = new SvdDatCompress(config.precision, config.numEvec);
    serializer.SetCompressor(dc);
    cout << "Doing lossy svd compression. (" << serializer.GetCompressionType() << ")" << endl;
  }
  // else if (config.compressionType == "svd+") {
  //   SvdDatCompressPlus *dc = new SvdDatCompressPlus();
  //   serializer.SetCompressor(dc);
  //   cout << "Doing lossy svd compression. (" << serializer.GetCompressionType() << ")" << endl;
  // }
  // else if (config.compressionType == "svd++") {
  //   SvdDatCompressPlusPlus *dc = new SvdDatCompressPlusPlus();
  //   if (config.errCon >0 )
  //     dc->SetErrCon(config.errCon);
  //   if (config.rankGood > 0 )
  //     dc->SetRankGood(config.rankGood);
  //   if (config.pivot > 0)
  //     dc->SetPivot(config.pivot);
  //   serializer.SetCompressor(dc);
  //   cout << "Doing lossy svd compression for good traces and delta for bad ones. (" << serializer.GetCompressionType() << ")" << endl;
  // }
  else if (config.compressionType == "delta") {
    VencoLossless *venco = new VencoLossless();
    serializer.SetCompressor(venco);
    cout << "Doing lossless delta compression. (" << serializer.GetCompressionType() << ")" << endl;
  }
  else if (config.compressionType == "delta-plain") {
    DeltaComp *delta = new DeltaComp();
    serializer.SetCompressor(delta);
    cout << "Doing lossless delta plain compression. (" << serializer.GetCompressionType() << ")" << endl;
  }
  else if (config.compressionType == "delta-plain-fst") {
    DeltaCompFst *delta = new DeltaCompFst();
    serializer.SetCompressor(delta);
    cout << "Doing lossless delta plain fast compression. (" << serializer.GetCompressionType() << ")" << endl;
  }
  else if (config.compressionType == "delta-plain-fst-smx") {
   DeltaCompFstSmX *delta = new DeltaCompFstSmX();
    serializer.SetCompressor(delta);
    cout << "Doing lossless delta plain fast compression. (" << serializer.GetCompressionType() << ")" << endl;
  }
  else if (config.compressionType == "none") {
    TraceCompressor *vanilla = new TraceNoCompress();
    serializer.SetCompressor(vanilla);
    cout << "Doing no compression. (" << serializer.GetCompressionType() << ")" << endl;
  }
  else {
    ION_ABORT("Don't recognize compression type: " + config.compressionType);
  }

  const char *id = GetChipId(explog_path);
  if (explog_path) free (explog_path);
  ChipIdDecoder::SetGlobalChipId(id);
  ImageTransformer::CalibrateChannelXTCorrection(inputDir.c_str(), "lsrowimage.dat");

  Image bfImg1;
  string bfFile = inputDir + "/beadfind_pre_0003.dat";
  bfImg1.LoadRaw(bfFile.c_str());
  const RawImage *bf1raw = bfImg1.GetImage(); 
  Mask mask(bf1raw->cols, bf1raw->rows);
  ImageTransformer::XTChannelCorrect(bfImg1.raw,bfImg1.results_folder);

  bfImg1.FilterForPinned (&mask, MaskEmpty, false);

  Image bfImg2;
  string bfFile2 = inputDir + "/beadfind_pre_0001.dat";
  bfImg2.LoadRaw(bfFile2.c_str());
  ImageTransformer::XTChannelCorrect(bfImg2.raw,bfImg1.results_folder);

  bfImg2.FilterForPinned (&mask, MaskEmpty, false);
  const RawImage *bf2raw = bfImg2.GetImage(); 


  GridMesh<T0Prior> t0Prior;
  T0Calc bfT0;
  /* Calc t0 and get prior. */
  cout << "Doing beadfind t0" << endl;
  GenerateBfT0Prior(config, bf1raw->image, bf1raw->baseFrameRate, bf1raw->rows, bf1raw->cols,
                    bf1raw->frames, bf1raw->timestamps,
                    config.row_step, config.col_step, &mask, bfT0, t0Prior);

  GridMesh<T0Prior> t0Prior2;
  T0Calc bfT02;
  GenerateBfT0Prior(config, bf2raw->image, bf2raw->baseFrameRate, bf2raw->rows, bf2raw->cols,
                    bf2raw->frames, bf2raw->timestamps,
                    config.row_step, config.col_step, &mask, bfT02, t0Prior2);

  SigmaTMidNucEstimation sigmaEst;
  sigmaEst.Init(config.rate_sigma_intercept, config.rate_sigma_slope, 
                config.t0_tmid_intercept, config.t0_tmid_slope, bf1raw->baseFrameRate);
  GridMesh<SigmaEst> sigmaTMid;
  bfImg1.Close();
  bfImg2.Close();

  // Calculate individual well t0 by looking at neighboring regions
  vector<float> wellT0;
  bfT0.CalcIndividualT0(wellT0, 0);
  vector<float> wellT02;
  bfT02.CalcIndividualT0(wellT02, 0);
  for (size_t i =0; i< wellT0.size();i++) {
    if (wellT0[i] > 0 && wellT02[i] > 0) {
      wellT0[i] = (wellT0[i] + wellT02[i])/2.0f;
    }
    else {
      wellT0[i] = max(wellT0[i], wellT02[i]);
    }
  }

  // Average the region level t0, should we do this first and then just do sinle well level?
  for (size_t bIx = 0; bIx < bfT0.GetNumRegions(); bIx++) {
    double t1 = bfT0.GetT0(bIx);
    double t2 = bfT02.GetT0(bIx);
    if (t1 > 0 && t2 > 0) {
      t1 = (t1 + t2)/2.0;
    }
    else {
      t1 = max(t1,t2);
    }
    bfT0.SetT0(bIx, t1);
  }

  // Single thread first dat
  for (size_t datIx = 0; datIx < 1; ++datIx) {
    cout << "Doing: " << datIx << endl;
    char buffer[2048];
    snprintf(buffer, sizeof(buffer), "%s/acq_%.4d.dat", inputDir.c_str(), (int) datIx);
    string datFile = buffer;
    /* Use prior to calculate t0 and slope. */
    Image datImg;
    T0Calc t0;
    datImg.LoadRaw(datFile.c_str());
    //    ImageTransformer::XTChannelCorrect(datImg.raw,datImg.results_folder);
    const RawImage *datRaw = datImg.GetImage(); 

    /* Estimate sigma and t_mid_nuc */
    if (datIx == 0) {
      cout << "Doing acquisition t0" << endl;

      GenerateAcqT0Prior(config, datRaw->image, datRaw->baseFrameRate, datRaw->rows, datRaw->cols,
                         datRaw->frames, datRaw->timestamps,
                         config.row_step, config.col_step, &mask, t0, t0Prior);
      
      ClockTimer timer;
      cout << "Estimating sigma." << endl;
      sigmaTMid.Init(datRaw->rows, datRaw->cols, config.row_step, config.col_step);
      for (size_t bIx = 0; bIx < t0.GetNumRegions(); bIx++) {
        t0.SetT0(bIx, bfT0.GetT0(bIx));
      }
      int neighbors = 2;
      if (config.isThumbnail) {
        cout << "Doing thumbnail version of slope." << endl;
        neighbors = 1;
      }
      EstimateSigmaValue(t0, sigmaEst, sigmaTMid, neighbors);
      timer.PrintMilliSeconds(cout,"Sigma Est took:");
      string sigmaFile = outputDir + "/sigma_tmid_est.txt";
      OutputSigmaTmidEstimates(sigmaTMid, sigmaFile.c_str());
    }

    /* For each region do shifting */
    ClockTimer timer;
    cout << "Shifting traces" << endl;
    timer.StartTimer();
    //    ShiftTraces(bfT0, wellT0, datRaw->frames, datRaw->baseFrameRate, datRaw->timestamps, datRaw->image);
    timer.PrintMilliSeconds(cout,"Shift took:");
    if (!config.bg_param.empty()) {
      DataCube<int> rowsCols;
      DataCube<float> tmidSigma;
      DataCube<float> fitTmidSigma;
      string path = config.bg_param + ":/region/region_location";
      if (!H5File::ReadDataCube(path, rowsCols)) {
        ION_ABORT("Couldn't read file: " + path);
      }
      path = config.bg_param + ":/region/region_init_param";
      if (!H5File::ReadDataCube(path, fitTmidSigma)) {
        ION_ABORT("Couldn't read file: " + path);
      }
      for (size_t i = 0; i < rowsCols.GetNumX(); i++) {
        int row = rowsCols.At(i,1,0);
        int col = rowsCols.At(i,0,0);
        SigmaEst &est = sigmaTMid.GetItemByRowCol(row, col);
        float tmid_est =  fitTmidSigma.At(i,0,0);
        float sigma_est = fitTmidSigma.At(i,1,0);
        est.mTMidNuc = tmid_est;
        est.mSigma = sigma_est;
      }
      string fitSigmaFile = outputDir + "/bg_fit_sigma_tmid_est.txt";
      OutputSigmaTmidEstimates(sigmaTMid, fitSigmaFile.c_str());

      // path = config.bg_param + ":/region/region_init_param";
      // if (!H5File::ReadMatrix(path, tmidSigma)) {
      //   ION_ABORT("Couldn't read file: " + path);
      // }
      // for (size_t i = 0; i < rowsCols.n_rows; i++) {
      //   int row = rowsCols.at(i,0);
      //   int col = rowsCols.at(i,1);
      //   SigmaEst &est = sigmaTMid.GetItemByRowCol(row, col);
      //   float tmid_est =  tmidSigma.at(i,0);
      //   float sigma_est = tmidSigma.at(i,1);
      //   est.mTMidNuc = tmid_est;
      //   est.mSigma = sigma_est;
      // }
      // string sigmaFile = outputDir + "/supplied_sigma_tmid_est.txt";
      // OutputSigmaTmidEstimates(sigmaTMid, sigmaFile.c_str());
    }
    else if (config.use_hard_est) {
      for (size_t i = 0; i < bfT0.GetNumRegions(); i++) {
        bfT0.SetT0(i,config.t0_hard * datRaw->baseFrameRate + config.time_start_slop);
      }
      for (size_t i = 0; i < sigmaTMid.GetNumBin(); i++) {
        SigmaEst &est = sigmaTMid.GetItem(i);
        est.mTMidNuc = config.tmid_hard;
        est.mSigma = config.sigma_hard;
        est.mT0 = config.t0_hard;
      }
    }
    /* Use t0 and sigma to get the time compression bkgModel wants. */
    cout << "Generating chunks" << endl;
    //    GridMesh<TraceChunk> traceChunks;
    SynchDat sdat;
    if (datIx == 0  && config.grind_acq_0 > 0) {
      int nTimes = config.grind_acq_0;
      timer.StartTimer();
      size_t processMicroSec = 0;
      size_t hdf5MicroSec = 0;
      size_t compressMicroSec = 0;
      size_t convertMicroSec = 0;
      for (int i = 0; i <nTimes; i++) {
        //GridMesh<TraceChunk> traceChunken;
        SynchDat sdatIn;
        AddMetaData(sdat, datRaw, datIx);
	ClockTimer convTimer;
        GenerateDataChunks(config, bfT0, datRaw, config.row_step, config.col_step, sigmaTMid, sdatIn.mChunks,datImg);
	convertMicroSec += convTimer.GetMicroSec();
        snprintf(buffer, sizeof(buffer), "%s/acq_%.4d.sdat", outputDir.c_str(), (int)datIx);
        serializer.Write(buffer, sdatIn);
	processMicroSec += serializer.computeMicroSec;
	hdf5MicroSec += serializer.ioMicroSec;
	compressMicroSec += serializer.compressMicroSec;
      }
      size_t usec = timer.GetMicroSec();
      cout << "Took: " << usec / 1.0e6 << " seconds, " << usec / (nTimes * 1.0f) << " usec per write." << endl;
      timer.PrintMilliSeconds(cout,"Chunks took:");
      cout << "Read took: " << processMicroSec / (1e3 * nTimes) << " milli seconds per sdat compute." << endl;
      cout << "Read took: " << hdf5MicroSec / (1e3 * nTimes) << " milli seconds per sdat hdf5." << endl;
      cout << "Read took: " << compressMicroSec / (1e3 * nTimes) << " milli seconds per sdat compressing." << endl;
      cout << "Read took: " << convertMicroSec / (1e3 * nTimes) << " milli seconds per sdat converting." << endl;
      exit(0);
    }
    else {
      timer.StartTimer();
      AddMetaData(sdat, datRaw, datIx);
      GenerateDataChunks(config, bfT0, datRaw, config.row_step, config.col_step, sigmaTMid, sdat.mChunks,datImg);
      timer.PrintMilliSeconds(cout,"Chunks took:");
        if (datIx == 0 && config.doDebug) {
          OutputTraceChunks(sdat.mChunks,"flow_0_data_chunks.txt");
        }
    }
    datImg.Close();    

    /* Serialize onto disk. */
    snprintf(buffer, sizeof(buffer), "%s/acq_%.4d.sdat", outputDir.c_str(), (int)datIx);
    serializer.Write(buffer, sdat);
    /* Read back in first flow for checking */
    if (datIx == 0) {
      TraceChunkSerializer readSerializer;
      readSerializer.SetRecklessAbandon(true);
      //      GridMesh<TraceChunk> traceChunksIn;  
      SynchDat sdatIn;
      readSerializer.Read(buffer, sdatIn);
      if (datIx == 0 && config.doDebug) {
        OutputTraceChunks(sdatIn.mChunks, "flow_0_data_chunks_read.txt");
      }
      SampleQuantiles<float> s(50000);
      SampleQuantiles<float> s2(50000);
      SampleQuantiles<float> sAbs(50000);
      SampleStats<double> ss;
      int diffCount = 0;
      for (size_t bIx = 0; bIx < sdatIn.mChunks.mBins.size(); bIx++) {
        if (sdatIn.mChunks.mBins[bIx].mT0 != sdat.mChunks.mBins[bIx].mT0) {
          cout << "Got: " << sdatIn.mChunks.mBins[bIx].mT0 << " vs: " << sdat.mChunks.mBins[bIx].mT0 << endl;
          exit(1);
        }
        for (size_t i = 0; i < sdatIn.mChunks.mBins[bIx].mData.size(); i++) {
          double diff = (double)sdatIn.mChunks.mBins[bIx].mData[i] - (double)sdat.mChunks.mBins[bIx].mData[i];
          if (!std::isfinite(diff)) {
            cout << "NaNs!!" << endl;
          }
          if (diffCount < 10 && fabs(diff) > .00001) { // != 0) {
            diffCount++;
            cout << "Bin: " << bIx << " well: " << i << " diff is: " << diff << endl;
          }
          s.AddValue(diff);
          sAbs.AddValue(fabs(diff));
          ss.AddValue(sqrt(diff * diff));
          s2.AddValue(sqrt(diff * diff));
        }
      }
      cout << "Median rms: " << s2.GetMedian()  << " Avg: " << ss.GetMean() << " diff: " << s.GetMedian() << endl;
      cout << "Abs(diff) Quantiles:" << endl;
      for (size_t i = 0; i <= 100; i+=10) {
        cout << i << "\t" << sAbs.GetQuantile(i/100.0) << endl;
      }
    }      
  }
  // do the next N flows multithreaded
  if (numFlows > 1) {
    PJobQueue jQueue (config.numCores, numFlows-1);  
    vector<CreateSDat> jobs(numFlows -1);
    // for (int i = 0; i < 4; i++) {
    //   char buffer[2048];
    //   snprintf(buffer, sizeof(buffer), "%s/beadfind_pre_%.4d.dat", inputDir.c_str(), (int) i);
    //   string input = buffer;
    //   snprintf(buffer, sizeof(buffer), "%s/beadfind_pre_%.4d.sdat", outputDir.c_str(), (int)i);
    //   string output = buffer;
    //   jobs[i].Init(&config, input, output, &wellT0, &bfT0, &sigmaTMid);
    //   jQueue.AddJob(jobs[i]);
    // }

    // jQueue.WaitUntilDone();
    for (int i = 1; i < numFlows; i++) {
      char buffer[2048];
      snprintf(buffer, sizeof(buffer), "%s/acq_%.4d.dat", inputDir.c_str(), (int) i);
      string input = buffer;
      snprintf(buffer, sizeof(buffer), "%s/acq_%.4d.sdat", outputDir.c_str(), (int)i);
      string output = buffer;
      jobs[i-1].Init(&config, input, output, &wellT0, &bfT0, &sigmaTMid, i);
      jQueue.AddJob(jobs[i-1]);
    }
    jQueue.WaitUntilDone();
  }
  /* Serialize into backbround models */
  cout << "Done." << endl;
}