Esempio n. 1
0
void *FileSDatLoader ( void *arg )
{
  ImageLoadWorkInfo *master_img_loader = ( ImageLoadWorkInfo * ) arg;


  WorkerInfoQueue *loadWorkQ = new WorkerInfoQueue ( master_img_loader->flow_buffer_size );
  ImageLoadWorkInfo *n_image_loaders = new ImageLoadWorkInfo[master_img_loader->flow_buffer_size];
  SetUpIndividualImageLoaders ( n_image_loaders,master_img_loader );

  int numWorkers = numCores() /2; // @TODO - this should be subject to inception_state options
  numWorkers = ( numWorkers < 1 ? 1:numWorkers );
  fprintf ( stdout, "FileLoader: numWorkers threads = %d\n", numWorkers );
  {
    int cworker;
    pthread_t work_thread;
    // spawn threads for doing background correction/fitting work
    for ( cworker = 0; cworker < numWorkers; cworker++ )
    {
      int t = pthread_create ( &work_thread, NULL, FileSDatLoadWorker,
                               loadWorkQ );
      pthread_detach(work_thread);
      if ( t )
        fprintf ( stderr, "Error starting thread\n" );
    }
  }

  WorkerInfoQueueItem item;
  int flow_buffer_size = master_img_loader->flow_buffer_size;
  for ( int i_buffer = 0; i_buffer < flow_buffer_size;i_buffer++ )
  {
    ImageLoadWorkInfo *cur_image_loader = &n_image_loaders[i_buffer];

    int cur_flow = cur_image_loader->flow; // each job is an n_image_loaders item
    DontReadAheadOfSignalProcessing (cur_image_loader, master_img_loader->lead);


    //    cur_image_loader->sdat[cur_image_loader->cur_buffer].AdjustForDrift();
    //    cur_image_loader->sdat[cur_image_loader->cur_buffer].SubDcOffset();
    item.finished = false;
    item.private_data = cur_image_loader;
    loadWorkQ->PutItem ( item );

    if (!ChipIdDecoder::IsProtonChip())  //P2 and Long compute?
      PauseForLongCompute ( cur_flow,cur_image_loader );
  }

  // wait for all of the images to be processed
  loadWorkQ->WaitTillDone();

  KillQueue ( loadWorkQ,numWorkers );

  delete loadWorkQ;

  delete[] n_image_loaders;

  master_img_loader->finished = true;

  return NULL;
}
Esempio n. 2
0
bool BaseCallerParameters::InitContextVarsFromOptArgs(OptArgs& opts){

    assert(bc_files.options_set);
    char default_run_id[6]; // Create a run identifier from full output directory string
    ion_run_to_readname (default_run_id, (char*)bc_files.output_directory.c_str(), bc_files.output_directory.length());
    context_vars.run_id                      = opts.GetFirstString ('-', "run-id", default_run_id);
	num_threads_                             = opts.GetFirstInt    ('n', "num-threads", max(2*numCores(), 4));
	num_bamwriter_threads_                   = opts.GetFirstInt    ('-', "num-threads-bamwriter", 6);

    context_vars.flow_signals_type           = opts.GetFirstString ('-', "flow-signals-type", "none");
    context_vars.extra_trim_left             = opts.GetFirstInt    ('-', "extra-trim-left", 0);
    context_vars.only_process_unfiltered_set = opts.GetFirstBoolean('-', "only-process-unfiltered-set", false);

    // Treephaser options
    context_vars.dephaser                    = opts.GetFirstString ('-', "dephaser", "treephaser-sse");
    context_vars.keynormalizer               = opts.GetFirstString ('-', "keynormalizer", "gain");
    context_vars.windowSize                  = opts.GetFirstInt    ('-', "window-size", DPTreephaser::kWindowSizeDefault_);
    context_vars.skip_droop                  = opts.GetFirstBoolean('-', "skip-droop", true);
    context_vars.skip_recal_during_norm      = opts.GetFirstBoolean('-', "skip-recal-during-normalization", false);
    context_vars.diagonal_state_prog         = opts.GetFirstBoolean('-', "diagonal-state-prog", false);

    // Not every combination of options is possible here:
    if (context_vars.diagonal_state_prog and context_vars.dephaser != "treephaser-swan") {
      cout << " === BaseCaller Option Incompatibility: Using dephaser treephaser-swan with diagonal state progression instead of "
           << context_vars.dephaser << endl;
      context_vars.dephaser = "treephaser-swan";
    }

    context_vars.process_tfs      = true;
    context_vars.options_set      = true;
    return true;
};
Esempio n. 3
0
void PhaseEstimator::DoPhaseEstimation(RawWells *wells, Mask *mask, const ion::FlowOrder& flow_order, const vector<KeySequence>& keys,
    int region_size_x, int region_size_y,  bool use_single_core)
{
  flow_order_.SetFlowOrder(flow_order.str(), min(flow_order.num_flows(), 120));
  keys_ = keys;
  chip_size_x_ = mask->W();
  chip_size_y_ = mask->H();
  region_size_x_ = region_size_x;
  region_size_y_ = region_size_y;


  printf("Phase estimation mode = %s\n", phasing_estimator_.c_str());

  if (phasing_estimator_ == "override") {
    // Nothing to do!

  } else if (phasing_estimator_ == "spatial-refiner") {

    int num_workers = max(numCores(), 2);
    if (use_single_core)
      num_workers = 1;

    wells->Close();
    wells->OpenForIncrementalRead();
    SpatialRefiner(wells, mask, num_workers);


  } else if (phasing_estimator_ == "spatial-refiner-2") {

    int num_workers = max(numCores(), 2);
    if (use_single_core)
      num_workers = 1;

    wells->Close();
    wells->OpenForIncrementalRead();

    train_subset_count_ = 2;
    train_subset_cf_.resize(train_subset_count_);
    train_subset_ie_.resize(train_subset_count_);
    train_subset_dr_.resize(train_subset_count_);
    train_subset_regions_x_.resize(train_subset_count_);
    train_subset_regions_y_.resize(train_subset_count_);


    for (train_subset_ = 0; train_subset_ < train_subset_count_; ++train_subset_) {
      SpatialRefiner(wells, mask, num_workers);
      train_subset_cf_[train_subset_] = result_cf_;
      train_subset_ie_[train_subset_] = result_ie_;
      train_subset_dr_[train_subset_] = result_dr_;
      train_subset_regions_x_[train_subset_] = result_regions_x_;
      train_subset_regions_y_[train_subset_] = result_regions_y_;
    }

  } else
    ION_ABORT("Requested phase estimator is not recognized");

  // Compute mean cf, ie, dr

  average_cf_ = 0;
  average_ie_ = 0;
  average_dr_ = 0;
  int count = 0;

  for (int r = 0; r < result_regions_x_*result_regions_y_; r++) {
    if (result_cf_[r] || result_ie_[r] || result_dr_[r]) {
      average_cf_ += result_cf_[r];
      average_ie_ += result_ie_[r];
      average_dr_ += result_dr_[r];
      count++;
    }
  }
  if (count > 0) {
    average_cf_ /= count;
    average_ie_ /= count;
    average_dr_ /= count;
  }
}
Esempio n. 4
0
  ResourceManager::ResourceManager(bool runningInExeMgr) :
    fExeMgrStr("ExeMgr1"),
    fSystemConfigStr("SystemConfig"),
	fDMLProcStr("DMLProc"),
	fBatchInsertStr("BatchInsert"),
	fConfig(Config::makeConfig()),
	fNumCores(8),
	fHjNumThreads(defaultNumThreads),
	fJlProcessorThreadsPerScan(defaultProcessorThreadsPerScan),
	fJlNumScanReceiveThreads(defaultScanReceiveThreads),
	fTwNumThreads(defaultNumThreads),
	fHJUmMaxMemorySmallSideDistributor(fHashJoinStr,
		"UmMaxMemorySmallSide",
		getUintVal(fHashJoinStr, "TotalUmMaxMemorySmallSide", defaultTotalUmMemory),
		getUintVal(fHashJoinStr, "UmMaxMemorySmallSide", defaultHJUmMaxMemorySmallSide),
		0),
	fHJPmMaxMemorySmallSideSessionMap(
		getUintVal(fHashJoinStr, "PmMaxMemorySmallSide", defaultHJPmMaxMemorySmallSide)),
	isExeMgr(runningInExeMgr)
  {
	int temp;
	int configNumCores = -1;

	fTraceFlags = 0;
	//See if we want to override the calculated #cores
	temp = getIntVal(fJobListStr, "NumCores", -1);
	if (temp > 0)
		configNumCores = temp;

	if (configNumCores <= 0)
	{
		//count the actual #cores
		utils::CGroupConfigurator cg;
		fNumCores = cg.getNumCores();
		if (fNumCores <= 0)
			fNumCores = 8;
	}
	else
		fNumCores = configNumCores;

	//based on the #cores, calculate some thread parms
	if (fNumCores > 0)
	{
		fHjNumThreads = fNumCores;
		fJlNumScanReceiveThreads = fNumCores;
		fTwNumThreads = fNumCores;
	}

	//possibly override any calculated values
	temp = getIntVal(fHashJoinStr, "NumThreads", -1);
	if (temp > 0)
		fHjNumThreads = temp;
	temp = getIntVal(fJobListStr, "ProcessorThreadsPerScan", -1);
	if (temp > 0)
		fJlProcessorThreadsPerScan = temp;
	temp = getIntVal(fJobListStr, "NumScanReceiveThreads", -1);
	if (temp > 0)
		fJlNumScanReceiveThreads = temp;
	temp = getIntVal(fTupleWSDLStr, "NumThreads", -1);
	if (temp > 0)
		fTwNumThreads = temp;

	pmJoinMemLimit = getIntVal(fHashJoinStr, "PmMaxMemorySmallSide",
	  defaultHJPmMaxMemorySmallSide);
	// Need to use different limits if this instance isn't running on the UM,
	// or if it's an ExeMgr running on a PM node
	if (!isExeMgr)
		totalUmMemLimit = pmJoinMemLimit;
	else {
        string whichLimit = "TotalUmMemory";
        string pmWithUM = fConfig->getConfig("Installation", "PMwithUM");
        if (pmWithUM == "y" || pmWithUM == "Y") {
            oam::Oam OAM;
            oam::oamModuleInfo_t moduleInfo = OAM.getModuleInfo();
            string &moduleType = boost::get<1>(moduleInfo);

            if (moduleType == "pm" || moduleType == "PM") {
                string doesItExist = fConfig->getConfig(fHashJoinStr, "TotalPmUmMemory");
                if (!doesItExist.empty())
                    whichLimit = "TotalPmUmMemory";
            }
        }

        string umtxt = fConfig->getConfig(fHashJoinStr, whichLimit);
        if (umtxt.empty())
            totalUmMemLimit = defaultTotalUmMemory;
        else {
            // is it an absolute or a percentage?
            if (umtxt.find('%') != string::npos) {
                utils::CGroupConfigurator cg;
                uint64_t totalMem = cg.getTotalMemory();
                totalUmMemLimit = atoll(umtxt.c_str())/100.0 * (double) totalMem;

                if (totalUmMemLimit == 0 || totalUmMemLimit == LLONG_MIN ||
                  totalUmMemLimit == LLONG_MAX)  // some garbage in the xml entry
                    totalUmMemLimit = defaultTotalUmMemory;
            }
            else {  // an absolute; use the existing converter
                totalUmMemLimit = getIntVal(fHashJoinStr, whichLimit,
                    defaultTotalUmMemory);
            }
        }
	}
	configuredUmMemLimit = totalUmMemLimit;
	//cout << "RM: total UM memory = " << totalUmMemLimit << endl;

	// multi-thread aggregate
	string nt, nb, nr;
	nt = fConfig->getConfig("RowAggregation", "RowAggrThreads");
	if (nt.empty())
		fAggNumThreads = numCores();
	else
		fAggNumThreads = fConfig->uFromText(nt);

	nb = fConfig->getConfig("RowAggregation","RowAggrBuckets");
	if (nb.empty())
		fAggNumBuckets = fAggNumThreads * 4;
	else
		fAggNumBuckets = fConfig->uFromText(nb);

	nr = fConfig->getConfig("RowAggregation", "RowAggrRowGroupsPerThread");
	if (nr.empty())
		fAggNumRowGroups = 20;
	else
		fAggNumRowGroups = fConfig->uFromText(nr);

	// window function
	string wt = fConfig->getConfig("WindowFunction", "WorkThreads");
	if (wt.empty())
		fWindowFunctionThreads = numCores();
	else
		fWindowFunctionThreads = fConfig->uFromText(wt);

	// hdfs info
	string hdfs = fConfig->getConfig("SystemConfig", "DataFilePlugin");

	if ( hdfs.find("hdfs") != string::npos)
		fUseHdfs = true;
	else
		fUseHdfs = false;
  }
Esempio n. 5
0
void *FileLoader ( void *arg )
{
  ImageLoadWorkInfo *master_img_loader = ( ImageLoadWorkInfo * ) arg;

  prctl(PR_SET_NAME,"FileLoader",0,0,0);


  WorkerInfoQueue *loadWorkQ = new WorkerInfoQueue ( master_img_loader->flow_buffer_size );


  ImageLoadWorkInfo *n_image_loaders = new ImageLoadWorkInfo[master_img_loader->flow_buffer_size];
  SetUpIndividualImageLoaders ( n_image_loaders,master_img_loader );


  //   int numWorkers = 1;
  int numWorkers = numCores() /4; // @TODO - this should be subject to inception_state options
  numWorkers = ( numWorkers < 4 ? 4:numWorkers );
  fprintf ( stdout, "FileLoader: numWorkers threads = %d\n", numWorkers );
  WqInfo_t wq_info[numWorkers];

  {
    int cworker;
    pthread_t work_thread;
    // spawn threads for doing background correction/fitting work
    for ( cworker = 0; cworker < numWorkers; cworker++ )
    {
      wq_info[cworker].threadNum=cworker;
      wq_info[cworker].wq = loadWorkQ;
      int t = pthread_create ( &work_thread, NULL, FileLoadWorker,
                               &wq_info[cworker] );
      pthread_detach(work_thread);
      if ( t )
        fprintf ( stderr, "Error starting thread\n" );
    }
  }

  WorkerInfoQueueItem item;

  //time_t start, end;
  int flow_buffer_size = master_img_loader->flow_buffer_size;

  // this loop goes over the individual image loaders
  for ( int i_buffer = 0; i_buffer < flow_buffer_size; i_buffer++ )
  {
    ImageLoadWorkInfo *cur_image_loader = &n_image_loaders[i_buffer];

    int cur_flow = cur_image_loader->flow;  // each job is an n_image_loaders item

    DontReadAheadOfSignalProcessing (cur_image_loader, master_img_loader->lead);
    //***We are doing this on this thread so we >load< in sequential order that pinned in Flow updates in sequential order
    if (!cur_image_loader->inception_state->img_control.threaded_file_access) {
      JustLoadOneImageWithPinnedUpdate(cur_image_loader);
    }
    //*** now we can do the rest of the computation for an image, including dumping in a multiply threaded fashion

    item.finished = false;
    item.private_data = cur_image_loader;
    loadWorkQ->PutItem ( item );

    if (!ChipIdDecoder::IsProtonChip())
      PauseForLongCompute ( cur_flow,cur_image_loader );
  }

  // wait for all of the images to be processed
  loadWorkQ->WaitTillDone();
  KillQueue ( loadWorkQ,numWorkers );

  delete loadWorkQ;
  delete[] n_image_loaders;

  master_img_loader->finished = true;

  return NULL;
}
Esempio n. 6
0
void PhaseEstimator::DoPhaseEstimation(RawWells *wells, Mask *mask, const ion::FlowOrder& flow_order,
		                               const vector<KeySequence>& keys, bool use_single_core)
{
  // We only load / process what is necessary
  flow_order_.SetFlowOrder(flow_order.str(), min(flow_order.num_flows(), phasing_end_flow_+20));
  keys_ = keys;

  // Do we have enough flows to do phase estimation?
  // Check and, if necessary, adjust flow interval for estimation,

  if (not have_phase_estimates_) {

    if (flow_order_.num_flows() < 50) {
      phasing_estimator_ = "override";
      cout << "PhaseEstimator WARNING: Not enough flows to estimate phase; using default values." << endl;
    }

    else  {

      // Make sure we have at least 30 flows to estimate over
      if (phasing_end_flow_ - phasing_start_flow_ < 30) {
        phasing_end_flow_   = min(phasing_start_flow_+30, flow_order_.num_flows());
        phasing_start_flow_ = phasing_end_flow_ - 30; // We are guaranteed to have at least 50 flows
        cout << "PhaseEstimator WARNING: Shifting phase estimation window to flows " << phasing_start_flow_ << "-" << phasing_end_flow_ << endl;
        cerr << "PhaseEstimator WARNING: Shifting phase estimation window to flows " << phasing_start_flow_ << "-" << phasing_end_flow_ << endl;
      }
      // Check boundaries of estimation window and adjust if necessary,
      // try to keep estimation window size if possible, but don't start before flow 20
      if (phasing_end_flow_ > flow_order_.num_flows()) {
        phasing_start_flow_ = max(20, (phasing_start_flow_ - phasing_end_flow_ + flow_order_.num_flows()) );
        phasing_end_flow_   = flow_order_.num_flows();
        cout << "PhaseEstimator WARNING: Shifting phase estimation window to flows " << phasing_start_flow_ << "-" << phasing_end_flow_ << endl;
        cerr << "PhaseEstimator WARNING: Shifting phase estimation window to flows " << phasing_start_flow_ << "-" << phasing_end_flow_ << endl;
      }
    }
  }

  // ------------------------------------

  if (phasing_estimator_ == "override") {
    if (not have_phase_estimates_)
      SetPhaseParameters(init_cf_, init_ie_, init_dr_);

  } else if (phasing_estimator_ == "spatial-refiner") {

    int num_workers = max(numCores(), 2);
    if (use_single_core)
      num_workers = 1;

    wells->Close();
    wells->OpenForIncrementalRead();
    SpatialRefiner(wells, mask, num_workers);


  } else if (phasing_estimator_ == "spatial-refiner-2") {

    int num_workers = max(numCores(), 2);
    if (use_single_core)
      num_workers = 1;

    wells->Close();
    wells->OpenForIncrementalRead();

    train_subset_count_ = 2;
    train_subset_cf_.resize(train_subset_count_);
    train_subset_ie_.resize(train_subset_count_);
    train_subset_dr_.resize(train_subset_count_);
    train_subset_regions_x_.resize(train_subset_count_);
    train_subset_regions_y_.resize(train_subset_count_);


    for (train_subset_ = 0; train_subset_ < train_subset_count_; ++train_subset_) {
      SpatialRefiner(wells, mask, num_workers);
      train_subset_cf_[train_subset_] = result_cf_;
      train_subset_ie_[train_subset_] = result_ie_;
      train_subset_dr_[train_subset_] = result_dr_;
      train_subset_regions_x_[train_subset_] = result_regions_x_;
      train_subset_regions_y_[train_subset_] = result_regions_y_;
    }

  } else
    ION_ABORT("Requested phase estimator is not recognized");

  // Compute mean cf, ie, dr

  average_cf_ = 0;
  average_ie_ = 0;
  average_dr_ = 0;
  int count = 0;

  for (int r = 0; r < result_regions_x_*result_regions_y_; r++) {
    if (result_cf_.at(r) || result_ie_.at(r) || result_dr_.at(r)) {
      average_cf_ += result_cf_[r];
      average_ie_ += result_ie_[r];
      average_dr_ += result_dr_[r];
      count++;
    }
  }
  if (count > 0) {
    average_cf_ /= count;
    average_ie_ /= count;
    average_dr_ /= count;
  }
  have_phase_estimates_ = true;
}
Esempio n. 7
0
void *FileSDatLoader ( void *arg )
{
  ImageLoadWorkInfo *master_img_loader = ( ImageLoadWorkInfo * ) arg;


  WorkerInfoQueue *loadWorkQ = new WorkerInfoQueue ( master_img_loader->flow_buffer_size );
  ImageLoadWorkInfo *n_image_loaders = new ImageLoadWorkInfo[master_img_loader->flow_buffer_size];
  SetUpIndividualImageLoaders ( n_image_loaders,master_img_loader );

  int numWorkers = numCores() /2; // @TODO - this should be subject to inception_state options
  //int numWorkers = 1;
  numWorkers = ( numWorkers < 1 ? 1:numWorkers );
  fprintf ( stdout, "FileLoader: numWorkers threads = %d\n", numWorkers );
  {
    int cworker;
    pthread_t work_thread;
    // spawn threads for doing background correction/fitting work
    for ( cworker = 0; cworker < numWorkers; cworker++ )
    {
      int t = pthread_create ( &work_thread, NULL, FileSDatLoadWorker,
                               loadWorkQ );
      pthread_detach(work_thread);
      if ( t )
        fprintf ( stderr, "Error starting thread\n" );
    }
  }

  WorkerInfoQueueItem item;
  Timer timer_file_access;
  //double file_access_time = 0;
  int flow_buffer_size = master_img_loader->flow_buffer_size;
  for ( int i_buffer = 0; i_buffer < flow_buffer_size;i_buffer++ )
  {
    ImageLoadWorkInfo *cur_image_loader = &n_image_loaders[i_buffer];

    int cur_flow = cur_image_loader->flow; // each job is an n_image_loaders item

    DontReadAheadOfSignalProcessing (cur_image_loader, master_img_loader->lead);
    TraceChunkSerializer serializer;
    timer_file_access.restart();
    bool ok = serializer.Read ( cur_image_loader->name, cur_image_loader->sdat[cur_image_loader->cur_buffer] );
    if (!ok) {
      ION_ABORT("Couldn't load file: " + ToStr(cur_image_loader->name));
    }
    // if ( ImageTransformer::gain_correction != NULL )
    //   ImageTransformer::GainCorrectImage ( &cur_image_loader->sdat[cur_image_loader->cur_buffer] );
  
    //file_access_time += timer_file_access.elapsed();
    fprintf ( stdout, "File access = %0.2lf sec.\n", timer_file_access.elapsed() );
    cur_image_loader->pinnedInFlow->Update ( cur_image_loader->flow, &cur_image_loader->sdat[cur_image_loader->cur_buffer] );
    cur_image_loader->sdat[cur_image_loader->cur_buffer].AdjustForDrift();
    cur_image_loader->sdat[cur_image_loader->cur_buffer].SubDcOffset();
    item.finished = false;
    item.private_data = cur_image_loader;
    loadWorkQ->PutItem ( item );
    
    if (ChipIdDecoder::GetGlobalChipId() != ChipId900)
      PauseForLongCompute ( cur_flow,cur_image_loader );

    /*if ( CheckFlowForWrite ( cur_flow,false ) )
    {
      fprintf (stdout, "File access Time for flow %d to %d: %.1f sec\n", ( ( cur_flow+1 ) - NUMFB ), cur_flow, file_access_time);
      file_access_time = 0;
    }*/
  }

  // wait for all of the images to be processed
  loadWorkQ->WaitTillDone();

  KillQueue ( loadWorkQ,numWorkers );

  delete loadWorkQ;

  delete[] n_image_loaders;

  master_img_loader->finished = true;

  return NULL;
}
Esempio n. 8
0
void *FileLoader ( void *arg )
{
  ImageLoadWorkInfo *master_img_loader = ( ImageLoadWorkInfo * ) arg;


  WorkerInfoQueue *loadWorkQ = new WorkerInfoQueue ( master_img_loader->flow_buffer_size );


  ImageLoadWorkInfo *n_image_loaders = new ImageLoadWorkInfo[master_img_loader->flow_buffer_size];
  SetUpIndividualImageLoaders ( n_image_loaders,master_img_loader );

  int numWorkers = numCores() /2; // @TODO - this should be subject to inception_state options
  // int numWorkers = 1;
  numWorkers = ( numWorkers < 1 ? 1:numWorkers );
  fprintf ( stdout, "FileLoader: numWorkers threads = %d\n", numWorkers );
  {
    int cworker;
    pthread_t work_thread;
    // spawn threads for doing background correction/fitting work
    for ( cworker = 0; cworker < numWorkers; cworker++ )
    {
      int t = pthread_create ( &work_thread, NULL, FileLoadWorker,
                               loadWorkQ );
      pthread_detach(work_thread);
      if ( t )
        fprintf ( stderr, "Error starting thread\n" );
    }
  }

  WorkerInfoQueueItem item;

  //time_t start, end;
  int flow_buffer_size = master_img_loader->flow_buffer_size;

  Timer timer_file_access;
  double file_access_time = 0;
  // this loop goes over the individual image loaders
  for ( int i_buffer = 0; i_buffer < flow_buffer_size; i_buffer++ )
  {
    ImageLoadWorkInfo *cur_image_loader = &n_image_loaders[i_buffer];

    int cur_flow = cur_image_loader->flow;  // each job is an n_image_loaders item

    DontReadAheadOfSignalProcessing (cur_image_loader, master_img_loader->lead);
    //***We are doing this on this thread so we >load< in sequential order that pinned in Flow updates in sequential order
    timer_file_access.restart();
    if (!cur_image_loader->inception_state->img_control.threaded_file_access) {
      JustLoadOneImageWithPinnedUpdate(cur_image_loader);
    }
    else {
//      JustCacheOneImage(cur_image_loader);
    }
    file_access_time += timer_file_access.elapsed();
    //*** now we can do the rest of the computation for an image, including dumping in a multiply threaded fashion

    item.finished = false;
    item.private_data = cur_image_loader;
    loadWorkQ->PutItem ( item );

    if (ChipIdDecoder::GetGlobalChipId() != ChipId900)
      PauseForLongCompute ( cur_flow,cur_image_loader );

    if ( CheckFlowForWrite ( cur_flow,false ) )
    {
      fprintf (stdout, "File access Time for flow %d to %d: %.1f sec\n", ( ( cur_flow+1 ) - NUMFB ), cur_flow, file_access_time);
      file_access_time = 0;
    }
  }

  // wait for all of the images to be processed
  loadWorkQ->WaitTillDone();
  KillQueue ( loadWorkQ,numWorkers );

  delete loadWorkQ;
  delete[] n_image_loaders;

  master_img_loader->finished = true;

  return NULL;
}