Ejemplo n.º 1
0
 Ref<Expr> Interpreter::Parse(ILineReader & reader)
 {
     InterpreterErrorReporter errorReporter(*this);
     Lexer          lexer(reader);
     LineNormalizer normalizer(lexer);
     FinchParser    parser(normalizer, errorReporter);
     
     return parser.Parse();
 }
Ejemplo n.º 2
0
// integrated squared error 
static double squaredIntegratedError(const vector<Param> &params, const Param &estimate) {
    double left = 0;
    double middle = 0;
    double right = 0;
    for(int i=0; i < params.size(); i++) {
        double p1 = params[i].p;
        double u1 = params[i].u;
        double s1 = params[i].s;
        for(int j=0; j < params.size(); j++) {
            double p2 = params[j].p;
            double u2 = params[j].u;
            double s2 = params[j].s;
            left += normalizer(params[i], params[j]);
        }
        middle += normalizer(params[i], estimate);
    }
    right = normalizer(estimate, estimate);

    return left - 2*middle + right;
}
Ejemplo n.º 3
0
int main()
{
    std::cout << "[moeoChebyshevMetric] => ";

    // objective vectors
    ObjectiveVector obj0, obj1, obj2, obj3, obj4, obj5, obj6;
    obj0[0] = 3;
    obj0[1] = 3;

    obj1[0] = 2;
    obj1[1] = 2;

    obj2[0] = 1;
    obj2[1] = 1;

    obj4[0] = 0;
    obj4[1] = 0;

    
    std::vector<double> poids;
    poids.resize(2);
    
    poids[0]=2;
    poids[1]=3;
    ObjectiveVector obj_poids(poids);


    // population
    eoPop < Solution > pop;
    pop.resize(3);
    pop[0].objectiveVector(obj0); 
    pop[1].objectiveVector(obj1);
    pop[2].objectiveVector(obj2);
    Solution reference;
    reference.objectiveVector(obj4);
    unsigned int rho=2;

    moeoObjectiveVectorNormalizer<Solution> normalizer(pop,10);

    moeoAugmentedAchievementScalarizingFunctionMetricFitnessAssignment<Solution> fitness(rho,obj4,obj_poids,normalizer,eval);
    moeoAugmentedAchievementScalarizingFunctionMetricFitnessAssignment<Solution> fitness1(rho,obj4,obj_poids);
    moeoAugmentedAchievementScalarizingFunctionMetricFitnessAssignment<Solution> fitness2(rho,obj4,obj_poids,normalizer);
    moeoAugmentedAchievementScalarizingFunctionMetricFitnessAssignment<Solution> fitness3(rho,obj4,obj_poids,eval);
    fitness(pop);
    fitness(reference);
    
    assert(pop[0].fitness()<pop[1].fitness());
    assert(pop[1].fitness()<pop[2].fitness());
 
    std::cout << "Ok" << std::endl;
    
    return EXIT_SUCCESS;
}
Ejemplo n.º 4
0
bool SVGPathParser::parseAndNormalizePath()
{
    SVGPathNormalizer normalizer(m_consumer);

    while (m_source->hasMoreData()) {
        PathSegmentData segment = m_source->parseSegment();
        if (segment.command == PathSegUnknown)
            return false;

        normalizer.emitSegment(segment);
    }
    return true;
}
int	main(int argc, char *argv[]) {
	int	c;
	while (EOF != (c = getopt(argc, argv, "d")))
		switch (c) {
		case 'd':
			debuglevel = LOG_DEBUG;
			break;
		}

	// create a Chart factory
	CatalogPtr 	catalog = CatalogFactory::get(CatalogFactory::Combined,
					"/usr/local/starcatalogs");
	TurbulencePointSpreadFunction   psf(2);
	ChartFactory    factory(catalog, psf, 14, 100);
	debug(LOG_DEBUG, DEBUG_LOG, 0, "chart factory created");

	// create an Image Normalizer
	ImageNormalizer normalizer(factory);

	// prepare the initial transformation
	Projection      projection(M_PI * 162 / 180, Point(838, 182), 0.98);
	debug(LOG_DEBUG, DEBUG_LOG, 0, "projection: %s",
	projection.toString().c_str());

	// get the image from the input file
	FITSin  in("andromeda-base.fits");
	ImagePtr        imageptr = in.read();

	// apply the normalizer to the 
	debug(LOG_DEBUG, DEBUG_LOG, 0, "apply normalizer");
	RaDec   center = normalizer(imageptr, projection);
	debug(LOG_DEBUG, DEBUG_LOG, 0, "true center: %s",
	center.toString().c_str());
	debug(LOG_DEBUG, DEBUG_LOG, 0, "transformation: %s",
	projection.toString().c_str());

	return EXIT_SUCCESS;
}
Ejemplo n.º 6
0
void CowichanTBB::norm(PointVector pointsIn, PointVector pointsOut) {

  MinMaxReducer minmax(pointsIn);

  // find min/max coordinates
  parallel_reduce(Range(0, n), minmax, auto_partitioner());

  Point minPoint = minmax.getMinimum();
  Point maxPoint = minmax.getMaximum();

  // compute scaling factors
  real xfactor = (real)((maxPoint.x == minPoint.x) ?
      0.0 : 1.0 / (maxPoint.x - minPoint.x));
  real yfactor = (real)((maxPoint.y == minPoint.y) ?
      0.0 : 1.0 / (maxPoint.y - minPoint.y));

  Normalizer normalizer(pointsIn, pointsOut, minPoint.x, minPoint.y, xfactor,
      yfactor);

  // normalize the vector
  parallel_for(Range(0, n), normalizer, auto_partitioner());

}
Ejemplo n.º 7
0
void MainWindow::normalize()
{
    VolumeNormalizer normalizer( carna->model(), this );

    bool ok = false;
    normalizer.setThreshold( QInputDialog::getInt
        ( this
        , "Volume Normalization"
        , "HUV threshold:"
        , normalizer.getThreshold()
        , -3071
        , 1024
        , 1
        , &ok ) );

    if( !ok )
    {
        return;
    }

    normalizer.compute();

    QString sizeLoss = QString::number( normalizer.getSizeLoss() * 100, 'f', 2 );

    const QMessageBox::StandardButton defaultButton =
                       std::abs( normalizer.getSizeLoss() - 1. ) < 1e-4
                    || std::abs( normalizer.getSizeLoss() ) < 1e-4
            ? QMessageBox::No
            : QMessageBox::Yes;

    if( QMessageBox::question( this, "Volume Normalization", "The resolution of the normalized volume data is " + sizeLoss + "% of the original. Do you want to close your current data set and load the new one?", QMessageBox::Yes | QMessageBox::No, defaultButton ) == QMessageBox::Yes )
    {
        Carna::base::model::Scene* const new_model = normalizer.getResult();
        this->closeRecord();
        this->init( new_model );
    }
}
  void IsobaricQuantifier::quantify(const ConsensusMap& consensus_map_in, ConsensusMap& consensus_map_out)
  {
    // precheck incoming map
    if (consensus_map_in.empty())
    {
      LOG_WARN << "Warning: Empty iTRAQ container. No quantitative information available!" << std::endl;
      return;
    }

    // create output map based on input, we will cleanup the channels while iterating over it
    consensus_map_out = consensus_map_in;

    // init stats
    stats_.reset();
    stats_.channel_count = quant_method_->getNumberOfChannels();

    // apply isotope correction if requested by user
    if (isotope_correction_enabled_)
    {
      stats_ = IsobaricIsotopeCorrector::correctIsotopicImpurities(consensus_map_in, consensus_map_out, quant_method_);
    }
    else
    {
      LOG_WARN << "Warning: Due to deactivated isotope-correction labeling statistics will be based on raw intensities, which might give too optimistic results." << std::endl;
    }

    // compute statistics and embed into output map
    computeLabelingStatistics_(consensus_map_out);

    // apply normalization if requested
    if (normalization_enabled_)
    {
      IsobaricNormalizer normalizer(quant_method_);
      normalizer.normalize(consensus_map_out);
    }
  }
Ejemplo n.º 9
0
enum charset_result
charset_utf8_to_utf8(normalizer_func_t *normalizer,
		     const unsigned char *src, size_t *src_size, buffer_t *dest)
{
	enum charset_result res = CHARSET_RET_OK;
	size_t pos;

	uni_utf8_partial_strlen_n(src, *src_size, &pos);
	if (pos < *src_size) {
		i_assert(*src_size - pos <= CHARSET_MAX_PENDING_BUF_SIZE);
		*src_size = pos;
		res = CHARSET_RET_INCOMPLETE_INPUT;
	}

	if (normalizer != NULL) {
		if (normalizer(src, *src_size, dest) < 0)
			return CHARSET_RET_INVALID_INPUT;
	} else if (!uni_utf8_get_valid_data(src, *src_size, dest)) {
		return CHARSET_RET_INVALID_INPUT;
	} else {
		buffer_append(dest, src, *src_size);
	}
	return res;
}
int main( int argc, const char* argv[])
{
	int rt = 0;
	std::auto_ptr<strus::ErrorBufferInterface> errorBuffer( strus::createErrorBuffer_standard( 0, 2));
	if (!errorBuffer.get())
	{
		std::cerr << _TXT("failed to create error buffer") << std::endl;
		return -1;
	}
	strus::ProgramOptions opt;
	bool printUsageAndExit = false;
	try
	{
		opt = strus::ProgramOptions(
				argc, argv, 11,
				"h,help", "v,version", "license", "t,tokenizer:", "n,normalizer:",
				"m,module:", "M,moduledir:", "q,quot:", "p,plain",
				"R,resourcedir:", "T,trace:");
		if (opt( "help")) printUsageAndExit = true;
		std::auto_ptr<strus::ModuleLoaderInterface> moduleLoader( strus::createModuleLoader( errorBuffer.get()));
		if (!moduleLoader.get()) throw strus::runtime_error(_TXT("failed to create module loader"));

		if (opt("moduledir"))
		{
			std::vector<std::string> modirlist( opt.list("moduledir"));
			std::vector<std::string>::const_iterator mi = modirlist.begin(), me = modirlist.end();
			for (; mi != me; ++mi)
			{
				moduleLoader->addModulePath( *mi);
			}
			moduleLoader->addSystemModulePath();
		}
		if (opt("module"))
		{
			std::vector<std::string> modlist( opt.list("module"));
			std::vector<std::string>::const_iterator mi = modlist.begin(), me = modlist.end();
			for (; mi != me; ++mi)
			{
				if (!moduleLoader->loadModule( *mi))
				{
					throw strus::runtime_error(_TXT("error failed to load module %s"), mi->c_str());
				}
			}
		}
		if (opt("license"))
		{
			std::vector<std::string> licenses_3rdParty = moduleLoader->get3rdPartyLicenseTexts();
			std::vector<std::string>::const_iterator ti = licenses_3rdParty.begin(), te = licenses_3rdParty.end();
			if (ti != te) std::cout << _TXT("3rd party licenses:") << std::endl;
			for (; ti != te; ++ti)
			{
				std::cout << *ti << std::endl;
			}
			std::cerr << std::endl;
			if (!printUsageAndExit) return 0;
		}
		if (opt( "version"))
		{
			std::cout << _TXT("Strus utilities version ") << STRUS_UTILITIES_VERSION_STRING << std::endl;
			std::cout << _TXT("Strus module version ") << STRUS_MODULE_VERSION_STRING << std::endl;
			std::cout << _TXT("Strus rpc version ") << STRUS_RPC_VERSION_STRING << std::endl;
			std::cout << _TXT("Strus trace version ") << STRUS_TRACE_VERSION_STRING << std::endl;
			std::cout << _TXT("Strus analyzer version ") << STRUS_ANALYZER_VERSION_STRING << std::endl;
			std::cout << _TXT("Strus base version ") << STRUS_BASE_VERSION_STRING << std::endl;
			std::vector<std::string> versions_3rdParty = moduleLoader->get3rdPartyVersionTexts();
			std::vector<std::string>::const_iterator vi = versions_3rdParty.begin(), ve = versions_3rdParty.end();
			if (vi != ve) std::cout << _TXT("3rd party versions:") << std::endl;
			for (; vi != ve; ++vi)
			{
				std::cout << *vi << std::endl;
			}
			if (!printUsageAndExit) return 0;
		}
		else if (!printUsageAndExit)
		{
			if (opt.nofargs() > 1)
			{
				std::cerr << _TXT("too many arguments") << std::endl;
				printUsageAndExit = true;
				rt = 1;
			}
			if (opt.nofargs() < 1)
			{
				std::cerr << _TXT("too few arguments") << std::endl;
				printUsageAndExit = true;
				rt = 2;
			}
		}
		if (printUsageAndExit)
		{
			std::cout << _TXT("usage:") << " strusAnalyze [options] <phrasepath>" << std::endl;
			std::cout << "<phrasepath> = " << _TXT("path to phrase to analyze ('-' for stdin)") << std::endl;
			std::cout << "description: " << _TXT("tokenizes and normalizes a text segment") << std::endl;
			std::cout << "             " << _TXT("and prints the result to stdout.") << std::endl;
			std::cout << _TXT("options:") << std::endl;
			std::cout << "-h|--help" << std::endl;
			std::cout << "   " << _TXT("Print this usage and do nothing else") << std::endl;
			std::cout << "-v|--version" << std::endl;
			std::cout << "    " << _TXT("Print the program version and do nothing else") << std::endl;
			std::cout << "--license" << std::endl;
			std::cout << "    " << _TXT("Print 3rd party licences requiring reference") << std::endl;
			std::cout << "-m|--module <MOD>" << std::endl;
			std::cout << "    " << _TXT("Load components from module <MOD>") << std::endl;
			std::cout << "-M|--moduledir <DIR>" << std::endl;
			std::cout << "    " << _TXT("Search modules to load first in <DIR>") << std::endl;
			std::cout << "-R|--resourcedir <DIR>" << std::endl;
			std::cout << "    " << _TXT("Search resource files for analyzer first in <DIR>") << std::endl;
			std::cout << "-t|--tokenizer <CALL>" << std::endl;
			std::cout << "    " << _TXT("Use the tokenizer <CALL> (default 'content')") << std::endl;
			std::cout << "-n|--normalizer <CALL>" << std::endl;
			std::cout << "    " << _TXT("Use the normalizer <CALL> (default 'orig')") << std::endl;
			std::cout << "-q|--quot <STR>" << std::endl;
			std::cout << "    " << _TXT("Use the string <STR> as quote for the result (default \"\'\")") << std::endl;
			std::cout << "-p|--plain" << std::endl;
			std::cout << "    " << _TXT("Do not print position and define default quotes as empty") << std::endl;
			std::cout << "-T|--trace <CONFIG>" << std::endl;
			std::cout << "    " << _TXT("Print method call traces configured with <CONFIG>") << std::endl;
			std::cout << "    " << strus::string_format( _TXT("Example: %s"), "-T \"log=dump;file=stdout\"") << std::endl;
			return rt;
		}
		// Declare trace proxy objects:
		typedef strus::Reference<strus::TraceProxy> TraceReference;
		std::vector<TraceReference> trace;
		if (opt("trace"))
		{
			std::vector<std::string> tracecfglist( opt.list("trace"));
			std::vector<std::string>::const_iterator ti = tracecfglist.begin(), te = tracecfglist.end();
			for (; ti != te; ++ti)
			{
				trace.push_back( new strus::TraceProxy( moduleLoader.get(), *ti, errorBuffer.get()));
			}
		}

		std::string resultQuot = "'";
		bool resultPlain = false;
		if (opt( "plain"))
		{
			resultPlain = true;
			resultQuot.clear();
		}
		if (opt( "quot"))
		{
			resultQuot = opt[ "quot"];
		}
		std::string docpath = opt[0];
		std::string tokenizer( "content");
		if (opt( "tokenizer"))
		{
			tokenizer = opt[ "tokenizer"];
		}
		std::string normalizer( "orig");
		if (opt( "normalizer"))
		{
			normalizer = opt[ "normalizer"];
		}
		// Set paths for locating resources:
		if (opt("resourcedir"))
		{
			std::vector<std::string> pathlist( opt.list("resourcedir"));
			std::vector<std::string>::const_iterator
				pi = pathlist.begin(), pe = pathlist.end();
			for (; pi != pe; ++pi)
			{
				moduleLoader->addResourcePath( *pi);
			}
		}

		// Create root object for analyzer:
		std::auto_ptr<strus::AnalyzerObjectBuilderInterface>
			analyzerBuilder( moduleLoader->createAnalyzerObjectBuilder());
		if (!analyzerBuilder.get()) throw strus::runtime_error(_TXT("failed to create analyzer object builder"));

		// Create proxy objects if tracing enabled:
		{
			std::vector<TraceReference>::const_iterator ti = trace.begin(), te = trace.end();
			for (; ti != te; ++ti)
			{
				strus::AnalyzerObjectBuilderInterface* proxy = (*ti)->createProxy( analyzerBuilder.get());
				analyzerBuilder.release();
				analyzerBuilder.reset( proxy);
			}
		}
		// Create objects for analyzer:
		std::auto_ptr<strus::QueryAnalyzerInterface>
			analyzer( analyzerBuilder->createQueryAnalyzer());
		if (!analyzer.get()) throw strus::runtime_error(_TXT("failed to create analyzer"));
		const strus::TextProcessorInterface* textproc = analyzerBuilder->getTextProcessor();
		if (!textproc) throw strus::runtime_error(_TXT("failed to get text processor"));

		// Create phrase type (tokenizer and normalizer):
		std::string phraseType;
		if (!strus::loadQueryAnalyzerPhraseType(
				*analyzer, textproc, phraseType, "", normalizer, tokenizer, errorBuffer.get()))
		{
			throw strus::runtime_error(_TXT("failed to load analyze phrase type"));
		}

		// Load the phrase:
		std::string phrase;
		if (docpath == "-")
		{
			unsigned int ec = strus::readStdin( phrase);
			if (ec) throw strus::runtime_error( _TXT( "error reading input from stdin (errno %u)"), ec);
		}
		else
		{
			unsigned int ec = strus::readFile( docpath, phrase);
			if (ec) throw strus::runtime_error( _TXT( "error reading input file '%s' (errno %u)"), docpath.c_str(), ec);
		}

		// Analyze the phrase and print the result:
		std::vector<strus::analyzer::Term> terms
			= analyzer->analyzePhrase( phraseType, phrase);

		std::sort( terms.begin(), terms.end(), TermOrder());

		std::vector<strus::analyzer::Term>::const_iterator
			ti = terms.begin(), te = terms.end();

		for (; ti != te; ++ti)
		{
			if (!resultPlain)
			{
				std::cout << ti->pos() << " ";
			}
			std::cout << resultQuot << ti->value() << resultQuot << std::endl;
		}
		if (errorBuffer->hasError())
		{
			throw strus::runtime_error(_TXT("error in analyze phrase"));
		}
		return 0;
	}
	catch (const std::bad_alloc&)
	{
		std::cerr << _TXT("ERROR ") << _TXT("out of memory") << std::endl;
	}
	catch (const std::runtime_error& e)
	{
		const char* errormsg = errorBuffer->fetchError();
		if (errormsg)
		{
			std::cerr << _TXT("ERROR ") << e.what() << ": " << errormsg << std::endl;
		}
		else
		{
			std::cerr << _TXT("ERROR ") << e.what() << std::endl;
		}
	}
	catch (const std::exception& e)
	{
		std::cerr << _TXT("EXCEPTION ") << e.what() << std::endl;
	}
	return -1;
}
Ejemplo n.º 11
0
Archivo: main.cpp Proyecto: LLNL/lbann
//-----------------------------------------------------------------------------
bool test_image_io(const std::string filename,
  const main_params& mp,
  const cropper_params& rp,
  const resizer_params& sp,
  const augmenter_params& ap)
{

  int transform_idx = 0;
  int mean_extractor_idx = -1;
  unsigned int num_bytes = mp.m_num_bytes; // size of image in bytes

  lbann::cv_process pp;
  { // Initialize the image processor
    if (rp.m_is_set) { // If cropper parameters are given
      // Setup a cropper
      std::unique_ptr<lbann::cv_cropper> cropper(new(lbann::cv_cropper));
      cropper->set(rp.m_crop_sz.first, rp.m_crop_sz.second, rp.m_rand_center, rp.m_roi_sz, rp.m_adaptive_interpolation);
      pp.add_transform(std::move(cropper));
      num_bytes = rp.m_crop_sz.first * rp.m_crop_sz.second * 3;
      transform_idx ++;
    }

    if (sp.m_is_set) { // If resizer parameters are given
      // Setup a cropper
      std::unique_ptr<lbann::cv_resizer> resizer(new(lbann::cv_resizer));
      resizer->set(sp.m_width, sp.m_height, rp.m_adaptive_interpolation);
      pp.add_transform(std::move(resizer));
      num_bytes = sp.m_width * sp.m_height * 3;
      transform_idx ++;
    }

    if (ap.m_is_set) { // Set up an augmenter
      std::unique_ptr<lbann::cv_augmenter> augmenter(new(lbann::cv_augmenter));
      augmenter->set(ap.m_hflip, ap.m_vflip, ap.m_rot, ap.m_hshift, ap.m_vshift, ap.m_shear);
      pp.add_transform(std::move(augmenter));
      transform_idx ++;
    }

    if (mp.m_enable_colorizer) { // Set up a colorizer
      std::unique_ptr<lbann::cv_colorizer> colorizer(new(lbann::cv_colorizer));
      pp.add_transform(std::move(colorizer));
      transform_idx ++;
    }

    if (mp.m_enable_decolorizer) { // Set up a colorizer
      std::unique_ptr<lbann::cv_decolorizer> decolorizer(new(lbann::cv_decolorizer));
      pp.add_transform(std::move(decolorizer));
      transform_idx ++;
    }

    if (mp.m_enable_mean_extractor) { // set up a mean extractor
      mean_extractor_idx = transform_idx;
      std::unique_ptr<lbann::cv_mean_extractor> mean_extractor(new(lbann::cv_mean_extractor));
      if (rp.m_is_set)
        mean_extractor->set(rp.m_crop_sz.first, rp.m_crop_sz.second, 3, mp.m_mean_batch_size);
      else
        mean_extractor->set(mp.m_mean_batch_size);
      pp.add_transform(std::move(mean_extractor));
      transform_idx ++;
    }

    if (!mp.is_normalizer_off()) { // Set up a normalizer
      if (mp.is_channel_wise_normalizer()) {
        std::unique_ptr<lbann::cv_normalizer> normalizer(new(lbann::cv_normalizer));
        normalizer->z_score(true);
        pp.add_normalizer(std::move(normalizer));
      } else {
        std::unique_ptr<lbann::cv_subtractor> normalizer(new(lbann::cv_subtractor));
#if 0
        cv::Mat img_to_sub = cv::imread(mp.m_mean_image_name);
        if (img_to_sub.empty()) {
          std::cout << mp.m_mean_image_name << " does not exist" << std::endl;
          return false;
        }
        normalizer->set_mean(img_to_sub);
#else
        std::vector<lbann::DataType> mean = {0.40625, 0.45703, 0.48047};
        normalizer->set_mean(mean);
        std::vector<lbann::DataType> stddev = {0.3, 0.5, 0.3};
        normalizer->set_stddev(stddev);
#endif
        pp.add_normalizer(std::move(normalizer));
      }
      transform_idx ++;
    }
  }

  // Load an image bytestream into memory
  std::vector<unsigned char> buf;
  bool ok = lbann::load_file(filename, buf);
  if (!ok) {
    std::cout << "Failed to load" << std::endl;
    return false;
  }

  int width = 0;
  int height = 0;
  int type = 0;

  ::Mat Images;
  ::Mat Image_v; // matrix view
  Images.Resize(((num_bytes==0)? 1: num_bytes), 2); // minibatch

  size_t img_begin = 0;
  size_t img_end = buf.size();
  for (unsigned int i=0; i < mp.m_num_iter; ++i)
  {
    // This has nothing to do with the image type but only to create view on a block of bytes
    using InputBuf_T = lbann::cv_image_type<uint8_t>;
    // Construct a zero copying view to a portion of a preloaded data buffer
    const cv::Mat inbuf(1, (img_end - img_begin), InputBuf_T::T(1), &(buf[img_begin]));

    if (num_bytes == 0) {
      ok = lbann::image_utils::import_image(inbuf, width, height, type, pp, Images);
      num_bytes = Images.Height();
      El::View(Image_v, Images, El::IR(0, num_bytes), El::IR(0, 1));
    } else {
      El::View(Image_v, Images, El::IR(0, num_bytes), El::IR(0, 1));
      //ok = lbann::image_utils::import_image(buf, width, height, type, pp, Image_v);
      ok = lbann::image_utils::import_image(inbuf, width, height, type, pp, Image_v);
    }
    if (!ok) {
      std::cout << "Failed to import" << std::endl;
      return false;
    }
    //if ((i%3 == 0u) && (mp.m_enable_mean_extractor)) {
    //  dynamic_cast<lbann::cv_mean_extractor*>(pp.get_transform(mean_extractor_idx))->reset();
    //}
  }

  // Print out transforms
  const unsigned int num_transforms = pp.get_num_transforms();
  const std::vector<std::unique_ptr<lbann::cv_transform> >& transforms = pp.get_transforms();

  for(unsigned int i=0u; i < num_transforms; ++i) {
    std::cout << std::endl << "------------ transform " << i << "-------------" << std::endl;
    std::cout << *transforms[i] << std::endl;
  }

  if (mp.m_enable_mean_extractor) {
    // Extract the mean of images
    cv::Mat mean_image;
    mean_image = dynamic_cast<lbann::cv_mean_extractor*>(pp.get_transform(mean_extractor_idx))->extract<uint16_t>();
    cv::imwrite("mean.png", mean_image);
  }

  // Export the unnormalized image
  const std::string ext = lbann::get_ext_name(filename);
  std::vector<unsigned char> outbuf;
  ok = lbann::image_utils::export_image(ext, outbuf, width, height, type, pp, Image_v);
  write_file("copy." + ext, outbuf);
  return ok;
}
Ejemplo n.º 12
0
/*
 * @param text A UText representing the text
 * @param rangeStart The start of the range of dictionary characters
 * @param rangeEnd The end of the range of dictionary characters
 * @param foundBreaks Output of C array of int32_t break positions, or 0
 * @return The number of breaks found
 */
int32_t 
CjkBreakEngine::divideUpDictionaryRange( UText *text,
        int32_t rangeStart,
        int32_t rangeEnd,
        UStack &foundBreaks ) const {
    if (rangeStart >= rangeEnd) {
        return 0;
    }

    const size_t defaultInputLength = 80;
    size_t inputLength = rangeEnd - rangeStart;
    // TODO: Replace by UnicodeString.
    AutoBuffer<UChar, defaultInputLength> charString(inputLength);

    // Normalize the input string and put it in normalizedText.
    // The map from the indices of the normalized input to the raw
    // input is kept in charPositions.
    UErrorCode status = U_ZERO_ERROR;
    utext_extract(text, rangeStart, rangeEnd, charString.elems(), inputLength, &status);
    if (U_FAILURE(status)) {
        return 0;
    }

    UnicodeString inputString(charString.elems(), inputLength);
    // TODO: Use Normalizer2.
    UNormalizationMode norm_mode = UNORM_NFKC;
    UBool isNormalized =
        Normalizer::quickCheck(inputString, norm_mode, status) == UNORM_YES ||
        Normalizer::isNormalized(inputString, norm_mode, status);

    // TODO: Replace by UVector32.
    AutoBuffer<int32_t, defaultInputLength> charPositions(inputLength + 1);
    int numChars = 0;
    UText normalizedText = UTEXT_INITIALIZER;
    // Needs to be declared here because normalizedText holds onto its buffer.
    UnicodeString normalizedString;
    if (isNormalized) {
        int32_t index = 0;
        charPositions[0] = 0;
        while(index < inputString.length()) {
            index = inputString.moveIndex32(index, 1);
            charPositions[++numChars] = index;
        }
        utext_openUnicodeString(&normalizedText, &inputString, &status);
    }
    else {
        Normalizer::normalize(inputString, norm_mode, 0, normalizedString, status);
        if (U_FAILURE(status)) {
            return 0;
        }
        charPositions.resize(normalizedString.length() + 1);
        Normalizer normalizer(charString.elems(), inputLength, norm_mode);
        int32_t index = 0;
        charPositions[0] = 0;
        while(index < normalizer.endIndex()){
            /* UChar32 uc = */ normalizer.next();
            charPositions[++numChars] = index = normalizer.getIndex();
        }
        utext_openUnicodeString(&normalizedText, &normalizedString, &status);
    }

    if (U_FAILURE(status)) {
        return 0;
    }

    // From this point on, all the indices refer to the indices of
    // the normalized input string.

    // bestSnlp[i] is the snlp of the best segmentation of the first i
    // characters in the range to be matched.
    // TODO: Replace by UVector32.
    AutoBuffer<uint32_t, defaultInputLength> bestSnlp(numChars + 1);
    bestSnlp[0] = 0;
    for(int i = 1; i <= numChars; i++) {
        bestSnlp[i] = kuint32max;
    }

    // prev[i] is the index of the last CJK character in the previous word in 
    // the best segmentation of the first i characters.
    // TODO: Replace by UVector32.
    AutoBuffer<int, defaultInputLength> prev(numChars + 1);
    for(int i = 0; i <= numChars; i++){
        prev[i] = -1;
    }

    const size_t maxWordSize = 20;
    // TODO: Replace both with UVector32.
    AutoBuffer<int32_t, maxWordSize> values(numChars);
    AutoBuffer<int32_t, maxWordSize> lengths(numChars);

    // Dynamic programming to find the best segmentation.
    bool is_prev_katakana = false;
    for (int32_t i = 0; i < numChars; ++i) {
        //utext_setNativeIndex(text, rangeStart + i);
        utext_setNativeIndex(&normalizedText, i);
        if (bestSnlp[i] == kuint32max)
            continue;

        int32_t count;
        // limit maximum word length matched to size of current substring
        int32_t maxSearchLength = (i + maxWordSize < (size_t) numChars)? maxWordSize : (numChars - i);

        fDictionary->matches(&normalizedText, maxSearchLength, lengths.elems(), count, maxSearchLength, values.elems());

        // if there are no single character matches found in the dictionary 
        // starting with this charcter, treat character as a 1-character word 
        // with the highest value possible, i.e. the least likely to occur.
        // Exclude Korean characters from this treatment, as they should be left
        // together by default.
        if((count == 0 || lengths[0] != 1) &&
                !fHangulWordSet.contains(utext_current32(&normalizedText))) {
            values[count] = maxSnlp;
            lengths[count++] = 1;
        }

        for (int j = 0; j < count; j++) {
            uint32_t newSnlp = bestSnlp[i] + values[j];
            if (newSnlp < bestSnlp[lengths[j] + i]) {
                bestSnlp[lengths[j] + i] = newSnlp;
                prev[lengths[j] + i] = i;
            }
        }

        // In Japanese,
        // Katakana word in single character is pretty rare. So we apply
        // the following heuristic to Katakana: any continuous run of Katakana
        // characters is considered a candidate word with a default cost
        // specified in the katakanaCost table according to its length.
        //utext_setNativeIndex(text, rangeStart + i);
        utext_setNativeIndex(&normalizedText, i);
        bool is_katakana = isKatakana(utext_current32(&normalizedText));
        if (!is_prev_katakana && is_katakana) {
            int j = i + 1;
            utext_next32(&normalizedText);
            // Find the end of the continuous run of Katakana characters
            while (j < numChars && (j - i) < kMaxKatakanaGroupLength &&
                    isKatakana(utext_current32(&normalizedText))) {
                utext_next32(&normalizedText);
                ++j;
            }
            if ((j - i) < kMaxKatakanaGroupLength) {
                uint32_t newSnlp = bestSnlp[i] + getKatakanaCost(j - i);
                if (newSnlp < bestSnlp[j]) {
                    bestSnlp[j] = newSnlp;
                    prev[j] = i;
                }
            }
        }
        is_prev_katakana = is_katakana;
    }

    // Start pushing the optimal offset index into t_boundary (t for tentative).
    // prev[numChars] is guaranteed to be meaningful.
    // We'll first push in the reverse order, i.e.,
    // t_boundary[0] = numChars, and afterwards do a swap.
    // TODO: Replace by UVector32.
    AutoBuffer<int, maxWordSize> t_boundary(numChars + 1);

    int numBreaks = 0;
    // No segmentation found, set boundary to end of range
    if (bestSnlp[numChars] == kuint32max) {
        t_boundary[numBreaks++] = numChars;
    } else {
        for (int i = numChars; i > 0; i = prev[i]) {
            t_boundary[numBreaks++] = i;
        }
        U_ASSERT(prev[t_boundary[numBreaks - 1]] == 0);
    }

    // Reverse offset index in t_boundary.
    // Don't add a break for the start of the dictionary range if there is one
    // there already.
    if (foundBreaks.size() == 0 || foundBreaks.peeki() < rangeStart) {
        t_boundary[numBreaks++] = 0;
    }

    // Now that we're done, convert positions in t_bdry[] (indices in 
    // the normalized input string) back to indices in the raw input string
    // while reversing t_bdry and pushing values to foundBreaks.
    for (int i = numBreaks-1; i >= 0; i--) {
        foundBreaks.push(charPositions[t_boundary[i]] + rangeStart, status);
    }

    utext_close(&normalizedText);
    return numBreaks;
}
int main( int argc, const char* argv[])
{
	int rt = 0;
	strus::DebugTraceInterface* dbgtrace = strus::createDebugTrace_standard( 2);
	if (!dbgtrace)
	{
		std::cerr << _TXT("failed to create debug trace") << std::endl;
		return -1;
	}
	strus::local_ptr<strus::ErrorBufferInterface> errorBuffer( strus::createErrorBuffer_standard( 0, 2, dbgtrace/*passed with ownership*/));
	if (!errorBuffer.get())
	{
		std::cerr << _TXT("failed to create error buffer") << std::endl;
		return -1;
	}
	try
	{
		bool printUsageAndExit = false;
		strus::ProgramOptions opt(
				errorBuffer.get(), argc, argv, 13,
				"h,help", "v,version", "license", "G,debug:", "t,tokenizer:", "n,normalizer:",
				"m,module:", "M,moduledir:", "q,quot:", "P,plain", "F,fileinput",
				"R,resourcedir:", "T,trace:");
		if (errorBuffer->hasError())
		{
			throw strus::runtime_error(_TXT("failed to parse program arguments"));
		}
		if (opt( "help")) printUsageAndExit = true;

		// Enable debugging selected with option 'debug':
		{
			std::vector<std::string> dbglist = opt.list( "debug");
			std::vector<std::string>::const_iterator gi = dbglist.begin(), ge = dbglist.end();
			for (; gi != ge; ++gi)
			{
				if (!dbgtrace->enable( *gi))
				{
					throw strus::runtime_error(_TXT("failed to enable debug '%s'"), gi->c_str());
				}
			}
		}

		strus::local_ptr<strus::ModuleLoaderInterface> moduleLoader( strus::createModuleLoader( errorBuffer.get()));
		if (!moduleLoader.get()) throw std::runtime_error( _TXT("failed to create module loader"));

		if (opt("moduledir"))
		{
			std::vector<std::string> modirlist( opt.list("moduledir"));
			std::vector<std::string>::const_iterator mi = modirlist.begin(), me = modirlist.end();
			for (; mi != me; ++mi)
			{
				moduleLoader->addModulePath( *mi);
			}
			moduleLoader->addSystemModulePath();
		}
		if (opt("module"))
		{
			std::vector<std::string> modlist( opt.list("module"));
			std::vector<std::string>::const_iterator mi = modlist.begin(), me = modlist.end();
			for (; mi != me; ++mi)
			{
				if (!moduleLoader->loadModule( *mi))
				{
					throw strus::runtime_error(_TXT("error failed to load module %s"), mi->c_str());
				}
			}
		}
		if (opt("license"))
		{
			std::vector<std::string> licenses_3rdParty = moduleLoader->get3rdPartyLicenseTexts();
			std::vector<std::string>::const_iterator ti = licenses_3rdParty.begin(), te = licenses_3rdParty.end();
			if (ti != te) std::cout << _TXT("3rd party licenses:") << std::endl;
			for (; ti != te; ++ti)
			{
				std::cout << *ti << std::endl;
			}
			std::cout << std::endl;
			if (!printUsageAndExit) return 0;
		}
		if (opt( "version"))
		{
			std::cout << _TXT("Strus utilities version ") << STRUS_UTILITIES_VERSION_STRING << std::endl;
			std::cout << _TXT("Strus module version ") << STRUS_MODULE_VERSION_STRING << std::endl;
			std::cout << _TXT("Strus rpc version ") << STRUS_RPC_VERSION_STRING << std::endl;
			std::cout << _TXT("Strus trace version ") << STRUS_TRACE_VERSION_STRING << std::endl;
			std::cout << _TXT("Strus analyzer version ") << STRUS_ANALYZER_VERSION_STRING << std::endl;
			std::cout << _TXT("Strus base version ") << STRUS_BASE_VERSION_STRING << std::endl;
			std::vector<std::string> versions_3rdParty = moduleLoader->get3rdPartyVersionTexts();
			std::vector<std::string>::const_iterator vi = versions_3rdParty.begin(), ve = versions_3rdParty.end();
			if (vi != ve) std::cout << _TXT("3rd party versions:") << std::endl;
			for (; vi != ve; ++vi)
			{
				std::cout << *vi << std::endl;
			}
			if (!printUsageAndExit) return 0;
		}
		else if (!printUsageAndExit)
		{
			if (opt.nofargs() > 1)
			{
				std::cerr << _TXT("too many arguments") << std::endl;
				printUsageAndExit = true;
				rt = 1;
			}
			if (opt.nofargs() < 1)
			{
				std::cerr << _TXT("too few arguments") << std::endl;
				printUsageAndExit = true;
				rt = 2;
			}
		}
		if (printUsageAndExit)
		{
			std::cout << _TXT("usage:") << " strusAnalyze [options] <phrase>" << std::endl;
			std::cout << "<phrase> =   " << _TXT("path to phrase to analyze") << std::endl;
			std::cout << "             " << _TXT("file or '-' for stdin if option -F is specified)") << std::endl;
			std::cout << "description: " << _TXT("tokenizes and normalizes a text segment") << std::endl;
			std::cout << "             " << _TXT("and prints the result to stdout.") << std::endl;
			std::cout << _TXT("options:") << std::endl;
			std::cout << "-h|--help" << std::endl;
			std::cout << "   " << _TXT("Print this usage and do nothing else") << std::endl;
			std::cout << "-v|--version" << std::endl;
			std::cout << "    " << _TXT("Print the program version and do nothing else") << std::endl;
			std::cout << "--license" << std::endl;
			std::cout << "    " << _TXT("Print 3rd party licences requiring reference") << std::endl;
			std::cout << "-G|--debug <COMP>" << std::endl;
			std::cout << "    " << _TXT("Issue debug messages for component <COMP> to stderr") << std::endl;
			std::cout << "-m|--module <MOD>" << std::endl;
			std::cout << "    " << _TXT("Load components from module <MOD>") << std::endl;
			std::cout << "-M|--moduledir <DIR>" << std::endl;
			std::cout << "    " << _TXT("Search modules to load first in <DIR>") << std::endl;
			std::cout << "-R|--resourcedir <DIR>" << std::endl;
			std::cout << "    " << _TXT("Search resource files for analyzer first in <DIR>") << std::endl;
			std::cout << "-t|--tokenizer <CALL>" << std::endl;
			std::cout << "    " << _TXT("Use the tokenizer <CALL> (default 'content')") << std::endl;
			std::cout << "-n|--normalizer <CALL>" << std::endl;
			std::cout << "    " << _TXT("Use the normalizer <CALL> (default 'orig')") << std::endl;
			std::cout << "-q|--quot <STR>" << std::endl;
			std::cout << "    " << _TXT("Use the string <STR> as quote for the result (default \"\'\")") << std::endl;
			std::cout << "-P|--plain" << std::endl;
			std::cout << "    " << _TXT("Print results without quotes and without an end of line for each result") << std::endl;
			std::cout << "-F|--fileinput" << std::endl;
			std::cout << "    " << _TXT("Interpret phrase argument as a file name containing the input") << std::endl;
			std::cout << "-T|--trace <CONFIG>" << std::endl;
			std::cout << "    " << _TXT("Print method call traces configured with <CONFIG>") << std::endl;
			std::cout << "    " << strus::string_format( _TXT("Example: %s"), "-T \"log=dump;file=stdout\"") << std::endl;
			return rt;
		}
		// Declare trace proxy objects:
		typedef strus::Reference<strus::TraceProxy> TraceReference;
		std::vector<TraceReference> trace;
		if (opt("trace"))
		{
			std::vector<std::string> tracecfglist( opt.list("trace"));
			std::vector<std::string>::const_iterator ti = tracecfglist.begin(), te = tracecfglist.end();
			for (; ti != te; ++ti)
			{
				trace.push_back( new strus::TraceProxy( moduleLoader.get(), *ti, errorBuffer.get()));
			}
		}
		std::string resultQuot = "'";
		bool resultPlain = false;
		if (opt( "plain"))
		{
			resultPlain = true;
			resultQuot.clear();
		}
		if (opt( "quot"))
		{
			resultQuot = opt[ "quot"];
		}
		std::string phrasestring = opt[0];
		std::string tokenizer( "content");
		if (opt( "tokenizer"))
		{
			tokenizer = opt[ "tokenizer"];
		}
		std::string normalizer( "orig");
		if (opt( "normalizer"))
		{
			normalizer = opt[ "normalizer"];
		}
		// Set paths for locating resources:
		if (opt("resourcedir"))
		{
			std::vector<std::string> pathlist( opt.list("resourcedir"));
			std::vector<std::string>::const_iterator
				pi = pathlist.begin(), pe = pathlist.end();
			for (; pi != pe; ++pi)
			{
				moduleLoader->addResourcePath( *pi);
			}
		}

		// Create objects for analyzer:
		strus::local_ptr<strus::RpcClientMessagingInterface> messaging;
		strus::local_ptr<strus::RpcClientInterface> rpcClient;
		strus::local_ptr<strus::AnalyzerObjectBuilderInterface> analyzerBuilder;

		if (opt("rpc"))
		{
			messaging.reset( strus::createRpcClientMessaging( opt[ "rpc"], errorBuffer.get()));
			if (!messaging.get()) throw std::runtime_error( _TXT("failed to create rpc client messaging"));
			rpcClient.reset( strus::createRpcClient( messaging.get(), errorBuffer.get()));
			if (!rpcClient.get()) throw std::runtime_error( _TXT("failed to create rpc client"));
			(void)messaging.release();
			analyzerBuilder.reset( rpcClient->createAnalyzerObjectBuilder());
			if (!analyzerBuilder.get()) throw std::runtime_error( _TXT("failed to create rpc analyzer object builder"));
		}
		else
		{
			analyzerBuilder.reset( moduleLoader->createAnalyzerObjectBuilder());
			if (!analyzerBuilder.get()) throw std::runtime_error( _TXT("failed to create analyzer object builder"));
		}

		// Create proxy objects if tracing enabled:
		{
			std::vector<TraceReference>::const_iterator ti = trace.begin(), te = trace.end();
			for (; ti != te; ++ti)
			{
				strus::AnalyzerObjectBuilderInterface* proxy = (*ti)->createProxy( analyzerBuilder.get());
				analyzerBuilder.release();
				analyzerBuilder.reset( proxy);
			}
		}
		if (errorBuffer->hasError())
		{
			throw std::runtime_error( _TXT("error in initialization"));
		}

		// Create objects for analyzer:
		strus::local_ptr<strus::QueryAnalyzerInstanceInterface>
			analyzer( analyzerBuilder->createQueryAnalyzer());
		if (!analyzer.get()) throw std::runtime_error( _TXT("failed to create analyzer"));
		const strus::TextProcessorInterface* textproc = analyzerBuilder->getTextProcessor();
		if (!textproc) throw std::runtime_error( _TXT("failed to get text processor"));

		std::string analyzerConfig = strus::string_format( "[Element]\nfeature = %s %s text", normalizer.c_str(), tokenizer.c_str());
		// Create phrase type (tokenizer and normalizer):
		if (!strus::load_QueryAnalyzer_program_std( analyzer.get(), textproc, analyzerConfig, errorBuffer.get()))
		{
			throw strus::runtime_error( _TXT("failed to load query analyzer: %s"), errorBuffer->fetchError());
		}

		// Load the phrase:
		bool queryIsFile = opt("fileinput");
		if (queryIsFile)
		{
			int ec;
			std::string ps;
			if (phrasestring == "-")
			{
				ec = strus::readStdin( ps);
				if (ec) throw strus::runtime_error( _TXT("failed to read query from stdin (errno %u)"), ec);
			}
			else
			{
				ec = strus::readFile( phrasestring, ps);
				if (ec) throw strus::runtime_error(_TXT("failed to read query from file %s (errno %u)"), phrasestring.c_str(), ec);
			}
			phrasestring = ps;
		}
		// Analyze the phrase and print the result:
		strus::local_ptr<strus::QueryAnalyzerContextInterface> qryanactx( analyzer->createContext());
		if (!qryanactx.get()) throw std::runtime_error( _TXT("failed to create query analyzer context"));
	
		qryanactx->putField( 1, "", phrasestring);
		strus::analyzer::QueryTermExpression qry = qryanactx->analyze();
		if (errorBuffer->hasError()) throw std::runtime_error( _TXT("query analysis failed"));
		std::vector<strus::analyzer::QueryTerm> terms;
		std::vector<strus::analyzer::QueryTermExpression::Instruction>::const_iterator
			ii = qry.instructions().begin(), ie = qry.instructions().end();
		for (int iidx=0; ii != ie; ++ii,++iidx)
		{
			if (ii->opCode() == strus::analyzer::QueryTermExpression::Instruction::Term)
			{
				const strus::analyzer::QueryTerm& term = qry.term( ii->idx());
				if (resultPlain)
				{
					if (iidx) std::cout << " ";
					std::cout << term.value();
				}
				else
				{
					std::cout << resultQuot << term.value() << resultQuot << std::endl;
				}
			}
		}
		if (errorBuffer->hasError())
		{
			throw std::runtime_error( _TXT("error in analyze phrase"));
		}
		std::cerr << _TXT("done.") << std::endl;
		if (!dumpDebugTrace( dbgtrace, NULL/*filename ~ NULL = stderr*/))
		{
			std::cerr << _TXT("failed to dump debug trace to file") << std::endl;
		}
		return 0;
	}
	catch (const std::bad_alloc&)
	{
		std::cerr << _TXT("ERROR ") << _TXT("out of memory") << std::endl;
		return -2;
	}
	catch (const std::runtime_error& e)
	{
		const char* errormsg = errorBuffer->fetchError();
		if (errormsg)
		{
			std::cerr << _TXT("ERROR ") << e.what() << ": " << errormsg << std::endl;
		}
		else
		{
			std::cerr << _TXT("ERROR ") << e.what() << std::endl;
		}
	}
	catch (const std::exception& e)
	{
		std::cerr << _TXT("EXCEPTION ") << e.what() << std::endl;
	}
	if (!dumpDebugTrace( dbgtrace, NULL/*filename ~ NULL = stderr*/))
	{
		std::cerr << _TXT("failed to dump debug trace to file") << std::endl;
	}
	return -1;
}
Ejemplo n.º 14
0
void VertexArrayRenderer::RefreshArray() {
	SortPrimitives();

	m_vertex_data.clear();
	m_color_data.clear();
	m_texture_data.clear();
	m_index_data.clear();

	m_vertex_data.reserve( m_vertex_count );
	m_color_data.reserve( m_vertex_count );
	m_texture_data.reserve( m_vertex_count );
	m_index_data.reserve( m_index_count );

	m_batches.clear();

	m_last_vertex_count = 0;
	m_last_index_count = 0;

	auto primitives_size = m_primitives.size();

	// Default viewport
	Batch current_batch;
	current_batch.viewport = m_default_viewport;
	current_batch.atlas_page = 0;
	current_batch.start_index = 0;
	current_batch.index_count = 0;
	current_batch.min_index = 0;
	current_batch.max_index = static_cast<GLuint>( m_vertex_count - 1 );
	current_batch.custom_draw = false;

	sf::FloatRect window_viewport( 0.f, 0.f, static_cast<float>( m_window_size.x ), static_cast<float>( m_window_size.y ) );

	for( std::size_t primitive_index = 1; primitive_index != primitives_size + 1; primitive_index += 1 ) {
		Primitive* primitive = m_primitives[primitive_index - 1].get();

		primitive->SetSynced();

		if( !primitive->IsVisible() ) {
			continue;
		}

		sf::Vector2f position_transform( primitive->GetPosition() );

		auto viewport = primitive->GetViewport();

		std::size_t atlas_page = 0;

		auto viewport_rect = window_viewport;

		// Check if primitive needs to be rendered in a custom viewport.
		if( viewport && ( ( *viewport ) != ( *m_default_viewport ) ) ) {
			sf::Vector2f destination_origin( viewport->GetDestinationOrigin() );
			sf::Vector2f size( viewport->GetSize() );

			position_transform += ( destination_origin - viewport->GetSourceOrigin() );

			if( m_cull ) {
				viewport_rect.left = destination_origin.x;
				viewport_rect.top = destination_origin.y;
				viewport_rect.width = size.x;
				viewport_rect.height = size.y;
			}
		}

		const std::shared_ptr<Signal>& custom_draw_callback( primitive->GetCustomDrawCallback() );

		if( custom_draw_callback ) {
			// Start a new batch.
			current_batch.max_index = m_last_vertex_count ? ( static_cast<GLuint>( m_last_vertex_count ) - 1 ) : 0;
			m_batches.push_back( current_batch );

			// Mark current_batch custom draw batch.
			current_batch.viewport = viewport;
			current_batch.start_index = 0;
			current_batch.index_count = 0;
			current_batch.min_index = 0;
			current_batch.max_index = 0;
			current_batch.custom_draw = true;
			current_batch.custom_draw_callback = custom_draw_callback;

			// Start a new batch.
			m_batches.push_back( current_batch );

			// Reset current_batch to defaults.
			current_batch.viewport = m_default_viewport;
			current_batch.start_index = m_last_index_count;
			current_batch.index_count = 0;
			current_batch.min_index = m_last_vertex_count ? ( static_cast<GLuint>( m_last_vertex_count ) - 1 ) : 0;
			current_batch.custom_draw = false;
		}
		else {
			// Process primitive's vertices and indices
			const std::vector<Primitive::Vertex>& vertices( primitive->GetVertices() );
			const std::vector<GLuint>& indices( primitive->GetIndices() );

			sf::Vector2f position( 0.f, 0.f );

			sf::FloatRect bounding_rect( 0.f, 0.f, 0.f, 0.f );

			for( const auto& vertex : vertices ) {
				position.x = vertex.position.x + position_transform.x;
				position.y = vertex.position.y + position_transform.y;

				m_vertex_data.push_back( position );
				m_color_data.push_back( vertex.color );

				atlas_page = static_cast<unsigned int>( vertex.texture_coordinate.y ) / m_max_texture_size;

				// Used to normalize texture coordinates.
				sf::Vector2f normalizer( 1.f / static_cast<float>( m_texture_atlas[atlas_page]->getSize().x ), 1.f / static_cast<float>( m_texture_atlas[atlas_page]->getSize().y ) );

				// Normalize SFML's pixel texture coordinates.
				m_texture_data.push_back( sf::Vector2f( vertex.texture_coordinate.x * normalizer.x, static_cast<float>( static_cast<unsigned int>( vertex.texture_coordinate.y ) % m_max_texture_size ) * normalizer.y ) );

				// Update the bounding rect.
				if( m_cull ) {
					if( position.x < bounding_rect.left ) {
						bounding_rect.width += bounding_rect.left - position.x;
						bounding_rect.left = position.x;
					}
					else if( position.x > bounding_rect.left + bounding_rect.width ) {
						bounding_rect.width = position.x - bounding_rect.left;
					}

					if( position.y < bounding_rect.top ) {
						bounding_rect.height += bounding_rect.top - position.y;
						bounding_rect.top = position.y;
					}
					else if( position.y > bounding_rect.top + bounding_rect.height ) {
						bounding_rect.height = position.y - bounding_rect.top;
					}
				}
			}

			if( m_cull && !viewport_rect.intersects( bounding_rect ) ) {
				m_vertex_data.resize( m_last_vertex_count );
				m_color_data.resize( m_last_vertex_count );
				m_texture_data.resize( m_last_vertex_count );
			}
			else {
				for( const auto& index : indices ) {
					m_index_data.push_back( m_last_vertex_count + index );
				}

				// Check if we need to start a new batch.
				if( ( ( *viewport ) != ( *current_batch.viewport ) ) || ( atlas_page != current_batch.atlas_page ) ) {
					current_batch.max_index = m_last_vertex_count ? ( static_cast<GLuint>( m_last_vertex_count ) - 1 ) : 0;
					m_batches.push_back( current_batch );

					// Reset current_batch to defaults.
					current_batch.viewport = viewport;
					current_batch.atlas_page = atlas_page;
					current_batch.start_index = m_last_index_count;
					current_batch.index_count = 0;
					current_batch.min_index = m_last_vertex_count ? ( static_cast<GLuint>( m_last_vertex_count ) - 1 ) : 0;
					current_batch.custom_draw = false;
				}

				current_batch.index_count += static_cast<unsigned int>( indices.size() );

				m_last_vertex_count += static_cast<GLsizei>( vertices.size() );
				m_last_index_count += static_cast<GLsizei>( indices.size() );
			}
		}
	}

	current_batch.max_index = m_last_vertex_count ? ( static_cast<GLuint>( m_last_vertex_count ) - 1 ) : 0;
	m_batches.push_back( current_batch );
}
Ejemplo n.º 15
0
void Renderer::RefreshVBO( sf::RenderWindow& window ) {
	SortPrimitives();

	m_vertex_data.clear();
	m_color_data.clear();
	m_texture_data.clear();

	m_viewport_pairs.clear();

	m_last_vertex_count = 0;

	std::size_t primitives_size = m_primitives.size();

	// Check for alpha values in all primitives.
	// Disable depth test if any found.
	for( std::size_t primitive_index = 0; primitive_index < primitives_size; ++primitive_index ) {
		const std::vector<Primitive::Vertex>& vertices( m_primitives[primitive_index]->GetVertices() );

		std::size_t vertices_size = vertices.size();

		for( std::size_t vertex_index = 0; vertex_index < vertices_size; ++vertex_index ) {
			const Primitive::Vertex& vertex( vertices[vertex_index] );

			if( m_depth_clear_strategy && ( vertex.color.a < 255 ) ) {
#ifdef SFGUI_DEBUG
				std::cerr << "Detected alpha value " << static_cast<int>( vertex.color.a ) << " disabling depth test.\n";
#endif
				m_depth_clear_strategy = NO_DEPTH;
			}
		}
	}

	// Used to normalize texture coordinates.
	sf::Vector2f normalizer( 1.f / static_cast<float>( m_texture_atlas.getWidth() ), 1.f / static_cast<float>( m_texture_atlas.getHeight() ) );

	// Depth test vars
	float depth = -4.f;
	float depth_delta = 4.f / static_cast<float>( primitives_size );
	int direction = m_depth_clear_strategy ? -1 : 1;
	int start = static_cast<int>( m_depth_clear_strategy ? primitives_size : 1 );
	std::size_t end = m_depth_clear_strategy ? 0 : primitives_size + 1;

	RendererViewport::Ptr current_viewport = m_default_viewport;
	m_viewport_pairs.push_back( ViewportPair( m_default_viewport, 0 ) );

	sf::FloatRect window_viewport( 0.f, 0.f, static_cast<float>( window.getSize().x ), static_cast<float>( window.getSize().y ) );

	for( std::size_t primitive_index = start; primitive_index != end; primitive_index += direction ) {
		Primitive* primitive = m_primitives[primitive_index - 1].get();

		primitive->SetSynced();

		if( !primitive->IsVisible() ) {
			continue;
		}

		sf::Vector2f position_transform( primitive->GetPosition() );

		// Check if primitive needs to be rendered in a custom viewport.
		RendererViewport::Ptr viewport = primitive->GetViewport();

		if( viewport != current_viewport ) {
			current_viewport = viewport;

			ViewportPair scissor_pair( viewport, m_last_vertex_count );

			m_viewport_pairs.push_back( scissor_pair );
		}

		bool cull = m_cull;

		sf::FloatRect viewport_rect = window_viewport;

		if( viewport && ( viewport != m_default_viewport ) ) {
			sf::Vector2f destination_origin( viewport->GetDestinationOrigin() );
			sf::Vector2f size( viewport->GetSize() );

			position_transform += ( destination_origin - viewport->GetSourceOrigin() );

			if( m_cull ) {
				viewport_rect.left = destination_origin.x;
				viewport_rect.top = destination_origin.y;
				viewport_rect.width = size.x;
				viewport_rect.height = size.y;
			}
		}

		// Process primitive's vertices.
		const std::vector<Primitive::Vertex>& vertices( primitive->GetVertices() );

		std::size_t vertices_size = vertices.size();

		for( std::size_t vertex_index = 0; vertex_index < vertices_size; ++vertex_index ) {
			const Primitive::Vertex& vertex( vertices[vertex_index] );

			sf::Vector3f position( vertex.position.x + position_transform.x, vertex.position.y + position_transform.y, depth );

			m_vertex_data.push_back( position );
			m_color_data.push_back( vertex.color );

			// Normalize SFML's pixel texture coordinates.
			m_texture_data.push_back( sf::Vector2f( vertex.texture_coordinate.x * normalizer.x, vertex.texture_coordinate.y * normalizer.y ) );

			if( m_cull && viewport_rect.contains( position.x, position.y ) ) {
				cull = false;
			}
		}

		if( cull ) {
			m_vertex_data.resize( m_last_vertex_count );
			m_color_data.resize( m_last_vertex_count );
			m_texture_data.resize( m_last_vertex_count );
		}
		else {
			m_last_vertex_count += vertices_size;
			depth -= depth_delta;
		}
	}

	// Sync vertex data
	glBindBuffer( GL_ARRAY_BUFFER, m_vertex_vbo );
	glBufferData( GL_ARRAY_BUFFER, m_vertex_data.size() * sizeof( sf::Vector3f ), 0, GL_DYNAMIC_DRAW );

	if( m_vertex_data.size() > 0 ) {
		glBufferSubData( GL_ARRAY_BUFFER, 0, m_vertex_data.size() * sizeof( sf::Vector3f ), &m_vertex_data[0] );
	}

	// Sync color data
	glBindBuffer( GL_ARRAY_BUFFER, m_color_vbo );
	glBufferData( GL_ARRAY_BUFFER, m_color_data.size() * sizeof( sf::Color ), 0, GL_DYNAMIC_DRAW );

	if( m_color_data.size() > 0 ) {
		glBufferSubData( GL_ARRAY_BUFFER, 0, m_color_data.size() * sizeof( sf::Color ), &m_color_data[0] );
	}

	// Sync texture coord data
	glBindBuffer( GL_ARRAY_BUFFER, m_texture_vbo );
	glBufferData( GL_ARRAY_BUFFER, m_texture_data.size() * sizeof( sf::Vector2f ), 0, GL_STATIC_DRAW );

	if( m_texture_data.size() > 0 ) {
		glBufferSubData( GL_ARRAY_BUFFER, 0, m_texture_data.size() * sizeof( sf::Vector2f ), &m_texture_data[0] );
	}
}
Ejemplo n.º 16
0
int main (int argc, char** argv) {
  /* Initialize MPI */
  MPI_Init (&argc, &argv);

  /* Figure out the rank and size */
  MPI_Comm_rank (MPI_COMM_WORLD, &mpi_rank);
  MPI_Comm_size (MPI_COMM_WORLD, &mpi_size);

  /* MPI sends argc and argv everywhere --- parse everywhere */
  parse_parameters (argc,argv);

  /**
   * Now, we read the input matrix, FORCED, and PROHIBIT maps. To do this
   * we first create a partition of the total space so that we know which
   * range of KPIs is ours. Input matrix is stored per KPI and the maps 
   * are also ordered according to KPIs although not all the KPIs need to
   * be present.
   */
  pfunc::space_1D kpi_space = 
    partitioner_t<int>::create (0, 
                                int_params[NUM_KPIS_INDEX], 
                                mpi_rank, 
                                mpi_size);
  std::pair<int,int> full_kpi_range (0, int_params[NUM_KPIS_INDEX]);
  std::pair<int,int> my_kpi_range (kpi_space.begin(), kpi_space.end());
  std::vector<double> values 
         ((my_kpi_range.second-my_kpi_range.first)*
           int_params [NUM_INTERVALS_INDEX]);
  int_vec_map_t prohibit_map;
  int_vec_map_t forced_map;
  std::vector<double> kpi_weights (int_params[NUM_KPIS_INDEX], 1.0);
                                                         
  read_dense_matrix (chr_params [INPUT_MATRIX_PATH_INDEX],
                     my_kpi_range,
                     values.begin());

  if (0!=strcmp ("",chr_params[PROHIBIT_LIST_PATH_INDEX])) {
    read_map (chr_params [PROHIBIT_LIST_PATH_INDEX], 
              prohibit_map);
  }

  if (0!=strcmp ("",chr_params[FORCED_LIST_PATH_INDEX])) {
    read_map (chr_params [FORCED_LIST_PATH_INDEX], 
              forced_map);
  }

  if (0!=strcmp ("",chr_params[FORCED_LIST_PATH_INDEX])) {
    read_dense_matrix (chr_params [KPI_WEIGHTS_PATH_INDEX],
                       full_kpi_range,
                       kpi_weights.begin());
  }


  if (4<int_params[DEBUG_INDEX]) {
    print_matrix (values.begin(), 
                  int_params[NUM_INTERVALS_INDEX], 
                  my_kpi_range.second- my_kpi_range.first, 
                  "A");
    
    print_map (prohibit_map.begin(),
               prohibit_map.end(),
               "PROHIBIT");

    print_map (forced_map.begin(),
               forced_map.end(),
               "FORCED");
  }

#if USE_PFUNC
  /**
   * Define the PFunc instance. Note that we HAVE TO USE PFUNC::USE_DEFAULT as
   * the type of the FUNCTOR so that we can use pfunc::parallel_reduce.
   */
  typedef
  pfunc::generator <pfunc::cilkS, /* Cilk-style scheduling */
                    pfunc::use_default, /* No task priorities needed */
                    pfunc::use_default /* any function type*/> generator_type;
  typedef generator_type::attribute attribute;
  typedef generator_type::task task;
  typedef generator_type::taskmgr taskmgr;

  /* Create an instance of PFunc if that is what is needed */
  taskmgr* global_taskmgr;
  const int n_queues = int_params [NUM_THREADS_INDEX];
  unsigned int* thds_per_q_arr = new unsigned int [n_queues];
  for (int i=0; i<n_queues; ++i) thds_per_q_arr [i] = ONE_STEP;
  global_taskmgr = new taskmgr (n_queues, thds_per_q_arr);
  delete [] thds_per_q_arr;

  /* Create a task handle for all the tasks that we will use */
    task root_task;
    attribute root_attribute (false /*nested*/, false /*grouped*/);
#endif

  /*************************************************************************/
  /*           Set the base case size for all the tasks                    */
  pfunc::space_1D::base_case_size = int_params [TASK_SIZE_INDEX];
  
  /*************************************************************************/
  /* Create a range mapper that knows about the ownership of each column */
  std::vector<int> column_intervals (mpi_size+1);
  partitioner_t<int>::intervals (0, 
                                 int_params[NUM_KPIS_INDEX], 
                                 mpi_size, 
                                 column_intervals.begin());
  typedef interval_mapper_t<std::vector<int> > interval_mapper_t;
  interval_mapper_t interval_mapper (column_intervals);

  /* Populate the data frame with the given input matrix */
  data_frame_t<double> data_frame (my_kpi_range.first,
                                   int_params [NUM_INTERVALS_INDEX],
                                   my_kpi_range.second-my_kpi_range.first,
                                   int_params [LAG_INDEX]);
  data_frame.set (values.begin(), values.end(), true);

  /* Compute the mean and the length of each of the materialized X columns */
  double normalization_time = micro_time ();
  typedef normalizer_t <data_frame_t<double>, 
                        identity_mapper_t<int> > my_normalizer_t;
  identity_mapper_t<int> identity_mapper;
  my_normalizer_t normalizer (&data_frame, &identity_mapper);

#if USE_PFUNC
  pfunc::parallel_reduce<generator_type, my_normalizer_t, pfunc::space_1D>
    normalize (kpi_space, normalizer, *global_taskmgr);
  pfunc::spawn (*global_taskmgr, root_task, root_attribute, normalize);
  pfunc::wait (*global_taskmgr, root_task);
#else
  normalizer (kpi_space);
#endif
  normalization_time = micro_time() - normalization_time;

  /*************************************************************************/
  /*  Rule out all the candidates that have no variation in their columns  */
  double selection_time = micro_time ();
  typedef selector_t <data_frame_t<double>, 
                      int_set_t,
                      identity_mapper_t<int> > my_selector_t;
  my_selector_t selector (&data_frame, &identity_mapper);

#if USE_PFUNC
  pfunc::parallel_reduce<generator_type, my_selector_t, pfunc::space_1D>
    select (kpi_space, selector, *global_taskmgr);
  pfunc::spawn (*global_taskmgr, root_task, root_attribute, select);
  pfunc::wait (*global_taskmgr, root_task);
#else
  selector (kpi_space);
#endif
  selection_time = micro_time() - selection_time;

  /*************************************************************************/
  /*  Factorize all the columns so that Xg'Xg is formed and ready to go    */
  double factorization_time = micro_time ();
  typedef factorizer_t <data_frame_t<double>, 
                        std::vector<double>,
                        identity_mapper_t<int>,
                        SolverType> my_factorizer_t;
  my_factorizer_t factorizer (&data_frame, 
                              &identity_mapper,
                              int_params[NUM_INTERVALS_INDEX]-
                              int_params[LAG_INDEX],
                              int_params[LAG_INDEX],
                              dbl_params[LAMBDA_RIDGE_INDEX]);

#if USE_PFUNC
  pfunc::parallel_reduce<generator_type, my_factorizer_t, pfunc::space_1D>
    factorize (kpi_space, factorizer, *global_taskmgr);
  pfunc::spawn (*global_taskmgr, root_task, root_attribute, factorize);
  pfunc::wait (*global_taskmgr, root_task);
#else
  factorizer (kpi_space);
#endif
  factorization_time = micro_time() - factorization_time;

  /*************************************************************************/
  double total_time = 0.0;
  random_filter_t<int> filter (int_params[RAND_SEED_INDEX],
                               dbl_params[SAMPLE_RATIO_INDEX]);

  /* For each KPI, build model and output it one by one */
  int num_kpis_processed = 0;
  for (int kpi=0; kpi<int_params[NUM_KPIS_INDEX]; ++kpi) { 

    /**
     * We need to figure out if this is a useless kpi, in which case, we 
     * will not bother with trying to form a model for this kpi. All we 
     * need to do is a BROADCAST from from the OWNER of this particular kpi.
     */
    int my_vote = 0; /* process */
    int result;
    if (false==filter(kpi) ||
        (selector.get_list().end()!=selector.get_list().find(kpi)))my_vote=1;
    MPI_Allreduce (&my_vote,
                   &result, 
                   1, 
                   MPI_INT, 
                   MPI_MAX, /*If there is a single 1 --- we all ranks get 1*/
                   MPI_COMM_WORLD);
    if (1 == result) continue;

    /* we are processing */
    ++num_kpis_processed;

    const int num_rows = (int_params[NUM_INTERVALS_INDEX]-
                          int_params[LAG_INDEX]);

    /* Populate 'y' */
    std::vector<double> y (num_rows);
    const int owner = interval_mapper (kpi);
    if (mpi_rank == owner) data_frame.materialize_Y (kpi, y.begin());
    MPI_Bcast (&(y[0]), num_rows, MPI_DOUBLE, owner, MPI_COMM_WORLD);

    /* 
     * Create space for 'beta'. As we are modeling a normalized and centered X
     * with normalized 'Y', we do not have to worry about the intercept --- we
     * simply need enough space for the coefficients --- (M-L). The length of
     * each beta is at most MAX_ITERS * LAG 
     */
    std::vector<double> beta (int_params[MAX_ITERS_INDEX] * 
                              int_params[LAG_INDEX]);

    /* Instantiate the modeler */
    typedef std::less<double> compare_t;
    typedef modeler_t<data_frame_t<double>, /* type for the data_frame */
                      std::vector<double>,  /* type for Y and BETA */
                      std::vector<int>,     /*type for storing KPI predictors*/
                      int_set_t,            /* type for FORCED and PROHIBIT */
                      SolverType,           /* type for the solver */
                      stopper_t,            /* stopping functor */
                      compare_t,            /* comparison operator */
                      interval_mapper_t,    /* determine ownership */
                      my_factorizer_t       /* type of factorizer */
#if USE_PFUNC
                      , generator_type      /* the generator type */
#endif
                      > my_modeler_t;
                      
    const double stop_factor =
      (STOP_ON_OBJ_GAIN==int_params[STOPPING_CRITERIA_INDEX]) ?
         dbl_params[MIN_OBJ_GAIN_INDEX]:dbl_params[MIN_BIC_GAIN_INDEX];
    const stopper_t stopper (stop_factor, int_params[STOPPING_CRITERIA_INDEX]); 

    /* Create a map of the prohibited regressors for this KPI */
    int_set_t prohibit_set;
    if (prohibit_map.end() != prohibit_map.find(kpi)) {
      prohibit_set.insert ((prohibit_map[kpi]).begin(),
                           (prohibit_map[kpi]).end());
    }
    /* Insert the candidates that we don't want screened */
    prohibit_set.insert (selector.get_list().begin(),
                         selector.get_list().end());

    /* Create a map of the forced regressors for this KPI */
    int_set_t forced_set;
    if (forced_map.end() != forced_map.find(kpi)) {
      forced_set.insert ((forced_map[kpi]).begin(),
                         (forced_map[kpi]).end());
    }

    /* Create an instance of the modeler */
    std::vector<int> selected;
    double variance;
    double intercept;
    my_modeler_t my_modeler (data_frame,  /* data frame */
                             y,           /* regressor */
                             beta,        /* the output */
                             selected,    /* the selected KPIs in order */
                             prohibit_set,/* prohibited regressors */
                             forced_set,  /* forced regressors */
                             kpi_weights, /* weights to use for each kpi */
                             variance,    /* variance */
                             intercept,   /* intercept */
                             kpi,         /* target */
                             stopper,     /* stopping criteria */
                             interval_mapper, /* determine ownership */
                             factorizer,  /* factorizer for Xg'Xg */
                             dbl_params[LAMBDA_RIDGE_INDEX], /*ridge penalty*/
                             num_rows, /* num rows */
                             int_params[LAG_INDEX],  /* num columns */  
                             int_params[MAX_ITERS_INDEX],
                             int_params[DEBUG_INDEX]
#if USE_PFUNC
                             ,global_taskmgr /* task manager for pfunc */
#endif
                             );

    /* Let the model compute */
    double time = micro_time ();
    my_modeler ();
    time = micro_time () - time;
    total_time += time;

    /* Print out the coefficients if asked for */
    if (ROOT==mpi_rank && 1<int_params[DEBUG_INDEX]) {
      printf ("Model for KPI %d (Variance=%lf, Intercept=%lf)\n", 
                                          kpi, variance, intercept);
      for (size_t i=0;i<selected.size();++i) {
        printf("%d (",selected[i]);
        for (int j=0; j<int_params[LAG_INDEX]; ++j) {
          printf ("%lf", beta[i*int_params[LAG_INDEX]+j]); 
          if (j!=(int_params[LAG_INDEX]-1)) printf(",");
        }
        printf(")\n");
      }
    }

    /* Print out the coefficients to file if asked for */
    if (ROOT==mpi_rank && 0<int_params[WRITE_FILES_INDEX]) {
	    const std::string base_dir    = chr_params[OUTPUT_FILE_PATH_INDEX];
	    const std::string par_path    = base_dir +  "/parents.txt";
	    const std::string coeffs_path = base_dir +  "/coeffs.txt";
	    const std::string var_path    = base_dir +  "/variance.txt";
	    const std::string int_path    = base_dir +  "/intercept.txt";

      std::ofstream par_file (par_path.c_str(), std::ios_base::app); 
      std::ofstream coeffs_file (coeffs_path.c_str(), std::ios_base::app); 
      std::ofstream var_file (var_path.c_str(), std::ios_base::app); 
      std::ofstream int_file (int_path.c_str(), std::ios_base::app);

      par_file << kpi << ":";
      coeffs_file << kpi << ":";
      var_file << kpi << ":";
      int_file << kpi << ":";

      for (size_t i=0;i<selected.size();++i) {
        par_file << selected[i] << " ";
        for (int j=0; j<int_params[LAG_INDEX]; ++j)
          coeffs_file << beta[i*int_params[LAG_INDEX]+j] << " ";
      }

      var_file << variance;
      int_file << intercept;

      par_file << "\n";
      coeffs_file << "\n";
      var_file << "\n";
      int_file << "\n";

      par_file.close();
      coeffs_file.close();
      var_file.close();
      int_file.close();
    }
  }

  if (ROOT==mpi_rank) 
    printf ("Built %d models in %lf (secs) at rate of %lf (per sec)\n",
      num_kpis_processed, total_time, total_time/num_kpis_processed);

#if USE_PFUNC
    delete global_taskmgr;
#endif

  /* Finalize MPI */
  MPI_Finalize ();

  return 0;
}