static float testNetwork(NeuralNetwork& neuralNetwork, const Image& image,
    float noiseMagnitude, size_t iterations, size_t batchSize,
    std::default_random_engine& engine)
{
    float accuracy = 0.0f;

    iterations = std::max(iterations, 1UL);

    lucius::util::log("TestVisualization") << "Testing the accuracy of the trained network.\n";

    for(size_t i = 0; i != iterations; ++i)
    {
        lucius::util::log("TestVisualization") << " Iteration " << i << " out of "
            << iterations << "\n";
        
        ImageVector batch = generateBatch(image, noiseMagnitude,
            batchSize, engine);
        
        Matrix input = batch.convertToStandardizedMatrix(
            neuralNetwork.getInputCount(),
            neuralNetwork.getInputBlockingFactor(), image.colorComponents());
        
        Matrix reference = generateReference(batch);
        
        lucius::util::log("TestVisualization") << "  Input:     " << input.toString();
        lucius::util::log("TestVisualization") << "  Reference: " << reference.toString();
        
        accuracy += neuralNetwork.computeAccuracy(input, reference);
    }
    
    return accuracy * 100.0f / iterations;
}
static ImageVector generateBatch(const Image& image, float noiseMagnitude,
    size_t batchSize, std::default_random_engine& engine)
{
    ImageVector images;

    std::bernoulli_distribution distribution(0.5f);

    for(size_t i = 0; i != batchSize; ++i)
    {
        bool generateRandom = distribution(engine);

        if(generateRandom)
        {
            images.push_back(generateRandomImage(
                image.y(), image.x(), image.colorComponents(), engine));
        }
        else
        {
            images.push_back(addRandomNoiseToImage(image,
                noiseMagnitude, engine));
        }
    }
    
    return images;
}
static Matrix generateReference(const ImageVector& images)
{
    Matrix reference(images.size(), 1);
    
    for(size_t i = 0; i < images.size(); ++i)
    {
        reference(i, 0) = images[i].label() == "reference" ? 1.0f : 0.0f;
    
        //std::cout << "reference: " << reference(i, 0) << "\n";
    }
    
    return reference;
}
Esempio n. 4
0
void CRemoteDoc::OnMENUITEMPCAFusion() 
{
	using namespace rss;

	POSITION pos=GetDocTemplate()->GetFirstDocPosition();

	ImageVector<image_type> images;

	while(pos) {
		CRemoteDoc *pDoc= (CRemoteDoc *) (GetDocTemplate()->GetNextDoc(pos));

		if(pDoc) 
			images.push_back(pDoc->GetImage());
		else 
			break;
	}

	if(images.size()<=1) return;

	int width = images[0].width();
	int height = images[0].height();

	PCAFusion<image_type> fusion(width, height);		

	//images.resize_image(images[0].width(), images[0].height());

	BilinearInterpolation<image_type> interpolate(Size(width, height));
	for(size_t i=0; i<images.size(); i++) {
		image_type result;
		interpolate.operator ()(images[i], result);
		images[i]=result;
	}

	//CRemoteDoc *pResultDoc=(CRemoteDoc *) GetDocTemplate()->CreateNewDocument();
	//CFrameWnd * pWnd=GetDocTemplate()->CreateNewFrame(pResultDoc, 0);

	image_type result;
	clock_t start = clock();
	fusion(images, result);
	clock_t end = clock();
	CString title;
	title.Format("%f", static_cast<double>(end - start) / CLOCKS_PER_SEC);	
	theApp.GetMainWnd()->GetTopLevelParent()->SetWindowText(title.GetBuffer());

	SetImage(result);

	//pResultDoc->SetModifiedFlag( TRUE );
	//pWnd->InitialUpdateFrame(pResultDoc, true);
}
Esempio n. 5
0
 void Machinery::initialize(ImageVector & images)
 {
     m_algorithm->initialize(images);
     for(int i = 0; i < m_ios.size(); i++)
     {
         m_ios[i]->save(*images[images.size()-1]);
     }
 }
static void trainNetwork(NeuralNetwork& neuralNetwork, const Image& image,
    float noiseMagnitude, size_t iterations, size_t batchSize,
    std::default_random_engine& engine)
{
    lucius::util::log("TestVisualization") << "Training the network.\n";
    for(size_t i = 0; i != iterations; ++i)
    {
        lucius::util::log("TestVisualization") << " Iteration " << i << " out of "
            << iterations << "\n";
        ImageVector batch = generateBatch(image, noiseMagnitude,
            batchSize, engine);
        
        Matrix input = batch.convertToStandardizedMatrix(
            neuralNetwork.getInputCount(),
            neuralNetwork.getInputBlockingFactor(), image.colorComponents());
        
        Matrix reference = generateReference(batch);
        
        lucius::util::log("TestVisualization") << "  Input:     " << input.toString();
        lucius::util::log("TestVisualization") << "  Reference: " << reference.toString();
        
        neuralNetwork.train(input, reference);
    }
}
Esempio n. 7
0
    void Storage::getBlendmaps(float chunkSize, const osg::Vec2f &chunkCenter,
        bool pack, ImageVector &blendmaps, std::vector<Terrain::LayerInfo> &layerList)
    {
        // TODO - blending isn't completely right yet; the blending radius appears to be
        // different at a cell transition (2 vertices, not 4), so we may need to create a larger blendmap
        // and interpolate the rest of the cell by hand? :/

        osg::Vec2f origin = chunkCenter - osg::Vec2f(chunkSize/2.f, chunkSize/2.f);
        int cellX = static_cast<int>(origin.x());
        int cellY = static_cast<int>(origin.y());

        // Save the used texture indices so we know the total number of textures
        // and number of required blend maps
        std::set<UniqueTextureId> textureIndices;
        // Due to the way the blending works, the base layer will always shine through in between
        // blend transitions (eg halfway between two texels, both blend values will be 0.5, so 25% of base layer visible).
        // To get a consistent look, we need to make sure to use the same base layer in all cells.
        // So we're always adding _land_default.dds as the base layer here, even if it's not referenced in this cell.
        textureIndices.insert(std::make_pair(0,0));

        for (int y=0; y<ESM::Land::LAND_TEXTURE_SIZE+1; ++y)
            for (int x=0; x<ESM::Land::LAND_TEXTURE_SIZE+1; ++x)
            {
                UniqueTextureId id = getVtexIndexAt(cellX, cellY, x, y);
                textureIndices.insert(id);
            }

        // Makes sure the indices are sorted, or rather,
        // retrieved as sorted. This is important to keep the splatting order
        // consistent across cells.
        std::map<UniqueTextureId, int> textureIndicesMap;
        for (std::set<UniqueTextureId>::iterator it = textureIndices.begin(); it != textureIndices.end(); ++it)
        {
            int size = textureIndicesMap.size();
            textureIndicesMap[*it] = size;
            layerList.push_back(getLayerInfo(getTextureName(*it)));
        }

        int numTextures = textureIndices.size();
        // numTextures-1 since the base layer doesn't need blending
        int numBlendmaps = pack ? static_cast<int>(std::ceil((numTextures - 1) / 4.f)) : (numTextures - 1);

        int channels = pack ? 4 : 1;

        // Second iteration - create and fill in the blend maps
        const int blendmapSize = ESM::Land::LAND_TEXTURE_SIZE+1;

        for (int i=0; i<numBlendmaps; ++i)
        {
            GLenum format = pack ? GL_RGBA : GL_ALPHA;

            osg::ref_ptr<osg::Image> image (new osg::Image);
            image->allocateImage(blendmapSize, blendmapSize, 1, format, GL_UNSIGNED_BYTE);
            unsigned char* pData = image->data();

            for (int y=0; y<blendmapSize; ++y)
            {
                for (int x=0; x<blendmapSize; ++x)
                {
                    UniqueTextureId id = getVtexIndexAt(cellX, cellY, x, y);
                    int layerIndex = textureIndicesMap.find(id)->second;
                    int blendIndex = (pack ? static_cast<int>(std::floor((layerIndex - 1) / 4.f)) : layerIndex - 1);
                    int channel = pack ? std::max(0, (layerIndex-1) % 4) : 0;

                    if (blendIndex == i)
                        pData[y*blendmapSize*channels + x*channels + channel] = 255;
                    else
                        pData[y*blendmapSize*channels + x*channels + channel] = 0;
                }
            }

            blendmaps.push_back(image);
        }
    }
void ImageSeries::SetImageCollection(const ImageVector& images){
	_imageCollection.assign(images.begin(), images.end());
}
int main(int argc, char *argv[])
{
  int rc = 0;
  bool first = true;

  ImageVector v;

  Client client(argc, argv);

  if ((rc = parseArgs(argc, argv)))
    return rc;

  try {
    // event handler
    Event * event = new Event();
    
    // Signal set to be handled by the event handler.
    ACE_Sig_Set sig;
    
    // register Signal handler for Ctr+C
    sig.sig_add(SIGINT);
    sig.sig_add(SIGTERM);
    
    // Reactor, misused as signal handler
    ACE_Reactor reactor;
    
    if (reactor.register_handler(sig, event) == -1) {
      throw Miro::ACE_Exception(errno, "failed to register signal handler");
    }
    
    // get reference to video service
    Video_var video = client.resolveName<Video>(interfaceName.c_str());
    Miro::VideoConnection connection(video.in());

    ACE_Time_Value start;
    while(!canceled) {

      // wait for key
      if (first || !streaming) {
	first = false;
	cout << "Press key to grab next image: " << flush;
	getchar();
	start = ACE_OS::gettimeofday();
      }
    
      // init streaming timer
      if (!first && streaming) {
	ACE_Time_Value elapsed = ACE_OS::gettimeofday();
	elapsed -= start;
	if (elapsed.msec() > stop)
	  break;
      }

      {
	// get image
	Miro::VideoAcquireImage 
	  frame(connection, 
		(interval)?
		Miro::VideoAcquireImage::Current :
		Miro::VideoAcquireImage::Next);

	// fill image structure
	Image image;
	image.fileName = path() + client.namingContextName + "_" + Miro::timeString() + ".ppm";
	image.width = connection.handle->format.width;
	image.height = connection.handle->format.height;
	
	// fill image buffer
	if (!bgr && !streaming)
	  image.buffer = (char*) frame.buffer;
	else {
	  image.buffer = new char[3 * image.width * image.height];

	  // byte swapping
	  if (bgr) {
	    int offset = 0;
	    for (int i = 0; i < image.width; ++i) {
	      for (int j = 0; j < image.height; ++j, offset += 3) {
		image.buffer[offset + 0] = frame.buffer[offset + 2]; // r
		image.buffer[offset + 1] = frame.buffer[offset + 1]; // g
		image.buffer[offset + 2] = frame.buffer[offset + 0]; // b
	      }
	    }
	  }
	  else
	    memcpy(image.buffer, frame.buffer, 3 * image.width * image.height);
	}
	
	// save image
	if (!streaming) {
	  image.writePpm();
	  if (bgr)
	    delete[] image.buffer;
	}
	else 
	  v.push_back(image);
      }

      // sleep
      if (interval) {
	ACE_Time_Value delta;
	delta.msec(interval);
	ACE_OS::sleep(delta);
      }
    }
    cout << "exiting on signal" << endl;

    for (ImageVector::const_iterator i = v.begin(); i != v.end(); ++i) {
      i->writePpm();
      delete i->buffer;
    }
  }
  catch (const Miro::ETimeOut& e) {
    cerr << "Miro Timeout Exception: " << e << endl;
    rc = 1;
  }
  catch (const Miro::EDevIO & e) {
    cerr << "Miro Device I/O exception: " << e << endl;
    rc = 1;
  }
  catch (const Miro::EOutOfBounds & e) {
    cerr << "Miro out of bounds exception: " << e << endl;
    rc = 1;
  }
  catch (const CORBA::Exception & e) {
    cerr << "Uncaught CORBA exception: " << e << endl;
    rc = 1;
  }
  return rc;
}