void QSGSharedDistanceFieldGlyphCache::storeGlyphs(const QList<QDistanceField> &glyphs)
{
    {
        QMutexLocker locker(&m_pendingGlyphsMutex);
#if defined(QSGSHAREDDISTANCEFIELDGLYPHCACHE_DEBUG)
        qDebug("QSGSharedDistanceFieldGlyphCache::storeGlyphs() called for %s (%d glyphs)",
               m_cacheId.constData(), glyphs.size());
#endif

        int glyphCount = glyphs.size();
        QVector<quint32> glyphIds(glyphCount);
        QVector<QImage> images(glyphCount);
        for (int i = 0; i < glyphs.size(); ++i) {
            const QDistanceField &df = glyphs.at(i);
            m_requestedGlyphsThatHaveNotBeenReturned.insert(df.glyph());
            glyphIds[i] = df.glyph();
            // ### TODO: Handle QDistanceField in QPlatformSharedGraphicsCache
            images[i] = df.toImage(QImage::Format_Indexed8);
        }

        m_hasPostedEvents = true;
        QSGMainThreadInvoker *invoker = QSGMainThreadInvoker::instance();
        QCoreApplication::postEvent(invoker, new QSGInsertItemsEvent(m_sharedGraphicsCache,
                                                                     m_cacheId,
                                                                     glyphIds,
                                                                     images,
                                                                     m_isInSceneGraphUpdate));
    }

    processPendingGlyphs();
}
示例#2
0
void procOCL_OCV(int tex, int w, int h)
{
    int64_t t = getTimeMs();
    cl::ImageGL imgIn (theContext, CL_MEM_READ_ONLY,  GL_TEXTURE_2D, 0, tex);
    std::vector < cl::Memory > images(1, imgIn);
    theQueue.enqueueAcquireGLObjects(&images);
    theQueue.finish();
    cv::UMat uIn, uOut, uTmp;
    cv::ocl::convertFromImage(imgIn(), uIn);
    LOGD("loading texture data to OpenCV UMat costs %d ms", getTimeInterval(t));
    theQueue.enqueueReleaseGLObjects(&images);

    t = getTimeMs();
    //cv::blur(uIn, uOut, cv::Size(5, 5));
    cv::Laplacian(uIn, uTmp, CV_8U);
    cv:multiply(uTmp, 10, uOut);
    cv::ocl::finish();
    LOGD("OpenCV processing costs %d ms", getTimeInterval(t));

    t = getTimeMs();
    cl::ImageGL imgOut(theContext, CL_MEM_WRITE_ONLY, GL_TEXTURE_2D, 0, tex);
    images.clear();
    images.push_back(imgOut);
    theQueue.enqueueAcquireGLObjects(&images);
    cl_mem clBuffer = (cl_mem)uOut.handle(cv::ACCESS_READ);
    cl_command_queue q = (cl_command_queue)cv::ocl::Queue::getDefault().ptr();
    size_t offset = 0;
    size_t origin[3] = { 0, 0, 0 };
    size_t region[3] = { w, h, 1 };
    CV_Assert(clEnqueueCopyBufferToImage (q, clBuffer, imgOut(), offset, origin, region, 0, NULL, NULL) == CL_SUCCESS);
    theQueue.enqueueReleaseGLObjects(&images);
    cv::ocl::finish();
    LOGD("uploading results to texture costs %d ms", getTimeInterval(t));
}
示例#3
0
    void GLTexture::prepareImpl()
    {
        if( mUsage & TU_RENDERTARGET ) return;

        String baseName, ext;
        size_t pos = mName.find_last_of(".");
        baseName = mName.substr(0, pos);
        if( pos != String::npos )
            ext = mName.substr(pos+1);

        LoadedImages loadedImages = LoadedImages(new vector<Image>::type());

        if(mTextureType == TEX_TYPE_1D || mTextureType == TEX_TYPE_2D || 
            mTextureType == TEX_TYPE_3D)
        {

            do_image_io(mName, mGroup, ext, *loadedImages, this);


            // If this is a cube map, set the texture type flag accordingly.
            if ((*loadedImages)[0].hasFlag(IF_CUBEMAP))
                mTextureType = TEX_TYPE_CUBE_MAP;
            // If this is a volumetric texture set the texture type flag accordingly.
            if((*loadedImages)[0].getDepth() > 1)
                mTextureType = TEX_TYPE_3D;

        }
        else if (mTextureType == TEX_TYPE_CUBE_MAP)
        {
            if(getSourceFileType() == "dds")
            {
                // XX HACK there should be a better way to specify whether 
                // all faces are in the same file or not
                do_image_io(mName, mGroup, ext, *loadedImages, this);
            }
            else
            {
                vector<Image>::type images(6);
                ConstImagePtrList imagePtrs;
                static const String suffixes[6] = {"_rt", "_lf", "_up", "_dn", "_fr", "_bk"};

                for(size_t i = 0; i < 6; i++)
                {
                    String fullName = baseName + suffixes[i];
                    if (!ext.empty())
                        fullName = fullName + "." + ext;
                    // find & load resource data intro stream to allow resource
                    // group changes if required
                    do_image_io(fullName,mGroup,ext,*loadedImages,this);
                }
            }
        }
        else
            OGRE_EXCEPT( Exception::ERR_NOT_IMPLEMENTED, "**** Unknown texture type ****", "GLTexture::prepare" );

        mLoadedImages = loadedImages;
    }
示例#4
0
std::vector<std::vector<cv::Mat> > cubeMapsArrayToImages(const std::vector<CubeMap> &cubes)
{
    std::vector<std::vector<cv::Mat> > images (cubes.size());
    for(unsigned int i = 0; i < cubes.size(); ++i)
    {
        images[i] = cubes[i].exportIntoImages();
//        cv::flip(images[i][5], images[i][5], 0);
    }
    return images;
}
//==============================================================================
//==============================================================================
//==============================================================================
//==============================================================================
//==============================================================================
//==============================================================================
void 
patch_models::
train(ft_data &data,
      const vector<Point2f> &ref,
      const Size psize,
      const Size ssize,
      const bool mirror,
      const float var,
      const float lambda,
      const float mu_init,
      const int nsamples,
      const bool visi)
{
  //set reference shape
  int n = ref.size(); reference = Mat(ref).reshape(1,2*n);
  Size wsize = psize + ssize;

  //train each patch model in turn
  patches.resize(n);
  for(int i = 0; i < n; i++){
    if(visi)cout << "training patch " << i << "..." << endl;
    vector<Mat> images(0);
    for(int j = 0; j < data.n_images(); j++){
      Mat im = data.get_image(j,0);
//        imshow("im",im);
      vector<Point2f> p = data.get_points(j,false);
      Mat pt = Mat(p).reshape(1,2*n);
      Mat S = this->calc_simil(pt),A(2,3,CV_32F); 
      A.fl(0,0) = S.fl(0,0); A.fl(0,1) = S.fl(0,1);
      A.fl(1,0) = S.fl(1,0); A.fl(1,1) = S.fl(1,1);
      A.fl(0,2) = pt.fl(2*i  ) - 
    (A.fl(0,0) * (wsize.width-1)/2 + A.fl(0,1)*(wsize.height-1)/2);
      A.fl(1,2) = pt.fl(2*i+1) - 
    (A.fl(1,0) * (wsize.width-1)/2 + A.fl(1,1)*(wsize.height-1)/2);
      Mat I; warpAffine(im,I,A,wsize,INTER_LINEAR+WARP_INVERSE_MAP);
      images.push_back(I);
      if(mirror){
    im = data.get_image(j,1); 
    p = data.get_points(j,true);
    pt = Mat(p).reshape(1,2*n);
    S = this->calc_simil(pt);
    A.fl(0,0) = S.fl(0,0); A.fl(0,1) = S.fl(0,1);
    A.fl(1,0) = S.fl(1,0); A.fl(1,1) = S.fl(1,1);
    A.fl(0,2) = pt.fl(2*i  ) - 
      (A.fl(0,0) * (wsize.width-1)/2 + A.fl(0,1)*(wsize.height-1)/2);
    A.fl(1,2) = pt.fl(2*i+1) - 
      (A.fl(1,0) * (wsize.width-1)/2 + A.fl(1,1)*(wsize.height-1)/2);
    warpAffine(im,I,A,wsize,INTER_LINEAR+WARP_INVERSE_MAP);
    images.push_back(I);
      }
    }
    patches[i].train(images,psize,var,lambda,mu_init,nsamples,visi);
  }
}
示例#6
0
bool SwapChain::SetupSwapChainImages()
{
  _assert_(m_swap_chain_images.empty());

  uint32_t image_count;
  VkResult res =
      vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, nullptr);
  if (res != VK_SUCCESS)
  {
    LOG_VULKAN_ERROR(res, "vkGetSwapchainImagesKHR failed: ");
    return false;
  }

  std::vector<VkImage> images(image_count);
  res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count,
                                images.data());
  _assert_(res == VK_SUCCESS);

  m_swap_chain_images.reserve(image_count);
  for (uint32_t i = 0; i < image_count; i++)
  {
    SwapChainImage image;
    image.image = images[i];

    // Create texture object, which creates a view of the backbuffer
    image.texture = Texture2D::CreateFromExistingImage(
        m_width, m_height, 1, 1, m_surface_format.format, VK_SAMPLE_COUNT_1_BIT,
        VK_IMAGE_VIEW_TYPE_2D, image.image);

    VkImageView view = image.texture->GetView();
    VkFramebufferCreateInfo framebuffer_info = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
                                                nullptr,
                                                0,
                                                m_render_pass,
                                                1,
                                                &view,
                                                m_width,
                                                m_height,
                                                1};

    res = vkCreateFramebuffer(g_vulkan_context->GetDevice(), &framebuffer_info, nullptr,
                              &image.framebuffer);
    if (res != VK_SUCCESS)
    {
      LOG_VULKAN_ERROR(res, "vkCreateFramebuffer failed: ");
      return false;
    }

    m_swap_chain_images.emplace_back(std::move(image));
  }

  return true;
}
示例#7
0
int CFFLD::detector( Mat img, string strModelFile, float threshold, vector<DetectionResult> & result )
{
	// For xml model;
    CvLSVMFilterObject** filters = 0;
    int kFilters = 0;
    int kComponents = 0;
    int* kPartFilters = 0;
    float* b = 0;
    float scoreThreshold = 0.f;
    int err_code = 0;

    Object::Name name = Object::PERSON;
	string results;
	string images(".");
    int nbNegativeScenes = -1;
	int padding = 12;
	int interval = 10;
	//double threshold =-0.50;
	double overlap = 0.5;

	// load xml file
    err_code = loadModel( strModelFile.c_str(), &filters, &kFilters, &kComponents, &kPartFilters, &b, &scoreThreshold );

    if( err_code != 0 )
    {
    	cout<<"Invalid xml model file."<<endl;
    	return 0;
    }

 	Mixture mixture;
 	mixture.importFromOpenCVModel( kFilters, kComponents, kPartFilters, b, filters );

	JPEGImage image( img );
	HOGPyramid pyramid(image, padding, padding, interval);

	// Initialize the Patchwork class
	if ( !Patchwork::Init((pyramid.levels()[0].rows() - padding + 15) & ~15,
						  (pyramid.levels()[0].cols() - padding + 15) & ~15) ) 
	{
		cerr << "\nCould not initialize the Patchwork class" << endl;
		return -1;
	}

	mixture.cacheFilters();
	
	// Compute the detections
	vector<Detection> detections;
	ofstream out;
	string file;
	detect(mixture, image.width(), image.height(), pyramid, threshold, overlap, file, out,
		   images, detections, result );
}
void OnePixelMaterialGenerator::loadResource(Ogre::Resource* resource)
{

	Ogre::Texture* texture = static_cast<Ogre::Texture*>(resource);
	static Ogre::uchar data[3] = {0xFF, 0x7F, 0x7F};

	Ogre::DataStreamPtr stream(new Ogre::MemoryDataStream(&data, 3, false, true));
	Ogre::Image image;
	image.loadRawData(stream, 1,1,1, Ogre::PF_R8G8B8);
	Ogre::ConstImagePtrList images({&image});

	texture->_loadImages(images);
}
示例#9
0
void PrintToFiles()
{
	::EnumWindows(enumWindowsProc, NULL);

	std::string wnd;
	std::string wndname;

	std::ofstream names("window-names.txt");
	for (auto &p : g_windows)
	{
		Nena::Converter::ToString(wnd, (UINT32) p.first);
		wndname = p.second;

		names << wnd.c_str() << " " << wndname << "\n";

		OutputDebugStringA("-window: ");
		OutputDebugStringA(wndname.c_str());
		OutputDebugStringA("\n");

	}
	names.close();

	std::ofstream images("window-images.txt");
	for (auto &p : g_images)
	{
		Nena::Converter::ToString(wnd, (UINT32) p.first);
		wndname = p.second;

		images << wnd.c_str() << " " << wndname.c_str() << "\n";

		OutputDebugStringA("-image: ");
		OutputDebugStringA(wndname.c_str());
		OutputDebugStringA("\n");
	}
	images.close();

	std::ofstream classes("window-classes.txt");
	for (auto &p : g_classes)
	{
		Nena::Converter::ToString(wnd, (UINT32) p.first);
		wndname = p.second;

		classes << wnd.c_str() << " " << wndname.c_str() << "\n";

		OutputDebugStringA("-class: ");
		OutputDebugStringA(wndname.c_str());
		OutputDebugStringA("\n");
	}
	classes.close();
}
示例#10
0
void Renderer::loadTextures()
{
    {
        QVector<QString> files;
        files << "dirt.png" << "sand.png" << "grass.png" << "mountain.png";
        QVector<QImage> images(files.count(), QImage(QSize(700,700), QImage::Format_RGBA8888));

        for (int i=0; i<images.count(); i++)
        {
            QImage &image = images[i];
            if (!image.load(gDefaultPathTextures + files[i]))
                qDebug() << "Error loading texture " << gDefaultPathTextures + files[i];
        }

        int imageSize = images.first().width();	//for now, assume all images are the same width and height

        glGenTextures(1, &mTextures.terrain);
        qDebug() << "I am " << mTextures.terrain;
        glBindTexture(GL_TEXTURE_2D_ARRAY, mTextures.terrain);

        GLenum format = GL_BGRA;

        int mipLevels = 8;
        glTexStorage3D(GL_TEXTURE_2D_ARRAY, mipLevels, GL_RGBA8, imageSize, imageSize, 4);

        glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0,
                        0, 0, 0,
                        imageSize, imageSize, 1,
                        format, GL_UNSIGNED_BYTE, images[0].bits());

        glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0,
                        0, 0, 1,
                        imageSize, imageSize, 1,
                        format, GL_UNSIGNED_BYTE, images[1].bits());

        glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0,
                        0, 0, 2,
                        imageSize, imageSize, 1,
                        format, GL_UNSIGNED_BYTE, images[2].bits());

        glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0,
                        0, 0, 3,
                        imageSize, imageSize, 1,
                        format, GL_UNSIGNED_BYTE, images[3].bits());

        glGenerateMipmap(GL_TEXTURE_2D_ARRAY);
        glSamplerParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
        glSamplerParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, GL_LINEAR_MIPMAP_LINEAR);
    }
}
示例#11
0
static void MSFindSymbols(MSImageRef image, size_t count, const char *names[], void *values[]) {
    MSSymbolData items[count];

    for (size_t index(0); index != count; ++index) {
        MSSymbolData &item(items[index]);

        item.name_ = names[index];
        item.type_ = 0;
        item.sect_ = 0;
        item.desc_ = 0;
        item.value_ = 0;
    }

    if (image != NULL)
        MSMachONameList_(image, items, count);
    else {
        size_t remain(count);

        for (uint32_t image(0), images(_dyld_image_count()); image != images; ++image) {
            //fprintf(stderr, ":: %s\n", _dyld_get_image_name(image));

            ssize_t result(MSMachONameList_(_dyld_get_image_header(image), items, count));
            if (result == -1)
                continue;

            // XXX: maybe avoid this happening at all? a flag to NSMachONameList_?
            for (size_t index(0); index != count; ++index) {
                MSSymbolData &item(items[index]);
                if (item.name_ == NULL && item.value_ == 0) {
                    ++result;
                    item.name_ = names[index];
                }
            }

            remain -= count - result;
            if (remain == 0)
                break;
        }
    }

    for (size_t index(0); index != count; ++index) {
        MSSymbolData &item(items[index]);
        uintptr_t value(item.value_);
#ifdef __arm__
        if ((item.desc_ & N_ARM_THUMB_DEF) != 0)
            value |= 0x00000001;
#endif
        values[index] = reinterpret_cast<void *>(value);
    }
}
std::vector<image::image_type> get_images(swapchain_type &swapchain) {
	uint32_t count;
	VKCHECK(vkGetSwapchainImagesKHR(
		internal::get_instance(*internal::get_parent(swapchain)),
		internal::get_instance(swapchain), &count, NULL));
	std::vector<VkImage> images(count);
	VKCHECK(vkGetSwapchainImagesKHR(internal::get_instance(*internal::get_parent(swapchain)), internal::get_instance(swapchain), &count, &images.front()));
	std::vector<image::image_type> converted_images;
	converted_images.reserve(images.size());
	for (VkImage image : images) {
		converted_images.push_back(image::image_type(image, internal::get_parent(swapchain), false, VK_IMAGE_TYPE_2D, get_format(swapchain), 1, 1));
	}
	return std::move(converted_images);
}
void CGLWorkspace::StopAnimations()
{
	QListIterator<CGLImage*> images(iImages);
	images.toBack ();
	while (images.hasPrevious())
	{
		CGLImage* obj = images.previous();
		if(obj)
		{
			if(obj->GetAnimation())
			{
				obj->GetAnimation()->Stop();
			}
		}
	}
}
示例#14
0
void PageModel::buildDirectoryModel(){
    IwsConfig *cfg = IwsConfig::getInstance();

    m_css=cfg->dirViewCss();

    QDir dir(cfg->webRoot()+m_resourceUrl);
    if (dir.exists()){
        m_title = QString("Images in : ")+m_resourceUrl;
        dir.setFilter(QDir::Dirs);

        QFileInfoList dirInfoList=  dir.entryInfoList(QDir::Dirs ,QDir::Name);
        for (int i=0; i < dirInfoList.size() ; i++){
            QString curDirName = dirInfoList.at(i).fileName();
            if(curDirName!="." && !(m_resourceUrl=="/" && curDirName=="..")){
                Item item;
                QString iconFileName;
                if (curDirName==".."){
                    iconFileName = "back-folder.png";
                    item.m_description ="back";
                }
                else{
                    iconFileName ="folder.png";
                    item.m_description = dirInfoList.at(i).fileName();
                }
                item.m_href = m_resourceUrl.remove(QRegExp("/$"))+"/"+dirInfoList.at(i).fileName();
                item.m_imgSrc = QString("?icon-file=")+iconFileName;
                item.m_itemType = Item::ItemNav;
                m_items.append(item);
            }
        }
        QDir images(cfg->webRoot()+m_resourceUrl);
        images.setFilter(QDir::Files);

        images.setNameFilters(cfg->imageFileFilter());
        QFileInfoList imageInfoList=  images.entryInfoList(QDir::Files,QDir::Name);
        for (int i=0; i < imageInfoList.size() ; i++){

            Item item;
            item.m_description = imageInfoList.at(i).fileName();
            item.m_href = "?single-image-view="+m_resourceUrl.remove(QRegExp("/$"))+"/"+imageInfoList.at(i).fileName();
            item.m_imgSrc ="?thumbnail="+m_resourceUrl.remove(QRegExp("/$"))+"/"+imageInfoList.at(i).fileName();
            item.m_itemType = Item::ItemImage;
            m_items.append(item);
        }
    }
}
示例#15
0
文件: Map.cpp 项目: stevens-crag/crag
Map composition( const Map& firstMap, const Map& secondMap )
{
  
  int i;
  
  if (secondMap.domainSize() != firstMap.rangeSize())
    msgs::error("tried to compose differing domain and range in"
	  " ::composition(const Map&, const Map&)");
  
  vector<Word> images(firstMap.generatingImages().size());
  for (i=0; i < images.size(); i++) {
    Word w = firstMap.generatingImages(i);
    images[i] = secondMap.imageOf( w );
  }
  
  return Map(firstMap.domainAlphabet(),secondMap.rangeAlphabet(),images);
}
示例#16
0
wxImageList& Pcsx2App::GetImgList_Config()
{
	ScopedPtr<wxImageList>& images( GetResourceCache().ConfigImages );
	if( !images )
	{
		int image_size = MSW_GetDPIScale() * g_Conf->Listbook_ImageSize;
		images = new wxImageList(image_size, image_size);
		wxFileName mess;
		bool useTheme = (g_Conf->DeskTheme != L"default");

		if( useTheme )
		{
			wxDirName theme( PathDefs::GetThemes() + g_Conf->DeskTheme );
			mess = theme.ToString();
		}

		wxImage img;

		// GCC Specific: wxT() macro is required when using string token pasting.  For some
		// reason L generates syntax errors. >_<
		// TODO: This can be fixed with something like
		// #define L_STR(x) L_STR2(x)
		// #define L_STR2(x) L ## x
		// but it's probably best to do it everywhere at once. wxWidgets
		// recommends not to use it since v2.9.0.

		#undef  FancyLoadMacro
		#define FancyLoadMacro( name ) \
		{ \
			EmbeddedImage<res_ConfigIcon_##name> temp; \
			LoadImageAny(img, useTheme, mess, L"ConfigIcon_" wxT(#name), temp); \
			img.Rescale(image_size, image_size, wxIMAGE_QUALITY_HIGH); \
			m_Resources->ImageId.Config.name = images->Add(img); \
		}

		FancyLoadMacro( Paths );
		FancyLoadMacro( Plugins );
		FancyLoadMacro( Gamefixes );
		FancyLoadMacro( Speedhacks );
		FancyLoadMacro( MemoryCard );
		FancyLoadMacro( Video );
		FancyLoadMacro( Cpu );
		FancyLoadMacro( Appearance );
	}
	return *images;
}
示例#17
0
CImageExplorer::~CImageExplorer()
{
	instance = NULL;
	if(iScrollBar)
		delete iScrollBar;
	QListIterator<CImage*> images(iImages);
	images.toBack ();
	while (images.hasPrevious())
	{
		CImage* obj = images.previous();
		if(obj)
		{
			delete obj;
			obj = NULL;
		}
	}

}
void	ImageRepoTablesTest::testImageRepoTable() {
	debug(LOG_DEBUG, DEBUG_LOG, 0, "testImageRepoTable() begin");
	ImageTable	images(database);
	ImageRecord	imageinfo1;
	imageinfo1.filename = "testfile.fits";
	imageinfo1.project = "testproject";
	imageinfo1.created = time(NULL);
	imageinfo1.width = 360;
	imageinfo1.height = 240;
	imageinfo1.depth = 1;
	imageinfo1.pixeltype = 8;
	imageinfo1.exposuretime = 47.11;
	imageinfo1.temperature = -47.11;
	imageinfo1.purpose = "light";
	imageinfo1.bayer = "RGGB";
	imageinfo1.observation = "1962-02-14T12:34:56.777";
	long	id = images.add(imageinfo1);
	for (int count = 0; count < 10; count++) {
		imageinfo1.filename = stringprintf("test%d.fits", count);
		imageinfo1.uuid = astro::UUID();
		images.add(imageinfo1);
	}
	debug(LOG_DEBUG, DEBUG_LOG, 0, "added object %ld", id);
	ImageRecord imageinfo2 = images.byid(id);
	debug(LOG_DEBUG, DEBUG_LOG, 0, "filenames: '%s' ?= '%s'",
		imageinfo1.filename.c_str(),
		imageinfo2.filename.c_str());
	imageinfo1.filename = "testfile.fits";
	CPPUNIT_ASSERT(imageinfo1.filename == imageinfo2.filename);
	CPPUNIT_ASSERT(imageinfo1.project == imageinfo2.project);
	debug(LOG_DEBUG, DEBUG_LOG, 0, "created1 = %d, created2 = %d",
		imageinfo1.created, imageinfo2.created);
	CPPUNIT_ASSERT(imageinfo1.created == imageinfo2.created);
	CPPUNIT_ASSERT(imageinfo1.width == imageinfo2.width);
	CPPUNIT_ASSERT(imageinfo1.height == imageinfo2.height);
	CPPUNIT_ASSERT(imageinfo1.depth == imageinfo2.depth);
	CPPUNIT_ASSERT(imageinfo1.pixeltype == imageinfo2.pixeltype);
	CPPUNIT_ASSERT(imageinfo1.exposuretime == imageinfo2.exposuretime);
	CPPUNIT_ASSERT(imageinfo1.temperature == imageinfo2.temperature);
	CPPUNIT_ASSERT(imageinfo1.purpose == imageinfo2.purpose);
	CPPUNIT_ASSERT(imageinfo1.bayer == imageinfo2.bayer);
	CPPUNIT_ASSERT(imageinfo1.observation == imageinfo2.observation);
	debug(LOG_DEBUG, DEBUG_LOG, 0, "testImageRepoTable() end");
}
示例#19
0
std::vector<VkImage> getSwapchainImages (const DeviceInterface&			vkd,
										 VkDevice						device,
										 VkSwapchainKHR					swapchain)
{
	deUint32	numImages	= 0;

	VK_CHECK(vkd.getSwapchainImagesKHR(device, swapchain, &numImages, DE_NULL));

	if (numImages > 0)
	{
		std::vector<VkImage>	images	(numImages);

		VK_CHECK(vkd.getSwapchainImagesKHR(device, swapchain, &numImages, &images[0]));

		return images;
	}
	else
		return std::vector<VkImage>();
}
示例#20
0
void CImageExplorer::paint(QPainter *painter)
{
	Translate();
	QListIterator<CImage*> images(iImages);
	int imageHeightSum = 0;
	while (images.hasNext())
	{
		CImage *cimage = images.next();
		imageHeightSum += cimage->GetSize().y();
		if(cimage->GetPosition().y()+cimage->GetSize().y()>0 
			&& cimage->GetPosition().y()<=iSize.y())
		{
			cimage->paint(painter);
		}
	}
	DrawBorderRect(painter);
	if(iActiveImage){
		iActiveImage->DrawSelection(painter);
	}
}
void	ImageRepoTablesTest::testMetadataTable() {
	debug(LOG_DEBUG, DEBUG_LOG, 0, "testMetadataTable() begin");
	ImageTable	images(database);
	ImageRecord	imageinfo1;
	imageinfo1.filename = "metatest.fits";
	imageinfo1.project = "testproject";
	imageinfo1.created = time(NULL);
	imageinfo1.width = 360;
	imageinfo1.height = 240;
	imageinfo1.depth = 1;
	imageinfo1.pixeltype = 8;
	imageinfo1.exposuretime = 47.11;
	imageinfo1.temperature = -47.11;
	imageinfo1.purpose = "light";
	imageinfo1.bayer = "RGGB";
	imageinfo1.observation = "1962-02-14T12:34:56.777";
	imageinfo1.uuid = astro::UUID();
	long	id = images.add(imageinfo1);
	MetadataTable	metadata(database);
	MetadataRecord	meta(-1, id);

	meta.seqno = 0;
	meta.key = "EXPTIME";
	meta.value = "47.11";
	meta.comment = "exposure time in seconds";
	metadata.add(meta);

	meta.seqno = 1;
	meta.key = "BAYER";
	meta.value = "'RGGB'";
	meta.comment = "Bayer matrix layout";
	metadata.add(meta);

	meta.seqno = 2;
	meta.key = "SET-TEMP";
	meta.value = "-50.000";
	meta.comment = "set temperature";
	metadata.add(meta);

	debug(LOG_DEBUG, DEBUG_LOG, 0, "testMetadataTable() end");
}
示例#22
0
    static void shiftMapInpaint(const Mat &src, const Mat &mask, Mat &dst,
        const int nTransform = 60, const int psize = 8)
    {
        /** Preparing input **/
        cv::Mat img;
        src.convertTo( img, CV_32F );
        img.setTo(0, 255 - mask);

        /** ANNF computation **/
        std::vector <Matx33f> transforms( nTransform );
        xphotoInternal::dominantTransforms(img,
                    transforms, nTransform, psize);

        /** Warping **/
        std::vector <Mat> images( nTransform + 1 ); // source image transformed with transforms
        std::vector <Mat> masks( nTransform + 1 );  // definition domain for current shift

        Mat_<uchar> invMask = 255 - mask;
        dilate(invMask, invMask, Mat(), Point(-1,-1), 2);

        img.copyTo( images[0] );
        mask.copyTo( masks[0] );

        for (int i = 0; i < nTransform; ++i)
        {
            warpPerspective( images[0], images[i + 1], transforms[i],
                             images[0].size(), INTER_LINEAR );

            warpPerspective( masks[0], masks[i + 1], transforms[i],
                             masks[0].size(), INTER_NEAREST);
            masks[i + 1] &= invMask;
        }

        /** Stitching **/
        Mat photomontageResult;
        xphotoInternal::Photomontage < cv::Vec <float, cn> >( images, masks )
            .assignResImage(photomontageResult);

        /** Writing result **/
        photomontageResult.convertTo( dst, dst.type() );
    }
void CGLWorkspace::mousePressEvent(QMouseEvent *event){
	iActiveImage = NULL;

	QListIterator<CGLImage*> images(iImages);
	images.toBack ();
	while (images.hasPrevious())
	{
		CGLImage* obj = images.previous();
		if(obj->IsPointOnObject(event->x(),event->y()))
		{
			iActiveImage = obj;
			iImages.move(iImages.indexOf(obj),iImages.count()-1);
			obj->mousePressEvent(event);
			SelectImage(obj);
			UpdateTexture();
			//updateGL ();
			break;
		}
	}
	CInfoPanel::GetInstance()->SetWorkspaceInfoView();
}
示例#24
0
bool CFreeLayout::PrepareNewImageGeometry(CImage *image)
{
    QListIterator<CImage*> images(iParentWorkspace->GetImages());
    images.toBack();
    if(images.hasPrevious())
    {
        CImage*im= images.previous();
        image->SetPosition(QPointF(iParentWorkspace->GetPosition().x()+iParentWorkspace->GetBorders().left,
                                   iParentWorkspace->GetPosition().y()+iParentWorkspace->GetBorders().top));
    }


    //if(image->GetSize().x() > iParentWorkspace->GetSize().x())
    {
        image->SetGeometry(image->GetPosition().x(),
                           image->GetPosition().y(),
                           iParentWorkspace->GetSize().x()-iParentWorkspace->GetBorders().right-iParentWorkspace->GetBorders().left,
                           iParentWorkspace->GetSize().y()-iParentWorkspace->GetBorders().top-iParentWorkspace->GetBorders().bottom);
    }
    return true;
}
示例#25
0
文件: AppRes.cpp 项目: tsiru/pcsx2
wxImageList& Pcsx2App::GetImgList_Config()
{
    ScopedPtr<wxImageList>& images( GetResourceCache().ConfigImages );
    if( !images )
    {
        images = new wxImageList(32, 32);
        wxFileName mess;
        bool useTheme = (g_Conf->DeskTheme != L"default");

        if( useTheme )
        {
            wxDirName theme( PathDefs::GetThemes() + g_Conf->DeskTheme );
            mess = theme.ToString();
        }

        wxImage img;

        // GCC Specific: wxT() macro is required when using string token pasting.  For some
        // reason L generates syntax errors. >_<

#undef  FancyLoadMacro
#define FancyLoadMacro( name ) \
		{ \
			EmbeddedImage<res_ConfigIcon_##name> temp( g_Conf->Listbook_ImageSize, g_Conf->Listbook_ImageSize ); \
			m_Resources->ImageId.Config.name = images->Add( LoadImageAny( \
				img, useTheme, mess, L"ConfigIcon_" wxT(#name), temp ) \
			); \
		}

        FancyLoadMacro( Paths );
        FancyLoadMacro( Plugins );
        FancyLoadMacro( Gamefixes );
        FancyLoadMacro( Speedhacks );
        FancyLoadMacro( MemoryCard );
        FancyLoadMacro( Video );
        FancyLoadMacro( Cpu );
        FancyLoadMacro( Appearance );
    }
    return *images;
}
void CGLWorkspace::SetGeometry(float x, float y, float w, float h)
{
	WorkLog.write("Begin: CGLWorkspace::SetGeometry");	
	QPointF oldPos = iPosition;
	if(iLastInnerHeight ==0)
	{

		iLastInnerWidth = iSize.x()-GetBorders().left-GetBorders().right;
		iLastInnerHeight = iSize.y()-GetBorders().top-GetBorders().bottom;
	}
	int posxdif = x - oldPos.x();
	int posydif = y - oldPos.y();
	int innerWidth = w - GetBorders().left-GetBorders().right;
	int innerHeight = h- GetBorders().top-GetBorders().bottom;
	float sizeRatioX = (float)(innerWidth)/((float)iLastInnerWidth);
	float sizeRatioY = (float)(innerHeight)/((float)iLastInnerHeight);
	iLastInnerWidth = innerWidth;
	iLastInnerHeight = innerHeight;


	CGLObject::SetGeometry(x,y,w,h);

	QListIterator<CGLImage*> images(iImages);
	images.toFront();
	while(images.hasNext())
	{
		CGLImage* im = images.next();
		QPointF pos(im->GetPosition().x()-GetBorders().left,im->GetPosition().y()-GetBorders().top);
		QPointF size = im->GetSize();
		QPointF newPos;
		newPos.setX(GetBorders().left+pos.x()*sizeRatioX+posxdif*sizeRatioX);
		newPos.setY(GetBorders().top+pos.y()*sizeRatioY+posydif*sizeRatioY);
		QPointF newSize;
		newSize.setX(size.x()*sizeRatioX);
		newSize.setY(size.y()*sizeRatioY);
		im->SetGeometry(newPos.x(),newPos.y(), newSize.x(),newSize.y());
	}
	UpdateTexture();
	WorkLog.write("Begin: CGLWorkspace::SetGeometry");	
}
示例#27
0
文件: _tiff.cpp 项目: qbbian/imread
std::auto_ptr<image_list> STKFormat::read_multi(byte_source* src, ImageFactory* factory) {
    shift_source moved(src);
    stk_extend ext;
    tiff_warn_error twe;

    tif_holder t = read_client(&moved);
    std::auto_ptr<image_list> images(new image_list);
    const uint32 h = tiff_get<uint32>(t, TIFFTAG_IMAGELENGTH);
    const uint32 w = tiff_get<uint32>(t, TIFFTAG_IMAGEWIDTH);

    const uint16 nr_samples = tiff_get<uint16>(t, TIFFTAG_SAMPLESPERPIXEL, 1);
    const uint16 bits_per_sample = tiff_get<uint16>(t, TIFFTAG_BITSPERSAMPLE, 8);
    const int depth = nr_samples > 1 ? nr_samples : -1;

    const int strip_size = TIFFStripSize(t.tif);
    const int n_strips = TIFFNumberOfStrips(t.tif);
    int32_t n_planes;
    void* data;
    TIFFGetField(t.tif, UIC3Tag, &n_planes, &data);
    int raw_strip_size = 0;
    for (int st = 0; st != n_strips; ++st) {
        raw_strip_size += TIFFRawStripSize(t.tif, st);
    }
    for (int z = 0; z < n_planes; ++z) {
        // Monkey patch strip offsets. This is very hacky, but it seems to work!
        moved.shift(z * raw_strip_size);

        std::auto_ptr<Image> output(factory->create(bits_per_sample, h, w, depth));
        uint8_t* start = output->rowp_as<uint8_t>(0);
        for (int st = 0; st != n_strips; ++st) {
            const int offset = TIFFReadEncodedStrip(t.tif, st, start, strip_size);
            if (offset == -1) {
                throw CannotReadError("imread.imread._tiff.stk: Error reading strip");
            }
            start += offset;
        }
        images->push_back(output);
    }
    return images;
}
示例#28
0
void CImageExplorer::mousePressEvent(QMouseEvent *event)
{
	iActiveImage = NULL;
	QListIterator<CImage*> images(iImages);
	images.toBack ();
	while (images.hasPrevious())
	{
		CImage* obj = images.previous();
		if(obj->IsPointOnObject(event->x(),event->y()))
		{
			//iActiveImage = obj;
			//iImages.move(iImages.indexOf(obj),iImages.count()-1);
			obj->mousePressEvent(event);
			SelectImage(obj);
			CWidget::GetInstance()->paint();
			break;
		}
	}
	if(CInfoPanel::GetInstance())
	{
		CInfoPanel::GetInstance()->SetImageExplorerInfoView();
	}
}
void CGLWorkspace::DrawToTexture()
{
	WorkLog.write("Begin: CGLWorkspace::DrawToTexture");
	if(!iWorkspaceChanged && !iSaveSnapshot){
		WorkLog.write("End: CGLWorkspace::DrawToTexture");
		return;
	}
	iWorkspaceChanged = false;

	//Set up rendering to texture
	//render to ext buffer (texture behind)
	iActualTextureFBO->bind();


	//glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, iFBO);
	// clear buffers
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	//glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
	//draw 3D texture slice to texture
	QListIterator<CGLImage*> images(iImages);
	int imageHeightSum = 0;
	glDisable(GL_BLEND);

	while (images.hasNext())
	{
		glColor4f(1,1,1,1);
		CGLImage *im = images.next();
		imageHeightSum += im->GetSize().y();
		//im->PrepareActualTexture();
		//glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, iFBO);
		glViewport(0, 0,iActualTextureInfo.width, iActualTextureInfo.height);					// Reset The Current Viewport
		glMatrixMode(GL_PROJECTION);						// Select The Projection Matrix
		glLoadIdentity();							// Reset The Projection Matrix
		//map to texture as to workspace (in widget)

		int left = GetPosition().x()+GetBorders().left;
		int right = 	GetSize().x()+GetPosition().x()-GetBorders().right;
		int bottom=CGLWidget::GetInstance()->GetSize().y()-GetPosition().y()-GetSize().y()+GetBorders().bottom;
		int top = CGLWidget::GetInstance()->GetSize().y()-GetPosition().y()-GetBorders().top;
		glOrtho(left, 
			right ,
			bottom,
			top,
			0.1, 90);

		glMatrixMode(GL_MODELVIEW); 
		glLoadIdentity();						// Reset The Modelview Matrix
		im->Translate();
		glColor4f(1,1,1,1);
		glDisable(GL_BLEND);
		glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);
		im->DrawBorderRect();
		im->DrawInnerRect();

		if(iActiveImage==im)
		{
			im->DrawSelection();
		}
		glColor4f(1,1,1,1);
		im->DrawImage();
		im->DrawManipulation();
		im->DrawIcons();
		im->DrawSlider();
		im->DrawTexts();
	}
	if(iSaveSnapshot)
	{
		iSaveSnapshot = false;
		SaveTexture(*iSaveSnapshotFileName);

	}
	//glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
	iActualTextureFBO->release();
	WorkLog.write("End: CGLWorkspace::DrawToTexture");
	return;
}
示例#30
0
void
pcl::EnsensoGrabber::processGrabbing ()
{
  bool continue_grabbing = running_;
  while (continue_grabbing)
  {
    try
    {
      // Publish cloud / images
      if (num_slots<sig_cb_ensenso_point_cloud> () > 0 || num_slots<sig_cb_ensenso_images> () > 0 || num_slots<sig_cb_ensenso_point_cloud_images> () > 0)
      {
        pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
        boost::shared_ptr<PairOfImages> images (new PairOfImages);

        fps_mutex_.lock ();
        frequency_.event ();
        fps_mutex_.unlock ();

        NxLibCommand (cmdCapture).execute ();
        double timestamp;
        camera_[itmImages][itmRaw][itmLeft].getBinaryDataInfo (0, 0, 0, 0, 0, &timestamp);

        // Gather images
        if (num_slots<sig_cb_ensenso_images> () > 0 || num_slots<sig_cb_ensenso_point_cloud_images> () > 0)
        {
          // Rectify images
          NxLibCommand (cmdRectifyImages).execute ();
          int width, height, channels, bpe;
          bool isFlt, collected_pattern = false;

          try  // Try to collect calibration pattern, if not possible, publish RAW images instead
          {
            NxLibCommand collect_pattern (cmdCollectPattern);
            collect_pattern.parameters ()[itmBuffer].set (false);  // Do NOT store the pattern into the buffer!
            collect_pattern.execute ();
            collected_pattern = true;
          }
          catch (const NxLibException &ex)
          {
            // We failed to collect the pattern but the RAW images are available!
          }

          if (collected_pattern)
          {
            camera_[itmImages][itmWithOverlay][itmLeft].getBinaryDataInfo (&width, &height, &channels, &bpe, &isFlt, 0);
            images->first.header.stamp = images->second.header.stamp = getPCLStamp (timestamp);
            images->first.width = images->second.width = width;
            images->first.height = images->second.height = height;
            images->first.data.resize (width * height * sizeof(float));
            images->second.data.resize (width * height * sizeof(float));
            images->first.encoding = images->second.encoding = getOpenCVType (channels, bpe, isFlt);

            camera_[itmImages][itmWithOverlay][itmLeft].getBinaryData (images->first.data.data (), images->first.data.size (), 0, 0);
            camera_[itmImages][itmWithOverlay][itmRight].getBinaryData (images->second.data.data (), images->second.data.size (), 0, 0);
          }
          else
          {
            camera_[itmImages][itmRaw][itmLeft].getBinaryDataInfo (&width, &height, &channels, &bpe, &isFlt, 0);
            images->first.header.stamp = images->second.header.stamp = getPCLStamp (timestamp);
            images->first.width = images->second.width = width;
            images->first.height = images->second.height = height;
            images->first.data.resize (width * height * sizeof(float));
            images->second.data.resize (width * height * sizeof(float));
            images->first.encoding = images->second.encoding = getOpenCVType (channels, bpe, isFlt);

            camera_[itmImages][itmRaw][itmLeft].getBinaryData (images->first.data.data (), images->first.data.size (), 0, 0);
            camera_[itmImages][itmRaw][itmRight].getBinaryData (images->second.data.data (), images->second.data.size (), 0, 0);
          }
        }

        // Gather point cloud
        if (num_slots<sig_cb_ensenso_point_cloud> () > 0 || num_slots<sig_cb_ensenso_point_cloud_images> () > 0)
        {
          // Stereo matching task
          NxLibCommand (cmdComputeDisparityMap).execute ();

          // Convert disparity map into XYZ data for each pixel
          NxLibCommand (cmdComputePointMap).execute ();

          // Get info about the computed point map and copy it into a std::vector
          std::vector<float> pointMap;
          int width, height;
          camera_[itmImages][itmPointMap].getBinaryDataInfo (&width, &height, 0, 0, 0, 0);
          camera_[itmImages][itmPointMap].getBinaryData (pointMap, 0);

          // Copy point cloud and convert in meters
          cloud->header.stamp = getPCLStamp (timestamp);
          cloud->points.resize (height * width);
          cloud->width = width;
          cloud->height = height;
          cloud->is_dense = false;

          // Copy data in point cloud (and convert milimeters in meters)
          for (size_t i = 0; i < pointMap.size (); i += 3)
          {
            cloud->points[i / 3].x = pointMap[i] / 1000.0;
            cloud->points[i / 3].y = pointMap[i + 1] / 1000.0;
            cloud->points[i / 3].z = pointMap[i + 2] / 1000.0;
          }
        }

        // Publish signals
        if (num_slots<sig_cb_ensenso_point_cloud_images> () > 0)
          point_cloud_images_signal_->operator () (cloud, images);
        else if (num_slots<sig_cb_ensenso_point_cloud> () > 0)
          point_cloud_signal_->operator () (cloud);
        else if (num_slots<sig_cb_ensenso_images> () > 0)
          images_signal_->operator () (images);
      }
      continue_grabbing = running_;
    }
    catch (NxLibException &ex)
    {
      ensensoExceptionHandling (ex, "processGrabbing");
    }
  }
}