// Make colors from file void DataValues::MakeColor(STRING file) { Image * image = new Image(); if (!image->Load(file)) { delete image; return; } // Clear memset(&colorData, 0, sizeof(Color) * DATAVALUES_AMOUNT); colorData[AIR] = Color(255,255,255,0); colorData[STONE] = GetColorFromImage(1, 0, image); colorData[GRASS] = GetColorFromImage(0, 0, image); colorData[DIRT] = GetColorFromImage(3, 0, image); colorData[COBBLESTONE] = GetColorFromImage(0, 1, image); colorData[WOOD] = GetColorFromImage(4, 0, image); colorData[SAPLING] = Color(120,120,120,0); colorData[BEDROCK] = GetColorFromImage(1, 1, image); colorData[WATER] = GetColorFromImage(13, 12, image); colorData[STATIONARYWATER] = colorData[WATER]; colorData[LAVA] = GetColorFromImage(13, 14, image); colorData[STATIONARYLAVA] = colorData[LAVA]; colorData[SAND] = GetColorFromImage(2, 1, image); colorData[GRAVEL] = GetColorFromImage(3, 1, image); colorData[GOLDORE] = GetColorFromImage(0, 2, image); colorData[IRONORE] = GetColorFromImage(1, 2, image); colorData[COALORE] = GetColorFromImage(2, 2, image); colorData[LOG] = GetColorFromImage(4, 1, image); colorData[LEAVES] = GetColorFromImage(4, 3, image); colorData[GLASS] = GetColorFromImage(1, 3, image); colorData[WOOL] = GetColorFromImage(0, 4, image); colorData[YELLOWFLOWER] = GetColorFromImage(13, 0, image); colorData[REDROSE] = GetColorFromImage(12, 0, image); colorData[GOLDBLOCK] = GetColorFromImage(7, 1, image); colorData[IRONBLOCK] = GetColorFromImage(6, 1, image); colorData[DOUBLESLAB_STONE] = GetColorFromImage(6, 0, image); colorData[SLAB_STONE] = colorData[DOUBLESLAB_STONE]; colorData[BRICK] = GetColorFromImage(7, 0, image); colorData[TNT] = GetColorFromImage(8, 0, image); colorData[MOSSYCOBBLESTONE] = GetColorFromImage(4, 2, image); colorData[OBSIDIAN] = GetColorFromImage(5, 2, image); colorData[TORCH] = GetColorFromImage(0, 5, image); colorData[FIRE] = Color(255,170,30,200); colorData[WOODENSTAIRS] = GetColorFromImage(4, 0, image); colorData[CHEST] = GetColorFromImage(9, 1, image); colorData[DIAMONDORE] = GetColorFromImage(2, 3, image); colorData[DIAMONDBLOCK] = GetColorFromImage(8, 1, image); colorData[WORKBENCH] = GetColorFromImage(11, 2, image); colorData[CROPS] = GetColorFromImage(15, 5, image); colorData[SOIL] = GetColorFromImage(6, 5, image); colorData[FURNACE] = GetColorFromImage(12, 2, image); colorData[BURNINGFURNACE] = colorData[FURNACE]; colorData[SIGNPOST] = GetColorFromImage(4, 0, image); colorData[WOODENDOOR] = GetColorFromImage(1, 6, image); colorData[LADDER] = GetColorFromImage(3, 5, image); colorData[MINECARTTRACK] = GetColorFromImage(0, 8, image); colorData[COBBLESTONESTAIRS] = GetColorFromImage(0, 1, image); colorData[IRONDOOR] = GetColorFromImage(2, 6, image); colorData[REDSTONEORE] = GetColorFromImage(3, 3, image); colorData[GLOWINGREDSTONEORE] = colorData[REDSTONEORE]; colorData[REDSTONETORCHON] = GetColorFromImage(3, 6, image); colorData[REDSTONETORCHOFF] = GetColorFromImage(3, 7, image); colorData[SNOW] = GetColorFromImage(2, 4, image); colorData[ICE] = GetColorFromImage(3, 4, image); colorData[SNOWBLOCK] = GetColorFromImage(2, 4, image); colorData[CACTUS] = GetColorFromImage(5, 4, image); colorData[CLAY] = GetColorFromImage(8, 4, image); colorData[REED] = GetColorFromImage(9, 4, image); colorData[JUKEBOX] = GetColorFromImage(11, 4, image); colorData[PUMPKIN] = GetColorFromImage(6, 6, image); colorData[BLOODSTONE] = GetColorFromImage(7, 6, image); colorData[SLOWSAND] = GetColorFromImage(8, 6, image); colorData[LIGHTSTONE] = GetColorFromImage(9, 6, image); colorData[PORTAL] = GetColorFromImage(0, 14, image); // Temporarily colorData[JACKOLANTERN] = colorData[PUMPKIN]; colorData[LAPIZLAZULIORE] = GetColorFromImage(0, 10, image); colorData[LAPIZLAZULIBLOCK] = GetColorFromImage(0, 9, image); colorData[DISPENSER] = colorData[FURNACE]; colorData[SANDSTONE] = GetColorFromImage(0, 11, image); colorData[NOTEBLOCK] = GetColorFromImage(10, 4, image); colorData[CAKE] = GetColorFromImage(9, 7, image); colorData[BED] = GetColorFromImage(6, 8, image); // colorData[NEWORE] = GetColorFromImage(6, 2, image); // WOOLs color colorData[WOOLWHITE] = colorData[WOOL]; colorData[WOOLORANGE] = GetColorFromImage(2, 13, image); colorData[WOOLMAGENTA] = GetColorFromImage(2, 12, image); colorData[WOOLLIGHTBLUE] = GetColorFromImage(2, 11, image); colorData[WOOLYELLOW] = GetColorFromImage(2, 10, image); colorData[WOOLLIMEGREEN] = GetColorFromImage(2, 9, image); colorData[WOOLPINK] = GetColorFromImage(2, 8, image); colorData[WOOLGRAY] = GetColorFromImage(2, 7, image); colorData[WOOLLIGHTGRAY] = GetColorFromImage(1, 14, image); colorData[WOOLCYAN] = GetColorFromImage(1, 13, image); colorData[WOOLBLUE] = GetColorFromImage(1, 11, image); colorData[WOOLBROWN] = GetColorFromImage(1, 10, image); colorData[WOOLGREEN] = GetColorFromImage(1, 9, image); colorData[WOOLRED] = GetColorFromImage(1, 8, image); colorData[WOOLBLACK] = GetColorFromImage(1, 7, image); colorData[WOOLPURPLE] = GetColorFromImage(1, 12, image); colorData[WOOLUNKNOWN] = colorData[WOOL]; // Log color colorData[LOGNORMAL] = colorData[LOG]; colorData[LOGREDWOOD] = GetColorFromImage(4, 7, image); colorData[LOGBIRCH] = GetColorFromImage(5, 7, image); // Leaf color colorData[LEAFNORMAL] = colorData[LEAVES]; colorData[LEAFREDWOOD] = GetColorFromImage(4, 8, image); colorData[LEAFBIRCH] = GetColorFromImage(5, 8, image); // Slab color colorData[SLABSTONE] = colorData[STONE]; colorData[SLABSAND] = colorData[SAND]; colorData[SLABWOOD] = colorData[WOOD]; colorData[SLABCOBBLE] = colorData[COBBLESTONE]; delete image; }
void ProjectManager::_load_recent_projects() { ProjectListFilter::FilterOption filter_option = project_filter->get_filter_option(); String search_term = project_filter->get_search_term(); while(scroll_childs->get_child_count()>0) { memdelete( scroll_childs->get_child(0)); } List<PropertyInfo> properties; EditorSettings::get_singleton()->get_property_list(&properties); Color font_color = get_color("font_color","Tree"); List<ProjectItem> projects; List<ProjectItem> favorite_projects; for(List<PropertyInfo>::Element *E=properties.front();E;E=E->next()) { String _name = E->get().name; if (!_name.begins_with("projects/") && !_name.begins_with("favorite_projects/")) continue; String path = EditorSettings::get_singleton()->get(_name); if (filter_option == ProjectListFilter::FILTER_PATH && search_term!="" && path.findn(search_term)==-1) continue; String project = _name.get_slice("/",1); String conf=path.plus_file("engine.cfg"); bool favorite = (_name.begins_with("favorite_projects/"))?true:false; uint64_t last_modified = 0; if (FileAccess::exists(conf)) last_modified = FileAccess::get_modified_time(conf); String fscache = path.plus_file(".fscache"); if (FileAccess::exists(fscache)) { uint64_t cache_modified = FileAccess::get_modified_time(fscache); if ( cache_modified > last_modified ) last_modified = cache_modified; } ProjectItem item(project, path, conf, last_modified, favorite); if (favorite) favorite_projects.push_back(item); else projects.push_back(item); } projects.sort(); favorite_projects.sort(); for(List<ProjectItem>::Element *E=projects.front();E;) { List<ProjectItem>::Element *next = E->next(); if (favorite_projects.find(E->get()) != NULL) projects.erase(E->get()); E=next; } for(List<ProjectItem>::Element *E=favorite_projects.back();E;E=E->prev()) { projects.push_front(E->get()); } Ref<Texture> favorite_icon = get_icon("Favorites","EditorIcons"); for(List<ProjectItem>::Element *E=projects.front();E;E=E->next()) { ProjectItem &item = E->get(); String project = item.project; String path = item.path; String conf = item.conf; bool is_favorite = item.favorite; Ref<ConfigFile> cf = memnew( ConfigFile ); Error err = cf->load(conf); ERR_CONTINUE(err!=OK); String project_name="Unnamed Project"; if (cf->has_section_key("application","name")) { project_name = cf->get_value("application","name"); } if (filter_option==ProjectListFilter::FILTER_NAME && search_term!="" && project_name.findn(search_term)==-1) continue; Ref<Texture> icon; if (cf->has_section_key("application","icon")) { String appicon = cf->get_value("application","icon"); if (appicon!="") { Image img; Error err = img.load(appicon.replace_first("res://",path+"/")); if (err==OK) { img.resize(64,64); Ref<ImageTexture> it = memnew( ImageTexture ); it->create_from_image(img); icon=it; } } } if (icon.is_null()) { icon=get_icon("DefaultProjectIcon","EditorIcons"); } String main_scene; if (cf->has_section_key("application","main_scene")) { main_scene = cf->get_value("application","main_scene"); } HBoxContainer *hb = memnew( HBoxContainer ); hb->set_meta("name",project); hb->set_meta("main_scene",main_scene); hb->set_meta("favorite",is_favorite); hb->connect("draw",this,"_panel_draw",varray(hb)); hb->connect("input_event",this,"_panel_input",varray(hb)); VBoxContainer *favorite_box = memnew( VBoxContainer ); TextureButton *favorite = memnew( TextureButton ); favorite->set_normal_texture(favorite_icon); if (!is_favorite) favorite->set_opacity(0.2); favorite->set_v_size_flags(SIZE_EXPAND); favorite->connect("pressed",this,"_favorite_pressed",varray(hb)); favorite_box->add_child(favorite); hb->add_child(favorite_box); TextureFrame *tf = memnew( TextureFrame ); tf->set_texture(icon); hb->add_child(tf); VBoxContainer *vb = memnew(VBoxContainer); hb->add_child(vb); Control *ec = memnew( Control ); ec->set_custom_minimum_size(Size2(0,1)); vb->add_child(ec); Label *title = memnew( Label(project_name) ); title->add_font_override("font",get_font("large","Fonts")); title->add_color_override("font_color",font_color); vb->add_child(title); Label *fpath = memnew( Label(path) ); vb->add_child(fpath); fpath->set_opacity(0.5); fpath->add_color_override("font_color",font_color); scroll_childs->add_child(hb); } scroll->set_v_scroll(0); erase_btn->set_disabled(selected_list.size()<1); open_btn->set_disabled(selected_list.size()<1); run_btn->set_disabled(selected_list.size()<1 || (selected_list.size()==1 && single_selected_main=="")); }
int main( int argc, char* argv[]) { // On déclare notre pointeur sur SourceVideo VideoSource *src; CvVideoWriter *writer = 0; int isColor = 1; int fps = 30; // or 30 int frameW = 640; // 744 for firewire cameras int frameH = 480; // 480 for firewire cameras writer=cvCreateVideoWriter("out.avi",CV_FOURCC('P','I','M','1'), fps,cvSize(frameW,frameH),isColor); if( argc > 1 ) { // Initialisation : fichier vidéo string path(argv[1]); src = new VideoFile( path, (argc > 2) ); } else { // Initialisation : webcam src = new Camera( 0 ); } // Initialisation du flux vidéo try { src->open(); } catch( Exception &e ) { // Si une exception se produit, on l'affiche et on quitte. cout << e.what() << endl; delete src; return 10; } // Si tout va bien, on affiche les informations du flux vidéo. cout << src->getInfos() << endl; cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); Image img; char key = 'a'; // Début de la mesure du frame rate debut_mesure = getTimeMillis(); while( key != 'q' ) { try { src->getFrame( img ); } catch(Exception &e) { cout << "\n" << e.what() << endl; break; } /*CvScalar scalaire; scalaire.val[0] = 120; scalaire.val[1] = scalaire.val[2] = 0; img.colorFilter(scalaire);*/ img.colorPaint2(top_left,bottom_right); if (bottom_right.x < 720) { bottom_right.x++; } if (bottom_right.y < 576) { bottom_right.y++; } if (top_left.x > 0) { top_left.x--; } if (top_left.y > 0) { top_left.y--; } //img.colorBlacknWhite(); cvShowImage( "video", img ); cvWriteFrame(writer,img); key = cvWaitKey( 10 ); // Affichage du frame rate cout << "\rFrame Rate : " << setw(5); cout << left << setprecision(4); cout << calculFrameRate() << " FPS" << flush; } cout << endl; cvDestroyWindow( "video" ); delete src; return 0; }
void TextureCache::addImageAsyncCallBack(float dt) { // the image is generated in loading thread std::deque<ImageInfo*> *imagesQueue = _imageInfoQueue; _imageInfoMutex.lock(); if (imagesQueue->empty()) { _imageInfoMutex.unlock(); } else { ImageInfo *imageInfo = imagesQueue->front(); imagesQueue->pop_front(); _imageInfoMutex.unlock(); AsyncStruct *asyncStruct = imageInfo->asyncStruct; Image *image = imageInfo->image; const std::string& filename = asyncStruct->filename; Texture2D *texture = nullptr; if (image) { // generate texture in render thread texture = new (std::nothrow) Texture2D(); texture->initWithImage(image); #if CC_ENABLE_CACHE_TEXTURE_DATA // cache the texture file name VolatileTextureMgr::addImageTexture(texture, filename); #endif // cache the texture. retain it, since it is added in the map _textures.insert( std::make_pair(filename, texture) ); texture->retain(); texture->autorelease(); } else { auto it = _textures.find(asyncStruct->filename); if(it != _textures.end()) texture = it->second; } if (asyncStruct->callback) { asyncStruct->callback(texture); } if(image) { image->release(); } delete asyncStruct; delete imageInfo; --_asyncRefCount; if (0 == _asyncRefCount) { Director::getInstance()->getScheduler()->unschedule(schedule_selector(TextureCache::addImageAsyncCallBack), this); } } }
Image sImageAdd::Make() const { Image dest = i1; Over(dest, Point(0, 0), i2, i2.GetSize()); return dest; }
/** This operator returns a new image, created by adding another image element-wise to this image. */ Image operator+(const Image& image2) { Image result = Image(*this, _data + image2.data()); return result; }
void ImageLayerD3D10::RenderLayer() { ImageContainer *container = GetContainer(); if (!container) { return; } AutoLockImage autoLock(container); Image *image = autoLock.GetImage(); if (!image) { return; } gfxIntSize size = mScaleMode == SCALE_NONE ? image->GetSize() : mScaleToSize; SetEffectTransformAndOpacity(); ID3D10EffectTechnique *technique; if (image->GetFormat() == Image::CAIRO_SURFACE || image->GetFormat() == Image::REMOTE_IMAGE_BITMAP) { bool hasAlpha = false; if (image->GetFormat() == Image::REMOTE_IMAGE_BITMAP) { RemoteBitmapImage *remoteImage = static_cast<RemoteBitmapImage*>(image); if (!image->GetBackendData(LayerManager::LAYERS_D3D10)) { nsAutoPtr<TextureD3D10BackendData> dat = new TextureD3D10BackendData(); dat->mTexture = DataToTexture(device(), remoteImage->mData, remoteImage->mStride, remoteImage->mSize); if (dat->mTexture) { device()->CreateShaderResourceView(dat->mTexture, NULL, getter_AddRefs(dat->mSRView)); image->SetBackendData(LayerManager::LAYERS_D3D10, dat.forget()); } } hasAlpha = remoteImage->mFormat == RemoteImageData::BGRA32; } else { CairoImage *cairoImage = static_cast<CairoImage*>(image); if (!cairoImage->mSurface) { return; } if (!image->GetBackendData(LayerManager::LAYERS_D3D10)) { nsAutoPtr<TextureD3D10BackendData> dat = new TextureD3D10BackendData(); dat->mTexture = SurfaceToTexture(device(), cairoImage->mSurface, cairoImage->mSize); if (dat->mTexture) { device()->CreateShaderResourceView(dat->mTexture, NULL, getter_AddRefs(dat->mSRView)); image->SetBackendData(LayerManager::LAYERS_D3D10, dat.forget()); } } hasAlpha = cairoImage->mSurface->GetContentType() == gfxASurface::CONTENT_COLOR_ALPHA; } TextureD3D10BackendData *data = static_cast<TextureD3D10BackendData*>(image->GetBackendData(LayerManager::LAYERS_D3D10)); if (!data) { return; } nsRefPtr<ID3D10Device> dev; data->mTexture->GetDevice(getter_AddRefs(dev)); if (dev != device()) { return; } if (hasAlpha) { if (mFilter == gfxPattern::FILTER_NEAREST) { technique = effect()->GetTechniqueByName("RenderRGBALayerPremulPoint"); } else { technique = effect()->GetTechniqueByName("RenderRGBALayerPremul"); } } else { if (mFilter == gfxPattern::FILTER_NEAREST) { technique = effect()->GetTechniqueByName("RenderRGBLayerPremulPoint"); } else { technique = effect()->GetTechniqueByName("RenderRGBLayerPremul"); } } effect()->GetVariableByName("tRGB")->AsShaderResource()->SetResource(data->mSRView); effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector( ShaderConstantRectD3D10( (float)0, (float)0, (float)size.width, (float)size.height) ); } else if (image->GetFormat() == Image::PLANAR_YCBCR) { PlanarYCbCrImage *yuvImage = static_cast<PlanarYCbCrImage*>(image); if (!yuvImage->mBufferSize) { return; } if (!yuvImage->GetBackendData(LayerManager::LAYERS_D3D10)) { AllocateTexturesYCbCr(yuvImage); } PlanarYCbCrD3D10BackendData *data = static_cast<PlanarYCbCrD3D10BackendData*>(yuvImage->GetBackendData(LayerManager::LAYERS_D3D10)); if (!data) { return; } nsRefPtr<ID3D10Device> dev; data->mYTexture->GetDevice(getter_AddRefs(dev)); if (dev != device()) { return; } // TODO: At some point we should try to deal with mFilter here, you don't // really want to use point filtering in the case of NEAREST, since that // would also use point filtering for Chroma upsampling. Where most likely // the user would only want point filtering for final RGB image upsampling. technique = effect()->GetTechniqueByName("RenderYCbCrLayer"); effect()->GetVariableByName("tY")->AsShaderResource()->SetResource(data->mYView); effect()->GetVariableByName("tCb")->AsShaderResource()->SetResource(data->mCbView); effect()->GetVariableByName("tCr")->AsShaderResource()->SetResource(data->mCrView); /* * Send 3d control data and metadata to NV3DVUtils */ if (GetNv3DVUtils()) { Nv_Stereo_Mode mode; switch (yuvImage->mData.mStereoMode) { case STEREO_MODE_LEFT_RIGHT: mode = NV_STEREO_MODE_LEFT_RIGHT; break; case STEREO_MODE_RIGHT_LEFT: mode = NV_STEREO_MODE_RIGHT_LEFT; break; case STEREO_MODE_BOTTOM_TOP: mode = NV_STEREO_MODE_BOTTOM_TOP; break; case STEREO_MODE_TOP_BOTTOM: mode = NV_STEREO_MODE_TOP_BOTTOM; break; case STEREO_MODE_MONO: mode = NV_STEREO_MODE_MONO; break; } // Send control data even in mono case so driver knows to leave stereo mode. GetNv3DVUtils()->SendNv3DVControl(mode, true, FIREFOX_3DV_APP_HANDLE); if (yuvImage->mData.mStereoMode != STEREO_MODE_MONO) { // Dst resource is optional GetNv3DVUtils()->SendNv3DVMetaData((unsigned int)yuvImage->mSize.width, (unsigned int)yuvImage->mSize.height, (HANDLE)(data->mYTexture), (HANDLE)(NULL)); } } effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector( ShaderConstantRectD3D10( (float)0, (float)0, (float)size.width, (float)size.height) ); effect()->GetVariableByName("vTextureCoords")->AsVector()->SetFloatVector( ShaderConstantRectD3D10( (float)yuvImage->mData.mPicX / yuvImage->mData.mYSize.width, (float)yuvImage->mData.mPicY / yuvImage->mData.mYSize.height, (float)yuvImage->mData.mPicSize.width / yuvImage->mData.mYSize.width, (float)yuvImage->mData.mPicSize.height / yuvImage->mData.mYSize.height) ); } bool resetTexCoords = image->GetFormat() == Image::PLANAR_YCBCR; image = nsnull; autoLock.Unlock(); technique->GetPassByIndex(0)->Apply(0); device()->Draw(4, 0); if (resetTexCoords) { effect()->GetVariableByName("vTextureCoords")->AsVector()-> SetFloatVector(ShaderConstantRectD3D10(0, 0, 1.0f, 1.0f)); } GetContainer()->NotifyPaintedImage(image); }
void display(void) { static bool integrate = true; const uint2 imageSize = kfusion.configuration.inputSize; const double start = Stats.start(); renderInput(vertex.getDeviceImage(), normal.getDeviceImage(), depth.getDeviceImage(), reference, toMatrix4( trans * rot * preTrans ) * getInverseCameraMatrix(kfusion.configuration.camera), kfusion.configuration.nearPlane, kfusion.configuration.farPlane, kfusion.configuration.stepSize(), 0.01 ); cudaDeviceSynchronize(); Stats.sample("ground raycast"); Stats.sample("ground copy"); glRasterPos2i(0,0); glDrawPixels(vertex); glRasterPos2i(imageSize.x, 0); glDrawPixels(normal); glRasterPos2i(imageSize.x * 2, 0); glDrawPixels(depth); Stats.sample("ground draw"); kfusion.setDepth( depth.getDeviceImage() ); cudaDeviceSynchronize(); const double track_start = Stats.sample("process depth"); if(counter > 1){ integrate = kfusion.Track(); cudaDeviceSynchronize(); Stats.sample("track"); } renderTrackResult(rgb.getDeviceImage(), kfusion.reduction); cudaDeviceSynchronize(); Stats.sample("track render"); Stats.sample("track copy"); if(integrate){ kfusion.Integrate(); cudaDeviceSynchronize(); Stats.sample("integration"); kfusion.Raycast(); cudaDeviceSynchronize(); Stats.sample("raycast"); vertex = kfusion.vertex; normal = kfusion.normal; Stats.sample("raycast get"); } glRasterPos2i(0,imageSize.y * 1); glDrawPixels(vertex); glRasterPos2i(imageSize.x, imageSize.y * 1); glDrawPixels(normal); glRasterPos2i(2 * imageSize.x, imageSize.y * 1); glDrawPixels(rgb); Stats.sample("track draw"); Stats.sample("total track", Stats.get_time() - track_start, PerfStats::TIME); renderInput(vertex.getDeviceImage(), normal.getDeviceImage(), depth.getDeviceImage(), kfusion.integration, kfusion.pose * getInverseCameraMatrix(kfusion.configuration.camera), kfusion.configuration.nearPlane, kfusion.configuration.farPlane, kfusion.configuration.stepSize(), 0.7 * kfusion.configuration.mu ); cudaDeviceSynchronize(); Stats.sample("view raycast"); Stats.sample("view copy"); glRasterPos2i(0,imageSize.y * 2); glDrawPixels(vertex); glRasterPos2i(imageSize.x, imageSize.y * 2); glDrawPixels(normal); glRasterPos2i(imageSize.x * 2, imageSize.y * 2); glDrawPixels(depth); Stats.sample("view draw"); Stats.sample("events"); Stats.sample("total all", Stats.get_time() - start, PerfStats::TIME); if(counter % 30 == 0){ Stats.print(); Stats.reset(); cout << endl; } ++counter; printCUDAError(); glutSwapBuffers(); }
void GLTexture::loadImpl() { if( mUsage & TU_RENDERTARGET ) { createRenderTexture(); } else { String baseName, ext; size_t pos = mName.find_last_of("."); if( pos == String::npos ) OGRE_EXCEPT( Exception::ERR_INVALIDPARAMS, "Unable to load image file '"+ mName + "' - invalid extension.", "GLTexture::loadImpl" ); baseName = mName.substr(0, pos); ext = mName.substr(pos+1); if(mTextureType == TEX_TYPE_1D || mTextureType == TEX_TYPE_2D || mTextureType == TEX_TYPE_3D) { Image img; // find & load resource data intro stream to allow resource // group changes if required DataStreamPtr dstream = ResourceGroupManager::getSingleton().openResource( mName, mGroup, true, this); img.load(dstream, ext); // If this is a cube map, set the texture type flag accordingly. if (img.hasFlag(IF_CUBEMAP)) mTextureType = TEX_TYPE_CUBE_MAP; // If this is a volumetric texture set the texture type flag accordingly. if(img.getDepth() > 1) mTextureType = TEX_TYPE_3D; // Call internal _loadImages, not loadImage since that's external and // will determine load status etc again ConstImagePtrList imagePtrs; imagePtrs.push_back(&img); _loadImages( imagePtrs ); } else if (mTextureType == TEX_TYPE_CUBE_MAP) { if(StringUtil::endsWith(getName(), ".dds")) { // XX HACK there should be a better way to specify whether // all faces are in the same file or not Image img; // find & load resource data intro stream to allow resource // group changes if required DataStreamPtr dstream = ResourceGroupManager::getSingleton().openResource( mName, mGroup, true, this); img.load(dstream, ext); // Call internal _loadImages, not loadImage since that's external and // will determine load status etc again ConstImagePtrList imagePtrs; imagePtrs.push_back(&img); _loadImages( imagePtrs ); } else { std::vector<Image> images(6); ConstImagePtrList imagePtrs; static const String suffixes[6] = {"_rt", "_lf", "_up", "_dn", "_fr", "_bk"}; for(size_t i = 0; i < 6; i++) { String fullName = baseName + suffixes[i] + "." + ext; // find & load resource data intro stream to allow resource // group changes if required DataStreamPtr dstream = ResourceGroupManager::getSingleton().openResource( fullName, mGroup, true, this); images[i].load(dstream, ext); imagePtrs.push_back(&images[i]); } _loadImages( imagePtrs ); } } else OGRE_EXCEPT( Exception::ERR_NOT_IMPLEMENTED, "**** Unknown texture type ****", "GLTexture::load" ); } }
void LoginScreen::initializeScreen(MAUtil::String &os) { maScreenSetFullscreen(1); MAExtent ex = maGetScrSize(); int screenWidth = EXTENT_X(ex); int screenHeight = EXTENT_Y(ex); int centerH = screenWidth / 2; int buttonWidth = (int)((float)screenWidth * 0.75); if(screenHeight > 1000 && os.find("Android", 0) < 0) { buttonWidth = (int)((float)screenWidth * 0.4); } int buttonHeight = (int)((float)screenWidth * 0.15); if(screenHeight > 1000 && os.find("Android", 0) < 0) { buttonHeight = (int)((float)screenWidth * 0.07); } int buttonSpacing = (int)((float)buttonHeight * 0.3); if(os.find("Windows", 0) >= 0) { buttonSpacing = (int)((float)buttonHeight * 0.1); } int editBoxHeight = (int)((float)screenHeight * 0.07); if(screenHeight > 1000 && os.find("Android", 0) < 0) { editBoxHeight = (int)((float)screenHeight * 0.02); } int logoWidth = (int)((float)screenWidth * 0.75); int layoutTop = (int)((float)screenHeight * 0.3); if(screenHeight > 1000 && os.find("Android", 0) < 0) { layoutTop = (int)((float)screenHeight * 0.25); } int labelHeight = (int)((float)screenHeight * 0.05); if(screenHeight > 1000 && os.find("Android", 0) < 0) { labelHeight = (int)((float)screenHeight * 0.025); } int labelWidth = screenWidth; if(os.find("Android", 0) >= 0) { labelWidth = buttonWidth; } int labelSpacing = (int)((float)screenHeight * 0.02); if(screenHeight > 1000 && os.find("Android", 0) < 0) { labelSpacing = (int)((float)labelSpacing * 0.01); } int layoutHeight = (buttonHeight + buttonSpacing) * 2; int ipBoxButtonSpacing = (int)((float)screenHeight * 0.03); mLoginScreen = new Screen(); //The reload Logo Image* logo = new Image(); logo->setImage(LOGO_IMAGE); logo->wrapContentHorizontally(); logo->wrapContentVertically(); logo->setWidth(logoWidth); logo->setScaleMode(IMAGE_SCALE_PRESERVE_ASPECT); logo->setPosition(centerH - logoWidth/2, screenHeight / 12); //The connect to server button if(os == "iPhone OS") //Android image buttons do not support text { mServerConnectButton = new ImageButton(); ((ImageButton*)mServerConnectButton)->addButtonListener(this); ((ImageButton*)mServerConnectButton)->setBackgroundImage(CONNECT_BG); mServerConnectButton->setFontColor(0x000000); } else { mServerConnectButton = new Button(); ((Button*)mServerConnectButton)->addButtonListener(this); } mServerConnectButton->setText("Connect"); mServerConnectButton->setTextHorizontalAlignment(MAW_ALIGNMENT_CENTER); mServerConnectButton->setTextVerticalAlignment(MAW_ALIGNMENT_CENTER); mServerConnectButton->setWidth(buttonWidth); mServerConnectButton->setHeight(buttonHeight); mServerConnectButton->setPosition(centerH - buttonWidth/2, layoutHeight - buttonHeight); //The edit box that receives the server IP mServerIPBox = new EditBox(); mServerIPBox->setWidth(buttonWidth); //mServerIPBox->setHeight(editBoxHeight); mServerIPBox->addEditBoxListener(this); mServerIPBox->setPosition(centerH - buttonWidth/2,layoutHeight - buttonHeight - editBoxHeight - ipBoxButtonSpacing); //Label for the server IP edit box Label *serverIPLabel = new Label(); serverIPLabel->setText("Server IP:"); serverIPLabel->setFontColor(0xFFFFFF); serverIPLabel->setTextHorizontalAlignment(MAW_ALIGNMENT_CENTER); serverIPLabel->setTextVerticalAlignment(MAW_ALIGNMENT_CENTER); serverIPLabel->setWidth(labelWidth); serverIPLabel->setPosition(centerH - labelWidth/2, layoutHeight - buttonHeight - labelHeight - editBoxHeight - ipBoxButtonSpacing); /* * The mConnectLayout and mDisconnectLayout are placed * on top of each other inside a relative layout, and * each is only shown when needed. */ mConnectLayout = new RelativeLayout(); mConnectLayout->setWidth(screenWidth); mConnectLayout->setHeight(layoutHeight); mConnectLayout->addChild(serverIPLabel); mConnectLayout->addChild(mServerIPBox); mConnectLayout->addChild(mServerConnectButton); mConnectLayout->setPosition(0, layoutTop); //The disconnect button if(os == "iPhone OS") { mServerDisconnectButton = new ImageButton(); ((ImageButton*)mServerDisconnectButton)->addButtonListener(this); ((ImageButton*)mServerDisconnectButton)->setBackgroundImage(CONNECT_BG); mServerDisconnectButton->setFontColor(0x000000); } else { mServerDisconnectButton = new Button(); ((Button*)mServerDisconnectButton)->addButtonListener(this); } mServerDisconnectButton->setText("Disconnect"); mServerDisconnectButton->setTextHorizontalAlignment(MAW_ALIGNMENT_CENTER); mServerDisconnectButton->setTextVerticalAlignment(MAW_ALIGNMENT_CENTER); mServerDisconnectButton->setWidth(buttonWidth); mServerDisconnectButton->setHeight(buttonHeight); mServerDisconnectButton->setPosition(centerH - buttonWidth/2, layoutHeight - buttonHeight); //Some instructions for the user Label *instructionsLabel = new Label(); instructionsLabel->setText("Use the Reload Web UI to load an app"); instructionsLabel->setFontColor(0xFFFFFF); instructionsLabel->setWidth(labelWidth); instructionsLabel->setMaxNumberOfLines(2); instructionsLabel->setTextHorizontalAlignment(MAW_ALIGNMENT_CENTER); instructionsLabel->setTextVerticalAlignment(MAW_ALIGNMENT_CENTER); instructionsLabel->setPosition(centerH - labelWidth/2, layoutHeight - buttonHeight - labelHeight - ipBoxButtonSpacing); //Label with the Server IP mConnectedToLabel = new Label(); mConnectedToLabel->setFontColor(0xFFFFFF); mConnectedToLabel->setWidth(labelWidth); mConnectedToLabel->setTextHorizontalAlignment(MAW_ALIGNMENT_CENTER); mConnectedToLabel->setTextVerticalAlignment(MAW_ALIGNMENT_CENTER); mConnectedToLabel->setPosition(centerH - labelWidth/2, layoutHeight - buttonHeight - labelHeight * 2 - labelSpacing - ipBoxButtonSpacing); /* * The mConnectLayout and mDisconnectLayout are placed * on top of each other inside a relative layout, and * each is only shown when needed. */ mDisconnectLayout = new RelativeLayout(); mDisconnectLayout->setWidth(screenWidth); mDisconnectLayout->setHeight(layoutHeight); mDisconnectLayout->addChild(mConnectedToLabel); mDisconnectLayout->addChild(instructionsLabel); mDisconnectLayout->addChild(mServerDisconnectButton); mDisconnectLayout->setPosition(0, layoutTop); //The layout that appears when the client is connected //is hidden on startup mDisconnectLayout->setVisible(false); //Button that loads the last loaded app if(os == "iPhone OS") { mLoadLastAppButton = new ImageButton(); ((ImageButton*)mLoadLastAppButton)->addButtonListener(this); ((ImageButton*)mLoadLastAppButton)->setBackgroundImage(RELOAD_BG); mLoadLastAppButton->setFontColor(0x000000); } else { mLoadLastAppButton = new Button(); ((Button*)mLoadLastAppButton)->addButtonListener(this); } mLoadLastAppButton->setText("Reload last app"); mLoadLastAppButton->setTextHorizontalAlignment(MAW_ALIGNMENT_CENTER); mLoadLastAppButton->setTextVerticalAlignment(MAW_ALIGNMENT_CENTER); mLoadLastAppButton->setWidth(buttonWidth); mLoadLastAppButton->setHeight(buttonHeight); mLoadLastAppButton->setPosition(centerH - buttonWidth/2, layoutTop + layoutHeight + buttonSpacing); //The info icon mInfoIcon = new ImageButton(); mInfoIcon->addButtonListener(this); mInfoIcon->setBackgroundImage(INFO_ICON); mInfoIcon->setSize((int)(screenWidth * 0.1),(int)(screenWidth * 0.1)); //mInfoIcon->setScaleMode(IMAGE_SCALE_PRESERVE_ASPECT); mInfoIcon->setPosition((int)(screenWidth * 0.85), (int)(screenHeight * 0.95) - (int)(screenWidth * 0.1) / 2); //A little MoSync logo at the lower right of the screen Image* mosynclogo = new Image(); mosynclogo->setImage(MOSYNC_IMAGE); mosynclogo->setHeight((int)(screenWidth * 0.1)); mosynclogo->setScaleMode(IMAGE_SCALE_PRESERVE_ASPECT); mosynclogo->setPosition((int)(screenWidth * 0.05),(int)(screenHeight * 0.95) - (int)(screenWidth * 0.1) / 2); Image *background = new Image(); background->setSize(screenWidth, screenHeight); background->setImage(BACKGROUND); background->setScaleMode(IMAGE_SCALE_XY); RelativeLayout *mainLayout = new RelativeLayout(); mainLayout->setSize(screenWidth, screenHeight); if(os.find("Windows", 0) < 0) { mainLayout->addChild(background); } mainLayout->addChild(logo); mainLayout->addChild(mConnectLayout); mainLayout->addChild(mDisconnectLayout); mainLayout->addChild(mLoadLastAppButton); mainLayout->addChild(mosynclogo); mainLayout->addChild(mInfoIcon); mLoginScreen->setMainWidget(mainLayout); }
int main(int argc, char ** argv) { benchmark = argc > 1 && string(argv[1]) == "-b"; KFusionConfig config; config.volumeSize = make_uint3(128); config.combinedTrackAndReduce = false; config.iterations[0] = 10; config.iterations[1] = 5; config.iterations[2] = 5; config.inputSize = make_uint2(320, 240); config.camera = make_float4(100, 100, 160, 120); config.nearPlane = 0.001; config.maxweight = 100; config.mu = 0.1; config.dist_threshold = 0.2f; config.normal_threshold = 0.8f; kfusion.Init(config); if(printCUDAError()){ cudaDeviceReset(); exit(1); } reference.init(config.volumeSize, config.volumeDimensions); initVolumeWrap(reference, 1.0f); setBoxWrap(reference, make_float3(0.1f,0.1f,0.8f), make_float3(0.9f, 0.9f, 0.9f), -1.0f); setBoxWrap(reference, make_float3(0.1f,0.8f,0.1f), make_float3(0.9f, 0.9f, 0.9f), -1.0f); setBoxWrap(reference, make_float3(0.8f,0.1f,0.1f), make_float3(0.9f, 0.9f, 0.9f), -1.0f); setSphereWrap(reference, make_float3(0.5f), 0.2f, -1.0f); kfusion.setPose( toMatrix4( trans * rot * preTrans )); vertex.alloc(config.inputSize); normal.alloc(config.inputSize); depth.alloc(config.inputSize); rgb.alloc(config.inputSize); glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE ); glutInitWindowSize(config.inputSize.x * 3, config.inputSize.y * 3); glutCreateWindow("kfusion test"); glutDisplayFunc(display); glutKeyboardFunc(keys); glutSpecialFunc(specials); glutReshapeFunc(reshape); glutIdleFunc(idle); glutMainLoop(); cudaDeviceReset(); return 0; }
void ImageButton::paintButton (Graphics& g, bool isMouseOverButton, bool isButtonDown) { if (! isEnabled()) { isMouseOverButton = false; isButtonDown = false; } Image im (getCurrentImage()); if (im.isValid()) { const int iw = im.getWidth(); const int ih = im.getHeight(); int w = getWidth(); int h = getHeight(); int x = (w - iw) / 2; int y = (h - ih) / 2; if (scaleImageToFit) { if (preserveProportions) { int newW, newH; const float imRatio = ih / (float) iw; const float destRatio = h / (float) w; if (imRatio > destRatio) { newW = roundToInt (h / imRatio); newH = h; } else { newW = w; newH = roundToInt (w * imRatio); } x = (w - newW) / 2; y = (h - newH) / 2; w = newW; h = newH; } else { x = 0; y = 0; } } if (! scaleImageToFit) { w = iw; h = ih; } imageBounds.setBounds (x, y, w, h); const bool useDownImage = isButtonDown || getToggleState(); getLookAndFeel().drawImageButton (g, &im, x, y, w, h, useDownImage ? downOverlay : (isMouseOverButton ? overOverlay : normalOverlay), useDownImage ? downOpacity : (isMouseOverButton ? overOpacity : normalOpacity), *this); } }
void Director::createStatsLabel() { Texture2D *texture = nullptr; TextureCache *textureCache = TextureCache::getInstance(); if (_FPSLabel && _SPFLabel) { CC_SAFE_RELEASE_NULL(_FPSLabel); CC_SAFE_RELEASE_NULL(_SPFLabel); CC_SAFE_RELEASE_NULL(_drawsLabel); textureCache->removeTextureForKey("/cc_fps_images"); FileUtils::getInstance()->purgeCachedEntries(); } Texture2D::PixelFormat currentFormat = Texture2D::getDefaultAlphaPixelFormat(); Texture2D::setDefaultAlphaPixelFormat(Texture2D::PixelFormat::RGBA4444); unsigned char *data = nullptr; unsigned int dataLength = 0; getFPSImageData(&data, &dataLength); Image* image = new Image(); bool isOK = image->initWithImageData(data, dataLength); if (! isOK) { CCLOGERROR("%s", "Fails: init fps_images"); return; } texture = textureCache->addImage(image, "/cc_fps_images"); CC_SAFE_RELEASE(image); /* We want to use an image which is stored in the file named ccFPSImage.c for any design resolutions and all resource resolutions. To achieve this, Firstly, we need to ignore 'contentScaleFactor' in 'AtlasNode' and 'LabelAtlas'. So I added a new method called 'setIgnoreContentScaleFactor' for 'AtlasNode', this is not exposed to game developers, it's only used for displaying FPS now. Secondly, the size of this image is 480*320, to display the FPS label with correct size, a factor of design resolution ratio of 480x320 is also needed. */ float factor = EGLView::getInstance()->getDesignResolutionSize().height / 320.0f; _FPSLabel = new LabelAtlas(); _FPSLabel->setIgnoreContentScaleFactor(true); _FPSLabel->initWithString("00.0", texture, 12, 32 , '.'); _FPSLabel->setScale(factor); _SPFLabel = new LabelAtlas(); _SPFLabel->setIgnoreContentScaleFactor(true); _SPFLabel->initWithString("0.000", texture, 12, 32, '.'); _SPFLabel->setScale(factor); _drawsLabel = new LabelAtlas(); _drawsLabel->setIgnoreContentScaleFactor(true); _drawsLabel->initWithString("000", texture, 12, 32, '.'); _drawsLabel->setScale(factor); Texture2D::setDefaultAlphaPixelFormat(currentFormat); _drawsLabel->setPosition(Point(0, 34*factor) + CC_DIRECTOR_STATS_POSITION); _SPFLabel->setPosition(Point(0, 17*factor) + CC_DIRECTOR_STATS_POSITION); _FPSLabel->setPosition(CC_DIRECTOR_STATS_POSITION); }
void BaseApp::Init() { m_wireframe = false; lockMouse = true; m_freezeFrustum = false; blurOffset = 0.01; LoadOpenGLExt(); //importer3DS.Import3DS(&model, "data\\models\\3ds__tree2.3ds"); m_pFont = new Font("data\\textures\\fonts\\tahoma_10.tga"); glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT); m_pFont->Enable(m_width, m_height); m_pFont->SetAlignment(FONT_HORIZ_ALIGN_LEFT, FONT_VERT_ALIGN_TOP); m_pFont->Print(10, 10, "Loading..."); m_pFont->Disable(); SwapBuffers(m_hDC); ShowCursor(false); InitGUI(); InitSkyDome(); Vec2 *lixo, *lixo3; Vec3 *lixo2; g_heightMap = new HeightMap(); g_heightMap->SetWidthRatio(15.0f); g_heightMap->SetHeightRatio(5.0f); g_heightMap->SetYOffset(-150.0f); g_heightMap->LoadFromFile("data\\heightmap.png"); int numVerts = 0; numVerts = g_heightMap->BuildArray(&lixo2, &lixo, &lixo3); g_quadtree = new Quadtree(); g_quadtree->Build(g_heightMap, g_heightMap->vertices, lixo, numVerts); m_shdWater = new Shader("data\\shaders\\water"); m_shdWaterSpecular = new Shader("data\\shaders\\water_specular"); m_shdBlur = new Shader("data\\shaders\\blur"); m_shdHDR = new Shader("data\\shaders\\hdr"); m_shdSky = new Shader("data\\shaders\\sky"); m_shdUnderwater = new Shader("data\\shaders\\underwater"); m_shdSimpleColor = new Shader("data\\shaders\\simple_color"); m_sunPos.Set(0.0f, 1.0f, 0.0f); m_sunPos.x = cosf(0.35f); m_sunPos.y = 1.0 - sinf(0.35f); m_camera.PositionCamera( -527.03f, 13.39f, -606.4f, -526.3f, 13.4f, -605.75f, 0.0f, 1.0f, 0.0f ); glViewport(0, 0, m_width, m_height); glClearColor(0.5f, 0.8f, 1.0f, 1.0f); glEnable(GL_CULL_FACE); glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); glMatrixMode(GL_PROJECTION); projMat.BuildProjection(50.0f, (float)m_width / (float)m_height, 0.1f, 50000.0f); glLoadMatrixf(projMat); glMatrixMode(GL_MODELVIEW); glGenRenderbuffersEXT(1, &g_renderBuffer[0]); glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, g_renderBuffer[0]); glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, m_width, m_height); glGenRenderbuffersEXT(1, &g_renderBuffer[1]); glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, g_renderBuffer[1]); glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, m_width, m_height); glGenFramebuffersEXT(1, &g_frameBuffer); glGenRenderbuffersEXT(1, &g_depthRenderBuffer); glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, g_depthRenderBuffer); glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, m_width, m_height); //glGenFramebuffersEXT(1, &g_frameBuffer2);// glGenRenderbuffersEXT(1, &g_depthRenderBuffer2);// glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, g_depthRenderBuffer2);// glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, 512, 512);// //glGenFramebuffersEXT(1, &g_frameBufferHDR);// glGenRenderbuffersEXT(1, &g_depthRenderBufferHDR);// glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, g_depthRenderBufferHDR);// glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, m_width / 1, m_height / 1);// glGenRenderbuffersEXT(1, &g_renderBufferBlurdHDR); glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, g_renderBufferBlurdHDR); glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, m_width / 4, m_height / 4); GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT); switch(status) { case GL_FRAMEBUFFER_COMPLETE_EXT: //MessageBox(NULL,"GL_FRAMEBUFFER_COMPLETE_EXT!","SUCCESS",MB_OK|MB_ICONEXCLAMATION); break; case GL_FRAMEBUFFER_UNSUPPORTED_EXT: MessageBox(NULL,"GL_FRAMEBUFFER_UNSUPPORTED_EXT!","ERROR",MB_OK|MB_ICONEXCLAMATION); exit(0); break; default: exit(0); } g_texFP16.CreateRenderTarget(m_width, m_height, 3, GL_RGB16F_ARB, true); g_texFP162.CreateRenderTarget(m_width, m_height, 3, GL_RGB16F_ARB, true); g_texHDR.CreateRenderTarget(m_width / 1, m_height / 1, 3, GL_RGB16F_ARB, true); g_texHBluredHDR.CreateRenderTarget(m_width / 4, m_height / 4, 3, GL_RGB16F_ARB, true); g_texVBluredHDR.CreateRenderTarget(m_width / 4, m_height / 4, 3, GL_RGB16F_ARB, true); g_texWaterReflect.CreateRenderTarget(512, 512, 3, GL_RGB16F_ARB, true); Image image; image.Load("data\\textures\\water_nmap.png"); image.ToNormalMap(2); water.Load2D(image, GL_REPEAT, GL_REPEAT, GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR_MIPMAP_LINEAR, true); image.Load("data\\textures\\teste.bmp"); m_skyGradient.Load1D(image, GL_CLAMP, GL_LINEAR, GL_LINEAR); image.Load("data\\textures\\sun.png"); m_texSun.Load2D(image, GL_REPEAT, GL_REPEAT, GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR_MIPMAP_LINEAR, true); CubeMapFilePath cubeMapFilePath; strcpy(cubeMapFilePath.posX, "data\\textures\\underwater_nmap.png"); strcpy(cubeMapFilePath.negX, "data\\textures\\underwater_nmap.png"); strcpy(cubeMapFilePath.posY, "data\\textures\\underwater_nmap.png"); strcpy(cubeMapFilePath.negY, "data\\textures\\underwater_nmap.png"); strcpy(cubeMapFilePath.posZ, "data\\textures\\underwater_nmap.png"); strcpy(cubeMapFilePath.negZ, "data\\textures\\underwater_nmap.png"); m_texCubeUnderwaterNormMap.LoadCubeMap(cubeMapFilePath, GL_REPEAT, GL_REPEAT, GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR_MIPMAP_LINEAR, 0); m_texViewPort.CreateRenderTarget(m_width, m_height, 3, GL_RGB, true); m_mouseLockedX = m_width * 0.5; m_mouseLockedY = m_height * 0.5; CenterMouse(); }
void Problem_2_B_b() { string strName("testOCR-bill2.raw"); Image imTrainingBill(strName,316,901,3), *pimGray, *pimBin; int iThresh, iRows = 316, iCols = 901; cout<<"\n\n ** ** ** ** ** ** ** ** ** ** ** ** **\n"; cout<<"\n Test IMage 2: \n"; imTrainingBill.AllocImageMem(); imTrainingBill.ReadImageData(); pimGray = imTrainingBill.Convert2Gray(); pimGray->WriteImageData(); pimGray->PlotHistogram(); cout<<"\n Enter Threshold for Binary Conversion: "; cin>>iThresh; pimBin = pimGray->Convert2Bin(iThresh); pimBin->WriteImageData(); //Charachter Segmentation int **iaLabels, iLabelCount; list<int> plistLabels; cout<<"\n* Segmenting Test Charachters.."; iaLabels = SegmentCharachters(pimBin,&plistLabels); //Paint Labeled Image for(int i = 0;i<iRows;i++) { for(int j = 0;j<iCols;j++) { Pixel pxP; if(iaLabels[i][j] == 0) { pxP.iBW = (255); } else { pxP.iBW = (iaLabels[i][j]); } pimGray->SetPixel(i,j,pxP); } } pimGray->WriteImageData(); //Charachter Seperation cout<<"\n* Extracting Test Charachters.."; int i = 1; Image *pimCharachter[60]; int *iaLabelArr = new int[60]; for(list<int>::iterator it = plistLabels.begin();it != plistLabels.end();++it) { int iLabel = *it; pimCharachter[i-1] = ExtractCharachter(iaLabels, iRows, iCols, iLabel); iaLabelArr[i-1] = iLabel; string strName; char szName[30]; sprintf(szName,"Test_2Char_%d.raw",i); strName = string::basic_string(szName); pimCharachter[i-1]->SetImageName(strName); pimCharachter[i-1]->WriteImageData(); i++; } cout<<"\n*Total Number of Charachters in Test2 image: "<<i-1; //Features Feature *dfFeatures[60]; for(int j = 0;j<i-1;j++) { dfFeatures[j] = ComputeFeatures(pimCharachter[j]); dfFeatures[j]->iLabel = iaLabelArr[j]; } //CSV File WriteFeatureFile(dfFeatures,i-1,"P2A_FeatureSet_test1.csv"); //Identify T and L char cT; bool fT = false; bool fL = false; bool fO = false; Image *pimChar; int iXmin,iXmax,iYmin,iYmax; list<int> pLabelList; bool bDone = false; for(int i = 0;i<iRows;i++) { for(int j = 0;j<iCols;j++) { int iCurrLabel = iaLabels[i][j]; if(iCurrLabel == 0) continue; //if label done before for(list<int>::iterator it1 = pLabelList.begin();it1 != pLabelList.end();it1++) { if(*it1 == iCurrLabel) { bDone = true; break; } } if(bDone == true) { bDone = false; continue; } else { pLabelList.push_back(iCurrLabel); } pimChar = ExtractCharachter(iaLabels, iRows, iCols, iCurrLabel); cT = DecisionTreeChar(pimChar); if(cT == 'T') { cout<<"T("<<iCurrLabel<<") "; fT = true; } if(cT == 'O') { cout<<"O "; fO = true; } if(cT == 'L') { cout<<"L("<<iCurrLabel<<") "; fL = true; } if(( fL && fT && fO) == true) { iXmin = i - 2; iYmin = j+ pimChar->GetImageCols() + 1; iXmax = iXmin + pimChar->GetImageRows() + 2; break; } } if(( fL && fT && fO) == true) { break; } } cout<<"\n*Ammount:\n"; pLabelList.clear(); for(int i = iXmin;i<iXmax;i++) { for(int j = iYmin;j<iCols;j++) { int iCurrLabel = iaLabels[i][j]; if(iCurrLabel == 0) continue; //Check for repeat label for(list<int>::iterator it1 = pLabelList.begin();it1 != pLabelList.end();it1++) { if(*it1 == iCurrLabel) { bDone = true; break; } } if(bDone == true) { bDone = false; continue; } else { pLabelList.push_back(iCurrLabel); } pimChar = ExtractCharachter(iaLabels, iRows, iCols, iCurrLabel); cT = DecisionTreeInt(pimChar); if(cT == '\n') continue; cout<<cT<<"("<<iCurrLabel<<")"; } } cout<<"\n Press 'C' to continue.."; getch(); }
void Southfall::renderBirmingham() { Image *BirminghamIM = &imageLibrary->BirminghamIM; if(BirminghamIM->getScale() < .1) currentState = GAME; BirminghamIM->setX(SCREEN_WIDTH/2-BirminghamIM->getWidth()*BirminghamIM->getScale()/2); BirminghamIM->setY(SCREEN_HEIGHT/2-BirminghamIM->getHeight()*BirminghamIM->getScale()/2); BirminghamIM->draw(); BirminghamIM->setScale(BirminghamIM->getScale()*.996); BirminghamIM->setRadians(BirminghamIM->getRadians()+birminghamRot); birminghamRot += .0001; }
// [working thread] void onJob() override { DocumentApi api = writer().document()->getApi(transaction()); int cels_count = 0; for (Cel* cel : sprite()->uniqueCels()) { // TODO add size() member function to CelsRange (void)cel; ++cels_count; } // For each cel... int progress = 0; for (Cel* cel : sprite()->uniqueCels()) { // Get cel's image Image* image = cel->image(); if (image && !cel->link()) { // Resize the cel bounds only if it's from a reference layer if (cel->layer()->isReference()) { gfx::RectF newBounds = scale_rect<double>(cel->boundsF()); transaction().execute(new cmd::SetCelBoundsF(cel, newBounds)); } else { // Change its location api.setCelPosition(sprite(), cel, scale_x(cel->x()), scale_y(cel->y())); // Resize the image int w = scale_x(image->width()); int h = scale_y(image->height()); ImageRef new_image(Image::create(image->pixelFormat(), MAX(1, w), MAX(1, h))); new_image->setMaskColor(image->maskColor()); doc::algorithm::fixup_image_transparent_colors(image); doc::algorithm::resize_image( image, new_image.get(), m_resize_method, sprite()->palette(cel->frame()), sprite()->rgbMap(cel->frame()), (cel->layer()->isBackground() ? -1: sprite()->transparentColor())); api.replaceImage(sprite(), cel->imageRef(), new_image); } } jobProgress((float)progress / cels_count); ++progress; // Cancel all the operation? if (isCanceled()) return; // Transaction destructor will undo all operations } // Resize mask if (document()->isMaskVisible()) { ImageRef old_bitmap (crop_image(document()->mask()->bitmap(), -1, -1, document()->mask()->bitmap()->width()+2, document()->mask()->bitmap()->height()+2, 0)); int w = scale_x(old_bitmap->width()); int h = scale_y(old_bitmap->height()); base::UniquePtr<Mask> new_mask(new Mask); new_mask->replace( gfx::Rect( scale_x(document()->mask()->bounds().x-1), scale_y(document()->mask()->bounds().y-1), MAX(1, w), MAX(1, h))); algorithm::resize_image( old_bitmap.get(), new_mask->bitmap(), m_resize_method, sprite()->palette(0), // Ignored sprite()->rgbMap(0), // Ignored -1); // Ignored // Reshrink new_mask->intersect(new_mask->bounds()); // Copy new mask api.copyToCurrentMask(new_mask); // Regenerate mask document()->resetTransformation(); document()->generateMaskBoundaries(); } // Resize slices for (auto& slice : sprite()->slices()) { for (auto& k : *slice) { const SliceKey& key = *k.value(); if (key.isEmpty()) continue; SliceKey newKey = key; newKey.setBounds(scale_rect(newKey.bounds())); if (newKey.hasCenter()) newKey.setCenter(scale_rect(newKey.center())); if (newKey.hasPivot()) newKey.setPivot(gfx::Point(scale_x(newKey.pivot().x), scale_y(newKey.pivot().y))); transaction().execute( new cmd::SetSliceKey(slice, k.frame(), newKey)); } } // Resize Sprite api.setSpriteSize(sprite(), m_new_width, m_new_height); }
/** * @brief * Constructor */ TextureBufferRectangle::TextureBufferRectangle(PLRenderer::Renderer &cRenderer, Image &cImage, EPixelFormat nInternalFormat, uint32 nFlags) : PLRenderer::TextureBufferRectangle(cRenderer, nFlags), m_nOpenGLTexture(0) { // Get the OpenGL renderer instance Renderer &cRendererOpenGL = static_cast<Renderer&>(GetRenderer()); // Update renderer statistics cRendererOpenGL.GetWritableStatistics().nTextureBuffersNum++; // Initialize sampler states MemoryManager::Set(m_nSamplerState, PLRenderer::Sampler::Unknown, sizeof(uint32)*PLRenderer::Sampler::Number); // Choose the texture buffer pixel formats which should be used EPixelFormat nImageFormat; bool bUsePreCompressedData; m_nFormat = cRendererOpenGL.ChooseFormats(cImage, nInternalFormat, nFlags, nImageFormat, bUsePreCompressedData); // Get the first image part const ImagePart *pImagePart = cImage.GetPart(0); if (pImagePart) { // Get the first image buffer const ImageBuffer *pImageBuffer = pImagePart->GetMipmap(0); if (pImageBuffer) { // Get API pixel format const uint32 *pAPIPixelFormat = cRendererOpenGL.GetAPIPixelFormat(m_nFormat); if (pAPIPixelFormat) { // Is this a compressed texture buffer pixel format? const bool bCompressedFormat = IsCompressedFormat(); // Get uncompressed image format information EPixelFormat nImageFormatUncompressed = GetFormatFromImage(cImage, true); uint32 nAPIImageFormatUncompressed = cRendererOpenGL.GetOpenGLPixelFormat(nImageFormatUncompressed); uint32 nImageDataFormatUncompressed = cRendererOpenGL.GetOpenGLDataFormat(nImageFormatUncompressed); // Get the size m_vSize.x = pImageBuffer->GetSize().x; m_vSize.y = pImageBuffer->GetSize().y; // Create OpenGL texture buffer glGenTextures(1, &m_nOpenGLTexture); glBindTexture(GL_TEXTURE_RECTANGLE_ARB, m_nOpenGLTexture); // Setup depth format stuff if (m_nFormat == D16 || m_nFormat == D24 || m_nFormat == D32) { // Enable shadow comparison glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_COMPARE_MODE_ARB, GL_COMPARE_R_TO_TEXTURE); // Shadow comparison should be true (e.g. not in shadow) if r<=texture buffer glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_COMPARE_FUNC_ARB, GL_LEQUAL); // THIS is really important, if we choose other filtering there may be a crash... glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); } // Upload the texture buffer m_nNumOfMipmaps = pImagePart->GetNumOfMipmaps() - 1; // [TODO] Rectangle texture buffers with mipmaps possible? // bool bMipmaps = (nFlags & Mipmaps); bool bMipmaps = false; m_nNumOfMipmaps = 0; if (!m_nNumOfMipmaps && bMipmaps) { // Calculate the number of mipmaps m_nNumOfMipmaps = static_cast<uint32>(Math::Log2(static_cast<float>(Math::Max(m_vSize.x, m_vSize.y)))); // Build mipmaps automatically, no pre compressed image data can be used gluBuild2DMipmaps(GL_TEXTURE_RECTANGLE_ARB, *pAPIPixelFormat, m_vSize.x, m_vSize.y, nAPIImageFormatUncompressed, nImageDataFormatUncompressed, pImageBuffer->HasAnyData() ? pImageBuffer->GetData() : nullptr); // If compressed internal format, check whether all went fine if (bCompressedFormat) { GLint nCompressed; glGetTexLevelParameteriv(GL_TEXTURE_RECTANGLE_ARB, 0, GL_TEXTURE_COMPRESSED_ARB, &nCompressed); if (!nCompressed) { // There was an error, use no compression m_nFormat = nImageFormat; const uint32 *pAPIPixelFormatFallback = cRendererOpenGL.GetAPIPixelFormat(m_nFormat); if (pAPIPixelFormatFallback) gluBuild2DMipmaps(GL_TEXTURE_RECTANGLE_ARB, *pAPIPixelFormatFallback, m_vSize.x, m_vSize.y, nAPIImageFormatUncompressed, nImageDataFormatUncompressed, pImageBuffer->HasAnyData() ? pImageBuffer->GetData() : nullptr); } } // Calculate the total number of bytes this texture buffer requires for (uint32 nLevel=0; nLevel<=m_nNumOfMipmaps; nLevel++) m_nTotalNumOfBytes += GetNumOfBytes(nLevel); } else { // Ignore mipmaps? if (!bMipmaps) m_nNumOfMipmaps = 0; // Jep, we do not want to have any mipmaps for this texture buffer // Use given mipmaps Vector2i vSize; uint32 nLevel = 0; for (; nLevel<m_nNumOfMipmaps+1; nLevel++) { // Get the mipmap image buffer const ImageBuffer *pMipmapImageBuffer = pImagePart->GetMipmap(nLevel); if (pMipmapImageBuffer) { // Get the size of this mipmap level vSize.x = pMipmapImageBuffer->GetSize().x; vSize.y = pMipmapImageBuffer->GetSize().y; // Upload the texture buffer if (bUsePreCompressedData && pMipmapImageBuffer->HasCompressedData()) glCompressedTexImage2DARB(GL_TEXTURE_RECTANGLE_ARB, nLevel, *pAPIPixelFormat, vSize.x, vSize.y, 0, pMipmapImageBuffer->GetCompressedDataSize(), pMipmapImageBuffer->GetCompressedData()); else glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, nLevel, *pAPIPixelFormat, vSize.x, vSize.y, 0, nAPIImageFormatUncompressed, nImageDataFormatUncompressed, pMipmapImageBuffer->HasAnyData() ? pMipmapImageBuffer->GetData() : nullptr); // If compressed internal format, check whether all went fine if (bCompressedFormat) { GLint nCompressed; glGetTexLevelParameteriv(GL_TEXTURE_RECTANGLE_ARB, nLevel, GL_TEXTURE_COMPRESSED_ARB, &nCompressed); if (!nCompressed) { // There was an error, use no compression as fallback m_nFormat = nImageFormat; const uint32 *pAPIPixelFormatFallback = cRendererOpenGL.GetAPIPixelFormat(m_nFormat); if (pAPIPixelFormatFallback) glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, nLevel, *pAPIPixelFormatFallback, vSize.x, vSize.y, 0, nAPIImageFormatUncompressed, nImageDataFormatUncompressed, pMipmapImageBuffer->HasAnyData() ? pMipmapImageBuffer->GetData() : nullptr); } } } // Update the total number of bytes this texture buffer requires m_nTotalNumOfBytes += GetNumOfBytes(nLevel); } // We have to define all mipmap levels down to 1x1 otherwise the texture buffer is invalid when we try to use any min // filter that uses mipmaps. OpenGL "normally" uses white color when invalid/incomplete texture buffer is enabled. if (bMipmaps && (vSize.x != 1 || vSize.y != 1)) { // Write a warning into the log PL_LOG(Warning, String::Format("Lowest mipmap is %dx%d, but should be 1x1! Missing mipmap levels will be white!", vSize.x, vSize.y)) // If we don't define all mipmap levels down to 1x1 'mysterious' graphics bugs may occur were it is not // always easy to pinpoint the problem directly to the mipmaps. So, to avoid frustration during bug finding, // we just create the missing mipmap levels down to 1x1 with a white color - so it's possible to 'see' which texture // isn't correct without reading the log message from above. (for some people it appears to be hard to read logs *g*) char *pszBuffer = nullptr; do { // First at all, 'half' the current dimension if (vSize.x > 1) vSize.x /= 2; if (vSize.y > 1) vSize.y /= 2; // Update the total number of mipmap levels m_nNumOfMipmaps++; // Update the total number of bytes this texture buffer requires const uint32 nNumOfBytes = GetNumOfBytes(nLevel); m_nTotalNumOfBytes += nNumOfBytes; // Allocate memory for your white buffer and set it to "white" (we only do this once for the larges one) if (!pszBuffer) { pszBuffer = new char[nNumOfBytes]; MemoryManager::Set(pszBuffer, 255, nNumOfBytes); } // Upload the texture buffer glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, nLevel, *pAPIPixelFormat, vSize.x, vSize.y, 0, nAPIImageFormatUncompressed, nImageDataFormatUncompressed, pszBuffer); // Update the mipmap level counter nLevel++; } while (vSize.x != 1 || vSize.y != 1); // Cleanup your white buffer if (pszBuffer) delete [] pszBuffer; } } // Update renderer statistics cRendererOpenGL.GetWritableStatistics().nTextureBuffersMem += GetTotalNumOfBytes(); } } }
void Widget::drawSoftBorderQuad(Renderer *renderer, const SamplerStateID linearClamp, const BlendStateID blendSrcAlpha, const DepthStateID depthState, const float x0, const float y0, const float x1, const float y1, const float borderWidth, const float colScale, const float transScale){ if (corner == TEXTURE_NONE){ ubyte pixels[32][32][4]; for (int y = 0; y < 32; y++){ for (int x = 0; x < 32; x++){ int r = 255 - int(powf(sqrtf(float(x * x + y * y)) * (255.0f / 31.0f), 1.0f)); if (r < 0) r = 0; pixels[y][x][0] = r; pixels[y][x][1] = r; pixels[y][x][2] = r; pixels[y][x][3] = r; } } Image img; img.loadFromMemory(pixels, FORMAT_RGBA8, 32, 32, 1, 1, false); corner = renderer->addTexture(img, false, linearClamp); } float x0bw = x0 + borderWidth; float y0bw = y0 + borderWidth; float x1bw = x1 - borderWidth; float y1bw = y1 - borderWidth; TexVertex border[] = { TexVertex(vec2(x0, y0bw), vec2(1, 0)), TexVertex(vec2(x0, y0 ), vec2(1, 1)), TexVertex(vec2(x0bw, y0bw), vec2(0, 0)), TexVertex(vec2(x0bw, y0 ), vec2(0, 1)), TexVertex(vec2(x1bw, y0bw), vec2(0, 0)), TexVertex(vec2(x1bw, y0 ), vec2(0, 1)), TexVertex(vec2(x1bw, y0 ), vec2(0, 1)), TexVertex(vec2(x1, y0 ), vec2(1, 1)), TexVertex(vec2(x1bw, y0bw), vec2(0, 0)), TexVertex(vec2(x1, y0bw), vec2(1, 0)), TexVertex(vec2(x1bw, y1bw), vec2(0, 0)), TexVertex(vec2(x1, y1bw), vec2(1, 0)), TexVertex(vec2(x1, y1bw), vec2(1, 0)), TexVertex(vec2(x1, y1 ), vec2(1, 1)), TexVertex(vec2(x1bw, y1bw), vec2(0, 0)), TexVertex(vec2(x1bw, y1 ), vec2(0, 1)), TexVertex(vec2(x0bw, y1bw), vec2(0, 0)), TexVertex(vec2(x0bw, y1 ), vec2(0, 1)), TexVertex(vec2(x0bw, y1 ), vec2(0, 1)), TexVertex(vec2(x0, y1 ), vec2(1, 1)), TexVertex(vec2(x0bw, y1bw), vec2(0, 0)), TexVertex(vec2(x0, y1bw), vec2(1, 0)), TexVertex(vec2(x0bw, y0bw), vec2(0, 0)), TexVertex(vec2(x0, y0bw), vec2(1, 0)), }; vec4 col = color * vec4(colScale, colScale, colScale, transScale); renderer->drawTextured(PRIM_TRIANGLE_STRIP, border, elementsOf(border), corner, linearClamp, blendSrcAlpha, depthState, &col); // Center vec2 center[] = { vec2(x0bw, y0bw), vec2(x1bw, y0bw), vec2(x0bw, y1bw), vec2(x1bw, y1bw) }; renderer->drawPlain(PRIM_TRIANGLE_STRIP, center, 4, blendSrcAlpha, depthState, &col); }
bool handle_encode_arguments(int argc, char **argv, Images &images, int palette_size, int acb, flifEncodingOptional method, int lookback, int learn_repeats, int frame_delay) { int nb_input_images = argc-1; while(argc>1) { Image image; v_printf(2,"\r"); if (!image.load(argv[0])) { e_printf("Could not read input file: %s\n", argv[0]); return 2; }; images.push_back(std::move(image)); const Image& last_image = images.back(); if (last_image.rows() != images[0].rows() || last_image.cols() != images[0].cols() || last_image.numPlanes() != images[0].numPlanes()) { e_printf("Dimensions of all input images should be the same!\n"); e_printf(" First image is %ux%u, %i channels.\n",images[0].cols(),images[0].rows(),images[0].numPlanes()); e_printf(" This image is %ux%u, %i channels: %s\n",last_image.cols(),last_image.rows(),last_image.numPlanes(),argv[0]); return 2; } argc--; argv++; if (nb_input_images>1) {v_printf(2," (%i/%i) ",(int)images.size(),nb_input_images); v_printf(4,"\n");} } v_printf(2,"\n"); bool flat=true; for (Image &image : images) if (image.uses_alpha()) flat=false; if (flat && images[0].numPlanes() == 4) { v_printf(2,"Alpha channel not actually used, dropping it.\n"); for (Image &image : images) image.drop_alpha(); } uint64_t nb_pixels = (uint64_t)images[0].rows() * images[0].cols(); std::vector<std::string> desc; desc.push_back("YIQ"); // convert RGB(A) to YIQ(A) desc.push_back("BND"); // get the bounds of the color spaces if (palette_size > 0) desc.push_back("PLA"); // try palette (including alpha) if (palette_size > 0) desc.push_back("PLT"); // try palette (without alpha) if (acb == -1) { // not specified if ACB should be used if (nb_pixels > 10000) desc.push_back("ACB"); // try auto color buckets on large images } else if (acb) desc.push_back("ACB"); // try auto color buckets if forced if (method.o == Optional::undefined) { // no method specified, pick one heuristically if (nb_pixels < 10000) method.encoding=flifEncoding::nonInterlaced; // if the image is small, not much point in doing interlacing else method.encoding=flifEncoding::interlaced; // default method: interlacing } if (images.size() > 1) { desc.push_back("DUP"); // find duplicate frames desc.push_back("FRS"); // get the shapes of the frames if (lookback != 0) desc.push_back("FRA"); // make a "deep" alpha channel (negative values are transparent to some previous frame) } if (learn_repeats < 0) { // no number of repeats specified, pick a number heuristically learn_repeats = TREE_LEARN_REPEATS; if (nb_pixels < 5000) learn_repeats--; // avoid large trees for small images if (learn_repeats < 0) learn_repeats=0; } FILE *file = fopen(argv[0],"wb"); if (!file) return false; FileIO fio(file, argv[0]); return flif_encode(fio, images, desc, method.encoding, learn_repeats, acb, frame_delay, palette_size, lookback); }
void TextureCache::loadImage() { AsyncStruct *asyncStruct = nullptr; while (true) { std::queue<AsyncStruct*> *pQueue = _asyncStructQueue; _asyncStructQueueMutex.lock(); if (pQueue->empty()) { _asyncStructQueueMutex.unlock(); if (_needQuit) { break; } else { std::unique_lock<std::mutex> lk(_sleepMutex); _sleepCondition.wait(lk); continue; } } else { asyncStruct = pQueue->front(); pQueue->pop(); _asyncStructQueueMutex.unlock(); } Image *image = nullptr; bool generateImage = false; auto it = _textures.find(asyncStruct->filename); if( it == _textures.end() ) { _imageInfoMutex.lock(); ImageInfo *imageInfo; size_t pos = 0; size_t infoSize = _imageInfoQueue->size(); for (; pos < infoSize; pos++) { imageInfo = (*_imageInfoQueue)[pos]; if(imageInfo->asyncStruct->filename.compare(asyncStruct->filename) == 0) break; } _imageInfoMutex.unlock(); if(infoSize == 0 || pos == infoSize) generateImage = true; } if (generateImage) { const std::string& filename = asyncStruct->filename; // generate image image = new (std::nothrow) Image(); if (image && !image->initWithImageFileThreadSafe(filename)) { CC_SAFE_RELEASE(image); CCLOG("can not load %s", filename.c_str()); continue; } } // generate image info ImageInfo *imageInfo = new (std::nothrow) ImageInfo(); imageInfo->asyncStruct = asyncStruct; imageInfo->image = image; // put the image info into the queue _imageInfoMutex.lock(); _imageInfoQueue->push_back(imageInfo); _imageInfoMutex.unlock(); } if(_asyncStructQueue != nullptr) { delete _asyncStructQueue; _asyncStructQueue = nullptr; delete _imageInfoQueue; _imageInfoQueue = nullptr; } }
void set_size(int w, int h) { if((w>0)&&(h>0)){_w=w;_h=h;_img.resize(w,h,3);}}
void VolatileTextureMgr::reloadAllTextures() { _isReloading = true; // we need to release all of the glTextures to avoid collisions of texture id's when reloading the textures onto the GPU for(auto iter = _textures.begin(); iter != _textures.end(); ++iter) { (*iter)->_texture->releaseGLTexture(); } CCLOG("reload all texture"); auto iter = _textures.begin(); while (iter != _textures.end()) { VolatileTexture *vt = *iter++; switch (vt->_cashedImageType) { case VolatileTexture::kImageFile: { Image* image = new (std::nothrow) Image(); Data data = FileUtils::getInstance()->getDataFromFile(vt->_fileName); if (image && image->initWithImageData(data.getBytes(), data.getSize())) { Texture2D::PixelFormat oldPixelFormat = Texture2D::getDefaultAlphaPixelFormat(); Texture2D::setDefaultAlphaPixelFormat(vt->_pixelFormat); vt->_texture->initWithImage(image); Texture2D::setDefaultAlphaPixelFormat(oldPixelFormat); } CC_SAFE_RELEASE(image); } break; case VolatileTexture::kImageData: { vt->_texture->initWithData(vt->_textureData, vt->_dataLen, vt->_pixelFormat, vt->_textureSize.width, vt->_textureSize.height, vt->_textureSize); } break; case VolatileTexture::kString: { vt->_texture->initWithString(vt->_text.c_str(), vt->_fontDefinition); } break; case VolatileTexture::kImage: { vt->_texture->initWithImage(vt->_uiImage); } break; default: break; } if (vt->_hasMipmaps) { vt->_texture->generateMipmap(); } vt->_texture->setTexParameters(vt->_texParams); } _isReloading = false; }
void CDrawCheckbox::Draw(HDC hDC) { RECT rcWnd = {}; GetClientRect(m_hWnd, &rcWnd); int nWidth = rcWnd.right - rcWnd.left; int nHeight = rcWnd.bottom - rcWnd.top; HDC hMemDC = CreateCompatibleDC(hDC); HBITMAP hBmpMem = CreateCompatibleBitmap(hDC, nWidth, nHeight); HBITMAP hOldBmpMem = (HBITMAP)SelectObject(hMemDC, hBmpMem); Image *pDrawImg = NULL; if(!m_fLight) { pDrawImg = m_pNormalStateImg; } else { pDrawImg = m_pLightStateImg; } int nBmpWidth = pDrawImg->GetWidth(); int nBmpHeight = pDrawImg->GetHeight(); if (m_bkimState == BKLS_HORIZONTAL) { nBmpWidth = pDrawImg->GetWidth() / 8; nBmpHeight = pDrawImg->GetHeight(); } else { nBmpWidth = pDrawImg->GetWidth(); nBmpHeight = pDrawImg->GetHeight() / 8; } //绘制父窗口背景图 BitBlt(hMemDC, 0, 0, nWidth, nHeight, m_DrawBackgroundDC.m_hDC, 0, 0, SRCCOPY); //绘制背景图,高宽一样大小 //StretchBlt(hDC, 0, nYPos, nWidth, nHeight - nYPos, hMemDC, nXBmpPos,\ // 0, nBmpWidth, nBmpHeight, SRCCOPY); SetBkMode(hMemDC, TRANSPARENT); Graphics graphics(hMemDC); // Create a GDI+ graphics object RectF gRect; gRect.X = (REAL)0; gRect.Y = (REAL)0; gRect.Width = (REAL)nHeight; gRect.Height = (REAL)nHeight; if (m_bkimState == BKLS_HORIZONTAL) { graphics.DrawImage(pDrawImg, gRect, (REAL)nBmpWidth * m_nCtrlState + m_bCheck * 4 * nBmpWidth, 0, (REAL)nBmpWidth, (REAL)nBmpHeight, UnitPixel); } else { graphics.DrawImage(pDrawImg, gRect, 0, (REAL)nBmpHeight * m_nCtrlState + m_bCheck * 4 * nBmpWidth, (REAL)nBmpWidth, (REAL)nBmpHeight, UnitPixel); } // 绘制文本 // 当前绘制文本为单行,若绘制多行,可修改一下源码 TCHAR szCaption[g_nCaptionLen] = {}; GetWindowText(m_hWnd, szCaption, g_nCaptionLen - 1); if(_tcslen(szCaption) > 0) { HFONT hOldFont = (HFONT)SelectObject(hMemDC, m_hFont); rcWnd.left += nHeight + 2; rcWnd.top ++; SetTextColor(hMemDC, m_colorText); DrawText(hMemDC, szCaption, _tcslen(szCaption), &rcWnd, DT_VCENTER|DT_SINGLELINE); SelectObject(hMemDC, hOldFont); } BitBlt(hDC, 0, 0, nWidth, nHeight, hMemDC, 0, 0, SRCCOPY); SetBkMode(hMemDC, OPAQUE); graphics.ReleaseHDC(hMemDC); SelectObject(hMemDC, hOldBmpMem); DeleteObject(hBmpMem); DeleteDC(hMemDC); }
/////////////////////////////////////////////////////////////////////////////// // register: compare a reference image to the current (``test'') image. // // The reference image must be no larger than the current image, in // both dimensions. Type doesn't matter, as both images will be // converted to RGBA. // // The reference image will be slid into all possible positions over // the current image, and the sum of the mean absolute errors for all // four color channels computed at each position. // // Returns an Image::Registration struct that specifies the position at // which the sum of mean absolute errors was minimal, plus the statistics // at that position. /////////////////////////////////////////////////////////////////////////////// Image::Registration Image::reg(Image& ref) { int wt = width(); // Width of test image, in pixels. int ht = height(); // Height of test image, in pixels. int wr = ref.width(); // Width of reference image, in pixels. int hr = ref.height(); // Height of test image, in pixels. int dh = ht - hr; // Difference in heights, in pixels. int dw = wt - wr; // Difference in widths, in pixels. int i; if (dh < 0 || dw < 0) throw RefImageTooLarge(); int wt4 = 4 * wt; // Width of test image, in RGBA samples. int wr4 = 4 * wr; // Width of ref image, in RGBA samples. int dw4 = 4 * dw; // Difference in widths, in samples. double** testPix; // Buffers containing all the rows of // the test image that need to be // accessed concurrently. // XXX sure would be nice to use auto_ptr to allocate this stuff, // but it isn't supported in the STL that came with egcs 1.1.2. // XXX testPix = new (double*) [dh + 1]; // VC 6 seems to misinterpret this as a c-style cast testPix = new double* [dh + 1]; for (/*int */i = 0; i <= dh; ++i) testPix[i] = new double [wt4]; double* refPix = new double [wr4]; // Buffer containing the one row of // the reference image that's accessed // at any given time. BasicStats** stats; // Buffers containing a statistics- // gathering structure for each of // the possible reference image // positions. // XXX stats = new (BasicStats*) [dh + 1]; // VC 6 seems to misinterpret this as a c-style cast stats = new BasicStats * [dh + 1]; for (/*int*/ i = 0; i <= dh; ++i) stats[i] = new BasicStats [dw4 + 4]; // Prime the pump by unpacking the first few rows of the test image: char* testRow = pixels(); for (/*int*/ i = 0; i < dh; ++i) { unpack(wt, testPix[i], testRow); testRow += rowSizeInBytes(); } // Now accumulate statistics for one row of the reference image // at a time, in all possible positions: char* refRow = ref.pixels(); for (/*int*/ i = 0; i < hr; ++i) { // Get the next row of the reference image: ref.unpack(wr, refPix, refRow); refRow += ref.rowSizeInBytes(); // Get the next row of the test image: unpack(wt, testPix[dh], testRow); testRow += rowSizeInBytes(); // Accumulate absolute error for R,G,B,A in all positions: for (int j = 0; j <= dh; ++j) for (int k = 0; k <= dw4; k += 4) for (int m = 0; m < wr4; m += 4) { stats[j][k+0].sample( fabs( refPix[m+0]-testPix[j][m+k+0])); stats[j][k+1].sample( fabs( refPix[m+1]-testPix[j][m+k+1])); stats[j][k+2].sample( fabs( refPix[m+2]-testPix[j][m+k+2])); stats[j][k+3].sample( fabs( refPix[m+3]-testPix[j][m+k+3])); } } // Now find the position for which the sum of the mean absolute errors // is minimal: double minErrorSum = DBL_MAX; int minI = 0; int minJ = 0; for (/*int*/ i = 0; i <= dh; ++i) for (int j = 0; j <= dw4; j += 4) { double errorSum = stats[i][j+0].mean() + stats[i][j+1].mean() + stats[i][j+2].mean() + stats[i][j+3].mean(); if (errorSum < minErrorSum) { minErrorSum = errorSum; minI = i; minJ = j; } } Registration r; r.wOffset = minJ / 4; r.hOffset = minI; r.stats[0] = stats[minI][minJ+0]; r.stats[1] = stats[minI][minJ+1]; r.stats[2] = stats[minI][minJ+2]; r.stats[3] = stats[minI][minJ+3]; // Clean up: for (/*int*/ i = 0; i <= dh; ++i) delete[] testPix[i]; delete[] testPix; delete[] refPix; for (/*int*/ i = 0; i <= dh; ++i) delete[] stats[i]; delete[] stats; return r; } // Image::register
int main(int /*argc*/, char** /*argv*/) { PrintBuildInfo(); const int k_numImages = 5; Error error; BusManager busMgr; unsigned int numCameras; error = busMgr.GetNumOfCameras(&numCameras); if (error != PGRERROR_OK) { PrintError( error ); return -1; } printf( "Number of cameras detected: %u\n", numCameras ); if ( numCameras < 1 ) { printf( "Insufficient number of cameras... exiting\n" ); return -1; } PGRGuid guid; error = busMgr.GetCameraFromIndex(0, &guid); if (error != PGRERROR_OK) { PrintError( error ); return -1; } Camera cam; // Connect to a camera error = cam.Connect(&guid); if (error != PGRERROR_OK) { PrintError( error ); return -1; } // Get the camera information CameraInfo camInfo; error = cam.GetCameraInfo(&camInfo); if (error != PGRERROR_OK) { PrintError( error ); return -1; } PrintCameraInfo(&camInfo); // Check if the camera supports the FRAME_RATE property PropertyInfo propInfo; propInfo.type = FRAME_RATE; error = cam.GetPropertyInfo( &propInfo ); if (error != PGRERROR_OK) { PrintError( error ); return -1; } ExtendedShutterType shutterType = NO_EXTENDED_SHUTTER; if ( propInfo.present == true ) { // Turn off frame rate Property prop; prop.type = FRAME_RATE; error = cam.GetProperty( &prop ); if (error != PGRERROR_OK) { PrintError( error ); return -1; } prop.autoManualMode = false; prop.onOff = false; error = cam.SetProperty( &prop ); if (error != PGRERROR_OK) { PrintError( error ); return -1; } shutterType = GENERAL_EXTENDED_SHUTTER; } else { // Frame rate property does not appear to be supported. // Disable the extended shutter register instead. // This is only applicable for Dragonfly. const unsigned int k_extendedShutter = 0x1028; unsigned int extendedShutterRegVal = 0; error = cam.ReadRegister( k_extendedShutter, &extendedShutterRegVal ); if (error != PGRERROR_OK) { PrintError( error ); return -1; } std::bitset<32> extendedShutterBS( extendedShutterRegVal ); if ( extendedShutterBS[31] == true ) { // Set the camera into extended shutter mode error = cam.WriteRegister( k_extendedShutter, 0x80020000 ); if (error != PGRERROR_OK) { PrintError( error ); return -1; } } else { printf( "Frame rate and extended shutter are not supported... exiting\n" ); return -1; } shutterType = DRAGONFLY_EXTENDED_SHUTTER; } // Set the shutter property of the camera Property prop; prop.type = SHUTTER; error = cam.GetProperty( &prop ); if (error != PGRERROR_OK) { PrintError( error ); return -1; } prop.autoManualMode = false; prop.absControl = true; const float k_shutterVal = 3000.0; prop.absValue = k_shutterVal; error = cam.SetProperty( &prop ); if (error != PGRERROR_OK) { PrintError( error ); return -1; } printf( "Shutter time set to %.2fms\n", k_shutterVal ); // Enable timestamping EmbeddedImageInfo embeddedInfo; error = cam.GetEmbeddedImageInfo( &embeddedInfo ); if ( error != PGRERROR_OK ) { PrintError( error ); return -1; } if ( embeddedInfo.timestamp.available != 0 ) { embeddedInfo.timestamp.onOff = true; } error = cam.SetEmbeddedImageInfo( &embeddedInfo ); if ( error != PGRERROR_OK ) { PrintError( error ); return -1; } // Start the camera error = cam.StartCapture(); if (error != PGRERROR_OK) { PrintError( error ); return -1; } for ( int i=0; i < k_numImages; i++ ) { Image image; error = cam.RetrieveBuffer( &image ); if (error != PGRERROR_OK) { PrintError( error ); return -1; } TimeStamp timestamp = image.GetTimeStamp(); printf( "TimeStamp [%d %d]\n", timestamp.cycleSeconds, timestamp.cycleCount); } // Stop capturing images error = cam.StopCapture(); if (error != PGRERROR_OK) { PrintError( error ); return -1; } // Set the camera back to its original state prop.type = SHUTTER; error = cam.GetProperty( &prop ); if (error != PGRERROR_OK) { PrintError( error ); return -1; } prop.autoManualMode = true; error = cam.SetProperty( &prop ); if (error != PGRERROR_OK) { PrintError( error ); return -1; } if ( shutterType == GENERAL_EXTENDED_SHUTTER ) { Property prop; prop.type = FRAME_RATE; error = cam.GetProperty( &prop ); if (error != PGRERROR_OK) { PrintError( error ); return -1; } prop.autoManualMode = true; prop.onOff = true; error = cam.SetProperty( &prop ); if (error != PGRERROR_OK) { PrintError( error ); return -1; } } else if ( shutterType == DRAGONFLY_EXTENDED_SHUTTER ) { const unsigned int k_extendedShutter = 0x1028; unsigned int extendedShutterRegVal = 0; error = cam.ReadRegister( k_extendedShutter, &extendedShutterRegVal ); if (error != PGRERROR_OK) { PrintError( error ); return -1; } std::bitset<32> extendedShutterBS( extendedShutterRegVal ); if ( extendedShutterBS[31] == true ) { // Set the camera into extended shutter mode error = cam.WriteRegister( k_extendedShutter, 0x80000000 ); if (error != PGRERROR_OK) { PrintError( error ); return -1; } } } // Disconnect the camera error = cam.Disconnect(); if (error != PGRERROR_OK) { PrintError( error ); return -1; } printf( "Done! Press Enter to exit...\n" ); getchar(); return 0; }
int main( int argc, char** argv ) { // parse command line ---------------------------------------------- po::options_description general_opt("Allowed options are: "); general_opt.add_options() ("help,h", "display this message") ("input-file,i", po::value<std::string>(), "vol file (.vol) , pgm3d (.p3d or .pgm3d) file or sdp (sequence of discrete points)" ) ("thresholdMin,m", po::value<int>()->default_value(0), "threshold min to define binary shape" ) ("thresholdMax,M", po::value<int>()->default_value(255), "threshold max to define binary shape" ) ("transparency,t", po::value<uint>()->default_value(255), "transparency") ; bool parseOK=true; po::variables_map vm; try { po::store(po::parse_command_line(argc, argv, general_opt), vm); } catch(const std::exception& ex) { parseOK=false; trace.info()<< "Error checking program options: "<< ex.what()<< endl; } po::notify(vm); if( !parseOK || vm.count("help")||argc<=1) { std::cout << "Usage: " << argv[0] << " [input-file]\n" << "Display volume file as a voxel set by using QGLviewer" << general_opt << "\n"; return 0; } if(! vm.count("input-file")) { trace.error() << " The file name was defined" << endl; return 0; } string inputFilename = vm["input-file"].as<std::string>(); int thresholdMin = vm["thresholdMin"].as<int>(); int thresholdMax = vm["thresholdMax"].as<int>(); unsigned char transp = vm["transparency"].as<uint>(); QApplication application(argc,argv); Viewer3D viewer; viewer.setWindowTitle("simple Volume Viewer"); viewer.show(); typedef ImageSelector<Domain, unsigned char>::Type Image; string extension = inputFilename.substr(inputFilename.find_last_of(".") + 1); if(extension!="vol" && extension != "p3d" && extension != "pgm3D" && extension != "pgm3d" && extension != "sdp") { trace.info() << "File extension not recognized: "<< extension << std::endl; return 0; } if(extension=="vol" || extension=="pgm3d" || extension=="pgm3D") { Image image = (extension=="vol")? VolReader<Image>::importVol( inputFilename ): PNMReader<Image>::importPGM3D( inputFilename ); trace.info() << "Image loaded: "<<image<< std::endl; Domain domain = image.domain(); GradientColorMap<long> gradient( thresholdMin, thresholdMax); gradient.addColor(Color::Blue); gradient.addColor(Color::Green); gradient.addColor(Color::Yellow); gradient.addColor(Color::Red); for(Domain::ConstIterator it = domain.begin(), itend=domain.end(); it!=itend; ++it) { unsigned char val= image( (*it) ); Color c= gradient(val); if(val<=thresholdMax && val >=thresholdMin) { viewer << CustomColors3D(Color((float)(c.red()), (float)(c.green()),(float)(c.blue()), transp), Color((float)(c.red()), (float)(c.green()),(float)(c.blue()), transp)); viewer << *it; } } } else if(extension=="sdp") { vector<Z3i::Point> vectVoxels = PointListReader<Z3i::Point>::getPointsFromFile(inputFilename); for(int i=0; i< vectVoxels.size(); i++) { viewer << vectVoxels.at(i); } } viewer << Viewer3D::updateDisplay; return application.exec(); }
int ovxDemo(std::string inputPath, UserMemoryMode mode) { using namespace cv; using namespace ivx; Mat image = imread(inputPath, IMREAD_GRAYSCALE); if (image.empty()) return -1; //check image format if (image.depth() != CV_8U || image.channels() != 1) return -1; try { Context context = Context::create(); //put user data from cv::Mat to vx_image vx_df_image color = Image::matTypeToFormat(image.type()); vx_uint32 width = image.cols, height = image.rows; Image ivxImage; if (mode == COPY) { ivxImage = Image::create(context, width, height, color); ivxImage.copyFrom(0, image); } else { ivxImage = Image::createFromHandle(context, color, Image::createAddressing(image), image.data); } Image ivxResult; Image::Patch resultPatch; Mat output; if (mode == COPY || mode == MAP) { //we will copy or map data from vx_image to cv::Mat ivxResult = ivx::Image::create(context, width, height, VX_DF_IMAGE_U8); } else // if (mode == MAP_TO_VX) { //create vx_image based on user data, no copying required output = cv::Mat(height, width, CV_8U, cv::Scalar(0)); ivxResult = Image::createFromHandle(context, Image::matTypeToFormat(CV_8U), Image::createAddressing(output), output.data); } Graph graph = createProcessingGraph(ivxImage, ivxResult); // Graph execution graph.process(); //getting resulting image in cv::Mat if (mode == COPY) { ivxResult.copyTo(0, output); } else if (mode == MAP) { //create cv::Mat based on vx_image mapped data resultPatch.map(ivxResult, 0, ivxResult.getValidRegion()); //generally this is very bad idea! //but in our case unmap() won't happen until output is in use output = resultPatch.getMat(); } else // if (mode == MAP_TO_VX) { #ifdef VX_VERSION_1_1 //we should take user memory back from vx_image before using it (even before reading) ivxResult.swapHandle(); #endif } //here output goes cv::imshow("processing result", output); cv::waitKey(0); cv::destroyAllWindows(); #ifdef VX_VERSION_1_1 if (mode != COPY) { //we should take user memory back before release //(it's not done automatically according to standard) ivxImage.swapHandle(); if (mode == USER_MEM) ivxResult.swapHandle(); } #endif //the line is unnecessary since unmapping is done on destruction of patch //resultPatch.unmap(); } catch (const ivx::RuntimeError& e) { std::cerr << "Error: code = " << e.status() << ", message = " << e.what() << std::endl; return e.status(); } catch (const ivx::WrapperError& e) { std::cerr << "Error: message = " << e.what() << std::endl; return -1; } return 0; }
/// @copydoc ResourceHandler::CacheResource() bool Texture2dResourceHandler::CacheResource( ObjectPreprocessor* pObjectPreprocessor, Resource* pResource, const String& rSourceFilePath ) { HELIUM_ASSERT( pObjectPreprocessor ); HELIUM_ASSERT( pResource ); Texture2d* pTexture = Reflect::AssertCast< Texture2d >( pResource ); // Load the source texture data. FileStream* pSourceFileStream = FileStream::OpenFileStream( rSourceFilePath, FileStream::MODE_READ ); if( !pSourceFileStream ) { HELIUM_TRACE( TraceLevels::Error, ( TXT( "Texture2dResourceHandler::CacheResource(): Failed to open source texture file \"%s\" for " ) TXT( "reading.\n" ) ), *rSourceFilePath ); return false; } Image sourceImage; bool bLoadSuccess; { BufferedStream sourceStream( pSourceFileStream ); // Determine the proper image loader to used based on the image extension. FilePath sourceFilePath = *rSourceFilePath; String extension( sourceFilePath.Extension().c_str() ); if( extension == TXT( "png" ) ) { bLoadSuccess = PngImageLoader::Load( sourceImage, &sourceStream ); } else { bLoadSuccess = TgaImageLoader::Load( sourceImage, &sourceStream ); } } delete pSourceFileStream; if( !bLoadSuccess ) { HELIUM_TRACE( TraceLevels::Error, TXT( "Texture2dResourceHandler::CacheResource(): Failed to load source texture image \"%s\".\n" ), *rSourceFilePath ); } // Convert the source image to a 32-bit BGRA image for the NVIDIA texture tools library to process. Image::Format bgraFormat; bgraFormat.SetBytesPerPixel( 4 ); bgraFormat.SetChannelBitCount( Image::CHANNEL_RED, 8 ); bgraFormat.SetChannelBitCount( Image::CHANNEL_GREEN, 8 ); bgraFormat.SetChannelBitCount( Image::CHANNEL_BLUE, 8 ); bgraFormat.SetChannelBitCount( Image::CHANNEL_ALPHA, 8 ); #if HELIUM_ENDIAN_LITTLE bgraFormat.SetChannelBitOffset( Image::CHANNEL_RED, 16 ); bgraFormat.SetChannelBitOffset( Image::CHANNEL_GREEN, 8 ); bgraFormat.SetChannelBitOffset( Image::CHANNEL_BLUE, 0 ); bgraFormat.SetChannelBitOffset( Image::CHANNEL_ALPHA, 24 ); #else bgraFormat.SetChannelBitOffset( Image::CHANNEL_RED, 8 ); bgraFormat.SetChannelBitOffset( Image::CHANNEL_GREEN, 16 ); bgraFormat.SetChannelBitOffset( Image::CHANNEL_BLUE, 24 ); bgraFormat.SetChannelBitOffset( Image::CHANNEL_ALPHA, 0 ); #endif Image bgraImage; HELIUM_VERIFY( sourceImage.Convert( bgraImage, bgraFormat ) ); sourceImage.Unload(); // If the texture is flagged to ignore alpha data, set the alpha channel to fully opaque for each pixel in the // image. Otherwise, check if the image is fully opaque (in which case alpha data can be ignored during // compression, and we can potentially use cheaper compressed formats). uint32_t imageWidth = bgraImage.GetWidth(); uint32_t imageHeight = bgraImage.GetHeight(); uint32_t pixelCount = imageWidth * imageHeight; void* pImagePixelData = bgraImage.GetPixelData(); HELIUM_ASSERT( pImagePixelData ); uint8_t* pPixelAlpha = static_cast< uint8_t* >( pImagePixelData ) + 3; bool bIgnoreAlpha = pTexture->GetIgnoreAlpha(); if( bIgnoreAlpha ) { for( uint32_t pixelIndex = 0; pixelIndex < pixelCount; ++pixelIndex, pPixelAlpha += 4 ) { *pPixelAlpha = 0xff; } } else { uint32_t pixelIndex; for( pixelIndex = 0; pixelIndex < pixelCount; ++pixelIndex, pPixelAlpha += 4 ) { if( *pPixelAlpha != 0xff ) { break; } } if( pixelIndex >= pixelCount ) { bIgnoreAlpha = true; } } // Set up the input options for the texture compressor. Texture::ECompression compression = pTexture->GetCompression(); HELIUM_ASSERT( static_cast< size_t >( compression ) < static_cast< size_t >( Texture::ECompression::MAX ) ); bool bIsNormalMap = Texture::IsNormalMapCompression( compression ); bool bSrgb = pTexture->GetSrgb(); bool bCreateMipmaps = pTexture->GetCreateMipmaps(); nvtt::InputOptions inputOptions; inputOptions.setTextureLayout( nvtt::TextureType_2D, imageWidth, imageHeight ); inputOptions.setMipmapData( pImagePixelData, imageWidth, imageHeight ); inputOptions.setMipmapGeneration( bCreateMipmaps ); inputOptions.setMipmapFilter( nvtt::MipmapFilter_Box ); inputOptions.setWrapMode( nvtt::WrapMode_Repeat ); float gamma = ( bSrgb ? 2.2f : 1.0f ); inputOptions.setGamma( gamma, gamma ); inputOptions.setNormalMap( bIsNormalMap ); inputOptions.setNormalizeMipmaps( bIsNormalMap ); // Set up the output options for the texture compressor. MemoryTextureOutputHandler outputHandler( imageWidth, imageHeight, false, bCreateMipmaps ); nvtt::OutputOptions outputOptions; outputOptions.setOutputHandler( &outputHandler ); outputOptions.setOutputHeader( false ); // Set up the compression options for the texture compressor. nvtt::CompressionOptions compressionOptions; nvtt::Format outputFormat = nvtt::Format_BC1; ERendererPixelFormat pixelFormat = RENDERER_PIXEL_FORMAT_BC1; switch( compression ) { case Texture::ECompression::NONE: { outputFormat = nvtt::Format_RGBA; #if HELIUM_ENDIAN_LITTLE compressionOptions.setPixelFormat( 32, 0xff000000, 0x00ff0000, 0x0000ff00, 0x000000ff ); #else compressionOptions.setPixelFormat( 32, 0x000000ff, 0x0000ff00, 0x00ff0000, 0xff000000 ); #endif pixelFormat = ( bSrgb ? RENDERER_PIXEL_FORMAT_R8G8B8A8_SRGB : RENDERER_PIXEL_FORMAT_R8G8B8A8 ); break; } case Texture::ECompression::COLOR: { outputFormat = ( bIgnoreAlpha ? nvtt::Format_BC1 : nvtt::Format_BC1a ); pixelFormat = ( bSrgb ? RENDERER_PIXEL_FORMAT_BC1_SRGB : RENDERER_PIXEL_FORMAT_BC1 ); break; } case Texture::ECompression::COLOR_SHARP_ALPHA: { if( bIgnoreAlpha ) { outputFormat = nvtt::Format_BC1; pixelFormat = ( bSrgb ? RENDERER_PIXEL_FORMAT_BC1_SRGB : RENDERER_PIXEL_FORMAT_BC1 ); } else { outputFormat = nvtt::Format_BC2; pixelFormat = ( bSrgb ? RENDERER_PIXEL_FORMAT_BC2_SRGB : RENDERER_PIXEL_FORMAT_BC2 ); } break; } case Texture::ECompression::COLOR_SMOOTH_ALPHA: { if( bIgnoreAlpha ) { outputFormat = nvtt::Format_BC1; pixelFormat = ( bSrgb ? RENDERER_PIXEL_FORMAT_BC1_SRGB : RENDERER_PIXEL_FORMAT_BC1 ); } else { outputFormat = nvtt::Format_BC3; pixelFormat = ( bSrgb ? RENDERER_PIXEL_FORMAT_BC3_SRGB : RENDERER_PIXEL_FORMAT_BC3 ); } break; } case Texture::ECompression::NORMAL_MAP: { outputFormat = nvtt::Format_BC3n; pixelFormat = RENDERER_PIXEL_FORMAT_BC3; break; } case Texture::ECompression::NORMAL_MAP_COMPACT: { outputFormat = nvtt::Format_BC1; pixelFormat = RENDERER_PIXEL_FORMAT_BC1; break; } } compressionOptions.setFormat( outputFormat ); compressionOptions.setQuality( nvtt::Quality_Normal ); // Compress the texture. nvtt::Compressor compressor; bool bCompressSuccess = compressor.process( inputOptions, compressionOptions, outputOptions ); HELIUM_ASSERT( bCompressSuccess ); if( !bCompressSuccess ) { HELIUM_TRACE( TraceLevels::Error, ( TXT( "Texture2dResourceHandler::CacheResource(): Texture compression failed for texture image " ) TXT( "\"%s\".\n" ) ), *rSourceFilePath ); return false; } // Cache the data for each supported platform. const MemoryTextureOutputHandler::MipLevelArray& rMipLevels = outputHandler.GetFace( 0 ); uint32_t mipLevelCount = static_cast< uint32_t >( rMipLevels.GetSize() ); HELIUM_ASSERT( mipLevelCount != 0 ); int32_t pixelFormatIndex = static_cast< int32_t >( pixelFormat ); //PMDTODO: Implement this //BinarySerializer serializer; //for( size_t platformIndex = 0; platformIndex < static_cast< size_t >( Cache::PLATFORM_MAX ); ++platformIndex ) //{ // PlatformPreprocessor* pPreprocessor = pObjectPreprocessor->GetPlatformPreprocessor( // static_cast< Cache::EPlatform >( platformIndex ) ); // if( !pPreprocessor ) // { // continue; // } // Resource::PreprocessedData& rPreprocessedData = pTexture->GetPreprocessedData( // static_cast< Cache::EPlatform >( platformIndex ) ); // // Serialize the persistent data about the texture first. // serializer.SetByteSwapping( pPreprocessor->SwapBytes() ); // serializer.BeginSerialize(); // serializer << imageWidth; // serializer << imageHeight; // serializer << mipLevelCount; // serializer << pixelFormatIndex; // serializer.EndSerialize(); // rPreprocessedData.persistentDataBuffer = serializer.GetPropertyStreamBuffer(); // // Serialize each mip level. // DynamicArray< DynamicArray< uint8_t > >& rSubDataBuffers = rPreprocessedData.subDataBuffers; // rSubDataBuffers.Reserve( mipLevelCount ); // rSubDataBuffers.Resize( mipLevelCount ); // rSubDataBuffers.Trim(); // for( uint32_t mipLevelIndex = 0; mipLevelIndex < mipLevelCount; ++mipLevelIndex ) // { // rSubDataBuffers[ mipLevelIndex ] = rMipLevels[ mipLevelIndex ]; // } // // Platform data is now loaded. // rPreprocessedData.bLoaded = true; //} return true; }
int _tmain(int argc, _TCHAR* argv[]) { FILE *fp = fopen("test.nds", "rb"); if (NULL != fp) { printf("found file\n"); // find banner fseek(fp, 0x68, SEEK_SET); unsigned int bannerOffset = 0; fread(&bannerOffset, sizeof(unsigned int), 1, fp); if (0 != bannerOffset) { // read version fseek(fp, bannerOffset, SEEK_SET); unsigned short version; fread(&version, sizeof(unsigned short), 1, fp); // read english banner fseek(fp, bannerOffset + 0x340, SEEK_SET); wchar_t banner[0x100 + 1]; fread(banner, sizeof(wchar_t), 0x100, fp); banner[0x100] = '\0'; // output info printf("- version: %d\n", version); wprintf(L"- english banner:\n%s\n\n", banner); // read bitmap fseek(fp, bannerOffset + 0x20, SEEK_SET); unsigned char bitmap[(32 * 32) / 2]; fread(bitmap, 1, sizeof(bitmap), fp); // read palette fseek(fp, bannerOffset + 0x220, SEEK_SET); unsigned short palette[16]; fread(palette, 1, sizeof(palette), fp); #if 1 Image output; output.create(32, 32, 24); for (int yblock = 0; yblock < 4; ++yblock) for (int xblock = 0; xblock < 4; ++xblock) { int blockIndex = xblock + yblock * 4; const int blockSize = (8 * 8) / 2; unsigned char *src = &bitmap[blockSize * blockIndex]; for (int y = 0; y < 8; ++y) for (int x = 0; x < 8; ++x) { unsigned char palIndex = src[(y * 8 + x) / 2]; palIndex >>= (x & 1) * 4; unsigned short color = palette[palIndex & 0xf]; unsigned int r = color & ((1 << 5) - 1); unsigned int g = (color >> 5) & ((1 << 5) - 1); unsigned int b = (color >> 10) & ((1 << 5) - 1); r = int((float(r) / 31) * 255); g = int((float(g) / 31) * 255); b = int((float(b) / 31) * 255); RGBQUAD rgbColor; rgbColor.rgbRed = r; rgbColor.rgbGreen = g; rgbColor.rgbBlue = b; output.setPixelColor( xblock * 8 + x, yblock * 8 + y, rgbColor ); } } output.save("icon.png"); #endif }