stereoview::image_id_type stereoview::add_image(const image_type& img, const option<float>& focal_length) { if (!stored_image_db.empty() && (img.width() != width() || img.height() != height())) throw localized_invalid_argument(HERE(nfmt<4>("image size is %1 x %2 while expected size is %3 x %4") (img.width()) (img.height()) (width()) (height()))); const unsigned int id = fresh_int(); add_stored_image(id, img, focal_length); return id; }
TestRegionGrowing( ) : image( 10, 10 ) { for( size_type j = 1 ; j < image.height( ) - 1 ; j++ ) { for( size_type i = 1 ; i < image.width( ) - 1 ; i++ ) { image( i, j ) = 1; } } }
void Image2DIB(const image_type &image, BYTE *dib) { using namespace rss; const int all_header_size = 14 + 40 +1024; size_t dib_size = all_header_size + ( image.width() + ( (image.width()%4)?(4-image.width()%4):0) )* image.height(); std::ostrstream output(reinterpret_cast<char *>(dib), dib_size); BMPImageIO<image_type> image_io; if(!image_io.write(output, image)) throw rss::Exception("library can not write this dib"); }
void RSS_MultiSensor_FFT_Register(const image_type &VLImage, const image_type &IRImage, float para[6], image_type &result) { using namespace rss; image_type temp; if(para) { double scale1 = para[0] / para[3] * para[4] / para[1] * VLImage.width() / IRImage.width(); double scale2 = para[0] / para[3] * para[5] / para[2] * VLImage.height() / IRImage.height(); double scale = sqrt(scale1*scale2); HomoModel pre_trans_model; pre_trans_model.SetSimilarity(scale, 0, IRImage.width() * (1 - scale) / 2.0, IRImage.height() * (1 - scale) / 2.0); HomoTrans<image_type> pre_trans(pre_trans_model, VLImage.size()); pre_trans.operator ()(IRImage, temp); } else { temp = IRImage; } HomoModel model; SimilarityEstimation(true, true, false, SimilarityEstimation::OP_NONE, SimilarityEstimation::FILTER_NONE)(VLImage, temp, model); HomoTrans<image_type> homo_trans(model, VLImage.size()); homo_trans.operator()(temp, result); }
void RSS_MultiSensor_PCA_Fusion(const image_type &image1, const image_type &image2, image_type &result) { using namespace rss; rss::ImageVector<image_type> input_vector; input_vector.push_back(image1); if(image1.size() != image2.size()) { BilinearInterpolation<image_type> interpolate(image1.size()); image_type temp; interpolate(image2, temp); input_vector.push_back(temp); } else { input_vector.push_back(image2); } PCAFusion<image_type> fusion(image1.width(), image1.height()); fusion(input_vector, result); }
void RSS_MultiSensor_Fleet_Region(const image_type &VLImage, const image_type &IRImage, image_type &result1, image_type &result2, float regions[61]) { using namespace rss; rss::FleetEdgeDetector detector; detector(IRImage, result1); // result1 holds the fution image RSS_MultiSensor_Wavelet_Fusion(VLImage,IRImage,result1); // put the image of result into the Memeory dc HDC m_hDC = CreateCompatibleDC(::GetDC(NULL)); HBITMAP m_hBitmap = CreateCompatibleBitmap(::GetDC(NULL),result1.width(),result1.height()); HPEN m_hPen = CreatePen(PS_SOLID,1,RGB(255,255,255)); HBRUSH m_hBrush = (HBRUSH)GetStockObject(NULL_BRUSH); SelectObject(m_hDC,m_hBitmap); SelectObject(m_hDC,m_hPen); SelectObject(m_hDC,m_hBrush); SetBkMode(m_hDC,TRANSPARENT); SetTextColor(m_hDC,RGB(255,255,255)); for (int x = 0; x < result1.width(); x ++) { for (int y = 0; y < result1.height(); y ++) { int gray = rss::pixel_cast<rss::GrayPixel>(result1(x,y)); COLORREF color = RGB(gray,gray,gray); SetPixel(m_hDC,x,y,color); } } ObjectiveRegions r = detector.objective_region(); regions[0] = min(20u, static_cast<float>(r.size())); for(size_t i = 0; i < min(20U, r.size()); i++) { regions[i*3 + 1] = r[i].center.x(); regions[i*3 + 2] = r[i].center.y(); regions[i*3 + 3] = r[i].reliability; } // fill the background result2.resize(result1.size()); for (int x = 0; x < result2.width(); x ++) for (int y = 0; y < result2.height(); y ++) result2(x,y) = 0; // file the sensitive region for (int i = 0; i < min(20U,r.size()); i ++) { ::Rectangle(m_hDC,r[i].region.left(),r[i].region.top(),r[i].region.right(),r[i].region.bottom()); int dx = (r[i].region.right() - r[i].region.left()) / 2; int dy = (r[i].region.bottom() - r[i].region.top()) / 2; char* str = RSS_MultiSensor_Get_String(i+1); TextOut(m_hDC,r[i].region.left()+dx,r[i].region.top()+dy,str,strlen(str)); image_type region; region.resize(r[i].region.size()); for (int x = r[i].region.left(); x < r[i].region.right(); x ++) { for (int y = r[i].region.top(); y < r[i].region.bottom(); y ++) { region(x-r[i].region.left(),y-r[i].region.top()) = VLImage(x,y); } } RegionGrow<image_type> regionGrow(1.0/5.0,10); image_type region_result; regionGrow(region,region_result); for (int x = r[i].region.left(); x < r[i].region.right(); x ++) { for (int y = r[i].region.top(); y < r[i].region.bottom(); y ++) { result2(x,y) = region_result(x-r[i].region.left(),y-r[i].region.top()); } } } // put the image of Memory dc back to result1 for (int x = 0; x < VLImage.width(); x ++) { for (int y = 0; y < VLImage.height(); y ++) { result1(x,y) = rss::pixel_cast<rss::RealPixel>GetRValue(GetPixel(m_hDC,x,y)); } } DeleteObject(m_hDC); DeleteObject(m_hBitmap); DeleteObject(m_hPen); DeleteObject(m_hBrush); }
void stereoview::stored_image::image_to_gray_raw(const image_type& img, unsigned char* raw) { for (int y = 0; y < img.height(); ++y) for (int x = 0; x < img.width(); ++x) *raw++ = qGray(img.pixel(x, y)); }
int main( int argc , char *argv[ ] ) { // 入力ボリュームデータの作成 const vector_type c( ( va.width( ) - 1 ) / 2.0, ( va.height( ) - 1 ) / 2.0, ( va.depth( ) - 1 ) / 2.0 ); vector_type c0( c ), c1( c ); c0.x -= 8.0; c1.x += 8.0; for( size_t k = 0 ; k < va.depth( ) ; k ++ ) { for( size_t j = 0 ; j < va.height( ) ; j ++ ) { for( size_t i = 0 ; i < va.width( ) ; i ++ ) { const vector_type p( static_cast< double >( i ), static_cast< double >( j ), static_cast< double >( k ) ); const double d0 = distance( p, c0 ); const double d1 = distance( p, c1 ); va( i, j, k ) = 31.5 - minimum( d0, d1 ); if( va( i, j, k ) < 0.0 ) { va( i, j, k ) = 0.0; } } } } // 等値面生成の前処理(入力ボリュームデータを渡す) // 本サンプルのように描画の度に閾値を動的に変化させながら等値面生成を行う場合に生成処理時間を短縮できる // この行をコメントアウトしたものとしないものを比較すると処理速度の違いがわかる mcs.preprocess( va ); // 等値面生成パラメータ設定 mcs.offset( -31.5, -15.5, -15.5 ); mcs.scale( 0.1, 0.1, 0.1 ); // 等値面生成結果格納のため,大きめの領域を確保しておくことで // ベクタの要素サイズの増加に伴うメモリ再確保の発生を抑制 pv.reserve( 32768 ); nv.reserve( 32768 ); sv.reserve( 32768 ); glutInit( &argc , argv ); glutInitWindowPosition( 100 , 100 ); glutInitWindowSize( 400 , 400 ); glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH ); glutCreateWindow( "mist::marching_cubes" ); glutDisplayFunc( disp ); glutIdleFunc( idle ); glMatrixMode( GL_PROJECTION ); glFrustum( -1 , 1 , -1 , 1 , 1 , 5 ); gluLookAt( 0.0, 0.0, 4.0, 0.0, 0.0, -1.0, 0.0, 1.0, 0.0 ); GLfloat lpos[ ] = { 0 , 0 , 4 , 1 }; glLightfv( GL_LIGHT0 , GL_POSITION , lpos ); glEnable( GL_LIGHTING ); glEnable( GL_LIGHT0 ); glEnable( GL_DEPTH_TEST ); glEnable( GL_CULL_FACE ); glCullFace( GL_BACK ); glMatrixMode( GL_MODELVIEW ); glutMainLoop( ); return 0; }