// TODO should this go into the Logger class? void FeaturePointsRANSAC::drawFull3DMMProjection( cv::Mat img, cv::Point3f centerIn3dmm, cv::Point2f centerInImage, cv::Mat s, cv::Mat t ) { std::vector<std::pair<std::string, cv::Point2f> > pointsInImage; cv::Mat zbuffer(img.rows, img.cols, CV_32FC1, cv::Scalar::all(0.0f)); cv::Mat pointsIn3dCentered(this->modelMeanShp.size()/3, 3, CV_32F); // As many rows as Ffp's, and 3 cols (x, y, z) for (unsigned int i=0; i<this->modelMeanShp.size()/3; ++i) { int theVertexToGet = i; pointsIn3dCentered.at<float>(i, 0) = this->modelMeanShp[3*theVertexToGet+0]-centerIn3dmm.x; pointsIn3dCentered.at<float>(i, 1) = this->modelMeanShp[3*theVertexToGet+1]-centerIn3dmm.y; pointsIn3dCentered.at<float>(i, 2) = this->modelMeanShp[3*theVertexToGet+2]-centerIn3dmm.z; float x = pointsIn3dCentered.row(i).t().dot(s) + centerInImage.x; float y = pointsIn3dCentered.row(i).t().dot(t) + centerInImage.y; if(x>=0 && y>=0 && x<img.cols && y<img.rows) { std::stringstream sstm; sstm << i; std::string result = sstm.str(); pointsInImage.push_back(std::make_pair(result, cv::Point2f(x, y))); img.at<cv::Vec3b>(y,x)[0] = (uchar)(255.0f * this->modelMeanTex[3*theVertexToGet+2]); img.at<cv::Vec3b>(y,x)[1] = (uchar)(255.0f * this->modelMeanTex[3*theVertexToGet+1]); img.at<cv::Vec3b>(y,x)[2] = (uchar)(255.0f * this->modelMeanTex[3*theVertexToGet+0]); } } }
static void _dxf_SET_ZBUFFER_STATUS (void *ctx, int status) { ENTRY(("_dxf_SET_ZBUFFER_STATUS(0x%x, %d)",ctx,status)); zbuffer(status) ; EXIT(("")); }
cv::Mat computeZbuffer(const pcl::PointCloud<pcl::PointXYZRGB>& point_cloud, const Frame3D& frame, int window_size = 2, double threshold = 0.2) { const double inf = std::numeric_limits< double >::infinity(); cv::Mat zbuffer(frame.depth_image_.rows, frame.depth_image_.cols, CV_32FC3, cv::Vec3f(inf, inf, inf)); const double focal_length = frame.focal_length_; const double sizeX = frame.depth_image_.cols; const double sizeY = frame.depth_image_.rows; const double cx = sizeX / 2.0; const double cy = sizeY / 2.0; for (const pcl::PointXYZRGB& point : point_cloud) { const int u_unscaled = std::round(focal_length * (point.x / point.z) + cx); const int v_unscaled = std::round(focal_length * (point.y / point.z) + cy); if (u_unscaled < 0 || v_unscaled < 0 || u_unscaled >= sizeX || v_unscaled >= sizeY) continue; cv::Vec3f& uv_point = zbuffer.at<cv::Vec3f>(v_unscaled, u_unscaled); if (uv_point[2] > point.z) { uv_point[0] = point.x; uv_point[1] = point.y; uv_point[2] = point.z; } } for (const pcl::PointXYZRGB& point : point_cloud) { const int u_unscaled = std::round(focal_length * (point.x / point.z) + cx); const int v_unscaled = std::round(focal_length * (point.y / point.z) + cy); if (u_unscaled < 0 || v_unscaled < 0 || u_unscaled >= sizeX || v_unscaled >= sizeY) continue; const cv::Vec3f& uv_point = zbuffer.at<cv::Vec3f>(v_unscaled, u_unscaled); const int k_min = std::max(0, v_unscaled - window_size); const int k_max = std::min(frame.depth_image_.rows, v_unscaled + window_size + 1); const int l_min = std::max(0, u_unscaled - window_size); const int l_max = std::min(frame.depth_image_.cols, u_unscaled + window_size + 1); for (int k = k_min; k < k_max; ++k) { for (int l = l_min; l < l_max; ++l) { cv::Vec3f& second_point = zbuffer.at<cv::Vec3f>(k, l); if (uv_point[2] + threshold < second_point[2]) { second_point = uv_point; } } } } return zbuffer; }
DataBuffer ZLibCompression::compress(const DataBuffer &data, bool raw, int compression_level, CompressionMode mode) { const int window_bits = 15; DataBuffer zbuffer(1024*1024); IODevice_Memory output; int strategy = MZ_DEFAULT_STRATEGY; switch (mode) { case default_strategy: strategy = MZ_DEFAULT_STRATEGY; break; case filtered: strategy = MZ_FILTERED; break; case huffman_only: strategy = MZ_HUFFMAN_ONLY; break; case rle: strategy = MZ_RLE; break; case fixed: strategy = MZ_FIXED; break; } mz_stream zs = { nullptr }; int result = mz_deflateInit2(&zs, compression_level, MZ_DEFLATED, raw ? -window_bits : window_bits, 8, strategy); // Undocumented: if wbits is negative, zlib skips header check if (result != MZ_OK) throw Exception("Zlib deflateInit failed"); try { zs.next_in = (unsigned char *) data.get_data(); zs.avail_in = data.get_size(); while (true) { zs.next_out = (unsigned char *) zbuffer.get_data(); zs.avail_out = zbuffer.get_size(); int result = mz_deflate(&zs, MZ_FINISH); if (result == MZ_NEED_DICT) throw Exception("Zlib deflate wants a dictionary!"); if (result == MZ_DATA_ERROR) throw Exception("Zip data stream is corrupted"); if (result == MZ_STREAM_ERROR) throw Exception("Zip stream structure was inconsistent!"); if (result == MZ_MEM_ERROR) throw Exception("Zlib did not have enough memory to compress file!"); if (result == MZ_BUF_ERROR) throw Exception("Not enough data in buffer when Z_FINISH was used"); if (result != MZ_OK && result != MZ_STREAM_END) throw Exception("Zlib deflate failed while compressing zip file!"); int zsize = zbuffer.get_size() - zs.avail_out; if (zsize == 0) break; output.write(zbuffer.get_data(), zsize); if (result == MZ_STREAM_END) break; } mz_deflateEnd(&zs); } catch (...) { mz_deflateEnd(&zs); throw; } return output.get_data(); }
DataBuffer ZLibCompression::decompress(const DataBuffer &data, bool raw) { const int window_bits = 15; DataBuffer zbuffer(1024*1024); IODevice_Memory output; mz_stream zs = { nullptr }; int result = mz_inflateInit2(&zs, raw ? -window_bits : window_bits); if (result != MZ_OK) throw Exception("Zlib inflateInit failed"); zs.next_in = (unsigned char *) data.get_data(); zs.avail_in = data.get_size(); // Continue feeding zlib data until we get our data: while (true) { zs.next_out = (unsigned char *) zbuffer.get_data(); zs.avail_out = zbuffer.get_size(); // Decompress data: int result = mz_inflate(&zs, 0); if (result == MZ_NEED_DICT) throw Exception("Zlib inflate wants a dictionary!"); if (result == MZ_DATA_ERROR) throw Exception("Zip data stream is corrupted"); if (result == MZ_STREAM_ERROR) throw Exception("Zip stream structure was inconsistent!"); if (result == MZ_MEM_ERROR) throw Exception("Zlib did not have enough memory to decompress file!"); if (result == MZ_BUF_ERROR) throw Exception("Not enough data in buffer when Z_FINISH was used"); if (result != MZ_OK && result != MZ_STREAM_END) throw Exception("Zlib inflate failed while decompressing zip file!"); output.write(zbuffer.get_data(), zbuffer.get_size() - zs.avail_out); if (result == MZ_STREAM_END) break; } mz_inflateEnd(&zs); return output.get_data(); }
void ZBWidget::paintEvent(QPaintEvent *event) { ASSERT_MSG(m_model, "ZBWidget: failed to load model!"); QPainter painter(this); const int width = this->width(); const int height = this->height(); QImage img(width, height, QImage::Format_ARGB32); img.fill(Qt::darkGray); Eigen::MatrixXd zbuffer(width, height); zbuffer.fill(1.0); QRgb *img_data[height]; for ( int i=0; i < height; i++ ) { img_data[i] = (QRgb*)img.scanLine(i); } #if 0 for ( int i=0; i < height; i++ ) for ( int j=0; j < width; j++ ) { double d = std::sqrt((i-height/2)*(i-height/2)+(j-width/2)*(j-width/2)); if ( d < std::min(height/2, width/2) ) { img_data[i][j] = 0xffff0000; } } #endif #if 1 /* This is how our algorithms work: * 0. Setup model matrix * 1. Setup view matrix (camera) * 2. Setup projection matrix * 3. Find out all triangles in the viewing frustum * 4. Filter out all triangles facing backward to camera * 5. Rasterize each triangle into pixels * 6. Set each pixel of the image to its nearest triangle pixel's color */ Matrix4 transform(Matrix4::Identity()); transform *= perspective(60.0f, (float)width/height, 1.0f, 1000.0f); transform *= lookAt(0.0f, 0.0f, m_cameraDistance, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f); transform *= rotateX(m_cameraAngleX); transform *= rotateY(m_cameraAngleY); INFO("transform = (%.2f, %.2f, %.2f, %.2f,\n" " %.2f, %.2f, %.2f, %.2f,\n" " %.2f, %.2f, %.2f, %.2f,\n" " %.2f, %.2f, %.2f, %.2f)", transform(0,0), transform(0,1), transform(0,2), transform(0,3), transform(1,0), transform(1,1), transform(1,2), transform(1,3), transform(2,0), transform(2,1), transform(2,2), transform(2,3), transform(3,0), transform(3,1), transform(3,2), transform(3,3)); std::vector<Triangle> triangles; m_model->getTriangles(triangles, transform); for ( size_t i=0; i < triangles.size(); i++ ) { std::vector<Pixel> pixels; triangles[i].raster(pixels, width, height); for ( size_t j=0; j < pixels.size(); j++ ) { const Pixel &p = pixels[j]; if ( p.x < 0 || p.x >= width || p.y < 0 || p.y >= height ) continue; float depth = triangles[i].getDepth(p); if ( depth < zbuffer(p.x, p.y) ) { zbuffer(p.x, p.y) = depth; img_data[height-p.y-1][p.x] = triangles[i].getColor(p); } } } #endif painter.drawImage(QPoint(), img); }
bool CCommandSampleZoomRotateWindow::DollyWindow( const CRhinoViewport& vport, CRect pick_rect, ON_3dPoint& target_point, ON_Viewport& vp_out ) { const ON_Viewport& vp_in = vport.VP(); vp_out = vp_in; // screen port values int sleft, sright, stop, sbottom; if( !vp_in.GetScreenPort(&sleft, &sright, &sbottom, &stop) ) return false; // frustum values double fleft, fright, ftop, fbottom, fnear, ffar; if( !vp_in.GetFrustum(&fleft, &fright, &fbottom, &ftop, &fnear, &ffar) ) return false; // camera coordinate system ON_3dPoint cam_loc = vp_in.CameraLocation(); ON_3dVector cam_z = vp_in.CameraZ(); // capture Depth Buffer CRect screen_rect( sleft, stop, sright, sbottom ); // user-specified rectangle CRect zoom_rect; zoom_rect.IntersectRect( &pick_rect, screen_rect ); CRhinoZBuffer zbuffer( vport ); zbuffer.ShowIsocurves( true ); zbuffer.ShowMeshWires( true ); zbuffer.ShowCurves( true ); zbuffer.ShowPoints( true ); zbuffer.ShowText( true ); zbuffer.ShowAnnotations( false ); bool bSeeThrough = ( vport.XrayShade() || vport.GhostedShade() ); if( bSeeThrough ) zbuffer.EnableShading( false ); bool rc = zbuffer.Capture( zoom_rect ); if( rc && bSeeThrough && 0 == zbuffer.HitCount() ) { zbuffer.EnableShading( true ); zbuffer.ShowIsocurves( false ); zbuffer.ShowMeshWires( false ); zbuffer.ShowCurves( false ); zbuffer.ShowPoints( false ); zbuffer.ShowText( false ); rc = zbuffer.Capture( zoom_rect ); } ON_Xform s2c; if( rc ) rc = zbuffer.VP().GetXform( ON::screen_cs, ON::camera_cs, s2c ); if( rc ) { // dolly sideways so zoom rectangle is centered ON_3dPoint near_rect[4]; vp_in.GetNearRect( near_rect[0], near_rect[1], near_rect[2], near_rect[3] ); double port_dx = fabs( double(sright - sleft) ); double port_dy = fabs( double(stop - sbottom) ); ON_Interval lr( sleft, sright); double nx = lr.NormalizedParameterAt( 0.5 * (pick_rect.left + pick_rect.right) ); ON_Interval bt( sbottom, stop); double ny = bt.NormalizedParameterAt( 0.5 * (pick_rect.bottom + pick_rect.top) ); ON_3dPoint zoom_center = (1-nx) * (1-ny) * near_rect[0] + nx * (1-ny) * near_rect[1] + (1-nx) * ny * near_rect[2] + nx *ny * near_rect[3]; ON_3dVector dolly_vec = zoom_center - cam_loc; // dolly perpendicular to cam_z dolly_vec -= dolly_vec * cam_z * cam_z; vp_out.DollyCamera( dolly_vec ); double pick_rect_dx = fabs(double (pick_rect.right - pick_rect.left)); double pick_rect_dy = fabs(double(pick_rect.top - pick_rect.bottom)); // expand pick_rect to have the aspect ratio of the viewport double d = 1.0; if( pick_rect_dx/port_dx < pick_rect_dy/port_dy) d = pick_rect_dy / port_dy; else d = pick_rect_dx / port_dx ; fleft *= d; fright *= d; fbottom *= d; ftop *=d ; vp_out.SetFrustum( fleft, fright, fbottom, ftop, fnear, ffar ); // target point on plane perpendicular to cam_z cam_loc = vp_out.CameraLocation(); target_point = cam_loc + (target_point - cam_loc) *cam_z * cam_z; } return rc; }