/** * \brief Constructor. * \param img The image for the sprite. */ bear::visual::sprite::sprite( const image& img ) : bitmap_rendering_attributes(img.size()), m_image(img), m_clip_rectangle(0, 0, img.width(), img.height()), m_opaque_rectangle( 0, 0, 0, 0 ) { } // sprite::sprite()
static void image_insert_tile_row(image& target, int32_t x, int32_t y, uint32_t width, uint32_t height, const image& other, uint32_t offx, uint32_t offy) { if (offx + width <= other.width) { target.insert_sub(x, y, other, offx, offy, width, height); return; } if (offx != 0) { target.insert_sub(x, y, other, offx, offy, other.width-offx, height); x += other.width-offx; width -= other.width-offx; } uint32_t xx = 0; if (width >= other.width) { for (xx = 0; xx < width-other.width; xx += other.width) { target.insert_sub(x+xx, y, other, 0, offy, other.width, height); } } if (xx < width) { target.insert_sub(x+xx, y, other, 0, offy, width-xx, height); } }
void save_single_frame() { Setting conf("application.cfg"); string base = conf.getString("application.res_dir"); string vid1 = base + "/cities/Travel by Drone- Gang Depot Es, Pasar Minggu, Jakarta 12560, Indonesia (1-59).mp4"; Mat img = extract(vid1, 1659); save(img, base + "/out.jpg"); }
virtual BOOL on_draw(HELEMENT he, UINT draw_type, HDC hdc, const RECT &rc) { if ((DRAW_EVENTS)draw_type != where) return FALSE; // do default draw int w = rc.right - rc.left; int h = rc.bottom - rc.top; if (!surface) { surface = image::create(w, h); redraw = true; } else if (w != surface->width() || h != surface->height()) { delete surface; surface = image::create(w, h); redraw = true; } else if (redraw) surface->clear(); if (redraw) { graphics gx(surface); draw(he, gx, w, h); redraw = false; } surface->blit(hdc, rc.left, rc.top); return default_draw ? TRUE : FALSE; }
// merge float channels bool mergeOCPToImage::apply(const matrix<float>& c1, const matrix<float>& c2, const matrix<float>& c3, image& img) const { point p; // coordinates float r,g,b; // unnormed RGB channels float RG, BY, WB; // opponent colour channels if ((c1.size() != c2.size()) || (c1.size() != c3.size())) { setStatusString("sizes of channels do not match"); return false; } img.resize(c1.size(),rgbPixel(),false,false); for (p.y=0;p.y<img.rows();p.y++) { for (p.x=0;p.x<img.columns();p.x++) { RG = c1.at(p); BY = c2.at(p); WB = c3.at(p); b = BY*0.666666666667f; // r = WB + RG - b; g = WB - RG - b; b = WB + BY*1.3333333333333f; // truncate r,g and b if the value is not in intervall [0..1] // can happen due to rounding errors in split operation if (r<0.0f) { r=0.0f; } else if (r>1.0f) { r=1.0f; } if (g<0.0f) { g=0.0f; } else if (g>1.0f) { g=1.0f; } if (b<0.0f) { b=0.0f; } else if (b>1.0f) { b=1.0f; } img.at(p).set(static_cast<ubyte>(255.0f*r), static_cast<ubyte>(255.0f*g), static_cast<ubyte>(255.0f*b), 0); } } return true; };
/*! */ image max(const image& lhs,const image& rhs) { CvSize lhs_sz = lhs.dim(); CvSize rhs_sz = rhs.dim(); if((lhs_sz.height != rhs_sz.height) || (lhs_sz.width != rhs_sz.width)) throw exception(); image res = lhs; cvMax(lhs,rhs,res); return res; }
size_t cnt(const image& img) { size_t sum = 0; img.reset(); while (img.move_next()) { if (img.element()) sum++; } return sum; }
image<vec4f> display_diff(const image<vec4f>& diff) { auto display = image{diff.size(), zero4f}; for (auto j = 0; j < diff.size().y; j++) { for (auto i = 0; i < diff.size().x; i++) { auto diff_value = max(diff[{i, j}]); display[{i, j}] = {diff_value, diff_value, diff_value, 1}; } } return display; }
/*! */ image operator /(const image& lhs,const image& rhs) { CvSize lhs_sz = lhs.dim(); CvSize rhs_sz = rhs.dim(); if((lhs_sz.width != rhs_sz.width) || (lhs_sz.height !=rhs_sz.height)) throw exception(); image result = lhs; cvDiv(lhs,rhs,result); return result; }
image_view::image_view(const image &image, const vk::Format format, vk::ImageAspectFlags aspectFlags) : device_(image.parent_device()) { handle_ = device_.create_image_view( vk::ImageViewCreateInfo() .viewType(vk::ImageViewType::e2D) .format(format) .subresourceRange(vk::ImageSubresourceRange(aspectFlags, 0, 1, 0, 1)) .image(image.handle())); }
void imshow_impl( const image<T1, D1>& im1, const char* title1, const image<T2, D2>& im2, const char* title2, const image<T3, D3>& im3, const char* title3, const image<T4, D4>& im4, const char* title4, const image<T5, D5>& im5, const char* title5) { IplImage* ipl1; IplImage* ipl2; IplImage* ipl3; IplImage* ipl4; IplImage* ipl5; ipl1 = im1.ipl(); cvNamedWindow(title1, 1); cvShowImage(title1, ipl1); if(title2) { ipl2 = im2.ipl(); cvNamedWindow(title2, 2); cvShowImage(title2, ipl2); } if(title3) { ipl3 = im3.ipl(); cvNamedWindow(title3, 3); cvShowImage(title3, ipl3); } if(title4) { ipl4 = im4.ipl(); cvNamedWindow(title4, 4); cvShowImage(title4, ipl4); } if(title5) { ipl5 = im5.ipl(); cvNamedWindow(title5, 5); cvShowImage(title5, ipl5); } cvWaitKey(0); // very important, contains event processing loop inside cvDestroyWindow(title1); if(title2) cvDestroyWindow(title2); if(title3) cvDestroyWindow(title3); if(title4) cvDestroyWindow(title4); if(title5) cvDestroyWindow(title5); cvReleaseImage(&ipl1); if(title2) cvReleaseImage(&ipl2); if(title3) cvReleaseImage(&ipl3); if(title4) cvReleaseImage(&ipl4); if(title5) cvReleaseImage(&ipl5); }
typename image<T, D>::create_new median_blur(const image<T, D>& a, int neighbourhood) { IplImage* src = a.ipl(); IplImage* dst = cvCreateImage(cvGetSize(src), image_details::ipl_depth<T>(), int(a.channels())); cvSmooth(src, dst, CV_MEDIAN, neighbourhood); typename image<T, D>::create_new r(dst); cvReleaseImage(&src); cvReleaseImage(&dst); return r; }
image<vec4f> compute_diff_image(const image<vec4f>& a, const image<vec4f>& b) { auto diff = image{a.size(), zero4f}; for (auto j = 0; j < a.size().y; j++) { for (auto i = 0; i < a.size().x; i++) { diff[{i, j}] = {abs(a[{i, j}].x - b[{i, j}].x), abs(a[{i, j}].y - b[{i, j}].y), abs(a[{i, j}].z - b[{i, j}].z), abs(a[{i, j}].w - b[{i, j}].w)}; } } return diff; }
void find_in_image() { Setting conf("application.cfg"); string base = conf.getString("application.res_dir"); Mat haystack = read(base + "/car_features/01.jpg"); Mat needle = toGrayscale(cv::imread(base + "/car_features/01.jpg")); Mat needle64x128; cv::resize(needle, needle64x128, Size(64, 128)); vector<float> hog = computeHog(needle64x128); cout << hog.size() << endl; }
typename image<T, D>::create_new laplace(const image<T, D>& a, int aperture_size) { IplImage* src = a.ipl(); IplImage* dst = cvCreateImage(cvGetSize(src), image_details::ipl_depth<T>(), int(a.channels())); cvLaplace(src, dst, aperture_size); typename image<T, D>::create_new r(dst); cvReleaseImage(&src); cvReleaseImage(&dst); return r; }
typename image<T, D>::create_new blur_no_scale(const image<T, D>& a, int neighbourhood_rows, int neighbourhood_cols) { IplImage* src = a.ipl(); IplImage* dst = cvCreateImage(cvGetSize(src), image_details::ipl_depth<T>(), int(a.channels())); cvSmooth(src, dst, CV_BLUR_NO_SCALE, neighbourhood_cols, neighbourhood_rows); typename image<T, D>::create_new r(dst); cvReleaseImage(&src); cvReleaseImage(&dst); return r; }
typename image<T, D>::create_new sobel(const image<T, D>& a, int y_order, int x_order, int aperture_size) { IplImage* src = a.ipl(); IplImage* dst = cvCreateImage(cvGetSize(src), image_details::ipl_depth<T>(), (int)a.channels()); cvSobel(src, dst, x_order, y_order, aperture_size); typename image<T, D>::create_new r(dst); cvReleaseImage(&src); cvReleaseImage(&dst); return r; }
typename image<T, D>::create_new pyramid_down(const image<T, D>& a) { IplImage* src = a.ipl(); IplImage* dst = cvCreateImage( image_details::cv_size(a.width() / 2, a.height() / 2), image_details::ipl_depth<T>(), int(a.channels())); cvPyrDown(src, dst); typename image<T, D>::create_new r(dst); cvReleaseImage(&src); cvReleaseImage(&dst); return r; }
image image::operator/(const image& img) const { // sanity check assert(width() == img.width() && height() == img.height()); // create result image result(width(), height()); std::transform(begin(), end(), img.begin(), result.begin(), std::divides<color>()); // Done. return result; }
typename image<T, D>::create_new bilateral_blur(const image<T, D>& a, int color_sigma, int space_sigma) { IplImage* src = a.ipl(); IplImage* dst = cvCreateImage(cvGetSize(src), image_details::ipl_depth<T>(), int(a.channels())); cvSmooth(src, dst, CV_BILATERAL, color_sigma, space_sigma); typename image<T, D>::create_new r(dst); cvReleaseImage(&src); cvReleaseImage(&dst); return r; }
void image::insert_image(Pos& pos, image& image_to_insert) { // Check dimensions if (pos.x + image_to_insert.size().width > this->size_.width || pos.y + image_to_insert.size().height > this->size_.height) { cout << "Cant insert image: Size mismatch!" << endl; return; } for (int y = 0; y < image_to_insert.size().height; ++y) for (int x = 0; x < image_to_insert.size().width; ++x) this->pixels_[y + pos.y][x + pos.x] = image_to_insert.pixels().at(y).at(x); }
typename image<T, D>::create_new corner_eigenvals( const image<T, D>& a, int block_size, int aperture_size) { IplImage* src = a.ipl(); IplImage* dst = cvCreateImage( image_details::cv_size(a.width() * 6, a.height()), image_details::ipl_depth<T>(), 1); cvCornerEigenValsAndVecs(src, dst, block_size, aperture_size); typename image<T, D>::create_new r(dst); cvReleaseImage(&src); cvReleaseImage(&dst); return r; }
typename image<T, D>::create_new gaussian_blur(const image<T, D>& a, int neighbourhood_rows, int neighbourhood_cols, int vertical_sigma, int horizontal_sigma) { IplImage* src = a.ipl(); IplImage* dst = cvCreateImage(cvGetSize(src), image_details::ipl_depth<T>(), int(a.channels())); cvSmooth(src, dst, CV_GAUSSIAN, neighbourhood_cols, neighbourhood_rows, horizontal_sigma, vertical_sigma); typename image<T, D>::create_new r(dst); cvReleaseImage(&src); cvReleaseImage(&dst); return r; }
typename image<T, D>::create_new resample(const image<T, D>& a, size_t rows, size_t cols, interpolation interp) { IplImage* src = a.ipl(); IplImage* dst = cvCreateImage( image_details::cv_size(cols, rows), image_details::ipl_depth<T>(), int(a.channels())); cvResize(src, dst, image_details::interpolation_map[interp]); typename image<T, D>::create_new r(dst); cvReleaseImage(&src); cvReleaseImage(&dst); return r; }
void jsonwriter::append_image(const image& img) { rapidjson::Value frame("frame"); frame.SetObject(); // name value rapidjson::Value name(rapidjson::StringRef(img.name().c_str())); frame.AddMember("name", name, document_.GetAllocator()); // size value which contains actual width and height rapidjson::Value size; size.SetObject(); size.AddMember("width", img.size().width, document_.GetAllocator()); size.AddMember("height", img.size().height, document_.GetAllocator()); frame.AddMember("size", size, document_.GetAllocator()); // position value on spritesheet rapidjson::Value position; position.SetObject(); position.AddMember("x", img.position().x, document_.GetAllocator()); position.AddMember("y", img.position().y, document_.GetAllocator()); frame.AddMember("position", position, document_.GetAllocator()); // offset value that defines how much space was trimmed rapidjson::Value offset; offset.SetObject(); offset.AddMember("x", img.offset().x, document_.GetAllocator()); offset.AddMember("y", img.offset().y, document_.GetAllocator()); frame.AddMember("offset", offset, document_.GetAllocator()); // source value which contains the original image size rapidjson::Value source; source.SetObject(); source.AddMember("width", img.o_size().width, document_.GetAllocator()); source.AddMember("height", img.o_size().height, document_.GetAllocator()); frame.AddMember("source", source, document_.GetAllocator()); // add finished frame to frames array document_["frames"].PushBack(frame, document_.GetAllocator()); }
void image::append_image_right(image& image_to_append) { // Bring both images to the same height if (this->size_.height < image_to_append.size().height) append_height(image_to_append.size().height); else if (this->size_.height > image_to_append.size().height) image_to_append.append_height(this->size_.height); // Append new rows from other image to the right of each row for (int y = 0; y < (int) image_to_append.size().height; ++y) for (int x = 0; x < (int) image_to_append.size().width; ++x) this->pixels_.at(y).push_back(image_to_append.pixels().at(y).at(x)); this->size_.width += image_to_append.size().width; #if DEBUG cout << "images merged. array sizes: " << this->pixels_.at(1).size() << " width, " << this->pixels_.size() << " height. image size: " << this->size_.width << ", " << this->size_.height << endl; #endif }
//this one requires height <= other.height static void image_insert_tile_row(image& target, int32_t x, int32_t y, uint32_t width, uint32_t height, const image& other) { uint32_t xx = 0; if (width >= other.width) { for (xx = 0; xx < width-other.width; xx += other.width) { target.insert_sub(x+xx, y, other, 0, 0, other.width, height); } } if (xx < width) { target.insert_sub(x+xx, y, other, 0, 0, width-xx, height); } }
bool brightRGB::getMax(const image& img,dvector& dest) const{ // image empty? if (img.empty()) { setStatusString("image empty"); dest.resize(0); return false; } const rgbPixel transColor = getParameters().transColor; ivector maxV(3,-1); image::const_iterator it = img.begin(); if(getParameters().transparent) { while(it != img.end()) { if(*it != transColor) { if((*it).getRed() > maxV.at(0)) maxV.at(0) = (*it).getRed(); if((*it).getGreen() > maxV.at(1)) maxV.at(1) = (*it).getGreen(); if((*it).getBlue() > maxV.at(2)) maxV.at(2) = (*it).getBlue(); } it++; } // only transparent pixels? if (maxV.at(0)==-1) { setStatusString("only transparent pixels"); dest.resize(0); return false; } } else { // no transparent color while(it != img.end()) { if((*it).getRed() > maxV.at(0)) maxV.at(0) = (*it).getRed(); if((*it).getGreen() > maxV.at(1)) maxV.at(1) = (*it).getGreen(); if((*it).getBlue() > maxV.at(2)) maxV.at(2) = (*it).getBlue(); it++; } } if(maxV.at(0) == -1) return false; dest.castFrom(maxV); // normalize to 0..1 dest.divide(255); return true; };
inline void cvt_oniimage(openni::VideoFrameRef src, image &to, const MemOp &m) { const void* data = src.getData(); void* datab = const_cast<void*>(data); to = image(src.getWidth(), src.getHeight(), src.getStrideInBytes(), datab, m); to.set_format(image::FORMAT_DEPTH_16); }
void read_dem_header ( image<short> & dem ) { char file[1000]; GDALDataset *df; GDALRasterBand *bd; double trans[6]; int rows, cols; strcpy ( file, p.value("dem_file").c_str()); if ( strlen(file) == 0 ) { fprintf(stderr,"Need to specify a dem file\n"); exit(1); } df = (GDALDataset *) GDALOpen( file, GA_ReadOnly ); if( df == NULL ) { fprintf(stderr,"Could not open dem file: %s\n", file ); exit(1); } rows = df->GetRasterYSize(); cols = df->GetRasterXSize(); dem.create(rows,cols); if( df->GetGeoTransform( trans ) == CE_None ) { dem_east = trans[0]; dem_north = trans[3]; dem_wid = trans[1]; } else { fprintf(stderr,"Dem file: %s has no geographic metadata\n", file ); exit(1); } delete df; }