void procOCL_OCV(int tex, int w, int h) { int64_t t = getTimeMs(); cl::ImageGL imgIn (theContext, CL_MEM_READ_ONLY, GL_TEXTURE_2D, 0, tex); std::vector < cl::Memory > images(1, imgIn); theQueue.enqueueAcquireGLObjects(&images); theQueue.finish(); cv::UMat uIn, uOut, uTmp; cv::ocl::convertFromImage(imgIn(), uIn); LOGD("loading texture data to OpenCV UMat costs %d ms", getTimeInterval(t)); theQueue.enqueueReleaseGLObjects(&images); t = getTimeMs(); //cv::blur(uIn, uOut, cv::Size(5, 5)); cv::Laplacian(uIn, uTmp, CV_8U); cv:multiply(uTmp, 10, uOut); cv::ocl::finish(); LOGD("OpenCV processing costs %d ms", getTimeInterval(t)); t = getTimeMs(); cl::ImageGL imgOut(theContext, CL_MEM_WRITE_ONLY, GL_TEXTURE_2D, 0, tex); images.clear(); images.push_back(imgOut); theQueue.enqueueAcquireGLObjects(&images); cl_mem clBuffer = (cl_mem)uOut.handle(cv::ACCESS_READ); cl_command_queue q = (cl_command_queue)cv::ocl::Queue::getDefault().ptr(); size_t offset = 0; size_t origin[3] = { 0, 0, 0 }; size_t region[3] = { w, h, 1 }; CV_Assert(clEnqueueCopyBufferToImage (q, clBuffer, imgOut(), offset, origin, region, 0, NULL, NULL) == CL_SUCCESS); theQueue.enqueueReleaseGLObjects(&images); cv::ocl::finish(); LOGD("uploading results to texture costs %d ms", getTimeInterval(t)); }
void SCropVideoQuad::pushFrameInTimeline(::cv::Mat& imgIn, const ::cv::Rect& roi, ::arData::FrameTL::sptr& frameTL, ::fwCore::HiResClock::HiResClockType timestamp) { const size_t width = frameTL->getWidth(); const size_t height = frameTL->getHeight(); // Get the buffer of the timeline to fill SPTR(::arData::FrameTL::BufferType) bufferOut = frameTL->createBuffer(timestamp); std::uint8_t* frameBuffOut = bufferOut->addElement(0); // Create an openCV mat that aliases the buffer created from the output timeline ::cv::Mat imgOut(height, width, imgIn.type(), (void*)frameBuffOut, ::cv::Mat::AUTO_STEP); // Crop the full image to that image contained by the rectangle myROI // Note that this doesn't copy the data ::cv::Mat croppedImage = imgIn(roi); croppedImage.copyTo(imgOut); // push buffer and notify frameTL->pushObject(bufferOut); ::arData::TimeLine::ObjectPushedSignalType::sptr sig; sig = frameTL->signal< ::arData::TimeLine::ObjectPushedSignalType >(::arData::TimeLine::s_OBJECT_PUSHED_SIG ); sig->asyncEmit(timestamp); }
void procOCL_I2I(int texIn, int texOut, int w, int h) { if(!haveOpenCL) return; LOGD("procOCL_I2I(%d, %d, %d, %d)", texIn, texOut, w, h); cl::ImageGL imgIn (theContext, CL_MEM_READ_ONLY, GL_TEXTURE_2D, 0, texIn); cl::ImageGL imgOut(theContext, CL_MEM_WRITE_ONLY, GL_TEXTURE_2D, 0, texOut); std::vector < cl::Memory > images; images.push_back(imgIn); images.push_back(imgOut); int64_t t = getTimeMs(); theQueue.enqueueAcquireGLObjects(&images); theQueue.finish(); LOGD("enqueueAcquireGLObjects() costs %d ms", getTimeInterval(t)); t = getTimeMs(); cl::Kernel Laplacian(theProgI2I, "Laplacian"); //TODO: may be done once Laplacian.setArg(0, imgIn); Laplacian.setArg(1, imgOut); theQueue.finish(); LOGD("Kernel() costs %d ms", getTimeInterval(t)); t = getTimeMs(); theQueue.enqueueNDRangeKernel(Laplacian, cl::NullRange, cl::NDRange(w, h), cl::NullRange); theQueue.finish(); LOGD("enqueueNDRangeKernel() costs %d ms", getTimeInterval(t)); t = getTimeMs(); theQueue.enqueueReleaseGLObjects(&images); theQueue.finish(); LOGD("enqueueReleaseGLObjects() costs %d ms", getTimeInterval(t)); }
template <class PixTyp> GaImageT<PixTyp> gradient(GaImageT<PixTyp>& imgIn, unsigned short percent) { GaImageT<PixTyp> imgOut(imgIn.sizeX(), imgIn.sizeY()); imgOut.typeImage(imgIn.typeImage()); int i, j ,k ,l, n, m; //erzeugen eines Vektors mit Pointern zu den Bilddaten: //PixTyp* in_data = imgIn.Data(); PixTyp* vec[9]; for (i = 0; i < 3; i++) for (j = 0; j < 3; j++) vec[(i*3) + j] = &imgIn(i+1, j+1 ); for(k = 0; k < imgIn.sizeY()-3; k++) { for(l = 0; l < imgIn.sizeX()-3; l++) { double hor = 0; double ver = 0; hor = + (double) (*vec[0]) + (double) (*vec[1])*2 + (double) (*vec[2]) - (double) (*vec[6]) - (double) (*vec[7])*2 - (double) (*vec[8]); ver = + (double) (*vec[0]) + (double) (*vec[3])*2 + (double) (*vec[6]) - (double) (*vec[2]) - (double) (*vec[5])*2 - (double) (*vec[8]); imgOut(l+1,k+1) = (PixTyp) sqrt(sqrt(ver*ver + hor*hor)); //Alle weitersetzen for (m = 0; m < 9; m++) vec[m]++; } for (n = 0; n < 3; n++) for (m = 0; m < 9; m++) vec[m]++; } return imgOut; }
string colmap::compare(){ string ret(""); indexed_points *ds1_ip = dynamic_cast<indexed_points*>(dataset1); indexed_points *ds2_ip = dynamic_cast<indexed_points*>(dataset2); if(ds1_ip == NULL || ds2_ip == NULL) return string("Improper Datasets"); for(int j=1; j<=2; j++){ long double max = get_max_value(j); long double min = get_min_value(j); long double range = max - min; int x_size = 0; int y_size = 0; int z_size = 0; if(j == 1){ x_size = ds1_ip->get_dim().sizes[0]; y_size = ds1_ip->get_dim().sizes[1]; z_size = ds1_ip->get_dim().sizes[2]; } else{ x_size = ds2_ip->get_dim().sizes[0]; y_size = ds2_ip->get_dim().sizes[1]; z_size = ds2_ip->get_dim().sizes[2];; } CImg<unsigned char> imgOut(x_size, y_size, z_size, 3, 0); for(int x=0; x<x_size; x++){ for(int y=0; y<y_size; y++){ for(int z=0; z<z_size; z++){ layout loc; loc.arr_size = 3; loc.sizes = new int[3]; loc.sizes[0] = x; loc.sizes[1] = y; loc.sizes[2] = z; long double pt_val; if(j == 1) pt_val = ds1_ip->get_indexed_point(loc).vals[var_ds1]; else if(j == 2) pt_val = ds2_ip->get_indexed_point(loc).vals[var_ds2]; rgb_color col = get_color_single_sided(pt_val, range, min); imgOut(x, y_size - y - 1, z, 0) = col.r; imgOut(x, y_size - y - 1, z, 1) = col.g; imgOut(x, y_size - y - 1, z, 2) = col.b; } } } //Construct file name string output_name(outprefix); output_name += "_colmap"; if(j == 1){ output_name += "_FIRST_"; output_name += "var1_"; output_name += itoa(var_ds1); } else if(j == 2){ output_name += "_SECOND_"; output_name += "var2_"; output_name += itoa(var_ds2); } output_name += ".bmp"; ret += output_name + " "; imgOut.save_bmp(output_name.c_str()); } return ret; }
string difmap_wkey::compare(){ indexed_points *ds1_ip = dynamic_cast<indexed_points*>(dataset1); indexed_points *ds2_ip = dynamic_cast<indexed_points*>(dataset2); if(ds1_ip == NULL || ds2_ip == NULL) return string("Improper Datasets"); string ret(""); if(!check_card_match()){ ret = "No Output"; return ret; } long double max = get_max_dif(); long double min = get_min_dif(); long double range = fabs( max - min ); CImg<unsigned char> imgOut( ds1_ip->get_dim().sizes[0] + border_width + key_width, ds1_ip->get_dim().sizes[1], ds1_ip->get_dim().sizes[2], 3, 0 ); //Write difference map to image for(int x=0; x<ds1_ip->get_dim().sizes[0]; x++){ for(int y=0; y<ds1_ip->get_dim().sizes[1]; y++){ for(int z=0; z<ds1_ip->get_dim().sizes[2]; z++){ //TODO: loop over every dep_var layout loc; loc.arr_size = 3; loc.sizes = new int[3]; loc.sizes[0] = x; loc.sizes[1] = y; loc.sizes[2] = z; long double pt_val = fabs( ds1_ip->get_indexed_point(loc).vals[var_ds1] - ds2_ip->get_indexed_point(loc).vals[var_ds2] ); rgb_color col = get_color_double_sided(pt_val, range, min, 0); imgOut(x, ds1_ip->get_dim().sizes[1] - y - 1, z, 0) = col.r; imgOut(x, ds1_ip->get_dim().sizes[1] - y - 1, z, 1) = col.g; imgOut(x, ds1_ip->get_dim().sizes[1] - y - 1, z, 2) = col.b; } } } //Determine if one dataset is always larger int large_ds; if( 0 >= min && 0 <= max ) large_ds = -1; else{ layout loc; loc.arr_size = 3; loc.sizes = new int[3]; loc.sizes[0] = 0; loc.sizes[1] = 0; loc.sizes[2] = 0; if(ds1_ip->get_indexed_point(loc).vals[var_ds1] > ds2_ip->get_indexed_point(loc).vals[var_ds2]){ large_ds = 1; } else large_ds = 2; } //Write border int border_start = ds1_ip->get_dim().sizes[0]; for(int y=0; y < ds1_ip->get_dim().sizes[1]; y++){ for(int x=border_start; x < border_start + border_width; x++){ for(int z=0; z<ds1_ip->get_dim().sizes[2]; z++){ imgOut( x, y, z, 0 ) = border_color.r; imgOut( x, y, z, 1 ) = border_color.g; imgOut( x, y, z, 2 ) = border_color.b; } } } //Write key int x_start = ds1_ip->get_dim().sizes[0] + border_width; long double k_height = ds1_ip->get_dim().sizes[1]; long double unit = range / (k_height-1.0); for(int y=0; y<ds1_ip->get_dim().sizes[1]; y++){ for(int x=x_start; x < x_start + key_width; x++){ for(int z=0; z<ds1_ip->get_dim().sizes[2]; z++){ rgb_color col = get_color_double_sided(min+unit*y, range, min, 0); imgOut( x, ds1_ip->get_dim().sizes[1] - y - 1, z, 0 ) = col.r; imgOut( x, ds1_ip->get_dim().sizes[1] - y - 1, z, 1 ) = col.g; imgOut( x, ds1_ip->get_dim().sizes[1] - y - 1, z, 2 ) = col.b; } } } //Construct file name string output_name(outprefix); output_name += "_difmap_wkey_"; output_name += "var1_"; output_name += itoa(var_ds1); output_name += "_var2_"; output_name += itoa(var_ds2); //Write info to txt file output_info(output_name + ".txt", max, min, large_ds); //Write image imgOut.save_bmp((output_name + ".bmp").c_str()); ret = output_name; return ret; }
void TextLayer::recreateTexture(VidgfxContext *gfx) { if(!m_isTexDirty) return; // Don't waste any time if it hasn't changed m_isTexDirty = false; // Delete existing texture if one exists if(m_texture != NULL) vidgfx_context_destroy_tex(gfx, m_texture); m_texture = NULL; // Determine texture size. We need to keep in mind that the text in the // document might extend outside of the layer's bounds. m_document.setTextWidth(m_rect.width()); QSize size( (int)ceilf(m_document.size().width()), (int)ceilf(m_document.size().height())); if(m_document.isEmpty() || size.isEmpty()) { // Nothing to display return; } // Create temporary canvas. We need to be careful here as text is rendered // differently on premultiplied vs non-premultiplied pixel formats. On a // premultiplied format text is rendered with subpixel rendering enabled // while on a non-premultiplied format it is not. As we don't want subpixel // rendering we use the standard ARGB32 format. QSize imgSize( size.width() + m_strokeSize * 2, size.height() + m_strokeSize * 2); QImage img(imgSize, QImage::Format_ARGB32); img.fill(Qt::transparent); QPainter p(&img); p.setRenderHint(QPainter::Antialiasing, true); // Render text //m_document.drawContents(&p); // Render stroke if(m_strokeSize > 0) { #define STROKE_TECHNIQUE 0 #if STROKE_TECHNIQUE == 0 // Technique 0: Use QTextDocument's built-in text outliner //quint64 timeStart = App->getUsecSinceExec(); QTextDocument *outlineDoc = m_document.clone(this); QTextCharFormat format; QPen pen(m_strokeColor, (double)(m_strokeSize * 2)); pen.setJoinStyle(Qt::RoundJoin); format.setTextOutline(pen); QTextCursor cursor(outlineDoc); cursor.select(QTextCursor::Document); cursor.mergeCharFormat(format); // Take into account the stroke offset p.translate(m_strokeSize, m_strokeSize); //quint64 timePath = App->getUsecSinceExec(); outlineDoc->drawContents(&p); delete outlineDoc; //quint64 timeEnd = App->getUsecSinceExec(); //appLog() << "Path time = " << (timePath - timeStart) << " usec"; //appLog() << "Render time = " << (timeEnd - timePath) << " usec"; //appLog() << "Full time = " << (timeEnd - timeStart) << " usec"; #elif STROKE_TECHNIQUE == 1 // Technique 1: Create a text QPainterPath and stroke it quint64 timeStart = App->getUsecSinceExec(); // Create the path for the text's stroke QPainterPath path; QTextBlock &block = m_document.firstBlock(); int numBlocks = m_document.blockCount(); for(int i = 0; i < numBlocks; i++) { QTextLayout *layout = block.layout(); for(int j = 0; j < layout->lineCount(); j++) { QTextLine &line = layout->lineAt(j); const QString text = block.text().mid( line.textStart(), line.textLength()); QPointF pos = layout->position() + line.position(); pos.ry() += line.ascent(); //appLog() << pos << ": " << text; path.addText(pos, block.charFormat().font(), text); } block = block.next(); } quint64 timePath = App->getUsecSinceExec(); path = path.simplified(); // Fixes gaps with large stroke sizes quint64 timeSimplify = App->getUsecSinceExec(); // Render the path //p.strokePath(path, QPen(m_strokeColor, m_strokeSize)); // Convert it to a stroke QPainterPathStroker stroker; stroker.setWidth(m_strokeSize); //stroker.setCurveThreshold(2.0); stroker.setJoinStyle(Qt::RoundJoin); path = stroker.createStroke(path); // Render the path p.fillPath(path, m_strokeColor); quint64 timeEnd = App->getUsecSinceExec(); appLog() << "Path time = " << (timePath - timeStart) << " usec"; appLog() << "Simplify time = " << (timeSimplify - timePath) << " usec"; appLog() << "Render time = " << (timeEnd - timeSimplify) << " usec"; appLog() << "Full time = " << (timeEnd - timeStart) << " usec"; #elif STROKE_TECHNIQUE == 2 // Technique 2: Similar to technique 1 but do each block separately quint64 timeStart = App->getUsecSinceExec(); quint64 timeTotalSimplify = 0; quint64 timeTotalRender = 0; // Create the path for the text's stroke QTextBlock &block = m_document.firstBlock(); int numBlocks = m_document.blockCount(); for(int i = 0; i < numBlocks; i++) { // Convert this block to a painter path QPainterPath path; QTextLayout *layout = block.layout(); for(int j = 0; j < layout->lineCount(); j++) { QTextLine &line = layout->lineAt(j); const QString text = block.text().mid( line.textStart(), line.textLength()); QPointF pos = layout->position() + line.position() + QPointF(m_strokeSize, m_strokeSize); pos.ry() += line.ascent(); //appLog() << pos << ": " << text; path.addText(pos, block.charFormat().font(), text); } // Prevent gaps appearing at larger stroke sizes quint64 timeA = App->getUsecSinceExec(); path = path.simplified(); quint64 timeB = App->getUsecSinceExec(); timeTotalSimplify += timeB - timeA; // Render the path QPen pen(m_strokeColor, m_strokeSize * 2); pen.setJoinStyle(Qt::RoundJoin); p.strokePath(path, pen); timeA = App->getUsecSinceExec(); timeTotalRender += timeA - timeB; // Iterate block = block.next(); } // Make the final draw take into account the stroke offset p.translate(m_strokeSize, m_strokeSize); quint64 timeEnd = App->getUsecSinceExec(); appLog() << "Simplify time = " << timeTotalSimplify << " usec"; appLog() << "Render time = " << timeTotalRender << " usec"; appLog() << "Full time = " << (timeEnd - timeStart) << " usec"; #elif STROKE_TECHNIQUE == 3 // Technique 3: Raster brute-force where for each destination pixel // we measure the distance to the closest opaque source pixel quint64 timeStart = App->getUsecSinceExec(); // Get bounding region based on text line bounding rects QRegion region; QTextBlock &block = m_document.firstBlock(); int numBlocks = m_document.blockCount(); for(int i = 0; i < numBlocks; i++) { QTextLayout *layout = block.layout(); for(int j = 0; j < layout->lineCount(); j++) { QTextLine &line = layout->lineAt(j); const QString text = block.text().mid( line.textStart(), line.textLength()); QRect rect = line.naturalTextRect() .translated(layout->position()).toAlignedRect(); if(rect.isEmpty()) continue; // Don't add empty rectangles rect.adjust(0, 0, 1, 0); // QTextLine is incorrect? rect.adjust( -m_strokeSize, -m_strokeSize, m_strokeSize, m_strokeSize); //appLog() << rect; region += rect; } // Iterate block = block.next(); } quint64 timeRegion = App->getUsecSinceExec(); #if 0 // Debug bounding region QPainterPath regionPath; regionPath.addRegion(region); regionPath.setFillRule(Qt::WindingFill); p.fillPath(regionPath, QColor(255, 0, 0, 128)); #endif // 0 // We cannot read and write to the same image at the same time so // create a second one. Note that this is not premultiplied. QImage imgOut(size, QImage::Format_ARGB32); imgOut.fill(Qt::transparent); // Do distance calculation. We assume that non-fully transparent // pixels are always next to a fully opaque one so if the closest // "covered" pixel is not fully opaque then we can use that pixel's // opacity to determine the distance to the shape's edge. for(int y = 0; y < img.height(); y++) { for(int x = 0; x < img.width(); x++) { if(!region.contains(QPoint(x, y))) continue; float dist = getDistance(img, x, y, m_strokeSize); // We fake antialiasing by blurring the edge by 1px float outEdge = (float)m_strokeSize; if(dist >= outEdge) continue; // Outside stroke completely float opacity = qMin(1.0f, outEdge - dist); QColor col = m_strokeColor; col.setAlphaF(col.alphaF() * opacity); // Blend the stroke so that it appears under the existing // pixel data QRgb origRgb = img.pixel(x, y); QColor origCol(origRgb); origCol.setAlpha(qAlpha(origRgb)); col = blendColors(col, origCol, 1.0f); imgOut.setPixel(x, y, col.rgba()); } } quint64 timeRender = App->getUsecSinceExec(); // Swap image data p.end(); img = imgOut; p.begin(&img); quint64 timeEnd = App->getUsecSinceExec(); appLog() << "Region time = " << (timeRegion - timeStart) << " usec"; appLog() << "Render time = " << (timeRender - timeRegion) << " usec"; appLog() << "Swap time = " << (timeEnd - timeRender) << " usec"; appLog() << "Full time = " << (timeEnd - timeStart) << " usec"; #endif // STROKE_TECHNIQUE } // Render text m_document.drawContents(&p); // Convert the image to a GPU texture m_texture = vidgfx_context_new_tex(gfx, img); // Preview texture for debugging //img.save(App->getDataDirectory().filePath("Preview.png")); }