Mat ImageTransformation::cropImage(Mat originalImage, Point coordinate, Size rectSize, int offset)
{
    cv::Mat tempImage(originalImage);
    cv::Rect rect = cv::Rect(coordinate.x - offset,coordinate.y - offset, rectSize.width, rectSize.height);
    cv::Mat croppedImage = tempImage(rect);
    return croppedImage;
}
Beispiel #2
0
void ImageViewer::loadFile(const QString &fileName, const QString& extension, bool loadInfo) {
    fileName_ = fileName;
    extension_ = extension.trimmed().toLower();

    clearWidgets();
    
    if(fileName.isEmpty() || extension.isEmpty()) return;
    
    if (!QFileInfo(fileName).exists()) {
        setText(notFoundMessage_); 
        return;
    }
    
    if (loadInfo) {
        if (extension_ == "mrc" || extension_ == "map" || extension_ == "mrcs") {
            mrcHeader header(fileName);
            mrcInfo->setHeader(fileName, header);
            widgets->setCurrentWidget(mrcInfo);
        } else {
            fileInfo->setFile(fileName);
            widgets->setCurrentWidget(fileInfo);
        }
    } else {
        QImage image;
        if (extension_ == "mrc") {
            
            //Check if a png preview is available
            if(QFileInfo(fileName+".png").exists()) {
                if(QFileInfo(fileName).lastModified().toMSecsSinceEpoch() <= QFileInfo(fileName+".png").lastModified().toMSecsSinceEpoch()) {
                    image = QImage(fileName+".png");
                } else {
                    qDebug() << fileName << "had PNG, but is older, Time(MRC, PNG): " << QFileInfo(fileName).lastModified().toMSecsSinceEpoch() << QFileInfo(fileName+".png").lastModified().toMSecsSinceEpoch();
                    mrcImage tempImage(fileName);
                    image = *(tempImage.getImage());
                }
            } else {
                mrcImage tempImage(fileName);
                image = *(tempImage.getImage());
            }
        } else {
            image = QImage(fileName);
        }

        if (image.isNull()) {
            setNotSupportedText();
            return;
        }
        imageLabel->setPixmap(QPixmap::fromImage(image));
        resizeWidgets();
    }
}
cv::Mat ThermoCam::generateBin(const cv::Mat temperature)
{
   const unsigned short tempMin = static_cast<unsigned short>(33* 10);
   const unsigned short tempMax = static_cast<unsigned short>(33* 10);

   cv::Mat tempImage(temperature.rows, temperature.cols, CV_8UC1);

   for (unsigned int row = 0; row < temperature.rows; row++)
   {
       const uint16_t* dataTemperature = reinterpret_cast<const uint16_t*>(temperature.ptr(row));
       unsigned char* dataTempImage = tempImage.ptr(row);

       for (unsigned int col = 0; col < temperature.cols; col++, dataTemperature++) {
           const unsigned short temp = (*dataTemperature - 1000);
           if (temp < tempMin) *dataTempImage++ = 0xff;
           else                *dataTempImage++ = 0x00;
       }
   }

   if(tempImage.rows != _bin.rows || tempImage.cols != _bin.cols) {
      _bin = cv::Mat(tempImage.size(), CV_8UC1);
   }

   return(tempImage);
}
Beispiel #4
0
bool Object::exportFrames( int frameStart, int frameEnd,
                           Layer* currentLayer,
                           QSize exportSize, QString filePath,
                           const char* format,
                           int quality,
                           bool background,
                           bool antialiasing,
                           QProgressDialog* progress = NULL,
                           int progressMax = 50 )
{
    QSettings settings( "Pencil", "Pencil" );

    QString extension = "";
    QString formatStr = format;
    if ( formatStr == "PNG" || formatStr == "png" )
    {
        format = "PNG";
        extension = ".png";
    }
    if ( formatStr == "JPG" || formatStr == "jpg" || formatStr == "JPEG" || formatStr == "jpeg" )
    {
        format = "JPG";
        extension = ".jpg";
        background = true; // JPG doesn't support transparency so we have to include the background
    }
    if ( filePath.endsWith( extension, Qt::CaseInsensitive ) )
    {
        filePath.chop( extension.size() );
    }
    //qDebug() << "format =" << format << "extension = " << extension;

    qDebug() << "Exporting frames from " << frameStart << "to" << frameEnd << "at size " << exportSize;
    for ( int currentFrame = frameStart; currentFrame <= frameEnd; currentFrame++ )
    {
        if ( progress != NULL ) progress->setValue( ( currentFrame - frameStart )*progressMax / ( frameEnd - frameStart ) );
        QImage tempImage( exportSize, QImage::Format_ARGB32_Premultiplied );
        QPainter painter( &tempImage );

        // Make sure that old frame is erased before exporting a new one
        tempImage.fill( 0x00000000 );

        QRect viewRect = ( ( LayerCamera* )currentLayer )->getViewRect();
        QTransform mapView = RectMapTransform( viewRect, QRectF( QPointF( 0, 0 ), exportSize ) );
        mapView = ( ( LayerCamera* )currentLayer )->getViewAtFrame( currentFrame ) * mapView;
        painter.setWorldTransform( mapView );

        paintImage( painter, currentFrame, background, antialiasing );

        QString frameNumberString = QString::number( currentFrame );
        while ( frameNumberString.length() < 4 )
        {
            frameNumberString.prepend( "0" );
        }
        tempImage.save( filePath + frameNumberString + extension, format, quality );
    }

    return true;
}
Beispiel #5
0
bool Object::exportFrames(int frameStart, int frameEnd, QMatrix view, Layer* currentLayer, QSize exportSize, QString filePath, const char* format, int quality, bool background, bool antialiasing, int gradients, QProgressDialog* progress=NULL, int progressMax=50)
{

    QSettings settings("Pencil","Pencil");
    qreal curveOpacity = (100-settings.value("curveOpacity").toInt())/100.0; // default value is 1.0

    QString extension = "";
    QString formatStr = format;
    if ( formatStr == "PNG" || formatStr == "png")
    {
        format = "PNG";
        extension = ".png";
    }
    if ( formatStr == "JPG" || formatStr == "jpg" || formatStr == "JPEG" || formatStr == "jpeg")
    {
        format = "JPG";
        extension = ".jpg";
        background = true; // JPG doesn't support transparency so we have to include the background
    }
    if (filePath.endsWith(extension, Qt::CaseInsensitive))
    {
        filePath.chop(extension.size());
    }
    //qDebug() << "format =" << format << "extension = " << extension;

    qDebug() << "Exporting frames from " << frameStart << "to" << frameEnd << "at size " << exportSize;
    for(int currentFrame = frameStart; currentFrame <= frameEnd ; currentFrame++)
    {
        if ( progress != NULL ) progress->setValue((currentFrame-frameStart)*progressMax/(frameEnd-frameStart));
        QImage tempImage(exportSize, QImage::Format_ARGB32_Premultiplied);
        QPainter painter(&tempImage);

        // Make sure that old frame is erased before exporting a new one
        tempImage.fill(0x00000000);

        if (currentLayer->type == Layer::CAMERA)
        {
            QRect viewRect = ((LayerCamera*)currentLayer)->getViewRect();
            QMatrix mapView = Editor::map( viewRect, QRectF(QPointF(0,0), exportSize) );
            mapView = ((LayerCamera*)currentLayer)->getViewAtFrame(currentFrame) * mapView;
            painter.setWorldMatrix(mapView);
        }
        else
        {
            painter.setWorldMatrix(view);
        }
        paintImage(painter, currentFrame, background, curveOpacity, antialiasing, gradients);

        QString frameNumberString = QString::number(currentFrame);
        while ( frameNumberString.length() < 4) frameNumberString.prepend("0");
        tempImage.save(filePath+frameNumberString+extension, format, quality);
    }

    // XXX no error handling done yet
    return true;
}
Beispiel #6
0
void MainWindow::on_actionImport_Image_triggered()
{
    imageFileName = QFileDialog::getOpenFileName(this, tr("Open File"),"/home/matt/Desktop",tr("Images (*.jpg *.bmp)"));
    if(imageFileName!="")
    {
        QPixmap tempImage(imageFileName);
        ui->label->loadImage(tempImage);

    }
}
Beispiel #7
0
void HalfwayImage::paintEvent(QPaintEvent *event)
{
	if(!_image_loaded)
		return;

	QPainter painter(this);
	QPixmap pixmaptoshow;	
	
		
	if(_flag_error)
		pixmaptoshow=QPixmap::fromImage(_image_error.scaled(this->size(),Qt::KeepAspectRatio));
	else
	{
		QImage tempImage(_image);

		if (!_flag_multilayer)
		{
			QPoint MouseP(_mouse_pos.x()*_image.width(),_mouse_pos.y()*_image.height());
			int radius;
			if (_pressed)		
				radius=_image.width()/8;
			else
				radius=_image.width()/16;
			QRect rect(MouseP-QPoint(radius,radius),MouseP+QPoint(radius,radius));

			for(int y=rect.top();y<=rect.bottom();y++)
				for(int x=rect.left();x<=rect.right();x++)
				{
					if (tempImage.rect().contains(QPoint(x,y))&&(y-MouseP.y())*(y-MouseP.y())+(x-MouseP.x())*(x-MouseP.x())<radius*radius)
					{
						if (_pressed)					
							tempImage.setPixel(QPoint(x,y),_imageR.pixel(QPoint(x,y)));					
						else					
							tempImage.setPixel(QPoint(x,y),_imageL.pixel(QPoint(x,y)));	
					}
				}

				QPainter img_painter(&tempImage);			
				QPen blackPen(qRgba(0, 0, 0, 255));
				img_painter.setPen(blackPen);
				QBrush EmptyBrush(Qt::NoBrush);
				img_painter.setBrush(EmptyBrush);
				img_painter.drawEllipse(MouseP,radius,radius);
		}
		
		pixmaptoshow=QPixmap::fromImage(tempImage.scaled(this->size(),Qt::KeepAspectRatio));
	}

		
	painter.drawPixmap(0,0, pixmaptoshow);
	_real_size=pixmaptoshow.size();		
}
void PreviewColorPickerTool::CalcCorrectionForImage(unsigned int i,vigra::Point2D pos)
{
    const HuginBase::SrcPanoImage & img = helper->GetPanoramaPtr()->getImage(i);
    HuginBase::ImageCache::ImageCacheRGB8Ptr cacheImage8 = HuginBase::ImageCache::getInstance().getImage(img.getFilename())->get8BitImage();

    //copy only region to be inspected
    vigra::BRGBImage tempImage(2*ColorPickerSize,2*ColorPickerSize);
    vigra::copyImage(vigra::make_triple((*cacheImage8).upperLeft() + pos + vigra::Point2D(-ColorPickerSize,-ColorPickerSize),
                                        (*cacheImage8).upperLeft() + pos + vigra::Point2D( ColorPickerSize, ColorPickerSize),
                                        vigra::BRGBImage::Accessor()),
                     destImage(tempImage) );

    //now apply photometric corrections
    HuginBase::Photometric::InvResponseTransform<vigra::UInt8, double> invResponse(img);
    if (helper->GetPanoramaPtr()->getOptions().outputMode == HuginBase::PanoramaOptions::OUTPUT_LDR)
    {
        // select exposure and response curve for LDR output
        std::vector<double> outLut;
        vigra_ext::EMoR::createEMoRLUT(helper->GetPanoramaPtr()->getImage(0).getEMoRParams(), outLut);
        vigra_ext::enforceMonotonicity(outLut);
        invResponse.setOutput(1.0/pow(2.0,helper->GetPanoramaPtr()->getOptions().outputExposureValue), outLut,
                              255.0);
    }
    else
    {
        invResponse.setHDROutput(true,1.0/pow(2.0,helper->GetPanoramaPtr()->getOptions().outputExposureValue));
    }
    vigra::DRGBImage floatTemp(tempImage.size());
    vigra_ext::transformImageSpatial(srcImageRange(tempImage), destImage(floatTemp), invResponse, vigra::Diff2D(pos.x-ColorPickerSize,pos.y-ColorPickerSize));

    //calculate average
    vigra::FindAverage<vigra::DRGBImage::PixelType> average;
    vigra::inspectImage(srcImageRange(floatTemp), average);
    //range check
    vigra::RGBValue<double> RGBaverage=average.average();
    if(RGBaverage[0]>2 && RGBaverage[0]<253 &&
       RGBaverage[1]>2 && RGBaverage[1]<253 &&
       RGBaverage[2]>2 && RGBaverage[2]<253)
    {
        m_red+=RGBaverage[0]/RGBaverage[1];
        m_blue+=RGBaverage[2]/RGBaverage[1];
        m_count++;
    };
};
Beispiel #9
0
// ImageLayerNode
ImageLayerNode::ImageLayerNode(QQuickWindow *window, const QString file, bool mirroredType)
{
    QImage image(file);

    // NOTE this is a workaround to get the mirrored effect at the end of the image
    // ideally, do it using the shader program
    if (mirroredType) {
        QImage tempImage(image.width() * 2, image.height(), QImage::Format_ARGB32);
        QPainter p(&tempImage);
            p.drawImage(0, 0, image);
            p.drawImage(image.width(), 0, image.mirrored(true, false));
        p.end();

        image = tempImage;
    }

    QSGTexture *texture = window->createTextureFromImage(image);

    texture->setHorizontalWrapMode(QSGTexture::Repeat);
    texture->setVerticalWrapMode(QSGTexture::Repeat);
    texture->setFiltering(QSGTexture::Linear);

    m_width = texture->textureSize().width();
    m_height = texture->textureSize().height();

    QSGSimpleMaterial<ImageLayerState> *m = ImageLayerShader::createMaterial();
    m->state()->texture = texture;
    setMaterial(m);
    setFlag(OwnsMaterial, true);

    updateXPos(0);
    updateYPos(0);

    QSGGeometry *g = new QSGGeometry(QSGGeometry::defaultAttributes_TexturedPoint2D(), 4);
    QSGGeometry::updateTexturedRectGeometry(g, QRect(), QRect());
    setGeometry(g);

    setFlag(OwnsGeometry, true);
}
void Ffmpeg::createIntermediateImage(const TImageP &img, int frameIndex) {
  m_frameCount++;
  if (m_frameNumberOffset == -1) m_frameNumberOffset = frameIndex - 1;
  QString tempPath = getFfmpegCache().getQString() + "//" +
                     QString::fromStdString(m_path.getName()) + "tempOut" +
                     QString::number(frameIndex - m_frameNumberOffset) + "." +
                     m_intermediateFormat;
  std::string saveStatus = "";
  TRasterImageP tempImage(img);
  TRasterImage *image = (TRasterImage *)tempImage->cloneImage();

  m_lx           = image->getRaster()->getLx();
  m_ly           = image->getRaster()->getLy();
  m_bpp          = image->getRaster()->getPixelSize();
  int totalBytes = m_lx * m_ly * m_bpp;
  image->getRaster()->yMirror();

  // lock raster to get data
  image->getRaster()->lock();
  void *buffin = image->getRaster()->getRawData();
  assert(buffin);
  void *buffer = malloc(totalBytes);
  memcpy(buffer, buffin, totalBytes);

  image->getRaster()->unlock();

  // create QImage save format
  QByteArray ba      = m_intermediateFormat.toUpper().toLatin1();
  const char *format = ba.data();

  QImage *qi = new QImage((uint8_t *)buffer, m_lx, m_ly, QImage::Format_ARGB32);
  qi->save(tempPath, format, -1);
  free(buffer);
  m_cleanUpList.push_back(tempPath);

  delete qi;
  delete image;
}
void NineSlicesPainter::LoadImage(const wxString& filePath, bool forceAlpha) {  
  filePath_ = filePath;
  wxDELETE(sourceBitmap_);
  sourceBitmap_ = new wxBitmap(filePath_, wxBITMAP_TYPE_PNG);  

  if (!sourceBitmap_->HasAlpha() && forceAlpha) {
    // @hack: the bitmap MUST have an alpha channel
    // See ImageButton::LoadImage
    wxImage tempImage(filePath_, wxBITMAP_TYPE_PNG);
    if (tempImage.GetWidth() > 0 && tempImage.GetHeight() > 0) {
      tempImage.InitAlpha();

      unsigned char pixelAlpha = tempImage.GetAlpha(0, 0);
      if (pixelAlpha == 0) {
        pixelAlpha = 1;
      } else if (pixelAlpha == 255) {
        pixelAlpha = 254;
      }
      
      wxDELETE(sourceBitmap_);
      sourceBitmap_ = new wxBitmap(tempImage);
    }
  }  
}
Beispiel #12
0
void imageCallback(const sensor_msgs::ImageConstPtr & msg){

#ifdef PRINT_ROS_INFO
  ROS_INFO("Got image message.");
#endif

  // get the compressed image, and convert it to Opencv format.
  cv::Mat img;
  try{
   img =  cv_bridge::toCvShare(msg, "bgr8")->image;
  }
  catch(cv_bridge::Exception & e){
    ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str());
  }

#ifdef PRINT_ROS_INFO
  ROS_INFO("Converting image done.");
#endif  

  //std::cout << "image size = ( " << img.rows << " X " << img.cols << " )." << std::endl;
  //printf("image data address 0x%x\n", img.data);

  if( startTracking ){

    trackerMutex.lock();

#ifdef PRINT_ROS_INFO

    ROS_INFO("Tracker: Reading Frame ... ");
#endif    

    // update the tracking status, and draw the result.
    tracker->readFrame(img);
    
#ifdef PRINT_ROS_INFO
    ROS_INFO("Tracker: Updating status ... ");
#endif    

    tracker->updateTrackerStatus();

#ifdef PRINT_ROS_INFO
    ROS_INFO("Tracker: status updated ... ");
    ROS_INFO("Tracker: drawing ... ");
#endif    

    cv::Mat temp;
    img.copyTo(temp);
    tracker->drawTrackers(temp);
    
#ifdef PRINT_ROS_INFO
    ROS_INFO("Tracker: Publishing ... ");
#endif    

    // republish this image.
    sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", temp).toImageMsg();
    pub.publish(msg);

    // publish to topic -- object_location
    cv::Point2i location = tracker->getWeightedAverageLocation();
    std::stringstream locationStrStream;

    int currentNum = tracker->getSampleNum();
    int numWithHigConfidence = tracker->getNumOfSamplesHasProbLargerThan(PROB_THRESHOD);

    
    float highConfidenceSampleRatio;
    if( currentNum <= 0){
      highConfidenceSampleRatio = 0;
    }else{
      highConfidenceSampleRatio = numWithHigConfidence * 1.0f / currentNum;
    }

    std::cout << "High confidence sample ratio = " << highConfidenceSampleRatio << std::endl; 
    
    if( location.x < 0 || location.y < 0 || highConfidenceSampleRatio <= HIGH_CONFID_NUM_RATIO_THRESHOLD ){
      //locationStrStream << "object_x " << "0" << " , " << "object_y " << "0" << " , ";
      
      locationStrStream << "object_x " << img.cols /2   << ", " << "object_y " << img.rows / 2 << ", ";
      
      // make offsets to the samples:
      
      ROS_INFO("Tracker offset!");
      if( lastMovementDirection == TRACKER_UP)
	tracker->offsetTracker(TRACKER_DOWN);
      else if( lastMovementDirection == TRACKER_DOWN)
	tracker->offsetTracker(TRACKER_UP);
      else if( lastMovementDirection == TRACKER_LEFT)
	tracker->offsetTracker(TRACKER_RIGHT);
      else if( lastMovementDirection == TRACKER_RIGHT)
	tracker->offsetTracker(TRACKER_LEFT);
      
      
    }else{
      // "x 10, y 10, width 360, height 640"
      locationStrStream << "object_x " << location.x << ", " << "object_y " << location.y << ", ";
      lastMovementDirection = -1;
    }

    locationStrStream << "width " << img.cols << ", " << "height " << img.rows << ", ";

    locationStrStream << "direction follow" ;

    std_msgs::String locationMsg;
    locationMsg.data = locationStrStream.str();
    location_pub.publish(locationMsg);
        
    // release the lock
    trackerMutex.unlock();

  }

  else if (! objectSpecified ) {
    // show the image and let the user specify the inital result.

    //std::cout << img.rows << "," << img.cols << std::endl;
    
    cv::Mat tempImage(img.rows, img.cols, img.type());
    img.copyTo(tempImage);

    
    if( trackerMaxSize < 0){
      trackerMaxSize = MIN(img.rows, img.cols) - 1;
    }
    
    ROS_INFO("Drawing tracker ... ");

    cv::rectangle(tempImage, cv::Rect(tempImage.cols / 2 - trackerSize / 2, tempImage.rows / 2 - trackerSize / 2, trackerSize, trackerSize), cv::Scalar(0,0,255));
    
    // republish this image.
    sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", tempImage).toImageMsg();
    pub.publish(msg);

  }
  
  else{

    trackerMutex.lock();
    
    // haven't started tracking, but the initial object is specified.
    // create a tracker;
    if( tracker != NULL) delete tracker;

    tracker = new Tracker2D();

    tracker->setLimit(5 , img.cols - 5, 5 , img.rows - 5);
    tracker->initialize(cv::Point2f(img.cols / 2, img.rows / 2), trackerSize);
    
    // set object feature.
    cv::Mat objImage = img(cv::Rect(img.cols / 2 - trackerSize /2, img.rows / 2 - trackerSize /2, trackerSize, trackerSize));
    
    tracker->setObjectFeature(objImage);
    
    ROS_INFO("Starting Tracking ... " );

    startTracking = true;

    trackerMutex.unlock();
  }
}
Beispiel #13
0
/**
* @brief 
*
* @param imageCount
* @param imageFormat
* @param imageWidth
* @param imageHeight
*/
void MemoryIOV1::attachMemory( int imageCount, PixelFormat imageFormat, uint16_t imageWidth, uint16_t imageHeight )
{
    if ( mMemPtr )
        Fatal( "Unable to attach to shared memory, already attached" );
    mImageCount = imageCount;
    Image tempImage( imageFormat, imageWidth, imageHeight, 0 );
    Info( "Pixelformat converted from %d to %d", imageFormat, tempImage.pixelFormat() );
    size_t imageSize = tempImage.size();

    mMemSize = sizeof(SharedData)
             + sizeof(TriggerData)
             + (mImageCount*sizeof(struct timeval))
             + (mImageCount*imageSize);

    Debug( 1, "mem.size=%d", mMemSize );
#if OZ_MEM_MAPPED
    //snprintf( mMemFile, sizeof(mMemFile), "%s/oz.mmap.%d", config.path_map, mMemoryKey );
    snprintf( mMemFile, sizeof(mMemFile), "%s/oz.mmap.%d", mLocation.c_str(), mMemoryKey );
    mMapFd = open( mMemFile, O_RDWR|O_CREAT, (mode_t)0600 );
    if ( mMapFd < 0 )
        Fatal( "Can't open memory map file %s, probably not enough space free: %s", mMemFile, strerror(errno) );
    struct stat mapStat;
    if ( fstat( mMapFd, &mapStat ) < 0 )
        Fatal( "Can't stat memory map file %s: %s", mMemFile, strerror(errno) );
    if ( mapStat.st_size == 0 )
    {
        // Allocate the size
        if ( ftruncate( mMapFd, mMemSize ) < 0 )
            Fatal( "Can't extend memory map file %s to %d bytes: %s", mMemFile, mMemSize, strerror(errno) );
    }
    else if ( mapStat.st_size != mMemSize )
    {
        Error( "Got unexpected memory map file size %ld, expected %d", mapStat.st_size, mMemSize );
        close( mMapFd );
        if ( mOwner )
            unlink( mMemFile );
    }

    mMemPtr = (unsigned char *)mmap( NULL, mMemSize, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_LOCKED, mMapFd, 0 );
    if ( mMemPtr == MAP_FAILED )
        if ( errno == EAGAIN )
        {
            Debug( 1, "Unable to map file %s (%d bytes) to locked memory, trying unlocked", mMemFile, mMemSize );
            mMemPtr = (unsigned char *)mmap( NULL, mMemSize, PROT_READ|PROT_WRITE, MAP_SHARED, mMapFd, 0 );
        }
    if ( mMemPtr == MAP_FAILED )
        Fatal( "Can't map file %s (%d bytes) to memory: %s(%d)", mMemFile, mMemSize, strerror(errno), errno );
#else // OZ_MEM_MAPPED
    mShmId = shmget( (config.shm_key&0xffff0000)|mMemoryKey, mMemSize, IPC_CREAT|0700 );
    if ( mShmId < 0 )
    {
        Error( "Can't shmget, probably not enough shared memory space free: %s", strerror(errno));
        exit( -1 );
    }
    mMemPtr = (unsigned char *)shmat( mShmId, 0, 0 );
    if ( mMemPtr < 0 )
    {
        Error( "Can't shmat: %s", strerror(errno));
        exit( -1 );
    }
#endif // OZ_MEM_MAPPED

    mSharedData = (SharedData *)mMemPtr;
    mTriggerData = (TriggerData *)((char *)mSharedData + sizeof(SharedData));
    struct timeval *sharedTimestamps = (struct timeval *)((char *)mTriggerData + sizeof(TriggerData));
    unsigned char *sharedImages = (unsigned char *)((char *)sharedTimestamps + (mImageCount*sizeof(struct timeval)));


    if ( mOwner )
    {
        memset( mMemPtr, 0, mMemSize );
        mSharedData->size = sizeof(SharedData);
        mSharedData->valid = true;
        mSharedData->active = true;
        mSharedData->signal = false;
        mSharedData->state = IDLE;
        mSharedData->last_write_index = mImageCount;
        mSharedData->last_read_index = mImageCount;
        mSharedData->last_write_time = 0;
        mSharedData->last_event = 0;
        mSharedData->action = 0;
        mSharedData->brightness = -1;
        mSharedData->hue = -1;
        mSharedData->colour = -1;
        mSharedData->contrast = -1;
        mSharedData->alarm_x = -1;
        mSharedData->alarm_y = -1;
        mTriggerData->size = sizeof(TriggerData);
        mTriggerData->trigger_state = TRIGGER_CANCEL;
        mTriggerData->trigger_score = 0;
        mTriggerData->trigger_cause[0] = 0;
        mTriggerData->trigger_text[0] = 0;
        mTriggerData->trigger_showtext[0] = 0;
    }
    else
    {
        if ( !mSharedData->valid )
        {
            Error( "Shared data not initialised by capture daemon" );
            exit( -1 );
        }
    }

    mImageBuffer = new Snapshot[mImageCount];
    for ( int i = 0; i < imageCount; i++ )
    {
        mImageBuffer[i].timestamp = &(sharedTimestamps[i]);
        mImageBuffer[i].image = new Image( tempImage.format(), imageWidth, imageHeight, &(sharedImages[i*imageSize]), true );
    }
}
Beispiel #14
0
bool Object::exportFrames1(int frameStart, int frameEnd, QMatrix view, Layer* currentLayer, QSize exportSize, QString filePath, const char* format, int quality, bool background, bool antialiasing, int gradients, QProgressDialog* progress, int progressMax, int fps, int exportFps)
{

    int frameRepeat;
    int frameReminder, frameReminder1;
    int framePutEvery, framePutEvery1;
    int frameSkipEvery, frameSkipEvery1;
    int frameNumber;
    int framePerSecond;

    QSettings settings("Pencil","Pencil");
    qreal curveOpacity = (100-settings.value("curveOpacity").toInt())/100.0; // default value is 1.0

    QString extension = "";
    QString formatStr = format;
    if ( formatStr == "PNG" || formatStr == "png")
    {
        format = "PNG";
        extension = ".png";
    }
    if ( formatStr == "JPG" || formatStr == "jpg" || formatStr == "JPEG")
    {
        format = "JPG";
        extension = ".jpg";
        background = true; // JPG doesn't support transparency so we have to include the background
    }
    if (filePath.endsWith(extension, Qt::CaseInsensitive))
    {
        filePath.chop(extension.size());
    }
    //qDebug() << "format =" << format << "extension = " << extension;

    qDebug() << "Exporting frames from " << frameStart << "to" << frameEnd << "at size " << exportSize;
    convertNFrames(fps,exportFps,&frameRepeat,&frameReminder,&framePutEvery,&frameSkipEvery);
    qDebug() << "fps " << fps << " exportFps " << exportFps << " frameRepeat " << frameRepeat << " frameReminder " << frameReminder << " framePutEvery " << framePutEvery << " frameSkipEvery " << frameSkipEvery;
    frameNumber = 0;
    framePerSecond = 0;
    frameReminder1 = frameReminder;
    framePutEvery1 = framePutEvery;
    frameSkipEvery1 = frameSkipEvery;
    for(int currentFrame = frameStart; currentFrame <= frameEnd ; currentFrame++)
    {
        if ( progress != NULL ) progress->setValue((currentFrame-frameStart)*progressMax/(frameEnd-frameStart));
        QImage tempImage(exportSize, QImage::Format_ARGB32_Premultiplied);
        QPainter painter(&tempImage);

        // Make sure that old frame is erased before exporting a new one
        tempImage.fill(0x00000000);

        if (currentLayer->type == Layer::CAMERA)
        {
            QRect viewRect = ((LayerCamera*)currentLayer)->getViewRect();
            QMatrix mapView = Editor::map( viewRect, QRectF(QPointF(0,0), exportSize) );
            mapView = ((LayerCamera*)currentLayer)->getViewAtFrame(currentFrame) * mapView;
            painter.setWorldMatrix(mapView);
        }
        else
        {
            painter.setWorldMatrix(view);
        }
        paintImage(painter, currentFrame, background, curveOpacity, antialiasing, gradients);

        frameNumber++;
        framePerSecond++;
        QString frameNumberString = QString::number(frameNumber);
        while ( frameNumberString.length() < 4) frameNumberString.prepend("0");

        tempImage.save(filePath+frameNumberString+extension, format, quality);
        int delta = 0;
        if (framePutEvery)
        {
            framePutEvery1--;
            if (framePutEvery1)
            {delta = 1;}
            else
            {framePutEvery1 = framePutEvery;}
        }
        if (frameSkipEvery)
        {
            frameSkipEvery1--;
            if (!frameSkipEvery1)
            {delta = 1;}
            else
            {frameSkipEvery1 = frameSkipEvery;}
        }
        if (frameReminder1)
        {frameReminder1 -= delta;}
        else
        {delta = 0;}
        for (int i=0; (i < frameRepeat-1+delta) && (framePerSecond < exportFps); i++)
        {
            frameNumber++;
            framePerSecond++;
            QString frameNumberLink = QString::number(frameNumber);
            while ( frameNumberLink.length() < 4) frameNumberLink.prepend("0");
//                    QFile::link(filePath+frameNumberString+extension, filePath+frameNumberLink+extension+".lnk");
            tempImage.save(filePath+frameNumberLink+extension, format, quality);
        }
        if (framePerSecond == exportFps)
        {
            framePerSecond = 0;
            frameReminder1 = frameReminder;
            framePutEvery1 = framePutEvery;
            frameSkipEvery1 = frameSkipEvery;
        }
    }

    // XXX no error handling yet
    return true;
}
Beispiel #15
0
bool Object::exportFrames1( ExportFrames1Parameters par )
{
    int frameStart = par.frameStart;
    int frameEnd = par.frameEnd;
    QTransform view = par.view;
    Layer* currentLayer = par.currentLayer;
    QSize exportSize = par.exportSize;
    QString filePath = par.filePath;
    const char* format = par.format;
    int quality = par.quality;
    bool background = par.background;
    bool antialiasing = par.antialiasing;
    QProgressDialog* progress = par.progress;
    int progressMax = par.progressMax;
    int fps = par.fps;
    int exportFps = par.exportFps;

    int frameRepeat;
    int frameReminder, frameReminder1;
    int framePutEvery, framePutEvery1;
    int frameSkipEvery, frameSkipEvery1;
    int frameNumber;
    int framePerSecond;

    QSettings settings( "Pencil", "Pencil" );

    QString extension = "";
    QString formatStr = format;
    if ( formatStr == "PNG" || formatStr == "png" )
    {
        format = "PNG";
        extension = ".png";
    }
    if ( formatStr == "JPG" || formatStr == "jpg" || formatStr == "JPEG" )
    {
        format = "JPG";
        extension = ".jpg";
        background = true; // JPG doesn't support transparency so we have to include the background
    }
    if ( filePath.endsWith( extension, Qt::CaseInsensitive ) )
    {
        filePath.chop( extension.size() );
    }
    //qDebug() << "format =" << format << "extension = " << extension;

    qDebug() << "Exporting frames from " << frameStart << "to" << frameEnd << "at size " << exportSize;
    convertNFrames( fps, exportFps, &frameRepeat, &frameReminder, &framePutEvery, &frameSkipEvery );
    qDebug() << "fps " << fps << " exportFps " << exportFps << " frameRepeat " << frameRepeat << " frameReminder " << frameReminder << " framePutEvery " << framePutEvery << " frameSkipEvery " << frameSkipEvery;
    frameNumber = 0;
    framePerSecond = 0;
    frameReminder1 = frameReminder;
    framePutEvery1 = framePutEvery;
    frameSkipEvery1 = frameSkipEvery;
    for ( int currentFrame = frameStart; currentFrame <= frameEnd; currentFrame++ )
    {
        if ( progress != NULL ) progress->setValue( ( currentFrame - frameStart )*progressMax / ( frameEnd - frameStart ) );
        QImage tempImage( exportSize, QImage::Format_ARGB32_Premultiplied );
        QPainter painter( &tempImage );

        // Make sure that old frame is erased before exporting a new one
        tempImage.fill( 0x00000000 );

        if ( currentLayer->type() == Layer::CAMERA )
        {
            QRect viewRect = ( ( LayerCamera* )currentLayer )->getViewRect();
            QTransform mapView = RectMapTransform( viewRect, QRectF( QPointF( 0, 0 ), exportSize ) );
            mapView = ( ( LayerCamera* )currentLayer )->getViewAtFrame( currentFrame ) * mapView;
            painter.setWorldTransform( mapView );
        }
        else
        {
            painter.setTransform( view );
        }
        paintImage( painter, currentFrame, background, antialiasing );

        frameNumber++;
        framePerSecond++;
        QString frameNumberString = QString::number( frameNumber );
        while ( frameNumberString.length() < 4 ) frameNumberString.prepend( "0" );

        tempImage.save( filePath + frameNumberString + extension, format, quality );
        int delta = 0;
        if ( framePutEvery )
        {
            framePutEvery1--;
            qDebug() << "-->framePutEvery1" << framePutEvery1;
            if ( framePutEvery1 )
            {
                delta = 0;
            }
            else
            {
                delta = 1; framePutEvery1 = framePutEvery;
            }
        }
        if ( frameSkipEvery )
        {
            frameSkipEvery1--;
            qDebug() << "-->frameSkipEvery1" << frameSkipEvery1;
            if ( frameSkipEvery1 )
            {
                delta = 1;
            }
            else
            {
                delta = 0; frameSkipEvery1 = frameSkipEvery;
            }
        }
        if ( frameReminder1 )
        {
            frameReminder1 -= delta;
        }
        else
        {
            delta = 0;
        }
        for ( int i = 0; ( i < frameRepeat - 1 + delta ) && ( framePerSecond < exportFps ); i++ )
        {
            frameNumber++;
            framePerSecond++;
            QString frameNumberLink = QString::number( frameNumber );
            while ( frameNumberLink.length() < 4 ) frameNumberLink.prepend( "0" );
            tempImage.save( filePath + frameNumberLink + extension, format, quality );
        }
        if ( framePerSecond == exportFps )
        {
            framePerSecond = 0;
            frameReminder1 = frameReminder;
            framePutEvery1 = framePutEvery;
            frameSkipEvery1 = frameSkipEvery;
        }
    }

    // XXX no error handling yet
    return true;
}
Beispiel #16
0
	bool ErosionKernel<T, U>::convolve(const BitImage<U>& iImage, BitImage<U>& oImage) const
	{	
		//LOG1("Begin Convolve %d.", &iImage);
		int r, c, cc, rr, center;            

		center = m_dimension / 2;

		long rows=0, cols=0;
		if (!iImage.GetDimensions(rows, cols))
		{
			return false;
		}

		
		BitImage<U> tempImage(rows, cols);
		//tempImage = iImage;

		//const U* iBuf	= (const U*)iImage.GetBuffer();
		//U* tempBuf = (U *)tempImage.GetBuffer();
		bool okErode=false;
		long totalDim = rows*cols;
		//LOG0("Blurring the image in the X-direction.");
		for(r=0;r<rows;r++)
		{
			for(c=0;c<cols;c++)
			{
				okErode=false;
				for(rr=(-center);rr<=center;rr++)
				{
					for(cc=(-center);cc<=center;cc++)
					{
						if(((c+cc) >= 0) && ((c+cc) < cols) && ((r+rr) >= 0) && ((r+rr) < rows))
						{
							//long iIndex = (r+rr)*cols+(c+cc);							
							//if (iIndex>=0 && iIndex<totalDim)
							{
								if (m_buffer[(center+rr)*m_dimension + center+cc]>0 
									&& iImage.GetValue(r+rr, c+cc)==false)
								{
									okErode=true;
									break;
								}
							}
						}
					}
				}

				// it is not separable

				if (okErode)
				{
					for(rr=(-center);rr<=center;rr++)
					{
						for(cc=(-center);cc<=center;cc++)
						{
							//long iIndex = (r+rr)*cols+(c+cc);							
							//if (iIndex>=0 && iIndex<totalDim)
							if(((c+cc) >= 0) && ((c+cc) < cols) && ((r+rr) >= 0) && ((r+rr) < rows))
							{
								if (m_buffer[(center+rr)*m_dimension + center+cc] )
									//tempBuf[iIndex] = minVal;
									tempImage.SetValue(r+rr, c+cc, false);
							}
						}
					}
				} else 
				{
					tempImage.SetValue(r, c, iImage.GetValue(r, c));					
					//tempImage.SetValue(r, c, false);
					//tempBuf[(r)*cols+(c)] = iBuf[(r)*cols+(c)];
				}
			}
		}		

		// copy
		oImage = tempImage;

		//LOG1("End Convolve %d.", &iImage);
		return true;
	}
Beispiel #17
0
void Videostreaming::capFrame()
{
    __u32 buftype = m_buftype;
    v4l2_plane planes[VIDEO_MAX_PLANES];
    v4l2_buffer buf;
    unsigned char *tempSrcBuffer = NULL, *tempDestBuffer = NULL, *copyDestBuffer = NULL;
    unsigned char *tempCu130DestBuffer = NULL, *tempCu130SrcBuffer = NULL;
    unsigned char *tempCu40DestBuffer = NULL, *irBuffer = NULL;
    unsigned char *tempLogtechSrcBuffer = NULL, *tempLogtechDestBuffer = NULL;
    unsigned char *displaybuf = NULL;
    unsigned short int *tempCu40SrcBuffer = NULL;
    //Modified by Nithyesh
    //Previously it was int err = 0, x, y;
    int err = 0;
    __u32 x, y;
    bool again, v4l2convert = false;

    memset(planes, 0, sizeof(planes));
    buf.length = VIDEO_MAX_PLANES;
    buf.m.planes = planes;
    if (!dqbuf_mmap(buf, buftype, again)) {
        closeDevice();
        unsigned char *m_data=NULL;
        QImage tempImage(m_data,320,240,QImage::Format_RGB888);
        qImage = QPixmap::fromImage(tempImage);
        update();
        emit deviceUnplugged("Disconnected","Device Not Found");
        emit logCriticalHandle("Device disconnected");
        return;
    }
    if (again) {
        return;
    }
    if (buf.flags & V4L2_BUF_FLAG_ERROR) {        
        qbuf(buf);
        return;
    }
#if 0
    switch(m_capSrcFormat.fmt.pix.pixelformat) {
        case V4L2_PIX_FMT_YUYV: {
            if((width*height*2) == buf.bytesused){
                validFrame = true;
            }

        }
        break;
        case V4L2_PIX_FMT_SGRBG8:{
            // if bayer - 8 bit camera
            // {
                if ((width*height) == buf.bytesused)
                    validFrame = true;
            // }
            // if bayer - 8 bit + pad camera
            // {
                if ((width*height*2) == buf.bytesused)
                    validFrame = true;
            // }
        }
        break;
        case V4L2_PIX_FMT_MJPEG:{
            validFrame = true;
            break;
        }
        default:
        // To do: for other color spaces
        break;

    }

    if (validFrame != true){
        qbuf(buf);
        qDebug()<<"validFrame != true";
     //   return;
    }
#endif

    if (camDeviceName == "e-con's CX3 RDK with M\nT9P031" || camDeviceName == "See3CAM_12CUNIR" || camDeviceName == "See3CAM_CU51")
    {
        tempSrcBuffer = (unsigned char *)malloc(width * height * 2);
        tempDestBuffer = (unsigned char *)malloc(width * height << 1);
        copyDestBuffer = tempDestBuffer;

        memcpy(tempSrcBuffer, m_buffers[buf.index].start[0], buf.bytesused);

        for(__u32 l=0; l<(width*height*2); l=l+2) /* Y16 to YUYV conversion */
        {
            *tempDestBuffer++ = (((tempSrcBuffer[l] & 0xF0) >> 4) | (tempSrcBuffer[l+1] & 0x0F) << 4);
            *tempDestBuffer++ = 0x80;
        }
        m_capSrcFormat.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
        err = v4lconvert_convert(m_convertData, &m_capSrcFormat, &m_capDestFormat,
                                 (unsigned char *)copyDestBuffer, buf.bytesused,
                                 m_capImage->bits(), m_capDestFormat.fmt.pix.sizeimage);
        v4l2convert = true;

    }else if (camDeviceName == "See3CAM_CU40")    {
Beispiel #18
0
////////////////////////////////////////////////////////////////////////////
// Function: LocateSomas
//
// Find the location of the Somas
void LocateSomas3()
{
	int iSlices = The3DImage->m_iSlices;
	int iRows = The3DImage->m_iRows;
	int iCols = The3DImage->m_iCols;
	register int i,j,k;

	CImage tempImage(iRows, iCols);
	CImage tempImageXZ(iSlices, iCols);
	CImage tempImageYZ(iSlices, iRows);

	int iWidth = (int) (gfWidthSum / giNumOfWidthSumMembers + 0.5);

	int StructElemSize = iWidth + 7; // it was + 2 11-5-99

	CPoint* aDiskPoints = new CPoint[4 * StructElemSize* StructElemSize];
	int iNumOfPoints = 0;

	ConstructStructElem(StructElemSize, aDiskPoints, iNumOfPoints);

	// DetectSomas(ProjectionImage, &tempImage, StructElemSize);
	DetectSomas(ProjectionImage, & tempImage, aDiskPoints, iNumOfPoints);	

	// estimate the threshold from the brightest region in the image
	int Threshold = static_cast<int>(giForegroundMedian + gfForegroundStdDev);
	int aiHistogram[256];
	tempImage.Histogram(aiHistogram);

	for (i = 255; i > giForegroundMedian; i--)
	{
		if (aiHistogram[i] >= iNumOfPoints)
		{
			Threshold = static_cast<int>(0.6 * i);
			break;
		}
	}

	// use the histogram of seed points to estimate the median and the stddev of 
	// the foreground pixels

	if (Threshold > 250)
		Threshold = 250;


	ProjectionImage->ThresholdImage(Threshold, 0);

	tempImage.ThresholdImage(Threshold, 0);

	CImage tempImageXY(tempImage);

	// The soma is defined as the region of intersection betweent the three images

	int SomaVolume = 0;
	int minX = iCols + 1, minY = iRows + 1, minZ = iSlices + 1;
	int maxX = - 1;
	int maxY = - 1;
	int maxZ = - 1;

	int* aiPlaneSum = new int[iSlices];
	
	int iDenom = iRows;
	if (iCols < iRows)
		iDenom = iCols;
	if (2 * iSlices < iDenom)
		StructElemSize = static_cast<int>(StructElemSize * (2.0 * iSlices / iDenom));

	if (StructElemSize < iWidth)
		StructElemSize = iWidth + 1;

	ConstructStructElem(StructElemSize, aDiskPoints, iNumOfPoints);


	DetectSomas(TrackImageYZ, & tempImageYZ, aDiskPoints, iNumOfPoints);	
	tempImageYZ.Histogram(aiHistogram);
	for (i = 255; i > giForegroundMedian; i--)
	{
		if (aiHistogram[i] >= iNumOfPoints)
		{
			Threshold = static_cast<int>(0.6 * i);
			break;
		}
	}
	tempImageYZ.ThresholdImage(Threshold, 0);

	DetectSomas(TrackImageXZ, & tempImageXZ, aDiskPoints, iNumOfPoints);	
	tempImageXZ.Histogram(aiHistogram);
	for (i = 255; i > giForegroundMedian; i--)
	{
		if (aiHistogram[i] >= iNumOfPoints)
		{
			Threshold = static_cast<int>(0.6 * i);
			break;
		}
	}			

	delete [] aDiskPoints;
	delete [] aiPlaneSum;

	
	// count the number of pixels in the soma
	giSomaVolume = 0;
	for (k = 0; k < iSlices; k++)
	{
		for (i = 0; i < iRows; i++)
		{
			for (j = 0; j < iCols; j++)
			{
				if (tempImageXY.data[i][j] &&
					tempImageXZ.data[k][j] &&
					tempImageYZ.data[k][i])
				{
					giSomaVolume++;
				}
			}
		}
	}

	gaSomaPoints = new CPoint[giSomaVolume];

	register int index = 0;
	for (k = 0; k < iSlices; k++)
	{
		for (i = 0; i < iRows; i++)
		{
			for (j = 0; j < iCols; j++)
			{
				if (tempImageXY.data[i][j] &&
					tempImageXZ.data[k][j] &&
					tempImageYZ.data[k][i])
				{
					TrackImageXY->data[i][j] = SomaColor;
					TrackImageXZ->data[k][j] = SomaColor;
					TrackImageYZ->data[k][i] = SomaColor;
					The3DImage->data[k][i][j] = SomaColor;

					gaSomaPoints[index].m_iX = j;
					gaSomaPoints[index].m_iY = i;
					gaSomaPoints[index].m_iZ = k;
					index++;
					//						Empty3DImage->data[k][i][j] = StartingSomaColor+1;


					SomaVolume++;
					if (k < minZ)
						minZ = k;
					if (k > maxZ)
						maxZ = k;
					if (i < minY)
						minY = i;
					if (i > maxY)
						maxY = i;
					if (j < minX)
						minX = j;
					if (j > maxX)
						maxX = j;
				}
			}
		}
	}

	// temp stuff to write all soma points that are exterior to a file for 
	// vizualization with ASAD's program
	//	register int ii, jj, kk;
	//	register int iFlag;

	CPoint centerPoint;
	centerPoint.m_iX = static_cast<int>(static_cast<float>(minX + maxX) / 2.0 + 0.5);
	centerPoint.m_iY = static_cast<int>(static_cast<float>(minY + maxY) / 2.0 + 0.5);
	centerPoint.m_iZ = static_cast<int>(static_cast<float>(minZ + maxZ) / 2.0 + 0.5); 

	double radius = (maxX - minX) * (maxX - minX) +
		(maxY - minY) * (maxY - minY);
	radius = static_cast<int>(sqrt(radius) / 2.0 + 0.5);

	// create the soma collection
	giNumOfSomas = 1;
	gTheSomas = new CSomas(giNumOfSomas);
	gTheSomas->m_aData[0].m_iVolume = SomaVolume;
	gTheSomas->m_aData[0].m_Center = centerPoint;
}
Beispiel #19
0
///////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////
//
//
// Transformation
//
//
///////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////
void colorTransform::doTransform()
{
	PUNTOEXE_FUNCTION_START(L"colorTransform::doTransform");

	// Process all the input images
	///////////////////////////////////////////////////////////
	for(int scanImages = 0; ; ++scanImages)
	{
		// Get the input image
		///////////////////////////////////////////////////////////
		ptr<image> pInputImage=getInputImage(scanImages);

		// If the input image doesn't exist, then exit
		///////////////////////////////////////////////////////////
		if(pInputImage == 0)
			break;

		// Check the input color space
		///////////////////////////////////////////////////////////
		if(pInputImage->getColorSpace()!=getInitialColorSpace())
		{
			PUNTOEXE_THROW(colorTransformExceptionWrongColorSpace, "the image's color space cannot be handled by the transform");
		}

		// Get the output image
		///////////////////////////////////////////////////////////
		ptr<image> pOutputImage=getOutputImage(scanImages);
		if(pOutputImage == 0)
		{
			ptr<image> tempImage(new image);
			pOutputImage=tempImage;
			declareOutputImage(scanImages, pOutputImage);
		}

		// Get the input image's attributes and the data handler
		///////////////////////////////////////////////////////////
		imbxUint32 sizeX, sizeY;
		pInputImage->getSize(&sizeX, &sizeY);

		double sizeMmX, sizeMmY;
		pInputImage->getSizeMm(&sizeMmX, &sizeMmY);
		pOutputImage->setSizeMm(sizeMmX, sizeMmY);

		image::bitDepth inputDepth=pInputImage->getDepth();
		image::bitDepth outputDepth=inputDepth;
		imbxUint32  highBit=pInputImage->getHighBit();
		imbxUint32 totalPixelsNumber=sizeX*sizeY;
		
		imbxInt32 inputMinValue = 0;
		imbxInt32 inputMaxValue = (1L<<(highBit+1))-1L;
		imbxInt32 outputMinValue = inputMinValue;
		imbxInt32 outputMaxValue = inputMaxValue;

		if(inputDepth==image::depthS8 || inputDepth==image::depthS16)
		{
			inputMinValue-=1L<<highBit;
			inputMaxValue-=1L<<highBit;

			std::wstring outputColorSpace = getFinalColorSpace();
			if(outputColorSpace == L"MONOCHROME2" || outputColorSpace == L"MONOCHROME1")
			{
				outputMinValue-=1L<<highBit;
				outputMaxValue-=1L<<highBit;
			}
			else
			{
				if(inputDepth==image::depthS8)
					outputDepth = image::depthU8;
				if(inputDepth==image::depthS16)
					outputDepth = image::depthU16;
			}
		}

		// Get the data handler for the input and the output
		//  images
		///////////////////////////////////////////////////////////
		imbxUint32 rowSize, channelPixelSize, channelsNumber;
		ptr<handlers::imageHandler> pInputDataHandler = pInputImage->getDataHandler(false, &rowSize, &channelPixelSize, &channelsNumber);
		if(pInputDataHandler->getSize() < totalPixelsNumber * channelsNumber)
		{
			PUNTOEXE_THROW(colorTransformExceptionWrongColorSpace, "the input image's size doesn't match the requested size");
		}
		
		ptr<handlers::imageHandler> pOutputDataHandler = pOutputImage->create(sizeX, sizeY, outputDepth, getFinalColorSpace(), (imbxUint8)highBit);
		channelsNumber = pOutputImage->getChannelsNumber();
		if(pOutputDataHandler->getSize() < totalPixelsNumber * channelsNumber)
		{
			PUNTOEXE_THROW(colorTransformExceptionWrongColorSpace, "the output image's size doesn't match the requested size");
		}

		doColorTransform(pInputDataHandler->getMemoryBuffer(), pOutputDataHandler->getMemoryBuffer(), totalPixelsNumber, inputMinValue, inputMaxValue, outputMinValue, outputMaxValue);
	}
	
	PUNTOEXE_FUNCTION_END();
}
Beispiel #20
0
//By Yousef
//Try this
void LocateSomas3_v2()
{
	cout << "\tDetecting Somas ... ";	


	int iSlices = The3DImage->m_iSlices;
	int iRows = The3DImage->m_iRows;
	int iCols = The3DImage->m_iCols;
	register int i,j,k;

	CImage tempImage(iRows, iCols);
	CImage tempImageXZ(iSlices, iCols);
	CImage tempImageYZ(iSlices, iRows);

	int iWidth = (int) (gfWidthSum / giNumOfWidthSumMembers + 0.5);

	int StructElemSize = iWidth + 7; // it was + 2 11-5-99

	CPoint* aDiskPoints = new CPoint[4 * StructElemSize* StructElemSize];
	int iNumOfPoints = 0;

	ConstructStructElem(StructElemSize, aDiskPoints, iNumOfPoints);

	// DetectSomas(ProjectionImage, &tempImage, StructElemSize);
	DetectSomas(ProjectionImage, & tempImage, aDiskPoints, iNumOfPoints);	

	// estimate the threshold from the brightest region in the image
	int Threshold = static_cast<int>(giForegroundMedian + gfForegroundStdDev);
	int aiHistogram[256];
	tempImage.Histogram(aiHistogram);

	for (i = 255; i > giForegroundMedian; i--)
	{
		if (aiHistogram[i] >= iNumOfPoints)
		{
			Threshold = static_cast<int>(0.6 * i);
			break;
		}
	}

	// use the histogram of seed points to estimate the median and the stddev of 
	// the foreground pixels

	if (Threshold > 250)
		Threshold = 250;


	ProjectionImage->ThresholdImage(Threshold, 0);

	tempImage.ThresholdImage(Threshold, 0);

	CImage tempImageXY(tempImage);
	//Yousef: Try this
	//tempImage.Write("somaXY.pgm");

	// The soma is defined as the region of intersection betweent the three images
	int SomaVolume = 0;
	
	int* aiPlaneSum = new int[iSlices];
	
	int iDenom = iRows;
	if (iCols < iRows)
		iDenom = iCols;

	StructElemSize = static_cast<int>(StructElemSize * (iSlices / iDenom));
	/*if (2 * iSlices < iDenom)
		StructElemSize = static_cast<int>(StructElemSize * (2.0 * iSlices / iDenom));

	if (StructElemSize < iWidth)
		StructElemSize = iWidth + 1;*/

	ConstructStructElem(StructElemSize, aDiskPoints, iNumOfPoints);


	DetectSomas(TrackImageYZ, & tempImageYZ, aDiskPoints, iNumOfPoints);	
	tempImageYZ.Histogram(aiHistogram);
	for (i = 255; i > giForegroundMedian; i--)
	{
		if (aiHistogram[i] >= iNumOfPoints)
		{
			Threshold = static_cast<int>(0.6 * i);
			break;
		}
	}
	tempImageYZ.ThresholdImage(Threshold, 0);

	//Yousef: Try this
	//tempImageYZ.Write("somaYZ.pgm");

	DetectSomas(TrackImageXZ, & tempImageXZ, aDiskPoints, iNumOfPoints);	
	tempImageXZ.Histogram(aiHistogram);
	for (i = 255; i > giForegroundMedian; i--)
	{
		if (aiHistogram[i] >= iNumOfPoints)
		{
			Threshold = static_cast<int>(0.6 * i);
			break;
		}
	}			

	delete [] aDiskPoints;
	delete [] aiPlaneSum;

	//Yousef: Try this
	//tempImageXZ.Write("somaXZ.pgm");

	// count the number of pixels in the soma
	giSomaVolume = 0;
	for (k = 0; k < iSlices; k++)
	{
		for (i = 0; i < iRows; i++)
		{
			for (j = 0; j < iCols; j++)
			{
				if (tempImageXY.data[i][j] &&
					tempImageXZ.data[k][j] &&
					tempImageYZ.data[k][i])
				{
					giSomaVolume++;
				}
			}
		}
	}
	

	gaSomaPoints = new CPoint[giSomaVolume];

	register int index = 0;
	for (k = 0; k < iSlices; k++)
	{
		for (i = 0; i < iRows; i++)
		{
			for (j = 0; j < iCols; j++)
			{
				//if (Soma3DImage->data[k][i][j])
				if (tempImageXY.data[i][j] &&
					tempImageXZ.data[k][j] &&
					tempImageYZ.data[k][i])
				{					
					gaSomaPoints[index].m_iX = j;
					gaSomaPoints[index].m_iY = i;
					gaSomaPoints[index].m_iZ = k;						
					TrackImageXY->data[i][j] = SomaColor;
					TrackImageXZ->data[k][j] = SomaColor;
					TrackImageYZ->data[k][i] = SomaColor;
					The3DImage->data[k][i][j] = SomaColor;
					SomaVolume++;
					index++;
				}
			}
		}
	}
	
	if(SomaVolume == 0)
	{
		int minX = iCols + 1, minY = iRows + 1, minZ = iSlices + 1;
		int maxX = - 1;
		int maxY = - 1;
		int maxZ = - 1;
		CPoint centerPoint;
		centerPoint.m_iX = static_cast<int>(static_cast<float>(minX + maxX) / 2.0 + 0.5);
		centerPoint.m_iY = static_cast<int>(static_cast<float>(minY + maxY) / 2.0 + 0.5);
		centerPoint.m_iZ = static_cast<int>(static_cast<float>(minZ + maxZ) / 2.0 + 0.5); 

		double radius = (maxX - minX) * (maxX - minX) +
			(maxY - minY) * (maxY - minY);
		radius = static_cast<int>(sqrt(radius) / 2.0 + 0.5);

		// create the soma collection
		giNumOfSomas = 1;
		gTheSomas = new CSomas(giNumOfSomas);
		gTheSomas->m_aData[0].m_iVolume = SomaVolume;
		gTheSomas->m_aData[0].m_Center = centerPoint;
		giNumOfSomas = 0;
		cout<<giNumOfSomas<<" somas detected"<<endl;
		return;
	}
	//Yousef: 01-26-2006
	//Apply connected components to seperate somas
	int x, y, z, x_min, y_min, z_min, x_max, y_max, z_max, More_Somas, Soma_Not_Full, soma_ID, br;
	More_Somas = soma_ID = br = 1;
	giNumOfSomas = 1;
	SomaLabelsImage->data[gaSomaPoints[0].m_iZ][gaSomaPoints[0].m_iY][gaSomaPoints[0].m_iX] = 1;

	
	while(More_Somas)
	{
		More_Somas = 0;
		Soma_Not_Full = 1;		
		while(Soma_Not_Full)
		{
			Soma_Not_Full = 0;
			for (i=0; i<index; i++)
			{
				if(SomaLabelsImage->data[gaSomaPoints[i].m_iZ][gaSomaPoints[i].m_iY][gaSomaPoints[i].m_iX] != 0)
					continue;
				x_min = max(0,gaSomaPoints[i].m_iX-1);
				x_max = min(iCols,gaSomaPoints[i].m_iX+1);
				y_min = max(0,gaSomaPoints[i].m_iY-1);
				y_max = min(iRows,gaSomaPoints[i].m_iY+1);
				z_min = max(0,gaSomaPoints[i].m_iZ-1);
				z_max = min(iSlices,gaSomaPoints[i].m_iZ+1);
				
				for(z=z_min; z<=z_max; z++)
				{
					br = 0;
					for(y=y_min; y<=y_max; y++)
					{
						for(x=x_min; x<=x_max; x++)
						{
							if(SomaLabelsImage->data[z][y][x] == soma_ID)
							{
								SomaLabelsImage->data[gaSomaPoints[i].m_iZ][gaSomaPoints[i].m_iY][gaSomaPoints[i].m_iX] = soma_ID;
								Soma_Not_Full = 1;
								br = 1;
								break;
							}
						}
						if(br == 1)
							break;
					}
					if(br == 1)
						break;
				}
			}			
		}
		//See if there is any soma point that is not assigned a label
		 for (i=0; i<index; i++)
		{
			if(SomaLabelsImage->data[gaSomaPoints[i].m_iZ][gaSomaPoints[i].m_iY][gaSomaPoints[i].m_iX] == 0)
			{
				soma_ID++;
				giNumOfSomas++;
				SomaLabelsImage->data[gaSomaPoints[i].m_iZ][gaSomaPoints[i].m_iY][gaSomaPoints[i].m_iX] = soma_ID;
				More_Somas = 1;
				break;
			}
		}
	}

	int* SomaVolumes  = new int[giNumOfSomas];
	int* SomaCentersX = new int[giNumOfSomas];
	int* SomaCentersY = new int[giNumOfSomas];
	int* SomaCentersZ = new int[giNumOfSomas];
	for (i=0; i<soma_ID; i++)
	{
		SomaVolumes[i] = SomaCentersX[i] = SomaCentersY[i] = SomaCentersZ[i] = 0;
	}
	for( i=0; i<index; i++)
	{
		SomaVolumes[SomaLabelsImage->data[gaSomaPoints[i].m_iZ][gaSomaPoints[i].m_iY][gaSomaPoints[i].m_iX]-1]++;
		SomaCentersX[SomaLabelsImage->data[gaSomaPoints[i].m_iZ][gaSomaPoints[i].m_iY][gaSomaPoints[i].m_iX]-1]+=gaSomaPoints[i].m_iX;
		SomaCentersY[SomaLabelsImage->data[gaSomaPoints[i].m_iZ][gaSomaPoints[i].m_iY][gaSomaPoints[i].m_iX]-1]+=gaSomaPoints[i].m_iY;
		SomaCentersZ[SomaLabelsImage->data[gaSomaPoints[i].m_iZ][gaSomaPoints[i].m_iY][gaSomaPoints[i].m_iX]-1]+=gaSomaPoints[i].m_iZ;
	}
	for (i=0; i<soma_ID; i++)
	{
		SomaCentersX[i] = (int) (SomaCentersX[i] / SomaVolumes[i]);
		SomaCentersY[i] = (int) (SomaCentersY[i] / SomaVolumes[i]);
		SomaCentersZ[i] = (int) (SomaCentersZ[i] / SomaVolumes[i]);
	}

	gTheSomas = new CSomas(giNumOfSomas);	
	for (i=0; i<soma_ID; i++)
	{
		gTheSomas->m_aData[i].m_iVolume = SomaVolumes[i];
		gTheSomas->m_aData[i].m_Center.m_iX = SomaCentersX[i];
		gTheSomas->m_aData[i].m_Center.m_iY = SomaCentersY[i];
		gTheSomas->m_aData[i].m_Center.m_iZ = SomaCentersZ[i];
	}


	
	cout<<giNumOfSomas<<" somas detected"<<endl;
	TrackImageXY->Write("somamm.pgm");	
}
Beispiel #21
0
////////////////////////////////////////////////////////////////////////////
// Function: LocateSomas
//
// Find the location of the Somas
void LocateSomas2()
{
	int iSlices = The3DImage->m_iSlices;
	int iRows = The3DImage->m_iRows;
	int iCols = The3DImage->m_iCols;
	CImage tempImage(iRows, iCols);

	DetectSomas(ProjectionImage->data, tempImage.data);

	// use the histogram of seed points to estimate the median and the stddev of 
	// the foreground pixels

	int Threshold = static_cast<int>(giForegroundMedian + 2.0 * gfForegroundStdDev);

	ProjectionImage->ThresholdImage(Threshold, 0);
	tempImage.ThresholdImage(Threshold, 0);

	//tempImage.Write("tempImage.pgm");

	///////////////////////////////////////////////////////
	// NOTE: All what follows does not apply for images with multiple somas
	// change it in a manner similar to the 2dTrack programs to hadle multiple
	// somas
	// 3-4-1999

	int* PlaneSum = new int[iSlices];
	memset(PlaneSum, 0, sizeof(int) * iSlices);

	register int i, j, k;
	int minX, minY, maxX, maxY;
	minX = minY = 9999;
	maxX = maxY = 0;

	for (k = 0; k < iSlices; k++)
	{
		for (j = giMARGIN; j < iRows - giMARGIN; j++)
		{
			for (i = giMARGIN; i < iCols - giMARGIN; i++)
			{
				// if the point belongs to the soma, add its value
				// to the soma pixel sum in this image plane
				if (tempImage.data[j][i])
				{
					PlaneSum[k] += The3DImage->data[k][j][i];

					// figure out the boundaries of the region
					if (i < minX)
						minX = i;
					if (i > maxX)
						maxX = i;
					if (j < minY)
						minY = j;
					if (j > maxY)
						maxY = j;
				}
			}
		}
	}

	// find the plane with the higest sum
	int max = 0;
	int PlaneIndex = 0;
	for (i = 0; i < iSlices; i++)
	{
		if (PlaneSum[i] > max)
		{
			max = PlaneSum[i];
			PlaneIndex = i;
		}
	}
	int FromPlane = PlaneIndex;
	int ToPlane = PlaneIndex;
	int threshold = static_cast<int>(0.2 * max);
	while (PlaneSum[FromPlane] >= threshold)
	{
		if (FromPlane == 0)
			break;
		FromPlane -= 1;
	}
	while (PlaneSum[ToPlane] >= threshold)
	{
		if (ToPlane == iSlices - 1)
			break;
		ToPlane += 1;
	}

	// for now, we fill the region between FromPlane and ToPlane as the single
	// one soma in the image.	
	int SomaVolume = 0;
	int pixelValue = 0;
	int SomaID = 1;
	for (j = minY; j <= maxY; j++)
	{
		for (i = minX; i <= maxX; i++)
		{
			// if the point belong to the soma, mark it in the 3D image and 
			// update soma volume
			if (tempImage.data[j][i] != 0)
			{
				TrackImageXY->data[j][i] = SomaColor;
				for (k = FromPlane; k <= ToPlane; k++)
				{
					pixelValue = The3DImage->data[k][j][i];
					The3DImage->data[k][j][i] = SomaColor;
					Empty3DImage->data[k][j][i] = static_cast<unsigned char>(SomaID);

					if (pixelValue >= threshold)
					{
						TrackImageXZ->data[k][i] = SomaColor;
						TrackImageYZ->data[k][j] = SomaColor;

						SomaVolume++;
					}
				}
			}
		}
	}

	//TrackImageXY->Write("somaImage.pgm");

	//exit(0);
	CPoint centerPoint;
	int x = static_cast<int>(static_cast<float>(minX + maxX) / 2.0 + 0.5);
	int y = static_cast<int>(static_cast<float>(minY + maxX) / 2.0 + 0.5);
	int z = PlaneIndex;
	centerPoint.m_iX = x;
	centerPoint.m_iY = y;
	centerPoint.m_iZ = z;

	double radius = (maxX - minX) * (maxX - minX) +
		(maxY - minY) * (maxY - minY);
	radius = static_cast<int>(std::sqrt( radius) );
	if ((ToPlane - FromPlane) > radius)
		radius = ToPlane - FromPlane;

	radius = radius / 2;

	delete [] PlaneSum;

	// create the soma collection
	giNumOfSomas = 1;
	gTheSomas = new CSomas(giNumOfSomas);
	gTheSomas->m_aData[0].m_iVolume = SomaVolume;
	gTheSomas->m_aData[0].m_Center = centerPoint;
}
Beispiel #22
0
void ImageHandler::CreateImage(const CDC& dc, const CRect& clientRect, std::shared_ptr<BackgroundImage>& bkImage)
{
	CriticalSectionLock	lock(bkImage->updateCritSec);

	bkImage->dwImageWidth = clientRect.Width();
	bkImage->dwImageHeight= clientRect.Height();

	// create background DC
	if (bkImage->dcImage.IsNull()) bkImage->dcImage.CreateCompatibleDC(NULL);

	// create background bitmap
	if (!bkImage->image.IsNull()) bkImage->image.DeleteObject();
	Helpers::CreateBitmap(dc, bkImage->dwImageWidth, bkImage->dwImageHeight, bkImage->image);
	bkImage->dcImage.SelectBitmap(bkImage->image);

	// paint background
	CBrush backgroundBrush(::CreateSolidBrush(bkImage->imageData.crBackground));
	bkImage->dcImage.FillRect(&clientRect, backgroundBrush);

	// this can be false only for desktop backgrounds with no wallpaper image
	if (bkImage->originalImage.get() != NULL)
	{
		// create template image
		CDC		dcTemplate;
		CBitmap	bmpTemplate;

		dcTemplate.CreateCompatibleDC(NULL);

    DWORD dwNewWidth  = bkImage->dwImageWidth;
    DWORD dwNewHeight = bkImage->dwImageHeight;

		if ( (bkImage->imageData.imagePosition == imagePositionStretch ||
          bkImage->imageData.imagePosition == imagePositionFit     ||
          bkImage->imageData.imagePosition == imagePositionFill)
         &&
         (bkImage->originalImage->getWidth()  != dwNewWidth        ||
          bkImage->originalImage->getHeight() != dwNewHeight) )
		{
			// resize background image
			ImageHandler::CalcRescale(dwNewWidth, dwNewHeight, bkImage);
			fipImage tempImage(*(bkImage->originalImage));
			tempImage.rescale(dwNewWidth, dwNewHeight, FILTER_BILINEAR);

			bmpTemplate.CreateDIBitmap(
							dc,
							tempImage.getInfoHeader(),
							CBM_INIT,
							tempImage.accessPixels(),
							tempImage.getInfo(),
							DIB_RGB_COLORS);
		}
		else
		{
			bmpTemplate.CreateDIBitmap(
							dc,
							bkImage->originalImage->getInfoHeader(),
							CBM_INIT,
							bkImage->originalImage->accessPixels(),
							bkImage->originalImage->getInfo(),
							DIB_RGB_COLORS);
		}

		dcTemplate.SelectBitmap(bmpTemplate);

		if (bkImage->imageData.imagePosition == imagePositionTile)
		{
			TileTemplateImage(
				dcTemplate,
				0,
				0,
				bkImage);

		}
		else
		{
			PaintTemplateImage(
				dcTemplate,
				0,
				0,
				dwNewWidth,
				dwNewHeight,
				bkImage->dwImageWidth,
				bkImage->dwImageHeight,
				bkImage);
		}
	}

	if (bkImage->imageData.byTintOpacity > 0) TintImage(dc, bkImage);
}
Beispiel #23
0
void ImageHandler::PaintRelativeImage(const CDC& dc, CBitmap&	bmpTemplate, std::shared_ptr<BackgroundImage>& bkImage, DWORD& dwDisplayWidth, DWORD& dwDisplayHeight)
{
  // set template bitmap dimensions
  DWORD	dwTemplateWidth  = bkImage->originalImage->getWidth();
  DWORD	dwTemplateHeight = bkImage->originalImage->getHeight();

  if (bkImage->imageData.imagePosition == imagePositionStretch ||
      bkImage->imageData.imagePosition == imagePositionFit     ||
      bkImage->imageData.imagePosition == imagePositionFill)
  {
    if (bkImage->imageData.bExtend)
    {
      dwTemplateWidth  = bkImage->dwImageWidth;
      dwTemplateHeight = bkImage->dwImageHeight;
    }
    else
    {
      dwTemplateWidth  = dwDisplayWidth;
      dwTemplateHeight = dwDisplayHeight;
    }
  }

  DWORD dwNewWidth  = dwTemplateWidth;
  DWORD dwNewHeight = dwTemplateHeight;

  if ( bkImage->originalImage->getWidth()  != dwNewWidth ||
       bkImage->originalImage->getHeight() != dwNewHeight )
  {
    // resize background image
    ImageHandler::CalcRescale(dwNewWidth, dwNewHeight, bkImage);
    fipImage tempImage(*(bkImage->originalImage));

#ifdef _DEBUG
    DWORD dwGetTickCount = ::GetTickCount();
#endif
    tempImage.rescale(dwNewWidth, dwNewHeight, FILTER_BILINEAR);
#ifdef _DEBUG
    TRACE(L"rescale in %lu ms\n", ::GetTickCount() - dwGetTickCount);
#endif

    bmpTemplate.CreateDIBitmap(
      dc,
      tempImage.getInfoHeader(),
      CBM_INIT,
      tempImage.accessPixels(),
      tempImage.getInfo(),
      DIB_RGB_COLORS);
  }
  else
  {
    bmpTemplate.CreateDIBitmap(
      dc,
      bkImage->originalImage->getInfoHeader(),
      CBM_INIT,
      bkImage->originalImage->accessPixels(),
      bkImage->originalImage->getInfo(),
      DIB_RGB_COLORS);
  }

  dwDisplayWidth  = dwNewWidth;
  dwDisplayHeight = dwNewHeight;
}
Beispiel #24
0
	// init application and run
	int Run()
	{
		CBitmapCapture capture(DEMO_IMAGE);

		// open camera
		if (!capture.OpenCamera())
		{
			printf("error: could not open camera\n");
			return 1;
		}
		
		const int width = capture.GetWidth();
		const int height = capture.GetHeight();

		// create temp image for the image processing
		CByteImage image(width, height, capture.GetType());
		CByteImage grayImage(width, height, CByteImage::eGrayScale);
		CByteImage tempImage(width, height, CByteImage::eGrayScale);
		CByteImage visualizationImage(width, height, CByteImage::eRGB24);
		CByteImage *pImage = &image;


		// create an application handler
		CApplicationHandlerInterface *pApplicationHandler = CreateApplicationHandler();
		pApplicationHandler->Reset();
		
		// create a main window
		m_pMainWindow = CreateMainWindow(0, 0, width, height + 190, "Hough Line Detection Demo");

		// events are sent to this class, hence this class needs to have the CMainWindowEventInterface
		m_pMainWindow->SetEventCallback(this);

		// create an image widget to display a window
		WIDGET_HANDLE pImageWidget = m_pMainWindow->AddImage(0, 190, width, height);

		// add a label and a slider for the low threshold
		WIDGET_HANDLE pLabelCannyLow = m_pMainWindow->AddLabel(15, 15, 200, 30, "Canny low threshold: 0");
		m_pSliderCannyLow = m_pMainWindow->AddSlider(15, 30, 200, 40, 0, 1020, 102, m_nCannyLowThreshold);

		// add a label and a slider for the high threshold
		WIDGET_HANDLE pLabelCannyHigh = m_pMainWindow->AddLabel(15, 70, 200, 30, "Canny high threshold: 0");
		m_pSliderCannyHigh = m_pMainWindow->AddSlider(15, 85, 200, 40, 0, 1020, 102, m_nCannyHighThreshold);
		
		// add a label and a slider for the number of lines to extract
		WIDGET_HANDLE pLabelLines = m_pMainWindow->AddLabel(260, 15, 200, 30, "Circles to extract: 0 lines");
		m_pSliderLinesToExtract = m_pMainWindow->AddSlider(260, 30, 200, 40, 0, 30, 5, m_nCirclesToExtract);
		
		// add labels/sliders for specifying the radius interval of interest
		WIDGET_HANDLE pLabelMinRadius = m_pMainWindow->AddLabel(260, 70, 200, 30, "Min radius: 0");
		m_pSliderMinRadius = m_pMainWindow->AddSlider(260, 85, 200, 40, 1, 200, 5, m_nMinRadius);
		WIDGET_HANDLE pLabelMaxRadius = m_pMainWindow->AddLabel(260, 125, 200, 30, "Max radius: 0");
		m_pSliderMaxRadius = m_pMainWindow->AddSlider(260, 140, 200, 40, 1, 200, 5, m_nMaxRadius);
		
		// add a button to toggle between the original image and the processed one
		m_pButton = m_pMainWindow->AddButton(510, 80, 110, 35, "Show Edges");

		// add a labels to display processing stats
		WIDGET_HANDLE pLabelMS = m_pMainWindow->AddLabel(560, 15, 70, 20, "0 ms");
		WIDGET_HANDLE pLabelFPS = m_pMainWindow->AddLabel(560, 45, 70, 20, "0 fps");

		// make the window visible
		m_pMainWindow->Show();
		

		char buffer[1024];
		
		CVec3dArray resultListCircles(50);
		CDynamicArrayTemplate<int> resultHits(50);
		CVec2dArray edgePoints(10000), edgeDirections(10000);
		
		// main loop
		while (!pApplicationHandler->ProcessEventsAndGetExit())
		{
			if (!capture.CaptureImage(&pImage))
				break;
			
			// this is for visualization purposes only
			ImageProcessor::ConvertImage(pImage, &visualizationImage);
			
			get_timer_value(true);
			
			// convert input image to grayscale image
			ImageProcessor::ConvertImage(&image, &tempImage, true);
			
			// smooth image
			ImageProcessor::GaussianSmooth3x3(&tempImage, &grayImage);

			// detect edges with Canny edge detector
			ImageProcessor::Canny(&grayImage, edgePoints, edgeDirections, m_nCannyLowThreshold, m_nCannyHighThreshold);
			
			// detect lines with Hough transform
			ImageProcessor::HoughTransformCircles(edgePoints, edgeDirections, width, height, m_nMinRadius, m_nMaxRadius, m_nCirclesToExtract, 1, resultListCircles, resultHits, &visualizationImage);

			const unsigned int t = get_timer_value();
			
			// display the speed stats
			sprintf(buffer, "%2.2f ms", t / 1000.0f);
			m_pMainWindow->SetText(pLabelMS, buffer);
			sprintf(buffer, "%3.2f fps", 1000000.0f / t);
			m_pMainWindow->SetText(pLabelFPS, buffer);
			sprintf(buffer, "Canny low threshold: %i", m_nCannyLowThreshold);
			m_pMainWindow->SetText(pLabelCannyLow, buffer);
			sprintf(buffer, "Canny high threshold: %i", m_nCannyHighThreshold);
			m_pMainWindow->SetText(pLabelCannyHigh, buffer);
			sprintf(buffer, "Min radius: %i", m_nMinRadius);
			m_pMainWindow->SetText(pLabelMinRadius, buffer);
			sprintf(buffer, "Max radius: %i", m_nMaxRadius);
			m_pMainWindow->SetText(pLabelMaxRadius, buffer);
			sprintf(buffer, "Circles to extract: %i", m_nCirclesToExtract);
			m_pMainWindow->SetText(pLabelLines, buffer);

			// display either the original image or the processed image
			if (m_bShowEdges)
			{
				ImageProcessor::Canny(&grayImage, &grayImage, m_nCannyLowThreshold, m_nCannyHighThreshold);
				m_pMainWindow->SetImage(pImageWidget, &grayImage);
			}
			else
				m_pMainWindow->SetImage(pImageWidget, &visualizationImage);
		}
		
		delete m_pMainWindow;
		delete pApplicationHandler;
		
		return 0;
	}
Beispiel #25
0
	bool Kernel<T, U>::convolve(const Image<T>& iImage, Image<U>& oImage) const
	{
		// perform 2D convolution
		//LOG1("Begin Convolve %d.", &iImage);
		int r, c, rr, cc, center;            
		long double dot, sum;               

		center = m_dimension / 2;

		long rows=0, cols=0;
		if (!iImage.GetDimensions(rows, cols))
		{
			return false;
		}

		Image<double> tempImage(rows, cols);
		//oImage = iImage;

		oImage.SetDimensions(rows, cols);

		const T* iBuf	= (const T*)iImage.GetBuffer();
		U* oBuf			= (U*)oImage.GetBuffer();	
		double* tempBuf = (double *)tempImage.GetBuffer();

		if (!tempBuf) return false;

		float kernelValue = 0;
		double imageValue = 0;
		double maxVal = -9999, minVal = 9999;

		//LOG0("Blurring the image in the X-direction.");
		for(r=0;r<rows;r++)
		{
			for(c=0;c<cols;c++)
			{
				dot = 0.0f;
				sum = 0.0f;
				rr = 0;
				for(rr=(-center);rr<=center;rr++)
				{
					if(((r+rr) >= 0) && ((r+rr) < rows))
					{
						for(cc=(-center);cc<=center;cc++)
						{
							if(((c+cc) >= 0) && ((c+cc) < cols))
							{
								kernelValue = GetValue((center+rr), (center+cc));
								imageValue = (double)(iImage.GetValue((r+rr), (c+cc)));
								//if (imageValue) 
								{
									//imageValue = imageValue;
									//}
									//{
									dot += imageValue * kernelValue;//7//iBuf[(r+rr)*cols+(c+cc)] * kernelValue;//m_buffer[(center+rr)*m_dimension + (center+cc)];
									sum += kernelValue;//m_buffer[(center+rr)*m_dimension + (center+cc)];
								}
							}
						}
					}
				}

				long double oValue =  ( dot );

				if (sum>kPgCoarseEpsilon)
				{
					oValue /= sum;			
				}

				tempBuf[r*cols+c] = oValue;
				maxVal = maxVal<oValue ? oValue : maxVal;
				minVal = minVal>oValue ? oValue : minVal;

			}
		}

		//LOG0("The filter coefficients after derivative unquantized are:");
		//for(int i=0;i<(cols);i++)
		//	LOG2("Kernel[%d] = %f", i, (double)tempBuf[i]);

		// quantize values to between 0 & 255 ? 
		long imCount=0;
		double oRange = (maxVal-minVal);
		if (oRange) oRange = 255.0f/oRange;
		else oRange = 255.0f;

		while (imCount < rows*cols)
		{
			double outVal = (tempBuf[imCount]);//-minVal)*oRange;
			oBuf[imCount] = (U)(outVal+0.5f);
			imCount++;
		}

		return true;
	}
Beispiel #26
0
	bool Kernel<T, U>::convolveSeparable(const Image<T>& iImage, Image<U>& oImage) const
	{	
		//LOG1("Begin Convolve %d.", &iImage);
		int r, c, rr, cc, center;            
		long double dot, sum;               

		center = m_dimension / 2;

		long rows=0, cols=0;
		if (!iImage.GetDimensions(rows, cols))
		{
			return false;
		}

		Image<double> tempImage(rows, cols);
		//oImage = iImage;
		oImage.SetDimensions(rows, cols);

		const T* iBuf	= (const T*)iImage.GetBuffer();
		U* oBuf			= (U*)oImage.GetBuffer();
		double* tempBuf = (double *)tempImage.GetBuffer();
		
		//LOG0("Blurring the image in the X-direction.");
		for(r=0;r<rows;r++)
		{
			for(c=0;c<cols;c++)
			{
				dot = 0.0f;
				sum = 0.0f;
				for(cc=(-center);cc<=center;cc++)
				{
					if(((c+cc) >= 0) && ((c+cc) < cols))
					{
						dot += iBuf[r*cols+(c+cc)] * m_buffer[center+cc];
						sum += m_buffer[center+cc];
					}
				}
				if (sum>kPgCoarseEpsilon)
					tempBuf[r*cols+c] = dot/sum;
				else 
					tempBuf[r*cols+c] = dot;
			}
		}

		Image<double> tempImage1(rows, cols);
		double* tempBuf1 = (double *)tempImage1.GetBuffer();
		double maxVal = -9999, minVal = 9999;

		//LOG0("Blurring the image in the Y-direction.");
		for(c=0;c<cols;c++)
		{
			for(r=0;r<rows;r++)
			{
				sum = 0.0f;
				dot = 0.0f;
				for(rr=(-center);rr<=center;rr++)
				{
					if(((r+rr) >= 0) && ((r+rr) < rows))
					{
						dot += tempBuf[(r+rr)*cols+c] * m_buffer[center+rr];
						sum += m_buffer[center+rr];
					}
				}
				if (sum>kPgCoarseEpsilon)
					tempBuf1[r*cols+c] =(dot/sum);
				else
					tempBuf1[r*cols+c] =(dot);

				double oValue = tempBuf1[r*cols+c];
				maxVal = maxVal<oValue ? oValue : maxVal;
				minVal = minVal>oValue ? oValue : minVal;
			}
		}  

		// quantize values to between 0 & 255
		long imCount=0;
		double oRange = (maxVal-minVal);
		if (oRange) oRange = 255.0f/oRange;
		else oRange = 255.0f;

		while (imCount < rows*cols)
		{
			double outVal = (tempBuf1[imCount]);//-minVal)*oRange;
			oBuf[imCount] = (U)(outVal+0.5f);
			imCount++;
		}
		//LOG1("End Convolve %d.", &iImage);
		return true;
	}