Example #1
0
IplImage *dc1394_frame_get_iplimage(dc1394video_frame_t *frame)
{
    g_return_val_if_fail(frame != NULL, NULL);
    g_return_val_if_fail(frame->padding_bytes == 0, NULL);

    IplImage *img;
    unsigned char *imdata;
    dc1394video_mode_t video_mode = frame->video_mode;
    CvSize size = cvSize(frame->size[0], frame->size[1]);

    if (video_mode == DC1394_VIDEO_MODE_640x480_MONO8) {

        g_return_val_if_fail(
            (size.width * size.height * 1 * sizeof(unsigned char)) == frame->image_bytes,
            NULL);

        IplImage *tmp = cvCreateImageHeader(size, IPL_DEPTH_8U, 1);
        cvSetData(tmp, frame->image, size.width);

        img = cvCreateImage(size, IPL_DEPTH_8U, tmp->nChannels);
        cvCopy(tmp, img, 0);

        cvReleaseImageHeader(&tmp);

    } else if (video_mode == DC1394_VIDEO_MODE_640x480_MONO16) {

        g_return_val_if_fail(
            (size.width * size.height * 2 * sizeof(unsigned char)) == frame->image_bytes,
            NULL);

        IplImage *tmp = cvCreateImageHeader(size, IPL_DEPTH_16U, 1);
        cvSetData(tmp, frame->image, size.width*2);

        img = cvCreateImage(size, IPL_DEPTH_16U, tmp->nChannels);
        cvCopy(tmp, img, 0);

        cvReleaseImageHeader(&tmp);

    } else if ((video_mode == DC1394_VIDEO_MODE_FORMAT7_0) ||
               (video_mode == DC1394_VIDEO_MODE_FORMAT7_1)) {
            dc1394error_t err;
            dc1394video_frame_t dest;
            IplImage *tmp;

            img = cvCreateImageHeader(size, IPL_DEPTH_8U, 3);

            /* debayer frame into RGB8 */
            imdata = (unsigned char *)malloc(frame->size[0]*frame->size[1]*3*sizeof(unsigned char));
            dest.image = imdata;
            dest.color_coding = DC1394_COLOR_CODING_RGB8;
            err=dc1394_debayer_frames(frame, &dest, DC1394_BAYER_METHOD_NEAREST); 
            if (err != DC1394_SUCCESS)
                dc1394_log_error("Could not convert/debayer frames");

            /* convert from RGB to BGR */
            tmp = cvCreateImageHeader(cvSize(frame->size[0], frame->size[1]), IPL_DEPTH_8U, 3);
            cvSetData(tmp, imdata, frame->size[0]*3);

            cvCvtColor(tmp, img, CV_RGB2BGR);

            free(imdata);
            cvReleaseImageHeader(&tmp);
    } else {
        g_assert_not_reached();
    }

    return img;
}
JNIEXPORT void JNICALL Java_io_card_payment_CardScanner_nScanFrame(JNIEnv *env, jobject thiz,
    jbyteArray jb, jint width, jint height, jint orientation, jobject dinfo,
    jobject jCardResultBitmap, jboolean jScanExpiry) {
  dmz_trace_log("Java_io_card_payment_CardScanner_nScanFrame ... width:%i height:%i orientation:%i", width, height, orientation);

  if (orientation == 0) {
    dmz_error_log("orientation is 0. Nothing good can come from this.");
    return;
  }

  if (flipped) {
    orientation = dmz_opposite_orientation(orientation);
  }

  FrameScanResult result;

  IplImage *image = cvCreateImageHeader(cvSize(width, height), IPL_DEPTH_8U, 1);
  jbyte *jBytes = env->GetByteArrayElements(jb, 0);
  image->imageData = (char *)jBytes;

  float focusScore = dmz_focus_score(image, false);
  env->SetFloatField(dinfo, detectionInfoId.focusScore, focusScore);
  dmz_trace_log("focus score: %f", focusScore);
  if (focusScore >= minFocusScore) {

    IplImage *cbcr = cvCreateImageHeader(cvSize(width / 2, height / 2), IPL_DEPTH_8U, 2);
    cbcr->imageData = ((char *)jBytes) + width * height;
    IplImage *cb, *cr;

    // Note: cr and cb are reversed here because Android uses android.graphics.ImageFormat.NV21. This is actually YCrCb rather than YCbCr!
    dmz_deinterleave_uint8_c2(cbcr, &cr, &cb);

    cvReleaseImageHeader(&cbcr);

    dmz_edges found_edges;
    dmz_corner_points corner_points;
    bool cardDetected = dmz_detect_edges(image, cb, cr,
                                         orientation,
                                         &found_edges, &corner_points
                                        );

    updateEdgeDetectDisplay(env, thiz, dinfo, found_edges);

    if (cardDetected) {
      IplImage *cardY = NULL;
      dmz_transform_card(NULL, image, corner_points, orientation, false, &cardY);

      if (!detectOnly) {
        result.focus_score = focusScore;
        result.flipped = flipped;
        scanner_add_frame_with_expiry(&scannerState, cardY, jScanExpiry, &result);
        if (result.usable) {
          ScannerResult scanResult;
          scanner_result(&scannerState, &scanResult);

          if (scanResult.complete) {
            setScanCardNumberResult(env, dinfo, &scanResult);
            logDinfo(env, dinfo);
          }
        }
        else if (result.upside_down) {
          flipped = !flipped;
        }
      }

      setDetectedCardImage(env, jCardResultBitmap, cardY, cb, cr, corner_points, orientation);
      cvReleaseImage(&cardY);
    }

    cvReleaseImage(&cb);
    cvReleaseImage(&cr);
  }

  cvReleaseImageHeader(&image);
  env->ReleaseByteArrayElements(jb, jBytes, 0);
}
Example #3
0
/*!

  Computes the SURF points in the current image I and try to matched
  them with the points in the reference list. Only the matched points
  are stored.

  \param I : The gray scaled image where the points are computed.

  \return the number of point which have been matched.
*/
unsigned int vpKeyPointSurf::matchPoint(const vpImage<unsigned char> &I)
{
  IplImage* currentImage = NULL;

  if((I.getWidth() % 8) == 0){
    int height = (int)I.getHeight();
    int width  = (int)I.getWidth();
    CvSize size = cvSize(width, height);
    currentImage = cvCreateImageHeader(size, IPL_DEPTH_8U, 1);
    currentImage->imageData = (char*)I.bitmap;
  }else{
    vpImageConvert::convert(I,currentImage);
  }
  
  /* we release the memory storage for the current points (it has to be kept 
      allocated for the get descriptor points, ...) */
  if(storage_cur != NULL){
    cvReleaseMemStorage(&storage_cur);
    storage_cur = NULL;
  }
  storage_cur = cvCreateMemStorage(0);

  cvExtractSURF( currentImage, 0, &image_keypoints, &image_descriptors,
     storage_cur, params );

  CvSeqReader reader, kreader;
  cvStartReadSeq( ref_keypoints, &kreader );
  cvStartReadSeq( ref_descriptors, &reader );


  std::list<int> indexImagePair;
  std::list<int> indexReferencePair;


  unsigned int nbrPair = 0;

  for(int i = 0; i < ref_descriptors->total; i++ )
  {
    const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;
    const float* descriptor = (const float*)reader.ptr;
    CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
    CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
    int nearest_neighbor = naiveNearestNeighbor( descriptor,
						 kp->laplacian,
						 image_keypoints,
						 image_descriptors );
    if( nearest_neighbor >= 0 )
    {
      indexReferencePair.push_back(i);
      indexImagePair.push_back(nearest_neighbor);
      nbrPair++;
    }
  }

  std::list<int>::const_iterator indexImagePairIter = indexImagePair.begin();
  std::list<int>::const_iterator indexReferencePairIter = indexReferencePair.begin();

  matchedReferencePoints.resize(0);

  if (nbrPair == 0)
    return (0);

  currentImagePointsList.resize(nbrPair);
  matchedReferencePoints.resize(nbrPair);

  for (unsigned int i = 0; i < nbrPair; i++)
  {
      int index = *indexImagePairIter;

      CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem(image_keypoints, index);

      currentImagePointsList[i].set_i(r1->pt.y);
      currentImagePointsList[i].set_j(r1->pt.x);

      matchedReferencePoints[i] = (unsigned int)*indexReferencePairIter;


      ++indexImagePairIter;
      ++indexReferencePairIter;
  }

  if((I.getWidth() % 8) == 0){
    currentImage->imageData = NULL;
    cvReleaseImageHeader(&currentImage);
  }else{
    cvReleaseImage(&currentImage);
  }

  return nbrPair;
}
Example #4
0
HRESULT SaveSampleFilter::myTransform(void* self, IMediaSample *pInSample, CMediaType* pInMT, IMediaSample *pOutSample, CMediaType* pOutMT)
{
	if (self == NULL || pInSample == NULL || pInMT == NULL || pOutSample == NULL || pOutMT == NULL)
	{
		return E_FAIL;
	}
	SaveSampleFilter* pSelf = (SaveSampleFilter*)(GSMuxFilter*)self;

	if (IsEqualGUID(*pInMT->Type(), *pOutMT->Type()) && IsEqualGUID(*pInMT->Subtype(), *pOutMT->Subtype()) 
		&& pInMT->IsTemporalCompressed() == pOutMT->IsTemporalCompressed())
	{
		if (pInMT->FormatType() == NULL || pOutMT->FormatType() == NULL || 
			!IsEqualGUID(*pInMT->FormatType(), *pOutMT->FormatType()))
		{
			return E_FAIL;
		}
		if (IsEqualGUID(*pInMT->FormatType(), FORMAT_VideoInfo))
		{
			VIDEOINFOHEADER* pInFormat = (VIDEOINFOHEADER*)pInMT->Format();
			VIDEOINFOHEADER* pOutFormat = (VIDEOINFOHEADER*)pOutMT->Format();
			if (pInFormat == NULL || pOutFormat == NULL)
				return E_FAIL;
			if (pInFormat->bmiHeader.biWidth != pOutFormat->bmiHeader.biWidth || 
				pInFormat->bmiHeader.biHeight != pOutFormat->bmiHeader.biHeight)
			{
				return E_FAIL;
			}
		}
		else
		{
			return E_FAIL;
		}


		int camChannel;
		GUID guidSubType = pInMT->subtype;
		if (IsEqualGUID(guidSubType, MEDIASUBTYPE_RGB24))
		{
			camChannel = 3;
		}
		else if(IsEqualGUID(guidSubType, MEDIASUBTYPE_RGB32) || IsEqualGUID(guidSubType, MEDIASUBTYPE_ARGB32))
		{
			camChannel = 4;
		}

		BYTE* pInBuffer = NULL;
		BYTE* pOutBuffer = NULL;
		pInSample->GetPointer(&pInBuffer);
		pOutSample->GetPointer(&pOutBuffer);
		if (pInBuffer == NULL || pOutBuffer == NULL)
			return E_FAIL;
		//memcpy((void*)pOutBuffer, (void*)pInBuffer, pOutSample->GetSize());

		VIDEOINFOHEADER* pInFormat = (VIDEOINFOHEADER*)pInMT->Format();

		IplImage* cvImgSrc = cvCreateImageHeader(cvSize(pInFormat->bmiHeader.biWidth , pInFormat->bmiHeader.biHeight), 8, camChannel);
		cvImgSrc->imageData = (char*)pInBuffer;

		IplImage* cvImgDst = cvCreateImageHeader(cvSize(pInFormat->bmiHeader.biWidth , pInFormat->bmiHeader.biHeight), 8, camChannel);
		cvImgDst->imageData = (char*)pOutBuffer;

		cvCopy(cvImgSrc,cvImgDst);
		
		if(pSelf->m_bSaveFlag)
		{
			cvSaveImage(pSelf->m_saveSampleName,cvImgDst);
			pSelf->m_bSaveFlag = false;
		}
		
		cvReleaseImageHeader(&cvImgSrc);
		cvReleaseImageHeader(&cvImgDst);


		return S_OK;
	}
	else
	{
		return E_FAIL;
	}

}
Example #5
0
HRESULT SaveFrameFilter::myTransform(void* self, IMediaSample *pInSample, CMediaType* pInMT, IMediaSample *pOutSample, CMediaType* pOutMT)
{
	if (self == NULL || pInSample == NULL || pInMT == NULL || pOutSample == NULL || pOutMT == NULL)
	{
		return E_FAIL;
	}
	SaveFrameFilter* pSelf = (SaveFrameFilter*)(GSMuxFilter*)self;

	if (IsEqualGUID(*pInMT->Type(), *pOutMT->Type()) && IsEqualGUID(*pInMT->Subtype(), *pOutMT->Subtype()) 
		&& pInMT->IsTemporalCompressed() == pOutMT->IsTemporalCompressed())
	{
		if (pInMT->FormatType() == NULL || pOutMT->FormatType() == NULL || 
			!IsEqualGUID(*pInMT->FormatType(), *pOutMT->FormatType()))
		{
			return E_FAIL;
		}
		if (IsEqualGUID(*pInMT->FormatType(), FORMAT_VideoInfo))
		{
			VIDEOINFOHEADER* pInFormat = (VIDEOINFOHEADER*)pInMT->Format();
			VIDEOINFOHEADER* pOutFormat = (VIDEOINFOHEADER*)pOutMT->Format();
			if (pInFormat == NULL || pOutFormat == NULL)
				return E_FAIL;
			if (pInFormat->bmiHeader.biWidth != pOutFormat->bmiHeader.biWidth || 
				pInFormat->bmiHeader.biHeight != pOutFormat->bmiHeader.biHeight)
			{
				return E_FAIL;
			}
		}
		else
		{
			return E_FAIL;
		}


		int camChannel;
		GUID guidSubType = pInMT->subtype;
		if (IsEqualGUID(guidSubType, MEDIASUBTYPE_RGB24))
		{
			camChannel = 3;
		}
		else if(IsEqualGUID(guidSubType, MEDIASUBTYPE_RGB32) || IsEqualGUID(guidSubType, MEDIASUBTYPE_ARGB32))
		{
			camChannel = 4;
		}

		BYTE* pInBuffer = NULL;
		BYTE* pOutBuffer = NULL;
		pInSample->GetPointer(&pInBuffer);
		pOutSample->GetPointer(&pOutBuffer);
		if (pInBuffer == NULL || pOutBuffer == NULL)
			return E_FAIL;
		//memcpy((void*)pOutBuffer, (void*)pInBuffer, pOutSample->GetSize());

		VIDEOINFOHEADER* pInFormat = (VIDEOINFOHEADER*)pInMT->Format();

		IplImage* cvImgSrc = cvCreateImageHeader(cvSize(pInFormat->bmiHeader.biWidth , pInFormat->bmiHeader.biHeight), 8, camChannel);
		cvImgSrc->imageData = (char*)pInBuffer;

		IplImage* cvImgDst = cvCreateImageHeader(cvSize(pInFormat->bmiHeader.biWidth , pInFormat->bmiHeader.biHeight), 8, camChannel);
		cvImgDst->imageData = (char*)pOutBuffer;


		cvCopy(cvImgSrc,cvImgDst);

		CAutoLock lck1(&pSelf->m_csRenderPara);
		if(pSelf->m_bIsSave)
		{
			char filename[MAX_PATH];
			sprintf(filename,"C://SaveFrame//%s_%d.bmp",pSelf->m_saveFrameName,pSelf->m_nFrameCnt);
			if(pSelf->m_bIsTouch)
			{
				cvCircle(cvImgSrc,cvPoint(cvImgSrc->width/2,cvImgSrc->height/2),20,CV_RGB(255,255,255),5);
			}
			cvSaveImage(filename,cvImgSrc);
			
		}
		
		if(pSelf->m_bIsOneTouch)
		{
			char filename2[MAX_PATH];
			sprintf(filename2,"C://SaveFrame//One_%s_%d.bmp",pSelf->m_saveFrameName,pSelf->m_nFrameCnt);
			if(pSelf->m_bIsTouch)
			{
				int rectSize = 20;
				cvRectangle(cvImgSrc,cvPoint(cvImgSrc->width/2,cvImgSrc->height/2),cvPoint(cvImgSrc->width/2+rectSize,cvImgSrc->height/2+rectSize),CV_RGB(255,255,255),5);
				
			}
			cvSaveImage(filename2,cvImgSrc);

			pSelf->m_bIsOneTouch = false;
		}

		

		cvReleaseImageHeader(&cvImgSrc);
		cvReleaseImageHeader(&cvImgDst);


		pSelf->m_nFrameCnt++;

		return S_OK;
	}
	else
	{
		return E_FAIL;
	}

}
void CleanRetinex(GstRetinex *retinex) 
{
  if (retinex->pFrame)  cvReleaseImageHeader(&retinex->pFrame);
}
Example #7
0
void BOCV_IplImage_detach(IplImage *img)
{
	cvReleaseImageHeader(&img);
}
Example #8
0
void OpenCVImage::setPixelFormat(Image::PixelFormat format)
{
    assert(format != PF_START && "No format specified");
    assert(format != PF_END   && "No format specified");
    assert(m_own && "Must have ownership of the image to change its format");

    /**
     * Lookup table for conversion codes
     * Invalid conversions labeled with -1
     * Conversions to the same colorspace labeled with -2
     * Conversions on a sentinal value are ALWAYS invalid
     * Macros are defined in cv.h
     *
     * Table is constructed so the row is the current colorspace,
     * column is the colorspace to convert to.
     */
    static const int lookupTable[11][11] = {
        /* Sentinal value */
        {-1,         -1,         -1,-1,         -1,        -1,        -1,        -1, -1, -1, -1},
        /* RGB */
        {-1,         -2, CV_RGB2BGR,-1,CV_RGB2GRAY,CV_RGB2HSV,CV_RGB2Luv, RGB2LCHUV, -1, -1, -1},
        /* BGR */
        {-1, CV_BGR2RGB,         -2,-1,CV_BGR2GRAY,CV_BGR2HSV,CV_BGR2Luv,        -1, -1, -1, -1},
        /* YUV */
        {-1,         -1,         -1,-2,         -1,        -1,        -1,        -1, -1, -1, -1},
        /* Grayscale */
        {-1,CV_GRAY2RGB,CV_GRAY2BGR,-1,         -2,        -1,        -1,        -1, -1, -1, -1},
        /* HSV */ 
        {-1, CV_HSV2RGB, CV_HSV2BGR,-1,         -1,        -2,        -1,        -1, -1, -1, -1},
        /* CIELUV */
        {-1, CV_Luv2RGB, CV_Luv2BGR,-1,         -1,        -1,        -2,        -1, -1, -1, -1},
        /* CIELCh_uv */
        {-1,         -1,         -1,-1,         -1,        -1,        -1,        -2, -1, -1, -1},
        /* CIELAB */
        {-1,         -1,         -1,-1,         -1,        -1,        -1,        -1, -2, -1, -1},
        /* CIELCh_ab */
        {-1,         -1,         -1,-1,         -1,        -1,        -1,        -1, -1, -2, -1},
        /* Sentinal value */
        {-1,         -1,         -1,-1,         -1,        -1,        -1,        -1, -1, -1, -2}
    };

    int code = lookupTable[m_fmt][format];
    if(code == RGB2LCHUV) {
        LCHConverter::convert(this);
        m_fmt = Image::PF_LCHUV_8;
    } else if (code == -1) {
        throw ImageConversionException(m_fmt, format);
    } else if (code != -2) {
        // If the number of channels or depth change, we need a new image
        int depth = getDepth();
        int channels = getNumChannels();

        int newDepth = getFormatDepth(format);
        int newChannels = getFormatNumChannels(format);

        if (depth != newDepth || channels != newChannels) {
            // Create a new image with the new depth/channels
            IplImage* newImg = cvCreateImage(cvGetSize(m_img),
                                             newDepth, newChannels);
            cvCvtColor(m_img, newImg, code);

            // Delete old image data
            if (m_data) {
                cvReleaseImageHeader(&m_img);
                delete[] m_data;
                m_data = NULL;
            } else {
                cvReleaseImage(&m_img);
            }

            // Assign modified image as current image
            m_img = newImg;
        } else {
            cvCvtColor(m_img, m_img, code);
        }

        // Change the format flag
        m_fmt = format;
    }
}
Example #9
0
void ImageProcessorCV::CalculateGradientImage(CByteImage *pInputImage, CByteImage *pOutputImage)
{
	if (pInputImage->width != pOutputImage->width || pInputImage->height != pOutputImage->height ||
		pOutputImage->type != CByteImage::eGrayScale)
		return;

	IplImage *pIplInputImage = IplImageAdaptor::Adapt(pInputImage);
	IplImage *pIplOutputImage = IplImageAdaptor::Adapt(pOutputImage);

	if (pInputImage->type == CByteImage::eGrayScale)
	{
		IplImage *diff = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_16S, 1);
		IplImage *abs = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_8U, 1);
		
		cvSmooth(pIplInputImage, abs, CV_GAUSSIAN, 3, 3);
		cvSobel(abs, diff, 1, 0, 3);
		cvConvertScaleAbs(diff, pIplOutputImage);
		cvSobel(abs, diff, 0, 1, 3);
		cvConvertScaleAbs(diff, abs);
		cvAdd(abs, pIplOutputImage, pIplOutputImage, 0);
		
		cvReleaseImage(&diff);
		cvReleaseImage(&abs);
	}
	else if (pInputImage->type == CByteImage::eRGB24)
	{
		//	Determine Gradient Image by Irina Wchter
		//	instead of normal norm sqrt(x*x +y*y) use |x|+|y| because it is much faster
		IplImage *singleChannel0 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
		IplImage *singleChannel1 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
		IplImage *singleChannel2 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
		IplImage *diff = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_16S, 1);
		IplImage *abs = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_8U, 1);
		
		cvCvtPixToPlane(pIplInputImage, singleChannel0, singleChannel1, singleChannel2, NULL);
	
		cvSmooth(singleChannel0, singleChannel0, CV_GAUSSIAN, 3, 3);
		cvSobel(singleChannel0, diff, 1, 0, 3);
		cvConvertScaleAbs(diff, abs);
		cvSobel(singleChannel0, diff, 0, 1, 3);
		cvConvertScaleAbs(diff, singleChannel0);
		cvAdd(abs, singleChannel0, pIplOutputImage, 0);
	
		cvSmooth(singleChannel1, singleChannel1, CV_GAUSSIAN, 3, 3);
		cvSobel(singleChannel1, diff, 1, 0, 3);
		cvConvertScaleAbs(diff, abs);
		cvSobel(singleChannel1, diff, 0, 1, 3);
		cvConvertScaleAbs(diff, singleChannel1);
		cvAdd(abs, singleChannel1, singleChannel1, 0);
		cvMax(pIplOutputImage, singleChannel1, pIplOutputImage);
	
		cvSmooth(singleChannel2, singleChannel2, CV_GAUSSIAN, 3, 3);
		cvSobel(singleChannel2, diff, 1, 0, 3);
		cvConvertScaleAbs(diff, abs);
		cvSobel(singleChannel2, diff, 0, 1, 3);
		cvConvertScaleAbs(diff, singleChannel2);
		cvAdd(abs, singleChannel2, singleChannel2, 0);
		cvMax(pIplOutputImage, singleChannel2, pIplOutputImage);
	
		cvReleaseImage(&singleChannel0);
		cvReleaseImage(&singleChannel1);
		cvReleaseImage(&singleChannel2);
		cvReleaseImage(&diff);
		cvReleaseImage(&abs);
	}
	
	cvReleaseImageHeader(&pIplInputImage);
	cvReleaseImageHeader(&pIplOutputImage);
}
DEFINE_THREAD_ROUTINE( control_system, data )
{
    IplImage *current_frame, *color_pos, *src_hsi,*Connection;
    control_data_t l_control, prev_control;
    structure_COM CntrMass, Prev_COM;
    current_frame = cvCreateImage( cvSize(QVGA_WIDTH,QVGA_HEIGHT),8,3 );
    color_pos = cvCreateImage( cvSize(QVGA_WIDTH,QVGA_HEIGHT),8,1 );
    Connection = cvCreateImage(cvGetSize(color_pos),8,1);
    Prev_COM.n = -1;
  
    PRINT("STARTING CONTROL SYSTEM \n");

    while( end_all_threads==0 )
    {
    	vp_os_mutex_lock( &control_video_lock );
        if( global_video_feed==NULL )
        {
		vp_os_delay(20);
                vp_os_mutex_unlock( &control_video_lock );
        	continue;
        }
    	cvCopy( global_video_feed,current_frame,NULL );
    	vp_os_mutex_unlock( &control_video_lock );

    	vp_os_mutex_lock( &control_data_lock );
        	prev_control = control_data; 
    	vp_os_mutex_unlock( &control_data_lock ); 
   #ifdef Runtime	
        cvZero( color_pos );
	BackProjectHSI( current_frame,color_pos,cvScalar( COLOR2DETECT ), 20 ); // RED: 175,220,160,0 
	cvMorphologyEx( color_pos,color_pos,NULL,NULL,CV_MOP_OPEN,1 );
        
        CntrMass = calcCenterOfMass( color_pos );
        if( Prev_COM.n == -1 )
        {
                if( CntrMass.n > Pix_Count_thresh  )
                        Prev_COM = CntrMass;
        }
        else if( CntrMass.n>Pix_Count_thresh )
        {
            uchar *ptr, *ptr_src;
	    ptr_src = (uchar *)color_pos->imageData + Prev_COM.y * color_pos->widthStep + Prev_COM.x * color_pos->nChannels;
            if( *ptr_src > 127 )
            {
                  CalcConnectedComponents( color_pos, Connection, 5 );
            	  ptr = (uchar *)Connection->imageData + Prev_COM.y * Connection->widthStep + Prev_COM.x * Connection->nChannels;
             	  RemoveAllComponentExcept( Connection,color_pos,*ptr );
            	  CntrMass = calcCenterOfMass( color_pos );
	          Prev_COM = CntrMass;
            }
	    else
		  Prev_COM.n = -1;

            //CalcConnectedComponents( color_pos, Connection, 5 );
            //ptr = (uchar *)Connection->imageData + Prev_COM.y * Connection->widthStep + Prev_COM.x * Connection->nChannels;
            //RemoveAllComponentExcept( Connection,color_pos,*ptr );
            //CntrMass = calcCenterOfMass( color_pos );
	    //Prev_COM = CntrMass;
        }
	else
	    Prev_COM.n = -1;

	MakeControlZero( &l_control );
        if( CntrMass.n>Pix_Count_thresh )
        {
		CvMemStorage* storage = cvCreateMemStorage(0);
		float *p;

		cvCircle( current_frame, cvPoint(CntrMass.x,CntrMass.y), 5, CV_RGB(255,255,255), 1, 8, 0 );
		l_control.yaw = (float)(CntrMass.x-(QVGA_WIDTH/2))/(QVGA_WIDTH/2);
		l_control.gaz = -(float)(CntrMass.y-(QVGA_HEIGHT/2))/(QVGA_HEIGHT/2);
		if( CntrMass.n > 700 && CntrMass.n < 20000 )
		{
			if(CntrMass.n > 10000 && CntrMass.n<50000)
			{
				CntrMass.n = 10000;
				//l_control.pitch = -0.33;
			}
			//else
				//l_control.pitch = (float)(( (float)CntrMass.n - (float)5000 )/20000);
		}

		PRINT( "number of Pixels: %f\n\n",l_control.pitch );
		/*cvCanny( color_pos, color_pos, 10, 100, 3 );
		CvSeq* circles =cvHoughCircles(color_pos, storage, CV_HOUGH_GRADIENT, 1, 30, 100, 10, 5, 25 );
		if( circles->total > 0 )
		{
			p = (float *)cvGetSeqElem( circles, 0 );
			cvCircle( color_pos, cvPoint((int) p[0],(int) p[1]), (int) p[2], cvScalar(255,0,0,0), 1, 8, 0 );
			l_control.pitch = (  p[2]-15 )/20; 
			//PRINT( "Radius: %f\n\n",p[2] );
		}
		cvShowImage("Image", color_pos);
		cvWaitKey(20);*/
        }     
        //cvShowImage("Image", current_frame);
	//cvWaitKey(20);
        //cvShowImage( "BackProjection",color_pos );
        //cvWaitKey(20);
    #endif

    #ifdef Calc
    {
    	uchar *ptr_src;
        CvScalar current_sclr;
        src_hsi = cvCreateImage( cvGetSize(current_frame),current_frame->depth,3 );
        cvCvtColor( current_frame,src_hsi,CV_BGR2HSV );   // Red Ball:170,200,130
         //cvShowImage("Image", current_frame);
        //cvShowImage( "BackProjection",src_hsi );
        //cvWaitKey(30);
        ptr_src = (uchar *)src_hsi->imageData + (QVGA_HEIGHT/4) * current_frame->widthStep + (QVGA_WIDTH/4) * current_frame->nChannels;
        current_sclr = cvScalar( ptr_src[0],ptr_src[1],ptr_src[2],0  );
        PRINT( "Pix :- %d,%d,%d\n",ptr_src[0],ptr_src[1],ptr_src[2] );
        //if( calcDist( current_sclr,cvScalar(170,200,130,0) )<100  ) 
        //    PRINT("CENTER EQUAL\n\n");
            //l_control.yaw = -1;
       
    }
    #endif
    
    #ifdef Runtime
    	vp_os_mutex_lock( &control_data_lock );
		//control_data.yaw = -1;
        	control_data = GradualChangeOfControl( l_control,prev_control,20 ); 
		//PRINT( "YAW: %f\n\n",control_data.yaw );
    	vp_os_mutex_unlock( &control_data_lock );
    #endif

    	vp_os_delay(10);
    }

    cvReleaseImageHeader(&current_frame);
    cvReleaseImage(&Connection);
    return C_OK;
}
vector<VisionRecognitionResult> IPEL_Haar2FaceEyeDetectionComp::Recognize(vector<unsigned char> image,int width,int height,int pixelBytes)
{
	//PrintMessage("SUCCESS:IPEL_Haar2FaceEyeDetectionComp::Recognize()\n");

	vector<VisionRecognitionResult> _recognitionResult(0);

	IplImage *cvImage = cvCreateImageHeader( cvSize(width, height), 8, pixelBytes );
	cvImage->imageData = (char *)&image[0];

	if( _storage ) cvClearMemStorage( _storage );

	if( _cascade_f ) {
		/* detect faces */
		CvSeq *faces = cvHaarDetectObjects(cvImage, _cascade_f, _storage,
			1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize( 30, 30 ) );

		if( faces && faces->total>0) {
			/* Get region of face */
			int nfaces = faces->total; // faces->total값이 변한다. 
			_recognitionResult.resize (nfaces);
			CvRect *fr = new CvRect[nfaces];

			for( int i = 0; i < (faces ? nfaces : 0); i++ ) {
				/* draw a rectangle */
				CvRect *r = (CvRect*)cvGetSeqElem(faces, i);
				memcpy(&fr[i],r,sizeof(CvRect));					

				//rec.type = 1;
				_recognitionResult[i].name = "Face";
				/*- Get Upper left rectangle corner coordinate -*/
				_recognitionResult[i].point1X = (int)((r->x) + 0.5);
				_recognitionResult[i].point1Y = (int)((r->y) + 0.5);
				/*- Get Upper right rectangle corner coordinate -*/
				_recognitionResult[i].point2X = (int)((r->x + r->width) + 0.5);
				_recognitionResult[i].point2Y = (int)((r->y) + 0.5);
				/*- Get Lower right rectangle corner coordinate -*/
				_recognitionResult[i].point3X = (int)((r->x + r->width) + 0.5);
				_recognitionResult[i].point3Y = (int)((r->y + r->height) + 0.5);
				/*- Get Lower left rectangle corner coordinate -*/
				_recognitionResult[i].point4X = (int)((r->x) + 0.5);
				_recognitionResult[i].point4Y = (int)((r->y + r->height) + 0.5);
			}

			// Haar함수를 두번 수행할때 결과가 다를수 있다.
			for( int i = 0; i < (faces ? nfaces : 0); i++ ) {
				/* reset buffer for the next object detection */
				cvClearMemStorage(_storage);

				/* Set the Region of Interest: estimate the eyes' position */
				cvSetImageROI(cvImage, cvRect(fr[i].x, fr[i].y + (int)(fr[i].height/5.5), fr[i].width, (int)(fr[i].height/3.0) ) );

				/* detect eyes */
				CvSeq* eyes = cvHaarDetectObjects(cvImage, _cascade_e, _storage,
					1.15, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize(25, 15));

				/* draw a rectangle for each eye found */
				for(int j = 0; j < (eyes ? eyes->total : 0); j++ ) {
					if(j>1) break;
					CvRect *er = (CvRect*) cvGetSeqElem( eyes, j );
					cvRectangle(cvImage,
						cvPoint(er->x, er->y), 
						cvPoint(er->x + er->width, er->y + er->height),
						CV_RGB(255, 0, 0), 1, 8, 0);

				}

				cvResetImageROI(cvImage);
			}

			delete fr;
		}
	}

#if 0
	if( _recognitionResult.size() ) {
		for( std::vector<VisionRecognitionResult>::iterator it = _recognitionResult.begin()  ;  it != _recognitionResult.end()  ;  it++ ) {
			cvLine(cvImage,
				cvPoint(it->point1X,it->point1Y),
				cvPoint(it->point2X,it->point2Y),
				CV_RGB(0, 255, 0));
			cvLine(cvImage,
				cvPoint(it->point2X,it->point2Y),
				cvPoint(it->point3X,it->point3Y),
				CV_RGB(0, 255, 0));
			cvLine(cvImage,
				cvPoint(it->point3X,it->point3Y),
				cvPoint(it->point4X,it->point4Y),
				CV_RGB(0, 255, 0));
			cvLine(cvImage,
				cvPoint(it->point4X,it->point4Y),
				cvPoint(it->point1X,it->point1Y),
				CV_RGB(0, 255, 0));
		}
	}
#endif

	cvReleaseImageHeader( &cvImage );

	return _recognitionResult;
}
Example #12
0
//==============================================================================
//Binary image median filtering with a (2ts+1)^2 mask
void Cdlbk::Connect_Filter(int mWid, int mHei, unsigned char* pInput)
{	
	
	if (FT)
	{
		float flg_f=float(0.3);
		int ts = (int)(sqrt((float)FILTER_SIZE));
		unsigned int mask=0;
		unsigned int tem_sum=0;
		int i,j,m,n,x1,x2,y1,y2;
		unsigned char* ptem = new unsigned char[mWid*mHei];
		memset(ptem,0,sizeof(unsigned char)*mWid*mHei);
		unsigned char* pslipmast = new unsigned char[2*ts+1];
		memset(pslipmast,0,sizeof(unsigned char)*(2*ts+1));

		for( i=0; i<mHei; i+=1)
		{
			for( j=0; j<mWid; j+=1)
			{
				x1 = ( (j-ts) < 0    ) ? 0    : (j-ts);
				x2 = ( (j+ts) > mWid ) ? mWid : (j+ts);
				y1 = ( (i-ts) < 0    ) ? 0    : (i-ts);
				y2 = ( (i+ts) > mHei ) ? mHei : (i+ts);
				mask = (x2-x1+1)*(y2-y1+1);
				tem_sum=0;

				//for (n=y1; n<=y2; n++)
				//for (m=x1; m<=x2; m++)
				//	tem_sum+=pInput[n*mWid+m]/255;					
				//
				//float rst = (float)tem_sum/mask;
				//if(rst>flg_f)
				//ptem[i*mWid+j]=255;	
				
				if( mask!=(ts+ts+1)*(ts+ts+1) )
				{
					for (n=y1; n<=y2; n++)
					for (m=x1; m<=x2; m++)
						tem_sum+=pInput[n*mWid+m]/255;					

					float rst = (float)tem_sum/mask;
					if(rst>flg_f)
					ptem[i*mWid+j]=255;	
				}
				else
				{
					if(x1==0)//new row
					{
						for (m=x1; m<=x2; m++)
						{
							pslipmast[m] = 0;						
							for (n=y1; n<=y2; n++)					//cal every pslipmast element
								pslipmast[m]+=pInput[n*mWid+m]/255;					
						}
						
						tem_sum = 0;								//cal rst
						for (int k=0; k<=x2-x1; k++)
							tem_sum += pslipmast[k];
						float rst = (float)tem_sum/mask;
						if(rst>flg_f)
							ptem[i*mWid+j]=255;	
					}
					else
					{
						for (int q=0; q<x2-x1; q++)					 //slip buffer 
							pslipmast[q] = pslipmast[q+1];

						m = x2;										//cal last element of the slip buffer
						pslipmast[x2-x1]=0;
						for (n=y1; n<=y2; n++)
							pslipmast[x2-x1] += pInput[n*mWid+m]/255;					
						
						tem_sum = 0;								//cal rst
						for (int k=0; k<=x2-x1; k++)
							tem_sum += pslipmast[k];
						float rst = (float)tem_sum/mask;
						if(rst>flg_f)
							ptem[i*mWid+j]=255;	
					}			
				}						
			}
		}

		//update input data
		memcpy(pInput,ptem,sizeof(unsigned char)*mWid*mHei);
		delete [] ptem;
		delete [] pslipmast;
	}
	else
	{
		IplImage* pFore = cvCreateImageHeader(cvSize(mWid,mHei), 8, 1);
		cvSetData(pFore, pInput, mWid);	//4倍宽度
		CvMemStorage* storage = cvCreateMemStorage(0);
		CvSeq *first_seq = NULL, *prev_seq = NULL, *seq = NULL;
		cvFindContours( pFore, storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
		for (seq = first_seq; seq; seq = seq->h_next)
		{
			CvContour* cnt = (CvContour*)seq;
			double area = cvContourArea( cnt, CV_WHOLE_SEQ );
			if (fabs(area) <= FILTER_SIZE)
			{
				prev_seq = seq->h_prev;
				if( prev_seq )
				{
					prev_seq->h_next = seq->h_next;
					if( seq->h_next ) seq->h_next->h_prev = prev_seq;
				}
				else
				{
					first_seq = seq->h_next;
					if( seq->h_next ) seq->h_next->h_prev = NULL;
				}
			}
		}
		cvZero(pFore);
		cvDrawContours(pFore, first_seq, CV_RGB(255, 255, 255), CV_RGB(255, 255, 255), 10, -1);
		cvReleaseImageHeader(&pFore);
		cvReleaseMemStorage(&storage);
	}
}
Example #13
0
ICVConsumer::~ICVConsumer () {

	if (_image_in) {
		cvReleaseImageHeader (&_image_in);
	}
}
Example #14
0
void cveReleaseImageHeader(IplImage** image)
{
   cvReleaseImageHeader(image);
}
Example #15
0
int main()
{
	// important variables used in most applications
	int rc;
	Fg_Struct *fg = NULL;
	int img_nr;
	TrackingWindow cur;
	int seq[] = SEQ;

	// following lines are for displaying images only!  See OpenCV doc for more info.
	// they can be left out, if speed is important.
	IplImage *cvDisplay = NULL;

	cvDisplay = cvCreateImageHeader(cvSize(ROI_BOX, ROI_BOX), 
		BITS_PER_PIXEL, NUM_CHANNELS);
	cvNamedWindow(DISPLAY, CV_WINDOW_AUTOSIZE);
	
	// initialize the tracking window (i.e. blob and ROI positions)
	memset(&cur, 0, sizeof(TrackingWindow));
	set_initial_positions(&cur);

	// initialize the camera
	rc = init_cam(&fg, MEMSIZE(cur.roi_w, cur.roi_h), NUM_BUFFERS, CAMLINK);
	if(rc != FG_OK) {
		printf("init: %s\n", Fg_getLastErrorDescription(fg));
		Fg_FreeGrabber(fg);
		return rc;
	}

	// start acquiring images (this function also writes any buffered ROIs to the camera)
	rc = acquire_imgs(fg, (int *) &seq, SEQ_LEN);
	if(rc != FG_OK) {
		printf("init: %s\n", Fg_getLastErrorDescription(fg));
		Fg_FreeGrabber(fg);
		return rc;
	}

	// initialize parameters
	img_nr = 1;

	// start image loop and don't stop until the user presses 'q'
	printf("press 'q' at any time to quit this demo.");
	while(!(_kbhit() && _getch() == 'q')) {
		img_nr = Fg_getLastPicNumberBlocking(fg, img_nr, PORT_A, TIMEOUT);
		cur.img = (unsigned char *) Fg_getImagePtr(fg, img_nr, PORT_A);

		// make sure that camera returned a valid image
		if(cur.img != NULL) {
			// increment to the next desired frame.  This has to be at least
			// +2, because the camera's ROI will not be active until the second
			// frame (see Silicon Software FastConfig doc)
			img_nr += NEXT_IMAGE;

			// process image
			threshold(&cur, THRESHOLD);
			erode(&cur);

			// update ROI position
			position(&cur);

			// at this point position(...) has updated the ROI, but it only stores
			// the updated values internal to the code.  The next step is to flush
			// the ROI to the camera (see position(...) documentation).

			// write ROI position to camera to be updated on frame "img_nr"
			write_roi(fg, cur.roi, img_nr, !DO_INIT);

			// show image on screen
			display_tracking(&cur, cvDisplay);
		}
		else {
			// typically this state only occurs if an invalid ROI has been programmed
			// into the camera (e.g. roi_w == 4).
			printf("img is null: %d\n", img_nr);
			break;
		}
	}

	// free viewer resources
	cvReleaseImageHeader(&cvDisplay);

	// free camera resources
	rc = deinit_cam(fg);
	if(rc != FG_OK) {
		printf("deinit: %s\n", Fg_getLastErrorDescription(fg));
		return rc;
	}

	return FG_OK;
}
Example #16
0
File: cd.c Project: j0sh/thesis
static void process(kd_tree *kdt, IplImage *bkg, IplImage *diff,
    IplImage *img, char *outname)
{
    //int *imgc = block_coeffs(img, plane_coeffs);
    cvAbsDiff(img, bkg, diff_g);
    int *imgc = block_coeffs(diff_g, plane_coeffs);
    CvSize blksz = {(img->width/8)+7, (img->height/8)+7};
    IplImage *xy = prop_match_complete(kdt, imgc, bkg, blksz);
    IplImage *rev = splat(imgc, cvGetSize(img), plane_coeffs);
    xy2blks_special(xy, diff, recon_g, 8);
    cvAbsDiff(rev, recon_g, diff_g);
    cvReleaseImage(&rev);
    /*int *imgc;
    prop_coeffs(diff_g, plane_coeffs, &imgc);
    CvSize blksz = cvGetSize(bkg);
    IplImage *xy = prop_match_complete(kdt, imgc, bkg, blksz);
    xy2img(xy, diff, recon_g);
    cvAbsDiff(diff_g, recon_g, diff_g);*/
    //cvShowImage("diff_g before mul", diff_g);
    //cvAbsDiff(diff_g, diff, diff_g);
    //cvMul(diff_g, idiff, diff_g, 1);
    cvCvtColor(diff_g, gray_g, CV_BGR2GRAY);

    // full pixel dc
    //IplImage *dc = make_dc(gray_g);
    IplImage *mask = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
    IplImage *dc_f = cvCreateImage(cvGetSize(img), IPL_DEPTH_32F, 1);
    int *graydc = gck_calc_2d((uint8_t*)gray_g->imageData, img->width, img->height, KERNS, 1);
    IplImage *dc = cvCreateImageHeader(cvGetSize(img), IPL_DEPTH_32S, 1);
    int step = img->width + KERNS - 1;
    cvSetData(dc, graydc, step*sizeof(int));
    cvConvertScale(dc, dc_f, 1/255.0, 0);
    cvThreshold(gray_g, mask, 25, 255.0, CV_THRESH_BINARY);

    mask->width = orig_w;
    mask->height = orig_h;
    if (*outname) cvSaveImage(outname, mask, 0);

    /*double min = 0, max = 0;
    cvMinMaxLoc(dc, &min, &max, NULL, NULL, NULL);
    printf("min: %3f max: %3f\n", min, max);
    CvScalar scalar = cvRealScalar(-min);
    cvAddS(dc, scalar, dc_f, NULL);
    cvConvertScale(dc_f, dc_f, 1.0/(max - min), 0);*/

    // macroblock based counts
    //cvAdaptiveThreshold(gray_g, mask_g, 255, CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, 3, 0);

    //cvSmooth(mask_g, mask_g, CV_GAUSSIAN, 9, 9, 0, 0);
    //cvSmooth(diff_g, diff_g, CV_GAUSSIAN, 5, 5, 0, 0);
    //cvLaplace(diff_g, lap_g, 3);
    //IplImage *xy = prop_match(bkg, img);
    //xy2img(xy, bkg, recon_g);
    //cvAbsDiff(recon_g, img, diff_g);
    /*cvShowImage("recon", recon_g);
    //cvShowImage("diff", rev);
    cvShowImage("diff_g", diff_g);
    cvShowImage("img", img);
    cvShowImage("gray", gray_g);
    cvShowImage("dc float", dc_f);
    cvShowImage("mask", mask);
    cvShowImage("dc", dc);*/
    free(imgc);
    cvReleaseImage(&xy);
    cvReleaseImageHeader(&dc);
    free(graydc);
    cvReleaseImage(&mask);
    cvReleaseImage(&dc_f);
}
Example #17
0
/* Function: main
 * 
 * Description: Main function to extract frames from 2 video files and runs the
 *     rest of the program using them. Takes at least 10 commandline arguments, 
 *     in the order: 
 *        <number of camera pairs>
 *        <pair 1 camera 1 filename>
 *        <pair 1 camera 1 frame number>
 *        <pair 1 camera 2 filename>
 *        <pair 1 camera 2 frame number>
 *        <pair 1 view name>
 *        <pair 1 camera coefficients filename>
 *        ...
 *        <TPS smoothing parameter>
 *        <feature detector>
 *        <output directory>
 * 
 * Parameters:
 *     argc: number of commandline arguments
 *     argv: string array of commandline arguments
 * 
 * Returns: 0 on success, 1 on error.
 */
int main (int argc, char *argv[])
{    
    // check for minimum number of commandline arguments
    if (argc < 11)
    {
        printf("Usage:\nvideos\n\t<number of camera pairs>\n\t<pair 1 camera 1 filename>\n\t<pair 1 camera 1 frame number>\n\t<pair 1 camera 2 filename>\n\t<pair 1 camera 2 frame number>\n\t<pair 1 view name>\n\t<pair 1 camera coefficients filename>\n\t...\n\t<TPS smoothing parameter>\n\t<feature detector>\n\t<output directory>\n");
        exit(1);
    }
    
    // get the number of camera pairs
    int numCameraPairs = atoi(argv[1]);
    
    if (numCameraPairs <= 0)
    {
        printf("Invalid number of camera pairs.\n");
        exit(1);
    }
    
    // number of commandline arguments should be numCameraPairs*6 + 5
    if (argc != numCameraPairs*6 + 5)
    {
        printf("Usage:\nvideos\n\t<number of camera pairs>\n\t<pair 1 camera 1 filename>\n\t<pair 1 camera 1 frame number>\n\t<pair 1 camera 2 filename>\n\t<pair 1 camera 2 frame number>\n\t<pair 1 view name>\n\t<pair 1 camera coefficients filename>\n\t...\n\t<TPS smoothing parameter>\n\t<feature detector>\n\t<output directory>\n");
        exit(1);
    }
    
    // allocate memory to store information for camera pairs
    char **camera1Filenames = (char **)malloc(numCameraPairs * sizeof(char *));
    int *camera1Frames = (int *)malloc(numCameraPairs * sizeof(int));
    
    if (camera1Filenames == NULL || camera1Frames == NULL)
    {
        printf("Out of memory error.\n");
        exit(1);
    }
    
    char **camera2Filenames = (char **)malloc(numCameraPairs * sizeof(char *));
    int *camera2Frames = (int *)malloc(numCameraPairs * sizeof(int));
    
    if (camera2Filenames == NULL || camera2Frames == NULL)
    {
        printf("Out of memory error.\n");
        exit(1);
    }
    
    char **cameraNames = (char **)malloc(numCameraPairs * sizeof(char *));
    
    if (cameraNames == NULL)
    {
        printf("Out of memory error.\n");
        exit(1);
    }
    
    char **cameraCoefficientsFilenames = (char **)malloc(numCameraPairs * sizeof(char *));
    
    if (cameraCoefficientsFilenames == NULL)
    {
        printf("Out of memory error.\n");
        exit(1);
    }
    
    int argIndex = 2;
    
    for (int i = 0; i < numCameraPairs; i++)
    {        
        camera1Filenames[i] = argv[argIndex];    
        camera1Frames[i] = atoi(argv[argIndex+1]);
        camera2Filenames[i] = argv[argIndex+2];
        camera2Frames[i] = atoi(argv[argIndex+3]);
        cameraNames[i] = argv[argIndex+4];
        cameraCoefficientsFilenames[i] = argv[argIndex+5];
        
        // make sure input video frames are valid
        if (camera1Frames[i] <= 0)
        {
            printf("Invalid frame number for pair %d camera 1.\n", i+1);
            exit(1);
        }
        
        if (camera2Frames[i] <= 0)
        {
            printf("Invalid frame number for pair %d camera 1.\n", i+1);
            exit(1);
        }
        
        // make sure input filenames are valid
        if (!fileExists(camera1Filenames[i]))
        {
            printf("Could not open pair %d camera 1 video file.\n", i+1);
            exit(1);
        }
        
        if (!fileExists(camera2Filenames[i]))
        {
            printf("Could not open pair %d camera 2 video file.\n", i+1);
            exit(1);
        }
        
        if (!fileExists(cameraCoefficientsFilenames[i]))
        {
            printf("Could not open pair %d camera coefficients file.\n", i+1);
            exit(1);
        }
        
        argIndex += 6;
    }
    
    double regularization = atof(argv[argIndex]);
    char *featureDetector = argv[argIndex+1];
    char *outputDirectory = argv[argIndex+2];
            
    // make sure input feature dectector is recognized
    if (strcasecmp(featureDetector, FAST_FEATURE_DETECTOR) &&        
        strcasecmp(featureDetector, GFTT_FEATURE_DETECTOR) &&      
        strcasecmp(featureDetector, SURF_FEATURE_DETECTOR) &&
        strcasecmp(featureDetector, SIFT_FEATURE_DETECTOR) &&
        strcasecmp(featureDetector, SPEEDSIFT_FEATURE_DETECTOR))
    {
        printf("Feature Detector not recognized. Please select from the following:\n\t%s\n\t%s\n\t%s\n\t%s\n\t%s\n",
               FAST_FEATURE_DETECTOR,
               GFTT_FEATURE_DETECTOR,
               SURF_FEATURE_DETECTOR,
               SIFT_FEATURE_DETECTOR,
               SPEEDSIFT_FEATURE_DETECTOR);
        
        exit(1);
    }
    
    // make sure regularization parameter for TPS is valid
    if (regularization <= 0.0 || regularization == HUGE_VAL)
    {
        printf("Invalid smoothing parameter value.\n");
        exit(1);
    }
    
    // if output directory doesn't end with '/' char, append '/' to the string.
    // this is so we can later append a filename to the directory when we want 
    // to write the file to that directory
    if (outputDirectory[strlen(outputDirectory)-1] != '/')
    {
        strcat(outputDirectory, "/");
    }
    
    DIR *dir = opendir(outputDirectory);
    
    // if output directory does not exist, create it with correct permissions
    if (dir == NULL)
    {
        printf("Output directory does not exist.\n");
        
        if (mkdir(outputDirectory, S_IRWXO | S_IRWXG | S_IRWXU))
        {
            printf("Could not create output directory.\n");
            exit(1);
        }
        else
        {
            printf("Created output directory.\n");
        }
    }
    else
    {
        closedir(dir);
    }    
    
    // string for the MATLAB commands
    char command[500]; 
    
    Engine *matlabEngine;
    
    // open MATLAB engine
    if (!(matlabEngine = engOpen("\0")))
    {
        printf("Can't start MATLAB engine\n");        
        exit(1);
    }
    
    // create MATLAB arrays to retrieve values from MATLAB workspace
    mxArray **c1ImageData = (mxArray **)malloc(numCameraPairs * sizeof(mxArray *));
    mxArray **c1ImageDimensions = (mxArray **)malloc(numCameraPairs * sizeof(mxArray *));
    mxArray **c1ImagePaddedWidths = (mxArray **)malloc(numCameraPairs * sizeof(mxArray *));
    
    if (c1ImageData == NULL || c1ImageDimensions == NULL || c1ImagePaddedWidths == NULL)
    {
        printf("Out of memory error.\n");
        exit(1);
    }
    
    mxArray **c2ImageData = (mxArray **)malloc(numCameraPairs * sizeof(mxArray *));
    mxArray **c2ImageDimensions = (mxArray **)malloc(numCameraPairs * sizeof(mxArray *));
    mxArray **c2ImagePaddedWidths = (mxArray **)malloc(numCameraPairs * sizeof(mxArray *));
    
    if (c2ImageData == NULL || c2ImageDimensions == NULL || c2ImagePaddedWidths == NULL)
    {
        printf("Out of memory error.\n");
        exit(1);
    }
    
    // create IplImage arrays for camera 1 and 2 images for all camera pairs
    IplImage **c1Images = (IplImage **)malloc(numCameraPairs * sizeof(IplImage *));
    IplImage **c2Images = (IplImage **)malloc(numCameraPairs * sizeof(IplImage *));
    
    if (c1Images == NULL || c2Images == NULL)
    {
        printf("Out of memory error.\n");
        exit(1);
    }
    
    // for each camera pair, get the specified frames from cameras 1 and 2, using
    // MATLAB functions
    for (int i = 0; i < numCameraPairs; i++)
    {
        char video1Extension[6];
        
        // get the video file extension for the first video file
        if (getVideoFileExtension(camera1Filenames[i], video1Extension) == INVALID_VIDEO_FILE_EXTENSION_ERROR)
        {
            printf("Video files must be of extension .mrf or .cine.\n");
            exit(1);
        }
        
        // call appropriate MATLAB function depending on whether video file is .cine 
        // or .mrf to extract the frame as a MATLAB image. If neither, error.
        if ((strcasecmp(video1Extension, ".cine") == 0) || (strcasecmp(video1Extension, ".cin") == 0))
        {
            sprintf(command, "c1 = cineRead('%s', %d);", camera1Filenames[i], camera1Frames[i]);
            engEvalString(matlabEngine, command);
        }
        else if (strcasecmp(video1Extension, ".mrf") == 0)
        {
            sprintf(command, "c1 = mrfRead('%s', %d);", camera1Filenames[i], camera1Frames[i]);
            engEvalString(matlabEngine, command);
        }
        else
        {
            printf("Videos must be of extension .mrf or .cine.\n");
            exit(1);
        }
        
        char video2Extension[6];
        
        // get the video file extension for the second video file
        if (getVideoFileExtension(camera2Filenames[i], video2Extension) == INVALID_VIDEO_FILE_EXTENSION_ERROR)
        {
            printf("Video files must be of extension .mrf or .cine.\n");
            exit(1);
        }
        
        // call appropriate MATLAB function depending on whether video file is .cine 
        // or .mrf to extract the frame as a MATLAB image. If neither, error.
        if ((strcasecmp(video2Extension, ".cine") == 0) || (strcasecmp(video2Extension, ".cin") == 0))
        {
            sprintf(command, "c2 = cineRead('%s', %d);", camera2Filenames[i], camera2Frames[i]);
            engEvalString(matlabEngine, command);
        }
        else if (strcasecmp(video2Extension, ".mrf") == 0)
        {
            sprintf(command, "c2 = mrfRead('%s', %d);", camera2Filenames[i], camera2Frames[i]);
            engEvalString(matlabEngine, command);
        }
        else
        {
            printf("Videos must be of extension .mrf or .cine.\n");
            exit(1);
        }
        
        // call MATLAB function convert_image_matlab2cv_gs on MATLAB images to convert
        // them into a format that will be compatible with the IplImages of OpenCV
        sprintf(command, "[c1_img c1_dim c1_padded_width] = convert_image_matlab2cv_gs(c1);");    
        engEvalString(matlabEngine, command);
        
        sprintf(command, "[c2_img c2_dim c2_padded_width] = convert_image_matlab2cv_gs(c2);");
        engEvalString(matlabEngine, command);
        
        // retrieve the image data, image dimensions, and image padded width variables 
        // from MATLAB for both camera images
        c1ImageData[i] = engGetVariable(matlabEngine, "c1_img");
        c1ImageDimensions[i] = engGetVariable(matlabEngine, "c1_dim");
        c1ImagePaddedWidths[i] = engGetVariable(matlabEngine, "c1_padded_width");
        
        c2ImageData[i] = engGetVariable(matlabEngine, "c2_img");
        c2ImageDimensions[i] = engGetVariable(matlabEngine, "c2_dim");
        c2ImagePaddedWidths[i] = engGetVariable(matlabEngine, "c2_padded_width");    
        
        if (c1ImageData[i] == NULL || 
            c1ImageDimensions[i] == NULL || 
            c1ImagePaddedWidths[i] == NULL)
        {        
            printf("Could not retrieve all necessary information for pair %d camera 1 frame %d from MATLAB.\n", i+1, camera1Frames[i]);
            exit(1);
        }
        
        if (c2ImageData[i] == NULL || 
            c2ImageDimensions[i] == NULL || 
            c2ImagePaddedWidths[i] == NULL)
        {        
            printf("Could not retrieve all necessary information for pair %d camera 2 frame %d from MATLAB.\n", i+1, camera2Frames[i]);
            exit(1);
        }
        
        int c1Status, c2Status;
        
        ImageInfo c1ImageInfo, c2ImageInfo;            
        
        // extract the image information from the MATLAB variables in the form of 
        // mxArrays, and store in ImageInfo structs
        c1Status = getInputImageInfo(&c1ImageInfo, c1ImageData[i], c1ImageDimensions[i], c1ImagePaddedWidths[i]);
        c2Status = getInputImageInfo(&c2ImageInfo, c2ImageData[i], c2ImageDimensions[i], c2ImagePaddedWidths[i]);
        
        if (c1Status == IMAGE_INFO_DATA_ERROR)
        {
            printf("Pair %d camera 1: Images must have two dimensions.\n", i+1);
            exit(1);
        }
        
        if (c2Status == IMAGE_INFO_DATA_ERROR)
        {
            printf("Pair %d camera 2: Images must have two dimensions.\n", i+1);
            exit(1);
        }
        
        if (c1Status == IMAGE_INFO_DIMENSIONS_ERROR)
        {
            printf("Pair %d camera 1: Image dimension vectors must contain two elements: [width, height].\n", i+1);
            exit(1);
        }
        
        if (c2Status == IMAGE_INFO_DIMENSIONS_ERROR)
        {
            printf("Pair %d camera 2: Image dimension vectors must contain two elements: [width, height].\n", i+1);
            exit(1);
        }
        
        if (c1Status == IMAGE_INFO_PADDED_WIDTH_ERROR)
        {
            printf("Pair %d camera 1: Padded image widths must be scalars.\n", i+1);
            exit(1);
        }
        
        if (c2Status == IMAGE_INFO_PADDED_WIDTH_ERROR)
        {
            printf("Pair %d camera 2: Padded image widths must be scalars.\n", i+1);
            exit(1);
        }
        
        if (c1Status == IMAGE_DEPTH_ERROR)
        {
            printf("Pair %d camera 1: Images must be represented by 8 or 16-bit integers.\n", i+1);
            exit(1);
        }
        
        if (c2Status == IMAGE_DEPTH_ERROR)
        {
            printf("Pair %d camera 2: Images must be represented by 8 or 16-bit integers.\n", i+1);
            exit(1);
        }
        
        // create IplImages using values in ImageInfo structs
        c1Status = createIplImageFromImageInfo(&(c1Images[i]), c1ImageInfo);
        c2Status = createIplImageFromImageInfo(&(c2Images[i]), c2ImageInfo);
        
        if (c1Status == OUT_OF_MEMORY_ERROR ||
            c2Status == OUT_OF_MEMORY_ERROR)
        {
            printf("Out of memory error.\n");
            exit(1);
        }
        
        // flip the images over the y-axis to compensate for the differences in axial
        // labels between MATLAB and OpenCV (camera coefficients would not correctly
        // correspond to image otherwise)
        cvFlip(c1Images[i], NULL, 1);
        cvFlip(c2Images[i], NULL, 1);
    }
    
    char errorMessage[500];
    
    int numContours;
    char **contourNames;
    CvPoint3D32f **features3D;
    char **validFeatureIndicator;
    int *numFeaturesInContours;
    
    char contoursFilename[MAX_FILENAME_LENGTH];
    
    // for each camera pair, run features and triangulation
    for (int i = 0; i < numCameraPairs; i++)
    {
        // create the output 2D features filename as "frame<frame number>_features2D_<camera name>.txt"
        char features2DFilename[MAX_FILENAME_LENGTH];    
        sprintf(features2DFilename, "%sframe%d_features2D_%s.txt", outputDirectory, camera1Frames[i], cameraNames[i]);
        
        // create the output contours filename as "frame<frame number>_contours_<camera name>.txt"
        char tempContoursFilename[MAX_FILENAME_LENGTH];    
        sprintf(tempContoursFilename, "%sframe%d_contours_%s.txt", outputDirectory, camera1Frames[i], cameraNames[i]);
        
        printf("Camera pair for %s view:\n", cameraNames[i]);
        
        // run the features program to extract matching 2D features from the 2 
        // images within user defined contour
        if (features(c1Images[i], c2Images[i], features2DFilename, tempContoursFilename, featureDetector, errorMessage))
        {
            printf("Features: %s\n", errorMessage);
            exit(1);
        }
        
        // we only need to save the contour(s) for the first camera pair, as that 
        // is the one we will use to create the meshes, and we only use the contours
        // with the same name(s) in subsequent camera pairs
        if (i == 0)
        {
            strcpy(contoursFilename, tempContoursFilename);
            
            // get the contour names of the contours selected in features function for
            // output file naming and contour matching in other camera pairs
            int status = readContourNamesFromInputFile(&numContours, &contourNames, contoursFilename);
            
            if (status == INPUT_FILE_OPEN_ERROR)
            {
                printf("Could not open contour vertices file.\n");
                exit(1);
            }
            
            if (status == INCORRECT_INPUT_FILE_FORMAT_ERROR)
            {
                printf("Contour vertices file has incorrect format.\n");
                exit(1);
            }
            
            if (status == OUT_OF_MEMORY_ERROR)
            {
                printf("Out of memory error.\n");
                exit(1);
            }
            
            // allocate memory for 3D features
            features3D = (CvPoint3D32f **)malloc(numContours * sizeof(CvPoint3D32f *));
            validFeatureIndicator = (char **)malloc(numContours * sizeof(char *));
            numFeaturesInContours = (int *)malloc(numContours * sizeof(int));
            
            if (features3D == NULL || numFeaturesInContours == NULL || validFeatureIndicator == NULL)
            {
                printf("Out of memory error.\n");
                exit(1);
            }
            
            for (int j = 0; j < numContours; j++)
            {
                features3D[j] = (CvPoint3D32f *)malloc(MAX_FEATURES_IN_CONTOUR * sizeof(CvPoint3D32f));
                validFeatureIndicator[j] = (char *)malloc(MAX_FEATURES_IN_CONTOUR * sizeof(char));
                
                if (features3D[j] == NULL || validFeatureIndicator[j] == NULL)
                {
                    printf("Out of memory error.\n");
                    exit(1);
                }
                
                numFeaturesInContours[j] = 0;
            }
        }
        
        // create the output 3D features filename as "frame<frame number>_features3D_<camera name>.txt"
        char features3DFilename[MAX_FILENAME_LENGTH];    
        sprintf(features3DFilename, "%sframe%d_features3D_%s.txt", outputDirectory, camera1Frames[i], cameraNames[i]);
        
        // triangulate the matching 2D features between cameras to find the 3D coordinates 
        // of the features, and remove invalid features
        if (triangulation(cameraCoefficientsFilenames[i], features2DFilename, features3DFilename, c1Images[i], errorMessage))
        {
            printf("Triangulation: %s\n", errorMessage);
            exit(1);
        }
        
        // if features from triangulation lie within contours that have the same
        // names as those defined for the first camera pair, add them to the
        // 3D features array for mesh creation
        int status = read3DFeaturesFromFileForMatchingContours(features3DFilename, features3D, numFeaturesInContours, numContours, contourNames);
        
        if (status == INPUT_FILE_OPEN_ERROR)
        {
            printf("Could not open 3D features file.\n");
            exit(1);
        }
        
        if (status == INVALID_NUM_CONTOURS_ERROR)
        {
            printf("At least 1 contour region required.\n");
            exit(1);
        }
        
        if (status == INCORRECT_INPUT_FILE_FORMAT_ERROR)
        {
            printf("3D features file has incorrect format.\n");
            exit(1);
        }
    }        
    
    // for each contour (defined for the first camera pair), perform RANSAC on
    // the cumulative 3D features from all camera pairs that lie within the contour
    for (int i = 0; i < numContours; i++)
    {    
        memset(validFeatureIndicator[i], 1, numFeaturesInContours[i] * sizeof(char));

        // perform RANSAC to remove points that lie too far off a best-fit surface
        if (ransac(features3D[i], validFeatureIndicator[i], numFeaturesInContours[i], errorMessage))
        {
            printf("RANSAC: %s\n", errorMessage);
            exit(1);
        }
        
        int numValidFeatures = 0;
        
        for (int j = 0; j < numFeaturesInContours[i]; j++)
        {
            if (validFeatureIndicator[i][j])
            {
                numValidFeatures++;
            }
        }
        
        printf("Total valid features after RANSAC for contour %s: %d\n", contourNames[i], numValidFeatures);

    }
    
    // create the output 3D features filename for all camera pairs as 
    // "frame<frame number>_features3D.txt", and write the result of RANSAC to
    // the file
    char features3DFilename[MAX_FILENAME_LENGTH];    
    sprintf(features3DFilename, "%sframe%d_features3D.txt", outputDirectory, camera1Frames[0]);
    
    int status = write3DFeaturesToFile(features3D, validFeatureIndicator, numFeaturesInContours, contourNames, numContours, features3DFilename);
    
    if (status == OUTPUT_FILE_OPEN_ERROR)
    {
        sprintf(errorMessage, "Could not open output file.");
        return 1;
    }
    
    char **meshFilenames = (char **)malloc(numContours * sizeof(char *));
    
    if (meshFilenames == NULL)
    {
        printf("Out of memory error.\n");
        exit(1);
    }
    
    // for each contour, create a different mesh output file
    for (int i = 0; i < numContours; i++)
    {
        meshFilenames[i] = (char *)malloc(MAX_FILENAME_LENGTH * sizeof(char));
        
        if (meshFilenames[i] == NULL)
        {
            printf("Out of memory error.\n");
            exit(1);
        }
        
        // create the output mesh filename as "frame<frame number>_mesh_<contour name>_<camera name>.txt"
        sprintf(meshFilenames[i], "%sframe%d_mesh_%s.txt", outputDirectory, camera1Frames[0], contourNames[i]);
    }
    
    // create the wing meshes from the triangulated 3D points and the user-selected
    // contours, and write each mesh to a different file for each contour
    if (mesh(features3DFilename, contoursFilename, cameraCoefficientsFilenames[0], meshFilenames, numContours, regularization, errorMessage))
    {
        printf("Mesh: %s\n", errorMessage);
        exit(1);
    }
    
    // we only calculate the flow of a wing mesh if there is a mesh file with the
    // same contour name in the output directory for the previous video frame
    char **flowFilenames = (char **)malloc(numContours * sizeof(char *));
    
    if (flowFilenames == NULL)
    {
        printf("Out of memory error.\n");
        exit(1);
    }
    
    for (int i = 0; i < numContours; i++)
    {
        flowFilenames[i] = NULL;
    }
    
    int numFilesInDirectory;
    char **filenamesInDirectory = (char **)malloc(MAX_FILES_IN_DIRECTORY * sizeof(char *));
    
    if (filenamesInDirectory == NULL)
    {
        printf("Out of memory error.\n");
        exit(1);
    }
    
    for (int i = 0; i < MAX_FILES_IN_DIRECTORY; i++)
    {
        filenamesInDirectory[i] = (char *)malloc(MAX_FILENAME_LENGTH * sizeof(char));
        
        if (filenamesInDirectory[i] == NULL)
        {
            printf("Out of memory error.\n");
            exit(1);
        }
    }
    
    // get all files in the output directory
    getAllFilenamesInDirectory(outputDirectory, &numFilesInDirectory, filenamesInDirectory);
     
    // for each contour check if previous frame mesh file for same contour exists
    // in output directory
    for (int i = 0; i < numContours; i++)
    {
        // set substring indicating match to be "frame<previous frame number>_mesh_<contour name>.txt"
        char filenameToMatch[MAX_FILENAME_LENGTH];
        sprintf(filenameToMatch, "frame%d_mesh_%s.txt", camera1Frames[0]-1, contourNames[i]);
        
        // try to find a filename from the output directory that contains the
        // substring indicating a match for a previous frame mesh for the same
        // contour
        int fileExists = getIndexOfMatchingString(filenamesInDirectory, numFilesInDirectory, filenameToMatch);
        
        // if filename was found, create a flow output file for current contour 
        // and call flow to calculate the flow between previous contour mesh and 
        // current contour mesh
        if (fileExists != -1)
        {
            flowFilenames[i] = (char *)malloc(MAX_FILENAME_LENGTH * sizeof(char));
            
            if (flowFilenames[i] == NULL)
            {
                printf("Out of memory error.\n");
                exit(1);
            }
            
            // create the output flow filename as "frame<frame number>_flow_<contour name>_<camera name>.txt"
            sprintf(flowFilenames[i], "%sframe%d_flow_%s.txt", outputDirectory, camera1Frames[0], contourNames[i]);
            
            // add the output directory name to the beginning of the previous mesh
            // filename
            char prevFrameMeshFile[MAX_FILENAME_LENGTH];
            sprintf(prevFrameMeshFile, "%s%s", outputDirectory, filenameToMatch);
            
            // call flow to find the flow between the previous mesh file and the
            // current mesh file for each mesh point current contour
            if (flow(prevFrameMeshFile, meshFilenames[i], flowFilenames[i], errorMessage))
            {
                printf("Flow: %s\n", errorMessage);
                exit(1);
            }
        }
        
        else
        {
            printf("Mesh points file for previous frame not found for contour %s. Unable to calculate flow.\n", contourNames[i]);
        }
    }
    
    sprintf(command, "hold on;");
    engEvalString(matlabEngine, command);
    
    // for each contour, display MATLAB 3D plot of the mesh, as well as the flow 
    // for the mesh, if applicable
    for (int i = 0; i < numContours; i++)
    {        
        if (flowFilenames[i] != NULL)
        {
            sprintf(command, "flows = load('%s');", flowFilenames[i]);
            engEvalString(matlabEngine, command);
            
            // plot the flows of the mesh points
            sprintf(command, "quiver3(flows(:,1), flows(:,2), flows(:,3), flows(:,4), flows(:,5), flows(:,6), 4, 'r-');");
            engEvalString(matlabEngine, command);
            
        }
        
        sprintf(command, "mesh = importdata('%s', ' ', 1);", meshFilenames[i]);
        engEvalString(matlabEngine, command);
        
        // plot the mesh points
        sprintf(command, "plot3(mesh.data(:,1), mesh.data(:,2), mesh.data(:,3), 'b.');");
        engEvalString(matlabEngine, command);
    }
    
    // reverse the z and y coordinates in the display
    sprintf(command, "set(gca,'zdir','reverse','ydir','reverse');");
    engEvalString(matlabEngine, command);
    
    // scale the axes to be equal
    sprintf(command, "axis equal");
    engEvalString(matlabEngine, command);
    
    // wait for the user to hit enter
    printf("Hit return to continue.\n");
    fgetc(stdin);
    
    // close MATLAB engine
    engClose(matlabEngine);
    
    // cleanup
    free(camera1Filenames);
    free(camera1Frames);
    free(camera2Filenames);
    free(camera2Frames);
    free(cameraNames);
    free(cameraCoefficientsFilenames);
    
    for (int i = 0; i < numCameraPairs; i++)
    {
        mxDestroyArray(c1ImageData[i]);
        mxDestroyArray(c1ImageDimensions[i]);
        mxDestroyArray(c1ImagePaddedWidths[i]);
        
        mxDestroyArray(c2ImageData[i]);
        mxDestroyArray(c2ImageDimensions[i]);
        mxDestroyArray(c2ImagePaddedWidths[i]);
        
        free(c1Images[i]->imageData);
        cvReleaseImageHeader(&c1Images[i]);
        
        free(c2Images[i]->imageData);
        cvReleaseImageHeader(&c2Images[i]);
    }
    
    free(c1ImageData);
    free(c1ImageDimensions);
    free(c1ImagePaddedWidths);
    
    free(c2ImageData);
    free(c2ImageDimensions);
    free(c2ImagePaddedWidths);
    
    free(c1Images);
    free(c2Images);
    
    for (int i = 0; i < MAX_FILES_IN_DIRECTORY; i++)
    {
        free(filenamesInDirectory[i]);
    }
    
    free(filenamesInDirectory);
    
    for (int i = 0; i < numContours; i++)
    {
        free(contourNames[i]);
        free(features3D[i]);
        free(validFeatureIndicator[i]);
                
        free(meshFilenames[i]);
        
        if (flowFilenames[i] != NULL)
        {
            free(flowFilenames[i]);
        }
    }
    
    free(contourNames);
    free(features3D);
    free(validFeatureIndicator);
    free(numFeaturesInContours);
    
    free(meshFilenames);
    free(flowFilenames);
    
    exit(0);
}