Пример #1
0
/** 
 * setInput	-	set the name of the expected video file
 *
 * @param fileName	-	the name of the video file
 *
 * @return True if success. False otherwise
 */
bool VideoProcessor::setInput(const std::string &fileName)
{
    fnumber = 0;
    tempFile = fileName;

    // In case a resource was already
    // associated with the VideoCapture instance
    if (isOpened()){
        capture.release();
    }

    // Open the video file
    if(capture.open(fileName)){
        // read parameters
        length = capture.get(CV_CAP_PROP_FRAME_COUNT);
        rate = getFrameRate();
        cv::Mat input;
        // show first frame
        getNextFrame(input);
        emit showFrame(input);
        emit updateBtn();
        return true;
    } else {
        return false;
    }
}
Пример #2
0
QImage *AcquisitionModule::getNextFrame() {

  if (m_currentImageIndex >= m_numberOfFiles) {
    if (m_parseDirectory) {
      // We have to change directory
      m_currentDirIndex++;
      openFiles();
    } else {
      // We reached the end of the sequences!
      std::cout << "CDiskInput::getNextFrame() : End of the sequence reached.\n";
      return NULL;
    }
  }
        
  if (m_parseDirectory && m_currentDirIndex >= m_numberOfDirs) {
    // We reached the end of the sequences!
    std::cout << "CDiskInput::getNextFrame() : End of the sequence reached.\n";
    return NULL;
  }
  
  int retValue;
  QImage *tmpImage = NULL;
  retValue = readImageOnDisk(&tmpImage);

  if (retValue != 1) {
    // The image is corrupted, we attempt to read the following one...
    std::cout << "CDiskInput::getNextFrame() : ERROR : error reading an image, skipping...\n";
    return getNextFrame();
  } else 
    readTimeStamp(tmpImage);

  m_currentImageIndex++;
  m_NumFrame++;
  return tmpImage;
}
Пример #3
0
/** 
 * writeOutput	-	write the processed result
 *
 */
void VideoProcessor::writeOutput()
{
    cv::Mat input;

    // if no capture device has been set
    if (!isOpened() || !writer.isOpened())
        return;

    // save the current position
    long pos = curPos;
    
    // jump to the first frame
    jumpTo(0);

    while (getNextFrame(input)) {

        // write output sequence
        if (outputFile.length()!=0)
            writeNextFrame(input);
    }

    // set the modify flag to false
    modify = false;

    // release the writer
    writer.release();

    // jump back to the original position
    jumpTo(pos);
}
Пример #4
0
/**
 * @brief Video::convertToEvent
 * @param path
 * @return
 */
EventPtr Video::convertToEvent(std::string path){
	cv::Mat shot;
	FramePtr frame;
	EventPtr event;

	unsigned int j=0;
	int framecount=0;

	double tmpPos = getFramePos();
	setFramePos(0);

	emit startProgress(0, (uint) getLengthFrames());

	while(getNextFrame(shot)){
		emit progressChanged(j);

		if (event.isNull()){
			event = EventPtr(new Event(this));
		}
		// create new frame
		frame = FramePtr(new Frame(this, shot, path));
		// add frame to event
		event->addFrame(frame);
		framecount ++;
		j++;
	}

	setFramePos(tmpPos);

	return event;
}
Пример #5
0
void Cube(double time){
   float tm[16], rm[16], mm[16];
   GLuint loc;
   static double prev_time = 0.0;

   translate(tm, 0.5, 0.5, -3.0 - time*1.5);
   rotate(rm, sin(time) / M_PI, cos(time) / M_PI, M_PI);
   matrixMultiply4x4(rm, tm, mm);

   if(time - prev_time > 0.03){
      getNextFrame(matat);
      updateBigCube(matat.buffer, &bigcube);
      matatpos++;
   }

   glUseProgram(bigcube_shader);
   loc = glGetUniformLocation(bigcube_shader, "camera");
   glUniformMatrix4fv(loc, 1, GL_FALSE, mm);
   loc = glGetUniformLocation(bigcube_shader, "pmatrix");
   glUniformMatrix4fv(loc, 1, GL_FALSE, pmatrix);

   drawBigCube(bigcube);

   if(matatpos > 55){
      rewindDepthVideo(matat);
      matatpos = 1;
   }
}
Пример #6
0
void Matatas(double time){
   static double prev_time = 0.0;
   GLuint loc;
   float tmatrix[16];
   float rmatrix[16];
   float mmatrix[16];

   translate(tmatrix, -0.4, 3.0, -7.0);
   rotate(rmatrix, 0.0, 0.0, 4.1);
   matrixMultiply4x4(rmatrix, tmatrix, mmatrix);
   /* 30fps video playback */
   if(time - prev_time > 0.03){
      getNextFrame(matat);
      updateFace(matat.buffer, armface);
      prev_time = time;
      matatpos++;
   }

   glUseProgram(armface->shader);
   loc = glGetUniformLocation(armface->shader, "pmatrix");
   glUniformMatrix4fv(loc, 1, GL_FALSE, pmatrix);
   loc = glGetUniformLocation(armface->shader, "mmatrix");
   glUniformMatrix4fv(loc, 1, GL_FALSE, mmatrix);
   drawFace(armface);

   if(matatpos > 55){
      rewindDepthVideo(matat);
      matatpos = 1;
   }
}
Пример #7
0
/* The last scene. I have made it first. :-)*/
void Headbreak(double time){
   static int i = 0;
   static float rot = 0.0;
   static double prev_time;
   GLuint loc;
   float tmatrix[16];
   float rmatrix[16];
   float mmatrix[16];

   translate(tmatrix, -0.4, 3.0, -7.0);
   rotate(rmatrix, 0.0, 0.0, 4.1);
   matrixMultiply4x4(rmatrix, tmatrix, mmatrix);
   /* 30fps video playback */
   if(time - prev_time > 0.03){
      getNextFrame(head);
      updateFace(head.buffer, face);
      prev_time = time;
      i++;
   }

   glUseProgram(face->shader);
   loc = glGetUniformLocation(face->shader, "pmatrix");
   glUniformMatrix4fv(loc, 1, GL_FALSE, pmatrix);
   loc = glGetUniformLocation(face->shader, "mmatrix");
   glUniformMatrix4fv(loc, 1, GL_FALSE, mmatrix);
   drawFace(face);

   rot += 0.05;
   if(i > 87){
      rewindDepthVideo(head);
      i = 1;
   }
}
Пример #8
0
	void Sprite::animate()
	{
		if ( !isAnimated() )
		{
			return;
		}

		if ( ( timer->getCurrentTime() - previousTime ) > frameTime )
		{
			getNextFrame();
		}
	}
Пример #9
0
void CameraContainer::run()
{
    if (running)
        return;

    running = true;

    while (running)
    {
        getNextFrame();
    }
}
Пример #10
0
QImage *AcquisitionModule::getFrame(int i_numFrame) {

  m_startFrame = i_numFrame;
    
  if (m_parseDirectory)
    openDir();
    
  if (!goToFirstFrame())
    return NULL;
					
  return getNextFrame();
}
Пример #11
0
void JpegReceiver::createNextPicture()
{
	JpegWrap frame = getNextFrame();
	if (frame.length == 0) return;
	DWORD tm = GetTickCount();
	CxImage* image = new CxImage(frame.pic.get(), frame.length, CXIMAGE_FORMAT_JPG);
	Picture pic = Picture::createFromCxImage(image);
	delete image;
//	Capture_Log::getInstance()->log_write("Time is %d\n", GetTickCount() - tm);
	lockImage();
	curPic = pic;
	releaseImage();
}
Пример #12
0
const Graphics::Surface *RlfAnimation::getFrameData(uint frameNumber) {
	assert(!_stream);
	assert(frameNumber < _frameCount);

	// Since this method is so expensive, first check to see if we can use
	// getNextFrame() it's cheap.
	if ((int)frameNumber == _currentFrame) {
		return &_currentFrameBuffer;
	} else if (_currentFrame + 1 == (int)frameNumber) {
		return getNextFrame();
	}

	seekToFrame(frameNumber);
	return &_currentFrameBuffer;
}
bool InputProcessing::saveDerivativeFeatures(ofstream & file, int x, int y, Size screenSize) {

    Mat frame = getNextFrame(frameCount++);
    Mat gray;

    cvtColor(frame, gray, CV_BGR2GRAY);

    vector<Point> features = getFeatures(gray);
    if (features.size() != 6)
        return false;

    if (DEBUG_MODE) {
        circle(frame, features[0], 2, Scalar(10, 255, 255), -1, 8, 0);
        circle(frame, features[1], 2, Scalar(20, 210, 21), -1, 8, 0);
        circle(frame, features[2], 2, Scalar(10, 255, 255), -1, 8, 0);
        circle(frame, features[3], 2, Scalar(10, 255, 255), -1, 8, 0);
        circle(frame, features[4], 2, Scalar(20, 210, 21), -1, 8, 0);
        circle(frame, features[5], 2, Scalar(10, 255, 255), -1, 8, 0);

        imshow(WINDOW_NAME, frame);
    }
    vector<double> derf = getDerivativeFeatures(features, gray.size());

    if (derf[0] < 0 || derf[0] > 5) {
        return false;
    }

    if (derf[1] < 0 || derf[1] > 5) {
        return false;
    }

    file << derf[0] << ' '
         << derf[1] << ' '
         << derf[2] << ' '
         << derf[3] << ' '
         << derf[4] << ' '
         << derf[5] << ' '
         << derf[6] << ' '
         << derf[7] << ' '
         << derf[8] << ' '
         << x / (double) screenSize.width << ' '
         << y / (double) screenSize.height
         << endl;


    return true;
}
Пример #14
0
 bool CmpParser::load( std::string fileName, MoMa::Track *tr, bool hasRotation ){
     this->openFile(fileName);
     unsigned int dim=hdr.sampSize/4/3;
     arma::cube trackCont(3,dim,hdr.nSamples);
     arma::Col<double> frameTemp;
     unsigned int t=0;
     while (getNextFrame(frameTemp,0)){
         trackCont.slice(t)=arma::reshape(frameTemp,3,frameTemp.n_elem/3);
         t++;
         
         
     }
     tr->position.setData(1.0E7/hdr.sampPeriod, trackCont);
     tr->setFrameRate( 1.0E7/hdr.sampPeriod );
     
     return true;
 }
Пример #15
0
QImage VideoDecoder::getNextFrame(qint64 time)
{
    //if(init)
    {
        //   //qDebug() << "out" << i << "\n";
        if(av_read_frame(videoFormatContext, &videoPacket)>=0) {
            //  qDebug() << "PTS" << videoPacket.pts;
            //qDebug() << "DTS" << videoPacket.dts;
            QImage res =  getNextFrame(videoPacket, time);
            //av_free_packet(&videoPacket);
            return res;
        }
        else return QImage(10,10,QImage::Format_RGB888);
        //  imggg.save("F:/kaka" + QString::number(i) + ".png");
        //return imggg;
    }
    // else return QImage(800,600,QImage::Format_RGB888);
}
Пример #16
0
/** \brief this function implements the acquisition thread */
static void *acquisitionThreadFunc(void* val) {
    std::string basename=*(static_cast<std::string*>(val));
	// write basename.txt file
	FILE* ftxt=fopen(std::string(basename+".txt").c_str(), "w");
	if (ftxt) {
	    fprintf(ftxt, "example_image_server.cpp demo acquisition");
		fclose(ftxt);
	}
	// start basename.tif file for acquired frames
	uint16_t* frame=(uint16_t*)malloc(img_width*img_height*sizeof(uint16_t));
	TinyTIFFFile* tif=TinyTIFFWriter_open(std::string(basename+".tif").c_str(), 16, img_width, img_height);	
	maxWriteFrames=floor(acquisition_duration/exposure);
    framesCompleted=0;
    if (tif) {
		HighResTimer timer;
		timer.start();
		double seconds=timer.get_time()/1e6;
		double lastt=seconds;
		// perform acquisition
		bool canceled=!cam_acquisition_running;
		while (seconds<acquisition_duration && !canceled) {			
			seconds = timer.get_time()/1e6;
			if (fabs(seconds-lastt)>=exposure) {
				getNextFrame(framesCompleted, frame, img_width, img_height);
				TinyTIFFWriter_writeImage(tif, frame);
				pthread_mutex_lock(&mutexframesCompleted);
				// count acquired frames
				framesCompleted++;
				// cancel ancquisition, when cam_acquisition_running is reset
				if (!cam_acquisition_running) canceled=true;				
				pthread_mutex_unlock(&mutexframesCompleted);
				lastt=seconds;
				printf("*** acquired frame %d/%d @ %fs ***", framesCompleted, maxWriteFrames, float(seconds));
			}
	    }     
	    TinyTIFFWriter_close(tif);
    }
    free(frame);
	pthread_mutex_lock(&mutexframesCompleted);
    cam_acquisition_running=false;
	pthread_mutex_unlock(&mutexframesCompleted);
    return NULL;
}
Пример #17
0
void DoBrk(UserContext *context) {

    void * addr = (void*)context->regs[0];
    int newPageBrk = (UP_TO_PAGE(addr-VMEM_1_BASE)>>PAGESHIFT);
    int spPage = (DOWN_TO_PAGE((context->sp)-VMEM_1_BASE)>>PAGESHIFT) - 1; // ( - 1 to account for page in between stack and heap)

    TracePrintf(2, "DoBrk: Called with brk addr %p, page %d\n", addr, newPageBrk);
    if(newPageBrk>=spPage){
        TracePrintf(1, "User Brk error: addr %p is above allocatable region - interferes with the stack\n", addr);
        context->regs[0]=ERROR;
        return;
    }
    TracePrintf(1, "DoBrk: sp page is set at %d\n", spPage); 
    for(int i = 0; i<spPage; i++){
        //map pages before new brk
        if(i<newPageBrk && current_process->cow.pageTable[i].valid==0){
            current_process->cow.pageTable[i].valid=1;
            current_process->cow.pageTable[i].pfn = getNextFrame();
            current_process->cow.pageTable[i].prot = (PROT_READ | PROT_WRITE);
            TracePrintf(2, "===============DoBrk: page %d mapped with pfn %d and read and write prots\n",\
                    i, current_process->cow.pageTable[i].pfn);
        }
        //unmap pages after new brk
        if(i>=newPageBrk && (current_process->cow.pageTable[i].valid == 1)){
            TracePrintf(2, "===============DoBrk: page %d was valid with pfn %d\n", i, current_process->cow.pageTable[i].pfn); 
            current_process->cow.pageTable[i].valid=0;
            if(ERROR == addFrame(current_process->cow.pageTable[i].pfn)){
                TracePrintf(1, "Too Many Frames\n");
                context->regs[0]=ERROR;
                return;
            }
            current_process->cow.pageTable[i].pfn = -1;

            TracePrintf(2, "===============DoBrk: page %d unmapped\n", i);
        }
    }
    context->regs[0]= SUCCESS;
    return;


}
Пример #18
0
//Function executed at each frame
bool AcquisitionModule::run() {
    if(firstTime) {
        firstTime = false;
        m_NumFrame = m_startFrame;

        if (m_parseDirectory)
            openDir();

        if (!goToFirstFrame())
            std::cout << "Start frame does not exist in any given directory, or file corrupted" << std::endl;
    }

    QImage *anImage = getNextFrame();

    if (anImage == NULL)
        return false;
  
    if(m_data->currentImage != NULL)
        delete m_data->currentImage;
    m_data->currentImage = anImage;

    return true;
}
Пример #19
0
/** 
 * playIt	-	play the frames of the sequence
 *
 */
void VideoProcessor::playIt()
{
    // current frame
    cv::Mat input;

    // if no capture device has been set
    if (!isOpened())
        return;

    // is playing
    stop = false;

    // update buttons
    emit updateBtn();

    while (!isStop()) {

        // read next frame if any
        if (!getNextFrame(input))
            break;

        curPos = capture.get(CV_CAP_PROP_POS_FRAMES);

        // display input frame
        emit showFrame(input);

        // update the progress bar
        emit updateProgressBar();

        // introduce a delay
        emit sleep(delay);
    }
    if (!isStop()){
        emit revert();
    }
}
Пример #20
0
/** 
 * motionMagnify	-	eulerian motion magnification
 *
 */
void VideoProcessor::motionMagnify()
{
    // set filter
    setSpatialFilter(LAPLACIAN);
    setTemporalFilter(IIR);

    // create a temp file
    createTemp();

    // current frame
    cv::Mat input;
    // output frame
    cv::Mat output;

    // motion image
    cv::Mat motion;

    std::vector<cv::Mat> pyramid;
    std::vector<cv::Mat> filtered;

    // if no capture device has been set
    if (!isOpened())
        return;

    // set the modify flag to be true
    modify = true;

    // is processing
    stop = false;

    // save the current position
    long pos = curPos;
    // jump to the first frame
    jumpTo(0);

    while (!isStop()) {

        // read next frame if any
        if (!getNextFrame(input))
            break;

        input.convertTo(input, CV_32FC3, 1.0/255.0f);

        // 1. convert to Lab color space
        cv::cvtColor(input, input, CV_BGR2Lab);

        // 2. spatial filtering one frame
        cv::Mat s = input.clone();
        spatialFilter(s, pyramid);

        // 3. temporal filtering one frame's pyramid
        // and amplify the motion
        if (fnumber == 0){      // is first frame
            lowpass1 = pyramid;
            lowpass2 = pyramid;
            filtered = pyramid;
        } else {
            for (int i=0; i<levels; ++i) {
                curLevel = i;
                temporalFilter(pyramid.at(i), filtered.at(i));
            }

            // amplify each spatial frequency bands
            // according to Figure 6 of paper            
            cv::Size filterSize = filtered.at(0).size();
            int w = filterSize.width;
            int h = filterSize.height;

            delta = lambda_c/8.0/(1.0+alpha);
            // the factor to boost alpha above the bound
            // (for better visualization)
            exaggeration_factor = 2.0;

            // compute the representative wavelength lambda
            // for the lowest spatial frequency band of Laplacian pyramid
            lambda = sqrt(w*w + h*h)/3;  // 3 is experimental constant

            for (int i=levels; i>=0; i--) {
                curLevel = i;

                amplify(filtered.at(i), filtered.at(i));

                // go one level down on pyramid
                // representative lambda will reduce by factor of 2
                lambda /= 2.0;
            }
        }

        // 4. reconstruct motion image from filtered pyramid
        reconImgFromLaplacianPyramid(filtered, levels, motion);

        // 5. attenuate I, Q channels
        attenuate(motion, motion);

        // 6. combine source frame and motion image
        if (fnumber > 0)    // don't amplify first frame
            s += motion;

        // 7. convert back to rgb color space and CV_8UC3
        output = s.clone();
        cv::cvtColor(output, output, CV_Lab2BGR);
        output.convertTo(output, CV_8UC3, 255.0, 1.0/255.0);

        // write the frame to the temp file
        tempWriter.write(output);

        // update process
        std::string msg= "Processing...";
        emit updateProcessProgress(msg, floor((fnumber++) * 100.0 / length));
    }
    if (!isStop()){
        emit revert();
    }
    emit closeProgressDialog();

    // release the temp writer
    tempWriter.release();

    // change the video to the processed video 
    setInput(tempFile);

    // jump back to the original position
    jumpTo(pos);
}
Пример #21
0
/**
 * @brief Automatically splits the video into several events provided the
 * given parameters.
 * @param threshold
 * @param maxcount
 * @param mincount
 * @param history
 * @param varThreshold
 * @param bShadowDetection
 * @param path
 * @return
 */
std::deque<EventPtr> Video::autoDetectEvents(double threshold,
										   double maxcount,
										   double mincount,
										   int history,
										   int varThreshold,
										   bool bShadowDetection,
										   std::string path){
	cv::Mat shot;
	FramePtr frame;
	SnapshotPtr snap;
	EventPtr event;
	std::deque<EventPtr> events;

	unsigned int j=0;
	int emptycount=0;
	int framecount=0;
	int value;
	int absoluteThreshold = threshold/100*resolution.width*resolution.height;
	int i;


	// Initialization of background subtraction
	bgSubInit(history, varThreshold, bShadowDetection);

	setFramePos(0);

	emit startProgress(0, (uint) getLengthFrames());

	while(getNextFrame(shot)){
		QCoreApplication::processEvents();
		if (toCancel){
			events.clear();
			canceled();
			return events;
		}
		bg->NewFrame(shot);
		bg->Denoise();
		emit progressChanged(j);
		value = cv::countNonZero(bg->Foreground());

		// Detected change
		if ( value > absoluteThreshold ){
			if (event.isNull()){
				event = EventPtr(new Event(this));
			}
			// create new frame
			frame = FramePtr(new Frame(this, shot, path));
			snap = SnapshotPtr(new Snapshot(frame, bg->Foreground(), path));
			// add frame to event
			event->addFrame(frame);
			event->addSnapshot(snap);
			framecount ++;
			emptycount = 0;
		}
		// Did not detect change
		else if (!event.isNull()){
			emptycount ++;
			// create new frame
			frame = FramePtr(new Frame(this, shot, path));
			snap = SnapshotPtr(new Snapshot(frame, bg->Foreground(), path));
			// add frame to event
			event->addFrame(frame);
			event->addSnapshot(snap);
			framecount ++;
			if(emptycount > maxcount){
				if (framecount - emptycount > mincount){
					// remove extra frames with no movement
					for (i = 0; i < maxcount; i++){
						event->remLastFrame();
						event->remLastSnapshot();
					}
					events.push_back(event);
				}
				event.clear();
				emptycount = 0;
				framecount = 0;
			}
		}
		j++;
	}
	// Check if Video ended in the middle of an Event.
	if (!event.isNull()){
		if (framecount > mincount){
			events.push_back(event);
		} else {
			event.clear();
		}
	}
	return events;
}
Пример #22
0
QVector<Data*> Input::run(QVector<Data*>)
{
    return getNextFrame();
}
Пример #23
0
int main (void) {
    // HELLO HERE I AM
    printf("example CAM_SERVER TCP/IP camera server ...\n\nfloat-format=%f\n\n", float(M_PI));
    
    /////////////////////////////////////////////////////////////////////////////
    // INITIALIZING the TCPIP SERVER CLASS
    /////////////////////////////////////////////////////////////////////////////
    printf("starting TCP/IP server ...\n");
    TCPIPserver* server=new TCPIPserver;
    server->set_port(PORT);
    server->open_socket();
    printf("%s\n", server->get_version().c_str());
    bool running=true;
    
    /////////////////////////////////////////////////////////////////////////////
    // initializing basic variabls
    /////////////////////////////////////////////////////////////////////////////
    uint16_t* frame=(uint16_t*)malloc(img_width*img_height*sizeof(uint16_t));
    int img_byte_size=img_width*img_height*sizeof(uint16_t);
    double t=0;
    getNextFrame(t, frame, img_width, img_height);
	std::string basename="example_image_server_output";
    
    /////////////////////////////////////////////////////////////////////////////
    // initializing the acquisition thread structures + mutex
    /////////////////////////////////////////////////////////////////////////////
	pthread_t thread1;
	pthread_mutex_init(&mutexframesCompleted, NULL);
	
    
    /////////////////////////////////////////////////////////////////////////////
    // MAIN LOOP
    /////////////////////////////////////////////////////////////////////////////
    // in this loop, instructions are read from the TCPIP connection
    // and the camera (in this case only a simulated device) is controlled
    // accordingly. The function getNextFrame() is used to retrieve frames
    // with a sinusoidal test pattern.
    /////////////////////////////////////////////////////////////////////////////

    while(running) {
        // accept any incoming connection
        int connection=server->accept_connection();
        printfMessage("  accepted connection %d from %s\n", connection, server->get_client_name(connection).c_str());
        while(server->is_online(connection)) {
            // if we have an open connection, read instructions (as strings):
            std::string instruction=server->read_str_until(connection, '\n');
            
            // decode instructions:
            if (instruction.size()>0) {
                
                if (instruction=="CONNECT") {
				    // read remaining \n from "INSTRUCTION\n\n"
                    server->read_str_until(connection, '\n');
					
					
                    // CONNCET TO CAMERA
                    cam_connected=true;
                    // SEND ANSWER TO CLIENT/QF3
                    server->write(connection, std::string("ACK_CONNECT\n\n"));
                    // DONE CONNCETING TO CAMERA
                    
                    
                    printfMessage("CAMERA CONNECTED!\n");
                } else if (instruction=="DISCONNECT") {
				    // read remaining \n from "INSTRUCTION\n\n"
                    server->read_str_until(connection, '\n');

                    // DISCONNCET FROM CAMERA
                    cam_connected=false;
                    // SEND ANSWER TO CLIENT/QF3
                    server->write(connection, std::string("ACK_DISCONNECT\n\n"));
                    // DONE DISCONNCETING FROM CAMERA


                    printfMessage("CAMERA DISCONNECTED!\n");
                } else if (instruction=="LIVE_START") {
				    // read remaining \n from "INSTRUCTION\n\n"
                    server->read_str_until(connection, '\n');
				
				    // START THE LIVE_VIEW MODE
					// INIT/CONFIG CAMERA for LIVE-VIEW
                    cam_liveview=true;
                    // SEND ANSWER TO CLIENT/QF3
                    server->write(connection, std::string("ACK_LIVE_START\n\n"));
				    // DONE STARTING THE LIVE_VIEW MODE


                    printfMessage("LIVE VIEW STARTED!\n");
                } else if (instruction=="LIVE_STOP") {
				    // read remaining \n from "INSTRUCTION\n\n"
                    server->read_str_until(connection, '\n');
				
				    // STOP THE LIVE-VIEW MODE
                    cam_liveview=false;
                    // SEND ANSWER TO CLIENT/QF3
                    server->write(connection, std::string("ACK_LIVE_STOP\n\n"));
					
					
                    printfMessage("LIVE VIEW STOPED!\n");
                } else if (instruction=="SIZE_X_GET") {
				    // read remaining \n from "INSTRUCTION\n\n"
                    server->read_str_until(connection, '\n');

				    // RETURNS THE width OF A FRAME IN LIVE_VIEW MODE
                    // SEND ANSWER TO CLIENT/QF3
                    server->write(connection, format("%d\n\n", img_width));
					
					
                    printfMessage("GET FRAME WIDTH!\n");
                } else if (instruction=="SIZE_Y_GET") {
					// read remaining \n from "INSTRUCTION\n\n"
                    server->read_str_until(connection, '\n');

				    // RETURNS THE height OF A FRAME IN LIVE_VIEW MODE
                    // SEND ANSWER TO CLIENT/QF3
                    server->write(connection, format("%d\n\n", img_height));
					
					
                    printfMessage("GET FRAME HEIGHT!\n");
                } else if (instruction=="GET_EXPOSURE") {
					// read remaining \n from "INSTRUCTION\n\n"
                    server->read_str_until(connection, '\n');
				
				    // RETURNS THE exposure time [seconds] OF A FRAME IN LIVE_VIEW MODE
                    // SEND ANSWER TO CLIENT/QF3
                    server->write(connection, format("%f\n\n", exposure));
					
					
                    printfMessage("GET EXPOSURE TIME!\n");
                } else if (instruction=="PARAMETERS_GET") {
					// read remaining \n from "INSTRUCTION\n\n"
                    server->read_str_until(connection, '\n');
				
				    // RETURNS A LIST OF THE AVAILABLE CAMERA PARAMETERS
                    // SEND ANSWER TO CLIENT/QF3
					//        { (PARAM_FLOAT | PARAM_INT | PARAM_BOOL | PARAM_STRING);<parameter_name>;<parameter_value_as_string>;<parameter_description_as_string>;[<param_range_min>];[<param_range_max>];[(RW|RO)}\n }* \n
					writeParameters(server, connection);
                    server->write(connection, "\n\n");
					
					
                    printfMessage("GET CAMERA PARAMETERS!\n");
               } else if (instruction=="PARAMETER_GET") {
				
				    // RETURNS A SINGLE PARAMETER
                    // SEND ANSWER TO CLIENT/QF3
					//        { (PARAM_FLOAT | PARAM_INT | PARAM_BOOL | PARAM_STRING);<parameter_name>;<parameter_value_as_string>;<parameter_description_as_string>;[<param_range_min>];[<param_range_max>];[(RW|RO)}\n }{1} \n
					std::string param_name=server->read_str_until(connection, "\n\n");
					writeParameters(server, connection, param_name);
                    server->write(connection, "\n\n");
					
					
                    printfMessage("GET CAMERA PARAMETER '%s'!\n", param_name.c_str());
                } else if (instruction=="PARAMETERS_SET") {
				
				    // SET A SINGLE IMAGE PARAMETER
                    //   instruction has the form PARAMETERS_SET\n<name>;<value>\n\n
					std::string param_name=server->read_str_until(connection, ';');
					std::string param_value=server->read_str_until(connection, "\n\n");
					setParameter(param_name, param_value);
					
					
                    printfMessage("SET CAMERA PARAMETERS ('%s' = '%s')!\n", param_name.c_str(), param_value.c_str());
                } else if (instruction=="IMAGE_NEXT_GET") {
					// read remaining \n from "INSTRUCTION\n\n"
                    server->read_str_until(connection, '\n');
				
				    // GET A NEW FRAME AND SEND IT TO THE CLIENT
                    t++;
                    getNextFrame(t, frame, img_width, img_height);
                    // SEND ANSWER TO CLIENT/QF3:
					//   1. (IMAGE8 | IMAGE16 | IMAGE32 | IMAGE64) \n
					//   2. <image_width_in_pixels>\n
					//   3. <image_height_in_pixels>\n
					//   5. <image raw data of size image_width_in_pixels*image_height_in_pixels*pixel_data_size>
					//   6. METADATA RECORDS, DESCRIBING THE IMAGE: (from writeParameters())
					//        { (PARAM_FLOAT | PARAM_INT | PARAM_BOOL | PARAM_STRING);<parameter_name>;<parameter_value_as_string>[;<parameter_description_as_string>]\n }* \n
                    server->write(connection, format("IMAGE%d\n%d\n%d\n", int(sizeof(frame[0])*8), img_width, img_height));
                    server->write(connection, (char*)frame, img_byte_size);
					writeParameters(server, connection);
                    server->write(connection, "\n\n");
					
					
                    printfMessage("GET FRAME! t=%lf\n", t);
                } else if (instruction=="RECORD") {
				
				    // START AN ACQUISITION AND SAVE DATA TO A FILE WITH THE BASENAME filename
                    basename=server->read_str_until(connection, "\n\n");
					
					if (!cam_acquisition_running) {
						printfMessage("RECORD FRAME START! basename=%s\n", basename.c_str());
						// SEND ANSWER TO CLIENT/QF3
						//   1. the written filenames (format FILE;<TYPE>;<FILENAME>;<DESCRIPTION>\n
						server->write(connection, format("FILE;TIFF;%s.tif;acquired frames\n", basename.c_str()));
						server->write(connection, format("FILE;TXT;%s.txt;additional text output\n", basename.c_str()));
						//   2. the used camera config parameters
						writeParameters(server, connection);
                        server->write(connection, "\n\n");
						cam_acquisition_running=true;
						int rc = pthread_create( &thread1, NULL, &acquisitionThreadFunc, (void*)(&basename) );
						if( rc != 0 ) {
							cam_acquisition_running=false;
							printfMessage("COULD NOT START RECORD FRAME!\n");
						} else {
						
						}
						
					}
                    printfMessage("RECORD FRAME STARTED!");
                } else if (instruction=="CANCEL_ACQUISITION") {
				    // read remaining \n from "INSTRUCTION\n\n"
                    server->read_str_until(connection, '\n');
				
				    // STOP THE LIVE-VIEW MODE
					pthread_mutex_lock(&mutexframesCompleted);
					cam_acquisition_running=false;
					pthread_mutex_unlock(&mutexframesCompleted);

                    
                    // SEND ANSWER TO CLIENT/QF3
                    server->write(connection, std::string("ACK_CANCEL_ACQUISITION\n\n"));
					
					
                    printfMessage("ACQUISITION CANCELED!\n");
                } else if (instruction!="\n" || instruction!="\n\n") {
                    // PRINT ERROR MESSAGE AND IGNORE UNKNOWN INSTRUCTION
                    printfMessage("read(%d) unknown instruction %s\n", connection, instruction.c_str());
                }
            }
        }
        printfMessage("  connection %d closed\n", connection);
    }

    printf("stopping/freeing TCP/IP server ...\n");
    free(frame);
    delete server;
	pthread_mutex_destroy(&mutexframesCompleted);
    pthread_exit(NULL);
}
Пример #24
0
/**
 * colorMagnify	-	color magnification
 *
 */
void VideoProcessor::colorMagnify()
{
    // set filter
    setSpatialFilter(GAUSSIAN);
    setTemporalFilter(IDEAL);

    // create a temp file
    createTemp();

    // current frame
    cv::Mat input;
    // output frame
    cv::Mat output;
    // motion image

    cv::Mat motion;
    // temp image
    cv::Mat temp;

    // video frames
    std::vector<cv::Mat> frames;
    // down-sampled frames
    std::vector<cv::Mat> downSampledFrames;
    // filtered frames
    std::vector<cv::Mat> filteredFrames;

    // concatenate image of all the down-sample frames
    cv::Mat videoMat;
    // concatenate filtered image
    cv::Mat filtered;

    // if no capture device has been set
    if (!isOpened())
        return;

    // set the modify flag to be true
    modify = true;

    // is processing
    stop = false;

    // save the current position
    long pos = curPos;

    // jump to the first frame
    jumpTo(0);

    // 1. spatial filtering
    while (getNextFrame(input) && !isStop()) {
        input.convertTo(temp, CV_32FC3);
        frames.push_back(temp.clone());
        // spatial filtering
        std::vector<cv::Mat> pyramid;
        spatialFilter(temp, pyramid);
        downSampledFrames.push_back(pyramid.at(levels-1));
        // update process
        std::string msg= "Spatial Filtering...";
        emit updateProcessProgress(msg, floor((fnumber++) * 100.0 / length));
    }
    if (isStop()){
        emit closeProgressDialog();
        fnumber = 0;
        return;
    }
    emit closeProgressDialog();

    // 2. concat all the frames into a single large Mat
    // where each column is a reshaped single frame
    // (for processing convenience)
    concat(downSampledFrames, videoMat);

    // 3. temporal filtering
    temporalFilter(videoMat, filtered);

    // 4. amplify color motion
    amplify(filtered, filtered);

    // 5. de-concat the filtered image into filtered frames
    deConcat(filtered, downSampledFrames.at(0).size(), filteredFrames);

    // 6. amplify each frame
    // by adding frame image and motions
    // and write into video
    fnumber = 0;
    for (int i=0; i<length-1 && !isStop(); ++i) {
        // up-sample the motion image        
        upsamplingFromGaussianPyramid(filteredFrames.at(i), levels, motion);
	resize(motion, motion, frames.at(i).size());
        temp = frames.at(i) + motion;
        output = temp.clone();
        double minVal, maxVal;
        minMaxLoc(output, &minVal, &maxVal); //find minimum and maximum intensities
        output.convertTo(output, CV_8UC3, 255.0/(maxVal - minVal),
                  -minVal * 255.0/(maxVal - minVal));
        tempWriter.write(output);
        std::string msg= "Amplifying...";
        emit updateProcessProgress(msg, floor((fnumber++) * 100.0 / length));
    }
    if (!isStop()) {
        emit revert();
    }
    emit closeProgressDialog();

    // release the temp writer
    tempWriter.release();

    // change the video to the processed video
    setInput(tempFile);

    // jump back to the original position
    jumpTo(pos);
}
Пример #25
0
void Composer::movitRender( Frame *dst, bool update )
{
	int i, j, start=0;
	Frame *f;
	//QTime time;
	//time.start();
	
	Profile projectProfile = sampler->getProfile();

	// find the lowest frame to process
	for ( j = 0 ; j < dst->sample->frames.count(); ++j ) {
		if ( (f = dst->sample->frames[j]->frame) ) {
			start = j;
			break;
		}
	}

	// build a "description" of the required chain
	// processing frames from bottom to top
	i = start;
	double pts = sampler->currentPTS();
	QStringList currentDescriptor;
	int ow = projectProfile.getVideoWidth();
	int oh = projectProfile.getVideoHeight();
	while ( (f = getNextFrame( dst, i )) ) {
		FrameSample *sample = dst->sample->frames[i - 1];
		// input and filters
		movitFrameDescriptor( "-", f, &sample->videoFilters, currentDescriptor, &projectProfile );
		// transition
		if ( sample->transitionFrame.frame && !sample->transitionFrame.videoTransitionFilter.isNull() ) {
			// filters applied on first transition frame, if any
			currentDescriptor.append( "--" + sample->transitionFrame.videoTransitionFilter->getDescriptorFirst( pts, f, &projectProfile ) );
			movitFrameDescriptor( "->", sample->transitionFrame.frame, &sample->transitionFrame.videoFilters, currentDescriptor, &projectProfile );
			// filters applied on second transition frame, if any
			currentDescriptor.append( "-->" + sample->transitionFrame.videoTransitionFilter->getDescriptorSecond( pts, sample->transitionFrame.frame, &projectProfile ) );
			currentDescriptor.append( "-<" + sample->transitionFrame.videoTransitionFilter->getDescriptor( pts, sample->transitionFrame.frame, &projectProfile ) );
		}
		// overlay
		if ( (i - 1) > start )
			currentDescriptor.append( GLOverlay().getDescriptor( pts, f, &projectProfile ) );
		ow = f->glWidth;
		oh = f->glHeight;
	}
	// background
	currentDescriptor.append( movitBackground.getDescriptor( pts, NULL, &projectProfile ) );
	// output
	if (outputResize.width() > 0) {
		currentDescriptor.append( QString("Resized output %1 %2").arg( outputResize.width() ).arg( outputResize.height() ) );
	}
	else {
		currentDescriptor.append( QString("OUTPUT %1 %2").arg( ow ).arg( oh ) );
	}

	// rebuild the chain if neccessary
	if ( currentDescriptor !=  movitChain.descriptor ) {
		for ( int k = 0; k < currentDescriptor.count(); k++ )
			printf("%s\n", currentDescriptor[k].toLocal8Bit().data());
		movitChain.descriptor = currentDescriptor;
		movitChain.reset();
		movitChain.chain = new EffectChain( projectProfile.getVideoSAR() * projectProfile.getVideoWidth(), projectProfile.getVideoHeight(), movitPool );

		i = start;
		Effect *last, *current = NULL;
		
		while ( (f = getNextFrame( dst, i )) ) {
			last = current;
			// input and filters
			MovitBranch *branch;
			FrameSample *sample = dst->sample->frames[i - 1];
			current = movitFrameBuild( f, &sample->videoFilters, &branch );
			// transition
			if ( sample->transitionFrame.frame && !sample->transitionFrame.videoTransitionFilter.isNull() ) {
				QList<Effect*> el = sample->transitionFrame.videoTransitionFilter->getMovitEffects();
				
				// filters applied on first transition frame, if any
				QList<Effect*> first = sample->transitionFrame.videoTransitionFilter->getMovitEffectsFirst();
				for ( int l = 0; l < first.count(); ++l )
					current = movitChain.chain->add_effect( first.at( l ) );
				
				MovitBranch *branchTrans;
				Effect *currentTrans = movitFrameBuild( sample->transitionFrame.frame, &sample->transitionFrame.videoFilters, &branchTrans );
				// filters applied on second transition frame, if any
				QList<Effect*> second = sample->transitionFrame.videoTransitionFilter->getMovitEffectsSecond();
				for ( int l = 0; l < second.count(); ++l )
					currentTrans = movitChain.chain->add_effect( second.at( l ) );
				
				branchTrans->filters.append( new MovitFilter( el ) );
				for ( int l = 0; l < el.count(); ++l )
					current = movitChain.chain->add_effect( el.at( l ), current, currentTrans );
			}
			// overlay
			if ( last ) {
				GLOverlay *overlay = new GLOverlay();
				QList<Effect*> el = overlay->getMovitEffects();
				branch->overlay = new MovitFilter( el, overlay );
				for ( int l = 0; l < el.count(); ++l )
					current = movitChain.chain->add_effect( el.at( l ), last, current );
			}
		}
		// background
		QList<Effect*> el = movitBackground.getMovitEffects();
		movitChain.chain->add_effect( el[0] );
		// output resizer
		if (outputResize.width() > 0) {
			Effect *e = new ResampleEffect();
			e->set_int( "width", outputResize.width() );
			e->set_int( "height", outputResize.height() );
			movitChain.chain->add_effect( e );
		}
		// output
		movitChain.chain->set_dither_bits( 8 );
		ImageFormat output_format;
		output_format.color_space = COLORSPACE_sRGB;
		output_format.gamma_curve = GAMMA_REC_709;
		movitChain.chain->add_output( output_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED );
		movitChain.chain->finalize();
	}

	// update inputs data and filters parameters
	i = start, j = 0;
	int w = projectProfile.getVideoWidth();
	int h = projectProfile.getVideoHeight();
	while ( (f = getNextFrame( dst, i )) ) {
		f->glWidth = f->profile.getVideoWidth();
		f->glHeight = f->profile.getVideoHeight();
		f->glSAR = f->profile.getVideoSAR();
		f->glOVD = 0;
		f->glOVDTransformList.clear();
		
		// input and filters
		MovitBranch *branch = movitChain.branches[ j++ ];
		branch->input->process( f, &gl );
		int vf = 0;
		FrameSample *sample = dst->sample->frames[i - 1];
		for ( int k = 0; k < branch->filters.count(); ++k ) { 
			if ( !branch->filters[k]->filter )
				sample->videoFilters[vf++]->process( branch->filters[k]->effects, pts, f, &projectProfile );
			else
				branch->filters[k]->filter->process( branch->filters[k]->effects, pts, f, &projectProfile );
		}
		// transition
		if ( sample->transitionFrame.frame && !sample->transitionFrame.videoTransitionFilter.isNull() ) {
			sample->transitionFrame.frame->glWidth = sample->transitionFrame.frame->profile.getVideoWidth();
			sample->transitionFrame.frame->glHeight = sample->transitionFrame.frame->profile.getVideoHeight();
			sample->transitionFrame.frame->glSAR = sample->transitionFrame.frame->profile.getVideoSAR();
			MovitBranch *branchTrans = movitChain.branches[ j++ ];
			branchTrans->input->process( sample->transitionFrame.frame, &gl );
			int tvf = 0;
			int k;
			for ( k = 0; k < branchTrans->filters.count() - 1; ++k ) { 
				if ( !branchTrans->filters[k]->filter )
					sample->transitionFrame.videoFilters[tvf++]->process( branchTrans->filters[k]->effects, pts, sample->transitionFrame.frame, &projectProfile );
				else
					branchTrans->filters[k]->filter->process( branchTrans->filters[k]->effects, pts, sample->transitionFrame.frame, &projectProfile );
			}
			sample->transitionFrame.videoTransitionFilter->process( branchTrans->filters[k]->effects, pts, f, sample->transitionFrame.frame, &projectProfile );
		}
		// overlay
		if ( branch->overlay && branch->overlay->filter )
			branch->overlay->filter->process( branch->overlay->effects, pts, f, f, &projectProfile );
		
		w = f->glWidth;
		h = f->glHeight;
	}

	// render
	waitFence();
	// output resizer
	if (outputResize.width() > 0) {
		w = outputResize.width();
		h = outputResize.height();
	}
	FBO *fbo = gl.getFBO( w, h, GL_RGBA );
	movitChain.chain->render_to_fbo( fbo->fbo(), w, h );
	
	dst->glWidth = w;
	dst->glHeight = h;
	dst->glSAR = projectProfile.getVideoSAR();
	if ( !update ) {
		dst->setVideoFrame( Frame::GLTEXTURE, w, h, dst->glSAR,
						projectProfile.getVideoInterlaced(), projectProfile.getVideoTopFieldFirst(),
						pts, projectProfile.getVideoFrameDuration() );
	}
	dst->setFBO( fbo );
	dst->setFence( gl.getFence() );
	composerFence = gl.getFence();
	glFlush();
	
	//qDebug() << "elapsed" << time.elapsed();
}
Пример #26
0
bool Composer::renderVideoFrame( Frame *dst )
{
	int i = 0;
	sampler->getVideoTracks( dst );
	if ( !getNextFrame( dst, i ) ) {
		Profile projectProfile = sampler->getProfile();
		int w = projectProfile.getVideoWidth();
		int h = projectProfile.getVideoHeight();
		if (outputResize.width() > 0) {
			w = outputResize.width();
			h = outputResize.height();
		}
		
		if ( skipFrame > 0 ) {
			//qDebug() << "skipFrame" << sampler->currentPTS();
			--skipFrame;
			dst->setVideoFrame( Frame::NONE, w, h, projectProfile.getVideoSAR(),
								false, false, sampler->currentPTS(), projectProfile.getVideoFrameDuration() );
			return true;
		}
		
		// make black
		dst->setVideoFrame( Frame::GLTEXTURE, w, h, projectProfile.getVideoSAR(),
							false, false, sampler->currentPTS(), projectProfile.getVideoFrameDuration() );
		waitFence();
		if ( !gl.black( dst ) )
			return false;
		dst->glWidth = dst->profile.getVideoWidth();
		dst->glHeight = dst->profile.getVideoHeight();
		dst->glSAR = dst->profile.getVideoSAR();
		dst->setFence( gl.getFence() );
		composerFence = gl.getFence();
		glFlush();
		return true;
	}
	
	if ( skipFrame > 0 ) {
		Frame *f;
		bool skip = true;
		for ( int i = 0 ; i < dst->sample->frames.count(); ++i) {
			if ( (f = dst->sample->frames[i]->frame) ) {
				if ( f->mmi == 0 ) {
					skip = false;
					break;
				}
			}
		}
		if ( skip ) {
			//qDebug() << "skipFrame" << sampler->currentPTS();
			--skipFrame;
			Profile projectProfile = sampler->getProfile();
			int w = projectProfile.getVideoWidth();
			int h = projectProfile.getVideoHeight();
			if (outputResize.width() > 0) {
				w = outputResize.width();
				h = outputResize.height();
			}
			dst->setVideoFrame( Frame::NONE, w, h, projectProfile.getVideoSAR(),
								false, false, sampler->currentPTS(), projectProfile.getVideoFrameDuration() );
			return true;
		}
	}

	movitRender( dst );

	return true;
}
Пример #27
0
/*****************************************************************************
 * Timer has fired, load the next frame
 *****************************************************************************/
void MainWindow::timerupdate()
{
    getNextFrame();
}
Пример #28
0
/*****************************************************************************
 * Load a single next frame
 *****************************************************************************/
void MainWindow::on_btnNextFrame_clicked()
{
    getNextFrame();
}