Exemplo n.º 1
0
int MediaBridgeSession::SendVideo()
{
	VideoDecoder *decoder = VideoCodecFactory::CreateDecoder(VideoCodec::SORENSON);
	VideoEncoder *encoder = VideoCodecFactory::CreateEncoder(rtpVideoCodec);
	DWORD width = 0;
	DWORD height = 0;
	DWORD numpixels = 0;

	QWORD	lastVideoTs = 0;

	Log(">SendVideo\n");

	//Set video format
	if (!rtpVideo.SetSendingCodec(rtpVideoCodec))
		//Error
		return Error("Peer do not support [%d,%s]\n",rtpVideoCodec,VideoCodec::GetNameFor(rtpVideoCodec));

	//While sending video
	while (sendingVideo)
	{
		//Wait for next video
		if (!videoFrames.Wait(0))
			//Check again
			continue;

		//Get audio grame
		RTMPVideoFrame* video = videoFrames.Pop();
		//check
		if (!video)
			//Again
			continue;

		//Get time difference
		DWORD diff = 0;
		//Get timestam
		QWORD ts = video->GetTimestamp();
		//If it is not the first frame
		if (lastVideoTs)
			//Calculate it
			diff = ts - lastVideoTs;
		//Set the last audio timestamp
		lastVideoTs = ts;

		//Check
		if (video->GetVideoCodec()!=RTMPVideoFrame::FLV1)
			//Error
			continue;

		//Decode frame
		if (!decoder->Decode(video->GetMediaData(),video->GetMediaSize()))
		{
			Error("decode packet error");
			//Next
			continue;
		}

		//Check size
		if (decoder->GetWidth()!=width || decoder->GetHeight()!=height)
		{
			//Get dimension
			width = decoder->GetWidth();
			height = decoder->GetHeight();

			//Set size
			numpixels = width*height*3/2;

			//Set also frame rate and bps
			encoder->SetFrameRate(25,300,500);

			//Set them in the encoder
			encoder->SetSize(width,height);
		}
		//Check size
		if (!numpixels)
		{
			Error("numpixels equals 0");
			//Next
			continue;
		}
		//Check fpu
		if (sendFPU)
		{
			//Send it
			encoder->FastPictureUpdate();
			//Reset
			sendFPU = false;
		}

		//Encode it
		VideoFrame *videoFrame = encoder->EncodeFrame(decoder->GetFrame(),numpixels);

		//If was failed
		if (!videoFrame)
		{
			Log("No video frame\n");
			//Next
			continue;
		}

		//Set frame time
		videoFrame->SetTimestamp(diff);

		//Send it smoothly
		smoother.SendFrame(videoFrame,diff);

		//Delete video frame
		delete(video);
	}

	Log("<SendVideo\n");

	return 1;
}
Exemplo n.º 2
0
/****************************************
* RecVideo
*	Obtiene los packetes y los muestra
*****************************************/
int MediaBridgeSession::RecVideo()
{
	//Coders
	VideoDecoder* decoder = NULL;
	VideoEncoder* encoder = VideoCodecFactory::CreateEncoder(VideoCodec::SORENSON);
	//Create new video frame
	RTMPVideoFrame  frame(0,262143);
	//Set codec
	frame.SetVideoCodec(RTMPVideoFrame::FLV1);

	int 	width=0;
	int 	height=0;
	DWORD	numpixels=0;

	Log(">RecVideo\n");

	//Mientras tengamos que capturar
	while(receivingVideo)
	{
		///Obtenemos el paquete
		RTPPacket* packet = rtpVideo.GetPacket();

		//Check
		if (!packet)
			//Next
			continue;

		//Get type
		VideoCodec::Type type = (VideoCodec::Type)packet->GetCodec();


		if ((decoder==NULL) || (type!=decoder->type))
		{
			//Si habia uno nos lo cargamos
			if (decoder!=NULL)
				delete decoder;

			//Creamos uno dependiendo del tipo
			decoder = VideoCodecFactory::CreateDecoder(type);

			//Check
			if (!decoder)
			{
				delete(packet);
				continue;
			}
		}

		//Lo decodificamos
		if(!decoder->DecodePacket(packet->GetMediaData(),packet->GetMediaLength(),0,packet->GetMark()))
		{
			delete(packet);
			continue;
		}
		//Get mark
		bool mark = packet->GetMark();

		//Delete packet
		delete(packet);

		//Check if it is last one
		if(!mark)
			continue;

		//Check size
		if (decoder->GetWidth()!=width || decoder->GetHeight()!=height)
		{
			//Get dimension
			width = decoder->GetWidth();
			height = decoder->GetHeight();

			//Set size
			numpixels = width*height*3/2;

			//Set also frame rate and bps
			encoder->SetFrameRate(25,300,500);

			//Set them in the encoder
			encoder->SetSize(width,height);
		}

		//Encode next frame
		VideoFrame *encoded = encoder->EncodeFrame(decoder->GetFrame(),numpixels);

		//Check
		if (!encoded)
			break;

		//Check size
		if (frame.GetMaxMediaSize()<encoded->GetLength())
			//Not enougth space
			return Error("Not enought space to copy FLV encodec frame [frame:%d,encoded:%d",frame.GetMaxMediaSize(),encoded->GetLength());

		//Get full frame
		frame.SetVideoFrame(encoded->GetData(),encoded->GetLength());

		//Set buffer size
		frame.SetMediaSize(encoded->GetLength());

		//Check type
		if (encoded->IsIntra())
			//Set type
			frame.SetFrameType(RTMPVideoFrame::INTRA);
		else
			//Set type
			frame.SetFrameType(RTMPVideoFrame::INTER);

		//Let the connection set the timestamp
		frame.SetTimestamp(getDifTime(&first)/1000);

		//Send it
		SendMediaFrame(&frame);
	}

	//Check
	if (decoder)
		//Delete
		delete(decoder);
	//Check
	if (encoder)
		//Delete
		delete(encoder);

	Log("<RecVideo\n");
}
Exemplo n.º 3
0
/****************************************
* RecVideo
*	Obtiene los packetes y los muestra
*****************************************/
int VideoStream::RecVideo()
{
	VideoDecoder*	videoDecoder = NULL;
	VideoCodec::Type type;
	timeval 	before;
	timeval		lastFPURequest;
	DWORD		lostCount=0;
	DWORD		frameTime = (DWORD)-1;
	DWORD		lastSeq = RTPPacket::MaxExtSeqNum;
	bool		waitIntra = false;
	
	
	Log(">RecVideo\n");
	
	//Get now
	gettimeofday(&before,NULL);

	//Not sent FPU yet
	setZeroTime(&lastFPURequest);

	//Mientras tengamos que capturar
	while(receivingVideo)
	{
		//Get RTP packet
		RTPPacket* packet = rtp.GetPacket();

		//Check
		if (!packet)
			//Next
			continue;
		
		//Get extended sequence number and timestamp
		DWORD seq = packet->GetExtSeqNum();
		DWORD ts = packet->GetTimestamp();

		//Get packet data
		BYTE* buffer = packet->GetMediaData();
		DWORD size = packet->GetMediaLength();

		//Get type
		type = (VideoCodec::Type)packet->GetCodec();

		//Lost packets since last
		DWORD lost = 0;

		//If not first
		if (lastSeq!=RTPPacket::MaxExtSeqNum)
			//Calculate losts
			lost = seq-lastSeq-1;

		//Increase total lost count
		lostCount += lost;

		//Update last sequence number
		lastSeq = seq;

		//If lost some packets or still have not got an iframe
		if(lostCount || waitIntra)
		{
			//Check if we got listener and more than 1/2 second have elapsed from last request
			if (listener && getDifTime(&lastFPURequest)>minFPUPeriod)
			{
				//Debug
				Debug("-Requesting FPU lost %d\n",lostCount);
				//Reset count
				lostCount = 0;
				//Request it
				listener->onRequestFPU();
				//Request also over rtp
				rtp.RequestFPU();
				//Update time
				getUpdDifTime(&lastFPURequest);
				//Waiting for refresh
				waitIntra = true;
			}
		}

		//Check if it is a redundant packet
		if (type==VideoCodec::RED)
		{
			//Get redundant packet
			RTPRedundantPacket* red = (RTPRedundantPacket*)packet;
			//Get primary codec
			type = (VideoCodec::Type)red->GetPrimaryCodec();
			//Check it is not ULPFEC redundant packet
			if (type==VideoCodec::ULPFEC)
			{
				//Delete packet
				delete(packet);
				//Skip
				continue;
			}
			//Update primary redundant payload
			buffer = red->GetPrimaryPayloadData();
			size = red->GetPrimaryPayloadSize();
		}
		
		//Check codecs
		if ((videoDecoder==NULL) || (type!=videoDecoder->type))
		{
			//If we already got one
			if (videoDecoder!=NULL)
				//Delete it
				delete videoDecoder;

			//Create video decorder for codec
			videoDecoder = VideoCodecFactory::CreateDecoder(type);

			//Check
			if (videoDecoder==NULL)
			{
				Error("Error creando nuevo decodificador de video [%d]\n",type);
				//Delete packet
				delete(packet);
				//Next
				continue;
			}
		}

		//Check if we have lost the last packet from the previous frame by comparing both timestamps
		if (ts>frameTime)
		{
			Debug("-lost mark packet ts:%u frameTime:%u\n",ts,frameTime);
			//Try to decode what is in the buffer
			videoDecoder->DecodePacket(NULL,0,1,1);
			//Get picture
			BYTE *frame = videoDecoder->GetFrame();
			DWORD width = videoDecoder->GetWidth();
			DWORD height = videoDecoder->GetHeight();
			//Check values
			if (frame && width && height)
			{
				//Set frame size
				videoOutput->SetVideoSize(width,height);

				//Check if muted
				if (!muted)
					//Send it
					videoOutput->NextFrame(frame);
			}
		}
		
		//Update frame time
		frameTime = ts;
		
		//Decode packet
		if(!videoDecoder->DecodePacket(buffer,size,lost,packet->GetMark()))
		{
			//Check if we got listener and more than 1/2 seconds have elapsed from last request
			if (listener && getDifTime(&lastFPURequest)>minFPUPeriod)
			{
				//Debug
				Log("-Requesting FPU decoder error\n");
				//Reset count
				lostCount = 0;
				//Request it
				listener->onRequestFPU();
				//Request also over rtp
				rtp.RequestFPU();
				//Update time
				getUpdDifTime(&lastFPURequest);
				//Waiting for refresh
				waitIntra = true;
			}
		}

		//Check if it is the last packet of a frame
		if(packet->GetMark())
		{
			if (videoDecoder->IsKeyFrame())
				Debug("-Got Intra\n");
			
			//No frame time yet for next frame
			frameTime = (DWORD)-1;

			//Get picture
			BYTE *frame = videoDecoder->GetFrame();
			DWORD width = videoDecoder->GetWidth();
			DWORD height = videoDecoder->GetHeight();
			//Check values
			if (frame && width && height)
			{
				//Set frame size
				videoOutput->SetVideoSize(width,height);
				
				//Check if muted
				if (!muted)
					//Send it
					videoOutput->NextFrame(frame);
			}
			//Check if we got the waiting refresh
			if (waitIntra && videoDecoder->IsKeyFrame())
				//Do not wait anymore
				waitIntra = false;
		}
		//Delete packet
		delete(packet);
	}

	//Delete encoder
	delete videoDecoder;

	Log("<RecVideo\n");
}
Exemplo n.º 4
0
int main(int argc, char* argv[])
{
  int i, j, k;
  int width, height;
  int numFleshRegions, numHands, xScale, yScale;
  int left, right, top, bottom;
  Image* image;
  Image outlineImage;
  FleshDetector* fleshDetector;
  vector<ConnectedRegion*>* fleshRegionVector;
  vector<Hand*> hands;
  Hand* hand;
  vector<HandCandidate*> handCandidates;
  HandCandidate* candidate;
  unsigned char angledBoxColor[] = {255, 255, 0};
  unsigned char longColor[] = {0, 255, 0};
  unsigned char shortColor[] = {0, 0, 255};
  unsigned char offsetColor[] = {0, 255, 255};
  unsigned char pointColor[] = {255, 0, 0};
  unsigned char farPointColor[] = {255, 0, 255};
  int numLargeRegions;
  string basename;
  DoublePoint centroid, center, nearEdge, farEdge;
  LineSegment shortLine, longLine, offsetLine;
  Rect angledBox;
  double edgeAngle, offsetAngle;
  CompositeClassifier postureDetector;
  string features;
  Matrix input;
  int classIndex;
  SubImage handImage;
  vector<Point> farPoints;
  int numFarPoints;
  string inputFilename, outputFilename;
  VideoDecoder decoder;
  VideoEncoder encoder;
  bool needInit = true;

  if ( argc < 5 )
  {
    printf("Usage: %s <flesh classifier file> <hand classifier file> <input file> <output file>\n", argv[0]);
    return 1;
  }

  // Either loads a real detector or gets a dummy detector if arg is "DUMMY"
  fleshDetector = FleshDetector::Get(argv[1]);
  if ( !fleshDetector )
  {
    fprintf(stderr, "Error loading flesh detector %s\n", argv[1]);
    return 1;
  }

  if ( !postureDetector.Load(argv[2]) )
  {
    fprintf(stderr, "Error loading hand detector %s\n", argv[2]);
    return 1;
  }
  features = postureDetector.GetFeatureString();

  inputFilename = argv[3];
  outputFilename = argv[4];

  decoder.SetFilename(inputFilename);
  if ( !decoder.Load() )
  {
    fprintf(stderr, "Error loading video %s\n", inputFilename.c_str());
    return 1;
  }

  while ( decoder.UpdateFrame() )
  {
    image = decoder.GetFrame();

    if ( needInit )
    {
      needInit = false;
      width = image->GetWidth();
      height = image->GetHeight();

      if ( !encoder.Open(outputFilename.c_str(), width, height, 10) )
      {
        fprintf(stderr, "Failed opening %s\n", outputFilename.c_str());
        return 1;
      }
    }

    hands.clear();
    outlineImage = *image;
    fleshRegionVector = fleshDetector->GetFleshRegions(image, xScale, yScale);
    if ( fleshRegionVector )
    {
      numFleshRegions = fleshRegionVector->size();
      numLargeRegions = 0;
      for (i = 0; i < numFleshRegions; i++)
      {
        if ( !(*fleshRegionVector)[i]->GetBounds(left, right, top, bottom) )
        {
          fprintf(stderr, "Error getting flesh block %d bounds\n", i);
          return 1;
        }
        left *= xScale;
        right = (right + 1) * xScale - 1;
        top *= yScale;
        bottom = (bottom + 1) * yScale - 1;
        if ( (right - left + 1 < FLESH_REGION_MIN_DIMENSION) || (bottom - top + 1 < FLESH_REGION_MIN_DIMENSION) )
          continue;
        numLargeRegions++;

        handImage.CreateFromParent(image, left, right, top, bottom);
        vector<ConnectedRegion*>* fullResRegions;
        fullResRegions = fleshDetector->GetFleshRegions(&handImage);
        int numFullResRegions = 0;
        if ( fullResRegions )
          numFullResRegions = fullResRegions->size();
        if ( !numFullResRegions )
        {
          fprintf(stderr, "Failed getting full resolution hand candidate\n");
          return 1;
        }
        int regionIndex = 0;
        if ( numFullResRegions > 1 )
        {
          for (k = 1; k < numFullResRegions; k++)
            if ( (*fullResRegions)[k]->HasMorePixels( *((*fullResRegions)[regionIndex]) ) )
              regionIndex = k;
        }

        candidate = new HandCandidate( (*fullResRegions)[regionIndex] );
        if ( !candidate->GetScaledFeatures(1, 1, centroid, center, nearEdge, farEdge,
          shortLine, longLine, offsetLine, edgeAngle, offsetAngle) )
        {
          fprintf(stderr, "Error getting hand candidate features for flesh block %d\n", i);
          return 1;
        }
        angledBox = candidate->GetAngledBoundingBox(longLine);
        farPoints.clear();
        if ( !candidate->GetFarPoints(farPoints) )
          fprintf(stderr, "Error getting far points for flesh block %d\n", i);
        numFarPoints = farPoints.size();

        centroid = handImage.GetTopLevelCoords(centroid);
        center = handImage.GetTopLevelCoords(center);
        nearEdge = handImage.GetTopLevelCoords(nearEdge);
        farEdge = handImage.GetTopLevelCoords(farEdge);
        shortLine.Translate(left, top);
        longLine.Translate(left, top);
        offsetLine.Translate(left, top);
        angledBox.Translate(left, top);
        for (k = 0; k < numFarPoints; k++)
          farPoints[k] = handImage.GetTopLevelCoords(farPoints[k]);

        if ( !candidate->GetFeatureVector(features, input) )
        {
          fprintf(stderr, "Error getting hand candidate features for flesh block %d\n", i);
          return 1;
        }

        classIndex = postureDetector.Classify(input);
        if ( classIndex != -1 )
        {
          hand = new Hand;
          hand->SetBounds(left, right, top, bottom);
          hand->SetPostureString(postureDetector.GetClassName(classIndex));
          hands.push_back(hand);
        }

        delete candidate;

        outlineImage.DrawLine(longColor, 1, longLine);
        outlineImage.DrawLine(shortColor, 1, shortLine);
        outlineImage.DrawLine(offsetColor, 1, offsetLine);
        outlineImage.DrawLine(pointColor, 1, centroid, centroid);
        outlineImage.DrawLine(pointColor, 1, center, center);
        outlineImage.DrawLine(pointColor, 1, nearEdge, nearEdge);
        outlineImage.DrawLine(pointColor, 1, farEdge, farEdge);
        outlineImage.DrawRect(angledBoxColor, 1, angledBox);
        for (k = 0; k < numFarPoints; k++)
          outlineImage.DrawLine(farPointColor, 1, centroid, farPoints[k]);
      }
      numHands = hands.size();
      for (j = 0; j < numHands; j++)
      {
        hands[j]->GetBounds(left, right, top, bottom);
        outlineImage.DrawBox(hands[j]->GetPostureColor(0),
                             hands[j]->GetPostureColor(1),
                             hands[j]->GetPostureColor(2),
                             hands[j]->GetPostureColor(3),
                             3, left, top, right, bottom);
        delete hands[j];
      }
      hands.clear();
    }

    if ( !encoder.AddFrame(&outlineImage) )
    {
      fprintf(stderr, "Error inserting video frame\n");
      return 1;
    }
  }
  encoder.Close();

  return 0;
}
Exemplo n.º 5
0
int main(int argc, char *argv[])
{
#if 0
  QCoreApplication a(argc, argv);
  return a.exec();
#endif
  VideoDecoder* videoDecoder = new VideoDecoder;
  VideoEncoder* videoEncoder = 0;
  AdaboostClassifier* openClassifier = new AdaboostClassifier;
  AdaboostClassifier* closedClassifier = new AdaboostClassifier;
  HandyTracker tracker;

  if ( argc != 5 )
  {
    printf("Usage: %s <open classifier> <closed classifier> <input video> <output video>\n", argv[0]);
    return 0;
  }

  if ( !openClassifier->Load(argv[1]) )
  {
    fprintf(stderr, "Failed loading open classifier\n", argv[1]);
    return 1;
  }

  if ( !tracker.SetOpenClassifier(openClassifier) )
  {
    fprintf(stderr, "Failed setting open classifier\n");
    return 1;
  }

  if ( !closedClassifier->Load(argv[2]) )
  {
    fprintf(stderr, "Failed loading closed classifier\n", argv[2]);
    return 1;
  }

  if ( !tracker.SetClosedClassifier(closedClassifier) )
  {
    fprintf(stderr, "Failed setting closed classifier\n");
    return 1;
  }

  videoDecoder->SetFilename(argv[3]);
  if ( !videoDecoder->Load() )
  {
    fprintf(stderr, "Failed loading video <%s>\n", argv[3]);
    return 1;
  }

  if ( !videoDecoder->UpdateFrame() )
  {
    fprintf(stderr, "Failed updating frame\n");
    return 1;
  }

  int frameNumber = 0;
  bool trackingInitialized = false;
  Image* img = videoDecoder->GetFrame();
  while ( img )
  {
    if ( !videoEncoder )
    {
      videoEncoder = new VideoEncoder;
      if ( !videoEncoder->Open(argv[4], img->GetWidth(), img->GetHeight(), 25) )
      {
        fprintf(stderr, "Failed opening output video <%s>\n", argv[4]);
        return 1;
      }
    }

    ProcessFrame(img, &tracker, trackingInitialized, frameNumber);
    if ( trackingInitialized )
      DrawResults(img, &tracker, frameNumber);

    videoEncoder->AddFrame(img);

    if ( frameNumber > 1 )
      tracker.PurgeRegion(frameNumber - 2);
    frameNumber++;

    videoDecoder->UpdateFrame();
    img = videoDecoder->GetFrame();
  }
  videoEncoder->Close();

  return 0;
}
Exemplo n.º 6
0
int main(int argc, char* argv[])
{
  VideoDecoder decoder;
  FleshDetector fleshDetector;
  Image* inputImage;
  Image* fleshImage;
  Image* outlineImage;
  Image* confidenceImage;
  int frameNumber = 0;
  string vidFilename;
  char outputFilename[1024];

  if ( argc < 4 )
  {
    printf("Usage: %s <classifier file> <video file> <output directory>\n", argv[0]);
    return 1;
  }

  if ( !fleshDetector.Load(argv[1]) )
  {
    fprintf(stderr, "Error loading flesh detector %s\n", argv[1]);
    return 1;
  }

  vidFilename = argv[2];
  decoder.SetFilename(vidFilename);
  if ( !decoder.Load() )
  {
    fprintf(stderr, "Error loading video %s\n", argv[2]);
    return 1;
  }

  while ( decoder.UpdateFrame() )
  {
    inputImage = decoder.GetFrame();

    TimingAnalyzer_Start(0);
    if ( fleshDetector.Process(inputImage, &outlineImage, &fleshImage, &confidenceImage) )
    {
      TimingAnalyzer_Stop(0);

      sprintf(outputFilename, "%s/flesh%05d.ppm", argv[3], frameNumber);
      fleshImage->Save(outputFilename);
      sprintf(outputFilename, "%s/frame%05d.ppm", argv[3], frameNumber);
      outlineImage->Save(outputFilename);
      sprintf(outputFilename, "%s/confidence%05d.ppm", argv[3], frameNumber);
      confidenceImage->Save(outputFilename);
    }

    frameNumber++;
  }
  printf("FleshDetector Process Time Min: %d\tMax: %d\tMean: %d\n",
         TimingAnalyzer_Min(0), TimingAnalyzer_Max(0), TimingAnalyzer_Mean(0));
  printf("FleshDetector GetFleshImage Time Min: %d\tMax: %d\tMean: %d\n",
         TimingAnalyzer_Min(1), TimingAnalyzer_Max(1), TimingAnalyzer_Mean(1));
  printf("FleshDetector GetOutlineImage Time Min: %d\tMax: %d\tMean: %d\n",
         TimingAnalyzer_Min(2), TimingAnalyzer_Max(2), TimingAnalyzer_Mean(2));
  printf("FleshDetector GetFleshConfidenceImage Time Min: %d\tMax: %d\tMean: %d\n",
         TimingAnalyzer_Min(3), TimingAnalyzer_Max(3), TimingAnalyzer_Mean(3));
  printf("FleshDetector CalcConfidence Time Min: %d\tMax: %d\tMean: %d\n",
         TimingAnalyzer_Min(4), TimingAnalyzer_Max(4), TimingAnalyzer_Mean(4));

  return 0;
}