Ejemplo n.º 1
0
int queue_picture(VideoState *is, AVFrame *pFrame, double pts) 
{
	VideoPicture *vp;
	//int dst_pix_fmt;
	AVPicture pict;
	/* wait until we have space for a new pic */
	SDL_LockMutex(is->pictq_mutex);
	while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&!is->quit) 
	{
		SDL_CondWait(is->pictq_cond, is->pictq_mutex);
	}
	SDL_UnlockMutex(is->pictq_mutex);
	if(is->quit)
		return -1;
	// windex is set to 0 initially
	vp = &is->pictq[is->pictq_windex];
	/* allocate or resize the buffer! */
	if(!vp->bmp ||vp->width != is->video_st->codec->width ||vp->height != is->video_st->codec->height) 
	{
		SDL_Event event;
		vp->allocated = 0;
		/* we have to do it in the main thread */
		event.type = FF_ALLOC_EVENT;
		event.user.data1 = is;
		SDL_PushEvent(&event);
		/* wait until we have a picture allocated */
		SDL_LockMutex(is->pictq_mutex);
		while(!vp->allocated && !is->quit) 
		{
			SDL_CondWait(is->pictq_cond, is->pictq_mutex);
		}
		SDL_UnlockMutex(is->pictq_mutex);
		if(is->quit) 
		{
			return -1;
		}
	}
	/* We have a place to put our picture on the queue */
	/* If we are skipping a frame, do we set this to null
	but still return vp->allocated = 1? */
	if(vp->bmp) 
	{
		SDL_LockYUVOverlay(vp->bmp);
		//dst_pix_fmt = PIX_FMT_YUV420P;
		/* point pict at the queue */
		pict.data[0] = vp->bmp->pixels[0];
		pict.data[1] = vp->bmp->pixels[2];
		pict.data[2] = vp->bmp->pixels[1];
		pict.linesize[0] = vp->bmp->pitches[0];
		pict.linesize[1] = vp->bmp->pitches[2];
		pict.linesize[2] = vp->bmp->pitches[1];
		// Convert the image into YUV format that SDL uses
		sws_scale(is->sws_ctx,(uint8_t const * const *)pFrame->data,
					pFrame->linesize,
					0,
					is->video_st->codec->height,
					pict.data,
					pict.linesize
				);
		SDL_UnlockYUVOverlay(vp->bmp);

		AVPicture  dstPict=pict;
		//avpicture_alloc(dstPict,PIX_FMT_YUV420P,640,320);
		if(av_picture_crop(&dstPict,&pict,AV_PIX_FMT_YUV420P10 ,104,80)==-1);
			printf("ss\n");
		//avpicture_free(dstPict);	
		////截图操作
		//save_frame(pict,vp->width,vp->height);

		vp->pts = pts;
		/* now we inform our display thread that we have a pic ready */
		if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) 
		{
			is->pictq_windex = 0;
		}
		SDL_LockMutex(is->pictq_mutex);
		is->pictq_size++;
		SDL_UnlockMutex(is->pictq_mutex);
	}
	return 0;
}
Ejemplo n.º 2
0
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int             i, videoStream;
  AVCodecContext  *pCodecCtx = NULL;
  AVCodec         *pCodec = NULL;
  AVFrame         *pFrame = NULL; 
  AVFrame         *pFrameRGB = NULL;
  AVPacket        packet;
  int             frameFinished;
  int             numBytes;
  uint8_t         *buffer= NULL;
  int ret, got_frame;
  
  if(argc < 2) {
    printf("Please provide a movie file\n");
    return -1;
  }
  // Register all formats and codecs
  //  avcodec_register_all();
  av_register_all();
  

  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) < 0 ) {
    av_log(NULL, AV_LOG_ERROR,"파일을 열 수 없습니다.\n");
    return -1;
  }

  
    
  // Retrieve stream information
  if((ret = avformat_find_stream_info(pFormatCtx,NULL)) < 0 ) {
    av_log(NULL, AV_LOG_ERROR,"stream 정보을 찾을 수 없습니다.\n");
    return ret; // Couldn't find stream information
  }
  
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream = av_find_best_stream(pFormatCtx,AVMEDIA_TYPE_VIDEO,-1,-1,&pCodec,0);

  
  if(videoStream < 0 ) {
    av_log(NULL,AV_LOG_ERROR,"Cannot find a video stream in the input file\n");
    return videoStream;
  }

  
  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  /* pCodec=avcodec_find_decoder(pCodecCtx->codec_id); */
  /* if(pCodec==NULL) { */
  /*   av_log(NULL, AV_LOG_ERROR,"지원되지 않는 코덱입니다.\n"); */
  /*   return -1; // Codec not found */
  /* } */
  // Open codec


  if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
  pFrame=avcodec_alloc_frame();
  
  // Allocate an AVFrame structure
  pFrameRGB=avcodec_alloc_frame();
  if(pFrameRGB==NULL)
    return -1;

  
  // Determine required buffer size and allocate buffer
  numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, 
         		      pCodecCtx->height); 
  buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); 
  
  /* // Assign appropriate parts of buffer to image planes in pFrameRGB */
  /* // Note that pFrameRGB is an AVFrame, but AVFrame is a superset */
  /* // of AVPicture */
   avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, 
         	 pCodecCtx->width, pCodecCtx->height); 


  av_init_packet(&packet);
  packet.data = NULL;
  packet.size = 0;

  // Read frames and save first five frames to disk
  i=0;

  
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
      
      // Did we get a video frame?
      if(frameFinished) {
	// Convert the image from its native format to RGB
	av_picture_crop((AVPicture *)pFrameRGB, (AVPicture*)pFrame, 
                        PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
	
	// Save the frame to disk
	if(++i<=100)
	  SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, 
		    i);
        if( i >100 )
          break;
      }
    }
    
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);

    
  }
  
  // Free the RGB image
  av_free(buffer);
  
  printf(" av_free ");
  av_free(pFrameRGB);
  
  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}
Ejemplo n.º 3
0
int dc_video_scaler_scale(VideoInputData *video_input_data, VideoScaledData *video_scaled_data)
{
	int ret, index, src_height;
	VideoDataNode *video_data_node;
	VideoScaledDataNode *video_scaled_data_node;
	AVFrame *src_vframe;

	//step 1: try to lock output slot. If none available, return ....
	if (video_input_data->circular_buf.size > 1)
		dc_consumer_unlock_previous(&video_scaled_data->consumer, &video_input_data->circular_buf);

	ret = dc_producer_lock(&video_scaled_data->producer, &video_scaled_data->circular_buf);
	//not ready
	if (ret<0) {
		return -1;
	}
	dc_producer_unlock_previous(&video_scaled_data->producer, &video_scaled_data->circular_buf);

	//step 2: lock input
	ret = dc_consumer_lock(&video_scaled_data->consumer, &video_input_data->circular_buf);
	if (ret < 0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Video scaler got an end of input tbuffer!\n"));
		return -2;
	}

	//step 3 - grab source and dest images
	video_data_node = (VideoDataNode*)dc_consumer_consume(&video_scaled_data->consumer, &video_input_data->circular_buf);
	video_scaled_data_node = (VideoScaledDataNode*)dc_producer_produce(&video_scaled_data->producer, &video_scaled_data->circular_buf);
	index = video_data_node->source_number;

	video_scaled_data->frame_duration = video_input_data->frame_duration;

	//crop if necessary
	if (video_input_data->vprop[index].crop_x || video_input_data->vprop[index].crop_y) {
#if 0
		av_frame_copy_props(video_scaled_data_node->cropped_frame, video_data_node->vframe);
		video_scaled_data_node->cropped_frame->width  = video_input_data->vprop[index].width  - video_input_data->vprop[index].crop_x;
		video_scaled_data_node->cropped_frame->height = video_input_data->vprop[index].height - video_input_data->vprop[index].crop_y;
#endif
		if (av_picture_crop((AVPicture*)video_scaled_data_node->cropped_frame, (AVPicture*)video_data_node->vframe, PIX_FMT_YUV420P, video_input_data->vprop[index].crop_y, video_input_data->vprop[index].crop_x) < 0) {
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Video scaler: error while cropping picture.\n"));
			return -1;
		}
		src_vframe = video_scaled_data_node->cropped_frame;
		src_height = video_input_data->vprop[index].height - video_input_data->vprop[index].crop_y;
	} else {
		assert(!video_scaled_data_node->cropped_frame);
		src_vframe = video_data_node->vframe;
		src_height = video_input_data->vprop[index].height;
	}


	//rescale the cropped frame
	ret = sws_scale(video_scaled_data->vsprop[index].sws_ctx,
	                (const uint8_t * const *)src_vframe->data, src_vframe->linesize, 0, src_height,
	                video_scaled_data_node->v_frame->data, video_scaled_data_node->v_frame->linesize);

	if (!ret) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Video scaler: error while resizing picture.\n"));
		return -1;
	}
	video_scaled_data_node->v_frame->pts = video_data_node->vframe->pts;

	if (video_data_node->nb_raw_frames_ref) {
		if (video_data_node->nb_raw_frames_ref==1) {
#ifndef GPAC_USE_LIBAV
			av_frame_unref(video_data_node->vframe);
#endif
			av_free_packet(&video_data_node->raw_packet);
		}
		video_data_node->nb_raw_frames_ref--;
	}

	dc_consumer_advance(&video_scaled_data->consumer);
	dc_producer_advance(&video_scaled_data->producer, &video_scaled_data->circular_buf);

	if (video_input_data->circular_buf.size == 1)
		dc_consumer_unlock_previous(&video_scaled_data->consumer, &video_input_data->circular_buf);
	return 0;
}
Ejemplo n.º 4
0
int main(int argc, char** argv){
	int quadrant_line, quadrant_column;
	
	char *videoFileName = argv[1];
	char quadFileName[64];

	int i = 0, k, j;

	long unsigned int inc = 0;
	long unsigned int incaudio = 0;

	int videoStreamIndex;
	int audioStreamIndex= -1;
	int frameFinished, gotPacket;

	AVDictionary	*codecOptions = NULL;
	
	UDP_PTSframe_t PTS_frame;

	struct tm *start_time_tm;
	char start_time_str[64];
	long unsigned int start_time;
	time_t start_timer_t;
	
	//Crop env
	int tam_quad;
	int frist = 1, marginLeft = 0, marginTop = 0;
	int width , height;

    if(argc < 4){
        usage();    
        return -1;
    }

    signal (SIGTERM, handlerToFinish);
	signal (SIGINT, handlerToFinish);

    tam_quad = sqrt(amount_of_quadrants);
    quadrant_line = atoi(argv[2]);
    quadrant_column = atoi(argv[3]);
    amount_of_quadrants = (quadrant_line * quadrant_column) + 1;

    strcpy (quadFileName, argv[4]);

    //Allocat output streams context
    ff_output = malloc (sizeof(ff_output_t) * amount_of_quadrants);

	av_register_all();
	avformat_network_init();

	//Initialize Input
	if (avformat_open_input (&ff_input.formatCtx, videoFileName, NULL, NULL) != 0) {
		printf ("Cold not open input video file at %s\n", videoFileName);
		return -1;
	}

	if (avformat_find_stream_info(ff_input.formatCtx, NULL) < 0) {
		printf ("Cold not get stream info\n");
		return -1;
	}

	av_dump_format(ff_input.formatCtx, 0, videoFileName, 0);

	videoStreamIndex = av_find_best_stream(ff_input.formatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &ff_input.encoder, 0);
	if (videoStreamIndex < 0) {
		printf ("no video streams found\n");
		return -1;
	}

	audioStreamIndex = av_find_best_stream(ff_input.formatCtx, AVMEDIA_TYPE_AUDIO, -1, -1, &ff_input.audioencoder, 0);
    if (audioStreamIndex < 0) {
        printf ("no audio streams found\n");
        return -1;
    }
    printf ("VIDEO ST %d, AUDIO ST %d\n", videoStreamIndex, audioStreamIndex);

    ff_input.audiocodecCtx = ff_input.formatCtx->streams[audioStreamIndex]->codec;
	ff_input.codecCtx = ff_input.formatCtx->streams[videoStreamIndex]->codec;

	if (avcodec_open2 (ff_input.audiocodecCtx, ff_input.audioencoder, NULL) < 0) {
        printf ("Could not open input codec\n");
        return -1;
    }

	if (avcodec_open2 (ff_input.codecCtx, ff_input.encoder, NULL) < 0) {
		printf ("Could not open input codec\n");
		return -1;
	}

	//Get system time and append as metadata
	getSystemTime (&PTS_frame.frameTimeVal); //Must be the same for all output contexts
	start_time = PTS_frame.frameTimeVal.tv_sec;
	start_timer_t = (time_t) start_time;
	start_time_tm = localtime (&start_timer_t);
	strftime(start_time_str, sizeof start_time_str, "%Y-%m-%d %H:%M:%S", start_time_tm);

	if (avformat_alloc_output_context2(&formatCtx, NULL, AV_OUTPUT_FORMAT, quadFileName) < 0) {
			printf ("could not create output context\n");
			return -1;
	}

	//Initialize Video Output Streams
	for (i = 0; i < amount_of_quadrants - 1; i++) {

		ff_output[i].outStream = avformat_new_stream (formatCtx, NULL);
		if (ff_output[i].outStream == NULL) {
			printf ("Could not create output stream\n");
			return -1;
		}

		ff_output[i].outStream->id = formatCtx->nb_streams - 1;

		ff_output[i].codecCtx = ff_output[i].outStream->codec;
		ff_output[i].encoder = avcodec_find_encoder_by_name (AV_OUTPUT_CODEC);
		if (ff_output[i].encoder == NULL) {
			printf ("Codec %s not found..\n", AV_OUTPUT_CODEC);
			return -1;
		}

		//Sliced sizes
		width = ff_input.codecCtx->width/quadrant_column;
		height = ff_input.codecCtx->height/quadrant_line;

		ff_output[i].codecCtx->codec_type 	= AVMEDIA_TYPE_VIDEO;
		ff_output[i].codecCtx->height 		= height;
		ff_output[i].codecCtx->width 		= width;
		ff_output[i].codecCtx->pix_fmt		= ff_input.codecCtx->pix_fmt;

		if (strcmp (AV_OUTPUT_CODEC, "libvpx") == 0) {
			//Maintain input aspect ratio for codec and stream info, and b_frames for codec info
			ff_output[i].codecCtx->sample_aspect_ratio = ff_input.codecCtx->sample_aspect_ratio;
			ff_output[i].codecCtx->max_b_frames = ff_input.codecCtx->max_b_frames;
			ff_output[i].outStream->sample_aspect_ratio = ff_output[i].codecCtx->sample_aspect_ratio;

			//Set custom BIT RATE and THREADs 
			ff_output[i].codecCtx->bit_rate 	= AV_OUTPUT_BITRATE;
			ff_output[i].codecCtx->thread_count = AV_OUTPUT_THREADS;
			ff_output[i].codecCtx->thread_type  = AV_OUTPUT_THREAD_TYPE;

			//Set custo timebase for codec and streams
			ff_output[i].codecCtx->time_base.num = 1;
			ff_output[i].codecCtx->time_base.den = AV_FRAMERATE;
			ff_output[i].outStream->time_base.num = 1;
			ff_output[i].outStream->time_base.den = 10000;			
		}

		if (strcmp (AV_OUTPUT_CODEC, "libx264") == 0) {
			// ff_output[i].codecCtx->profile = FF_PROFILE_H264_MAIN;
			// av_dict_set(&codecOptions, "profile","main",0);

			//Set custom BIT RATE and THREADs 
			ff_output[i].codecCtx->bit_rate 	= AV_OUTPUT_BITRATE;
			ff_output[i].codecCtx->thread_count = AV_OUTPUT_THREADS;
			ff_output[i].codecCtx->thread_type  = AV_OUTPUT_THREAD_TYPE;

			ff_output[i].codecCtx->bit_rate_tolerance = 0;
			ff_output[i].codecCtx->rc_max_rate = 0;
			ff_output[i].codecCtx->rc_buffer_size = 0;
			ff_output[i].codecCtx->gop_size = 40;
			ff_output[i].codecCtx->max_b_frames = 3;
			ff_output[i].codecCtx->b_frame_strategy = 1;
			ff_output[i].codecCtx->coder_type = 1;
			ff_output[i].codecCtx->me_cmp = 1;
			ff_output[i].codecCtx->me_range = 16;
			ff_output[i].codecCtx->qmin = 10;
			ff_output[i].codecCtx->qmax = 51;
			ff_output[i].codecCtx->scenechange_threshold = 40;
			ff_output[i].codecCtx->flags |= CODEC_FLAG_LOOP_FILTER;
			ff_output[i].codecCtx->me_method = ME_HEX;
			ff_output[i].codecCtx->me_subpel_quality = 5;
			ff_output[i].codecCtx->i_quant_factor = 0.71;
			ff_output[i].codecCtx->qcompress = 0.6;
			ff_output[i].codecCtx->max_qdiff = 4;

			//Set custo timebase for codec and streams
			ff_output[i].codecCtx->time_base.num = 1;
			ff_output[i].codecCtx->time_base.den = 24;
			ff_output[i].outStream->time_base.num = 1;
			ff_output[i].outStream->time_base.den = 90000;		
		}

		formatCtx->start_time_realtime = start_time;
		av_dict_set (&formatCtx->metadata, "service_name", start_time_str, 0);
		av_dict_set (&formatCtx->metadata, "creation_time", start_time_str, 0);

		//Open codec
		if (avcodec_open2(ff_output[i].codecCtx, ff_output[i].encoder, &codecOptions)) {
			printf ("Could not open output codec...\n");
			return -1;
		}
	}

	//Initializing Audio Output
	i = amount_of_quadrants-1; //Last stream
	ff_output[i].outStream = avformat_new_stream (formatCtx, NULL);
	if (ff_output[i].outStream == NULL) {
		printf ("Could not create output stream\n");
		return -1;
	}

	ff_output[i].outStream->id = formatCtx->nb_streams - 1;

	ff_output[i].codecCtx = ff_output[i].outStream->codec;
	ff_output[i].encoder = avcodec_find_encoder (ff_input.audiocodecCtx->codec_id);
	if (ff_output[i].encoder == NULL) {
		printf ("Codec %s not found..\n", AUDIO_OUTPUT_CODEC);
		return -1;
	}
  
    ff_output[i].codecCtx = ff_output[amount_of_quadrants-1].outStream->codec;
    ff_output[i].codecCtx->codec_id = ff_input.audiocodecCtx->codec_id;
    ff_output[i].codecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
    ff_output[i].codecCtx->sample_fmt = ff_input.audiocodecCtx->sample_fmt;
    ff_output[i].codecCtx->sample_rate = ff_input.audiocodecCtx->sample_rate;
    ff_output[i].codecCtx->channel_layout = ff_input.audiocodecCtx->channel_layout;
    ff_output[i].codecCtx->channels = av_get_channel_layout_nb_channels(ff_output[amount_of_quadrants-1].codecCtx->channel_layout);
    ff_output[i].codecCtx->bit_rate = ff_input.audiocodecCtx->bit_rate;  
    ff_output[i].codecCtx->sample_aspect_ratio = ff_input.audiocodecCtx->sample_aspect_ratio;
    ff_output[i].codecCtx->max_b_frames = ff_input.audiocodecCtx->max_b_frames;
    ff_output[i].outStream->sample_aspect_ratio = ff_output[i].codecCtx->sample_aspect_ratio;

    ff_output[i].outStream->time_base.num = ff_input.formatCtx->streams[audioStreamIndex]->time_base.num;
	ff_output[i].outStream->time_base.den = ff_input.formatCtx->streams[audioStreamIndex]->time_base.den;

	ff_output[i].codecCtx->time_base.num = ff_input.audiocodecCtx->time_base.num;
	ff_output[i].codecCtx->time_base.den = ff_input.audiocodecCtx->time_base.den;

	printf("sample_rate %d\n", ff_input.audiocodecCtx->sample_rate);

	//Open codec
	if (avcodec_open2(ff_output[i].codecCtx, ff_output[i].encoder, &codecOptions)) {
		printf ("Could not open output codec...\n");
		return -1;
	}

	av_dump_format (formatCtx, 0, quadFileName, 1);

	//Open output context
	if (avio_open (&formatCtx->pb, quadFileName, AVIO_FLAG_WRITE)) {
		printf ("avio_open failed %s\n", quadFileName);
		return -1;
	}
	
	//Write format context header
	if (avformat_write_header (formatCtx, &formatCtx->metadata)) {
		printf ("fail to write outstream header\n");
		return -1;
	}

	printf ("OUTPUT TO %s, at %lu\n", quadFileName, start_time);


	incaudio = 0;
	printf("Generating video streams...\n");
	while(av_read_frame (ff_input.formatCtx, &ff_input.packet) >= 0 && _keepEncoder) {
		if (ff_input.packet.stream_index == audioStreamIndex)
		{
			av_packet_ref  (&ff_output[amount_of_quadrants-1].packet, &ff_input.packet); 
            ff_output[amount_of_quadrants-1].packet.stream_index = amount_of_quadrants-1;
            ff_output[amount_of_quadrants-1].packet.pts = incaudio;

            // printf("%lu\n", ff_output[amount_of_quadrants-1].packet.pts);
            // if(gotPacket){
            	if (av_write_frame(formatCtx, &ff_output[amount_of_quadrants-1].packet) < 0) {
	                printf ("Unable to write to output stream..\n");
	                pthread_exit(NULL);
            	// }
            }            
            incaudio += 2880;
		}

		if (ff_input.packet.stream_index == videoStreamIndex) {

			ff_input.frame = av_frame_alloc();
			avcodec_decode_video2 (ff_input.codecCtx, ff_input.frame, &frameFinished, &ff_input.packet);

			if (frameFinished) {
				//TODO: Slice inputFrame and fill avQuadFrames[quadrant]
				//By now, inputFrame are replicated to all quadrants

				ff_input.frame->pts = av_frame_get_best_effort_timestamp (ff_input.frame);
				
				i = 0;
				for ( k = 0; k < quadrant_line; ++k) {
                    for (j = 0; j < quadrant_column; ++j) {
            			ff_output[i].frame = av_frame_alloc();

            			//make the cut quadrant ff_output[i]!
            			av_picture_crop((AVPicture *)ff_output[i].frame, (AVPicture *)ff_input.frame,       
            							ff_input.formatCtx->streams[videoStreamIndex]->codec->pix_fmt, marginTop, marginLeft);
            			
            			ff_output[i].frame->width = width; // updates the new width
						ff_output[i].frame->height = height; // updates the new height
						ff_output[i].frame->format = ff_input.frame->format;

						ff_output[i].frame->pts = inc;

						ff_output[i].packet.data = NULL;
						ff_output[i].packet.size = 0;
						av_init_packet (&ff_output[i].packet);

						avcodec_encode_video2 (ff_output[i].codecCtx, &ff_output[i].packet, ff_output[i].frame, &gotPacket);

						if (gotPacket) {
							ff_output[i].packet.stream_index = i;
							av_packet_rescale_ts (&ff_output[i].packet,
													ff_output[i].codecCtx->time_base,
													ff_output[i].outStream->time_base);

							if (av_write_frame (formatCtx, &ff_output[i].packet) < 0) {
								printf ("Unable to write to output stream..\n");
								pthread_exit(NULL);
							}

						}

						av_frame_free (&ff_output[i].frame);	

						i++;
						marginLeft += width;	

            		}
            		marginLeft = 0;
            		marginTop += height;
            	}
            	marginTop = 0; 
            	i = 0;
            	inc++;
			}
			av_frame_free (&ff_input.frame);
		}
	}

	return 0;
}