Exemple #1
0
void *video_thread(void *arg) {
  JNIEnv *env;
  if((*g_jvm)->AttachCurrentThread(g_jvm, &env, NULL) != JNI_OK) {
       LOGE(1, "### start video thead error");
	   return;
  }
  VideoState *is = (VideoState*)arg;
  AVPacket pkt1, *packet = &pkt1;
  int len1, frameFinished;
  AVFrame *pFrame;
  double pts;
  int numBytes;
  pFrame=avcodec_alloc_frame();
  int ret;
  for(;;) {
	if(is->quit == 1 || is->quit == 2) {
		break;
	}
    if(packet_queue_get(&is->videoq, packet, 1) < 0) {
	  if(debug) LOGI(10,"video_thread get packet exit");
      break;
    }
    pts = 0;
    global_video_pkt_pts = packet->pts;
    len1 = avcodec_decode_video2(is->video_st->codec,
				pFrame,
				&frameFinished,
				packet);	
    if(packet->dts == AV_NOPTS_VALUE
       && pFrame->opaque
       && *(uint64_t*)pFrame->opaque
       != AV_NOPTS_VALUE) {
      pts = *(uint64_t*) pFrame->opaque;
    } else if (packet->dts != AV_NOPTS_VALUE) {
      pts = packet->dts;
    } else {
      pts = 0;
    }
    pts *= av_q2d(is->video_st->time_base);
	//pts *= av_q2d(pCodecCtx->time_base);
    if (frameFinished) {
       pts = synchronize_video(is, pFrame, pts);
       if (queue_picture(is, pFrame, pts) < 0) {
			break;
       }
    }
    av_free_packet(packet);
  }
  av_free(pFrame);
  if((*g_jvm)->DetachCurrentThread(g_jvm) != JNI_OK) {
	LOGE(1,"### detach video thread error");
  }
  pthread_exit(0);
  if(debug) {
		LOGI(1,"### video_thread exit");
  }
  return ((void *)0);
}
int video_thread(void *arg) {
    VideoState *is = (VideoState *)arg;
    AVPacket pkt1, *packet = &pkt1;
    int frameFinished;
    AVFrame *pFrame;
    double pts;

    pFrame = avcodec_alloc_frame();

    for(;;) {
        if(packet_queue_get(&is->videoq, packet, 1) < 0) {
            // means we quit getting packets
            break;
        }

        if(packet->data == flush_pkt.data) {
            avcodec_flush_buffers(is->video_st->codec);
            continue;
        }

        pts = 0;

        // Save global pts to be stored in pFrame in first call
        global_video_pkt_pts = packet->pts;
        // Decode video frame
        avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
                              packet);

        if(packet->dts == AV_NOPTS_VALUE
                && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
            pts = *(uint64_t *)pFrame->opaque;

        } else if(packet->dts != AV_NOPTS_VALUE) {
            pts = packet->dts;

        } else {
            pts = 0;
        }

        pts *= av_q2d(is->video_st->time_base);

        // Did we get a video frame?
        if(frameFinished) {
            pts = synchronize_video(is, pFrame, pts);

            if(queue_picture(is, pFrame, pts) < 0) {
                break;
            }
        }

        av_free_packet(packet);
    }

    av_free(pFrame);
    return 0;
}
int video_thread(void *arg) {
  VideoState *is = (VideoState *)arg;
  AVPacket pkt1, *packet = &pkt1;
  int len1, frameFinished;
  AVFrame *pFrame;
  double pts;

  pFrame = avcodec_alloc_frame();

  is->rgbaFrame = avcodec_alloc_frame();
  avpicture_alloc ((AVPicture *)is->rgbaFrame, PIX_FMT_RGBA, is->video_st->codec->width, is->video_st->codec->height);


  for(;;) {
    if(packet_queue_get(&is->videoq, packet, 1) < 0) {
      // means we quit getting packets
      break;
    }
    pts = 0;

    // Save global pts to be stored in pFrame
    global_video_pkt_pts = packet->pts;
    // Decode video frame
    len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
        packet);
    if(packet->dts == AV_NOPTS_VALUE
       && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
      pts = (double)(*(uint64_t *)pFrame->opaque);
    } else if(packet->dts != AV_NOPTS_VALUE) {
		pts = (double)packet->dts;
    } else {
      pts = 0;
    }
    pts *= av_q2d(is->video_st->time_base);


    // Did we get a video frame?
    if(frameFinished) {
      pts = synchronize_video(is, pFrame, pts);
      if(queue_picture(is, pFrame, pts) < 0) {
  break;
      }
    }
    av_free_packet(packet);
  }

  SDL_CloseAudio();

  av_free(pFrame);

  avpicture_free((AVPicture *)is->rgbaFrame);
  av_free(is->rgbaFrame);

  return 0;
}
Exemple #4
0
int video_thread(void *arg) {
	VideoState *is = (VideoState*)arg;
	AVPacket pkt1, *packet = &pkt1;
	int len1, frameFinished;
	AVFrame *pFrame;
	double pts;

	pFrame = avcodec_alloc_frame();

	for(;;) {
		//printf("video_thread loop 1\n");
		if(packet_queue_get(&is->videoq, packet, 1) < 0) {
			fprintf(stderr, "%d: packet_queue_get errror\n", __LINE__);
			break;
		}
		//printf("video_thread loop 2\n");
		if(packet->data == flush_pkt.data) {
			avcodec_flush_buffers(is->video_st->codec);
			continue;
		}
		//printf("video_thread loop 3\n");
		pts = 0;
		global_video_pkt_pts = packet->pts;
		//printf("video_thread loop 4\n");
		len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, packet);
/*
		if(packet->dts == AV_NOPTS_VALUE && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
			pts = *(uint64_t*)pFrame->opaque;
		} else if(packet->dts != AV_NOPTS_VALUE) {
			pts = packet->dts;
		} else {
			pts = 0;
		}

		pts *= av_q2d(is->video_st->time_base);
*/
		//printf("video_thread loop 5\n");
		if(frameFinished) {
			//printf("video_thread loop 6\n");
			pts = synchronize_video(is, pFrame, pts);
			//printf("video_thread loop 7\n");
			if(queue_picture(is, pFrame, pts) < 0) 
			{
				//printf("video_thread loop 8\n");
				break;
			}
		}
		//printf("video_thread loop 6\n");
		av_free_packet(packet); 
	}
	av_free(pFrame);
	//printf("video_thread loop end\n");
	return 0;
}
Exemple #5
0
int video_thread(void* arg) {
	VideoState* is = (VideoState*) arg;
	AVPacket pkt1, *packet = &pkt1;
	int frameFinished;
	AVFrame* pFrame, *pFrameYUV;
	uint8_t *out_buffer;
	double pts;

	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();

	out_buffer = (uint8_t*) av_malloc(
			avpicture_get_size(AV_PIX_FMT_YUV420P, is->video_ctx->width,
					is->video_ctx->height));
	avpicture_fill((AVPicture*) pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P,
			is->video_ctx->width, is->video_ctx->height);

	for (;;) {
		if (packet_queue_get(&is->videoq, packet, 1, 1) < 0) {
			break;
		}

		if (packet->data == flush_pkt.data) {
			avcodec_flush_buffers(is->video_ctx);
			continue;
		}

		pts = 0;
		avcodec_decode_video2(is->video_ctx, pFrame, &frameFinished, packet);

		if ((pts = av_frame_get_best_effort_timestamp(pFrame)) == AV_NOPTS_VALUE) {
			pts = 0;
		}
		pts *= av_q2d(is->video_st->time_base);

		printf("queue_picture frameFinished=%d  packet->size=%d  pts=%lf\n",
				frameFinished, packet->size, pts);
		if (frameFinished) {
			pts = synchronize_video(is, pFrame, pts);
			if (queue_picture(is, pFrame, pFrameYUV, pts) < 0) {
				break;
			}
		}
		av_free_packet(packet);
	}

	av_frame_free(&pFrame);
	av_frame_free(&pFrameYUV);
	return 0;
}
Exemple #6
0
int video_thread(void *arg) {
  VideoState *is = (VideoState *)arg;
  AVPacket pkt1, *packet = &pkt1;
  int len1, frameFinished;
  AVFrame *pFrame;
  double pts;

  pFrame = avcodec_alloc_frame();

  for(;;) {
    // if we stopped getting packets
    while( packet_queue_get(&is->videoq, packet, 1) == 0 ) {
    }

    pts = 0;
    // Save global pts to be stored in pFrame in first call
    global_video_pkt_pts = packet->pts;

    // Decode video frame
    //frameCount++;
    len1 = avcodec_decode_video2(is->video_ctx, pFrame, &frameFinished, packet );

    if(packet->dts != AV_NOPTS_VALUE) {
      pts = packet->dts;
    } else {
      pts = 0;
    }
    pts *= av_q2d(is->video_ctx->time_base);


    // Frame is decoded, queue it to be played
    if(frameFinished) {
      pts = synchronize_video( is, pFrame, pts);
      if(queue_picture(is, pFrame, pts) < 0) {
        break;
      }
    }

    // Cleanup...
    av_free_packet(packet);
  }
  av_free(pFrame);
  return 0;
}
int video_thread(void *arg)
{
    VideoState *is = (VideoState *)arg;
    AVPacket pkt1, *packet = &pkt1;
    int frameFinished;
    AVFrame *pFrame;

    pFrame = av_frame_alloc();
    double pts;
    for(;;) {
        if(packet_queue_get(&is->videoq, packet, 1) < 0) {
            // means we quit getting packets
            break;
        }
        pts = 0;
        global_video_pkt_pts = packet->pts;
        // Decode video frame
        avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
                packet);
        /* if(packet->dts == AV_NOPTS_VALUE */
        /*         && pFrame->opaque && *(uint64_t*)pFrame->opaque != (uint64_t)AV_NOPTS_VALUE) { */
        /*     pts = *(uint64_t *)pFrame->opaque; */
        /* } else if(packet->dts != AV_NOPTS_VALUE) { */
        /*     pts = packet->dts; */
        /* } else { */
        /*     pts = 0; */
        /* } */
        if(packet->dts !=AV_NOPTS_VALUE)
            pts = packet->dts;
        //转化pts以秒显示
        pts *= av_q2d(is->video_st->time_base);
        // Did we get a video frame?
        if(frameFinished) {
            synchronize_video(is,pFrame,pts);
            if(queue_picture(is, pFrame,pts) < 0) {
                break;
            }
        }
        av_free_packet(packet);
    }
    av_free(pFrame);
    return 0;
}
Exemple #8
0
int video_thread(void *arg) {
  VideoState *is = (VideoState *)arg;
  AVPacket pkt1, *packet = &pkt1;
  int frameFinished;
  AVFrame *pFrame;
  double pts;

  pFrame = av_frame_alloc();

  for(;;) {
    if(packet_queue_get(&is->videoq, packet, 1) < 0) {
      // means we quit getting packets
      break;
    }
    if(packet_queue_get(&is->videoq, packet, 1) < 0) {
      // means we quit getting packets
      break;
    }
    pts = 0;

    // Decode video frame
    avcodec_decode_video2(is->video_ctx, pFrame, &frameFinished, packet);

    if((pts = av_frame_get_best_effort_timestamp(pFrame)) == AV_NOPTS_VALUE) {
      pts = av_frame_get_best_effort_timestamp(pFrame);
    } else {
      pts = 0;
    }
    pts *= av_q2d(is->video_st->time_base);

    // Did we get a video frame?
    if(frameFinished) {
      pts = synchronize_video(is, pFrame, pts);
      if(queue_picture(is, pFrame, pts) < 0) {
	break;
      }
    }
    av_free_packet(packet);
  }
  av_frame_free(&pFrame);
  return 0;
}
bool VideoPlayer::playback()
{
    clock.restart();

    while (!stopPlayback && pFormatCtx && av_read_frame(pFormatCtx, &(packet))>=0)
    {
        if (!stopPlayback && (packet.stream_index == streamIndex))
        {
            AVPacket avpkt;
            av_init_packet(&avpkt);
            avpkt.data = packet.data;
            avpkt.size = packet.size;
            avcodec_decode_video2(pCodecCtx, pFrame, &(frameFinished), &avpkt);

            double pts = 0;

            if (packet.dts == AV_NOPTS_VALUE && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE)
            {
                pts = *(uint64_t *)pFrame->opaque;
            } else 
            if (packet.dts != AV_NOPTS_VALUE) 
            {
                pts = packet.dts;
            } else 
            {
                pts = 0;
            }

            pts *= av_q2d(video_st->time_base);

            if (frameFinished)
            {
                dd->boundingWidth = dd->boundingRect().width();
                dd->boundingHeight = dd->boundingRect().height();

                if (dd->boundingWidth > screenWidth)
                {
                    dd->boundingWidth = screenWidth;
                }

                if (dd->boundingHeight > screenHeight)
                {
                    dd->boundingHeight = screenHeight;
                }

                int useFilter = SWS_FAST_BILINEAR;

                switch (dd->m_swsFilter)
                {
                    case DD_F_FAST_BILINEAR: useFilter = SWS_FAST_BILINEAR; break;
                    case DD_F_BILINEAR: useFilter = SWS_BILINEAR; break;
                    case DD_F_BICUBIC: useFilter = SWS_BICUBIC; break;
                    case DD_F_X: useFilter = SWS_X; break;
                    case DD_F_POINT: useFilter = SWS_POINT; break;
                    case DD_F_AREA: useFilter = SWS_AREA; break;
                    case DD_F_BICUBLIN: useFilter = SWS_BICUBLIN; break;
                    case DD_F_GAUSS: useFilter = SWS_GAUSS; break;
                    case DD_F_SINC: useFilter = SWS_SINC; break;
                    case DD_F_LANCZOS: useFilter = SWS_LANCZOS; break;
                    case DD_F_SPLINE: useFilter = SWS_SPLINE; break;
                }

                SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, dd->boundingWidth, dd->boundingHeight, PIX_FMT_RGB32, useFilter, NULL, NULL, NULL);

                dd->mutex->lock();

                sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, dd->pFrameRGB->data, dd->pFrameRGB->linesize);
                sws_freeContext(img_convert_ctx);

                dd->mutex->unlock();

                pts = synchronize_video(pts);
                double delay = 0;

                switch (dd->m_fpsRate)
                {
                    case DD_FPS_AUTO: delay = (pts - last_pts); break;
                    case DD_FPS_LIMIT_30: delay = 0.0333; break;
                    case DD_FPS_LIMIT_25: delay = 0.04; break;
                    case DD_FPS_LIMIT_20: delay = 0.05; break;
                    case DD_FPS_LIMIT_15: delay = 0.0666; break;
                    case DD_FPS_LIMIT_10: delay = 0.1; break;
                }

                if (delay <= 0 || delay >= 1.0)
                {
                    delay = last_delay;
                }

                last_pts = pts;
                last_delay = delay;

                int elapsed = clock.restart();

                int wait = (delay*1000)-elapsed;

                dd->updateFrame();

                if (wait > 0)
                {
                    QThread::msleep(wait);
                }

                clock.restart();

            }
        }

        av_free_packet(&(packet));
    }

    if (pFormatCtx)
    { 
        av_seek_frame(pFormatCtx, streamIndex, 0,  AVSEEK_FLAG_FRAME);
        return true;
    } else
    {
        return false;
    }
}
Exemple #10
0
int video_thread(void *arg) {
	VideoState *is = (VideoState *) arg;
	AVPacket pkt1, *packet = &pkt1;
	int frameFinished;
	AVFrame *pFrame;
	AVFrame *pFrameRGB;
	double pts;
	int numBytes;
	uint8_t *buffer;

	

	// Allocate an AVFrame structure
	pFrameRGB = avcodec_alloc_frame();
	if (pFrameRGB == NULL)
		return -1;

	// Determine required buffer size and allocate buffer
	numBytes = avpicture_get_size(PIX_FMT_RGB24, is->video_st->codec->width,
			is->video_st->codec->height);
	buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	// of AVPicture
	avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB24,
			is->video_st->codec->width, is->video_st->codec->height);


	pFrame = avcodec_alloc_frame();

	for (;;) {
		if (packet_queue_get(&is->videoq, packet, 1) < 0) {
			// means we quit getting packets
			break;
		}

		pts = 0;

		// Save global pts to be stored in pFrame in first call
		global_video_pkt_pts = packet->pts;

		// Decode video frame

		avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
				packet);

		if (packet->dts == AV_NOPTS_VALUE && pFrame->opaque
				&& *(uint64_t*) pFrame->opaque != AV_NOPTS_VALUE) {
			pts = *(uint64_t *) pFrame->opaque;
		} else if (packet->dts != AV_NOPTS_VALUE) {
			pts = packet->dts;
		} else {
			pts = 0;
		}
		pts *= av_q2d(is->video_st->time_base);

		// Did we get a video frame?
		if (frameFinished) {
			pts = synchronize_video(is, pFrame, pts);
			if (queue_picture(is, pFrame, pFrameRGB, pts) < 0) {
				break;
			}
		}
		av_free_packet(packet);
	}
	av_free(pFrame);
	return 0;
}
Exemple #11
0
int video_thread(void *arg)
{
    ZW_LOG_WARNING(QString("TTTTTTTV"));
    VideoState *is = (VideoState *) arg;
    AVPacket pkt1, *packet = &pkt1;

    int ret, got_picture, numBytes;

    double video_pts = 0; //当前视频的pts
    double audio_pts = 0; //音频pts


    ///解码视频相关
    AVFrame *pFrame, *pFrameRGB;
    uint8_t *out_buffer_rgb; //解码后的rgb数据
    struct SwsContext *img_convert_ctx;  //用于解码后的视频格式转换

    AVCodecContext *pCodecCtx = is->video_st->codec; //视频解码器

    pFrame = av_frame_alloc();
    pFrameRGB = av_frame_alloc();

    ///这里我们改成了 将解码后的YUV数据转换成RGB32
    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
            pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
            AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);

    numBytes = avpicture_get_size(AV_PIX_FMT_RGB32, pCodecCtx->width,pCodecCtx->height);

    out_buffer_rgb = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
    avpicture_fill((AVPicture *) pFrameRGB, out_buffer_rgb, AV_PIX_FMT_RGB32,
            pCodecCtx->width, pCodecCtx->height);

    while(1)
    {
        if (is->quit)
        {
            ZW_LOG_WARNING(QString("TTTTTTTV"));
            break;
        }

        if (is->isPause == true) //判断暂停
        {
            ZW_LOG_WARNING(QString("TTTTTTTV"));
            SDL_Delay(10);
            continue;
        }

        if (packet_queue_get(&is->videoq, packet, 0) <= 0)
        {
            ZW_LOG_WARNING(QString("TTTTTTTV"));
            if (is->readFinished)
            {//队列里面没有数据了且读取完毕了
                ZW_LOG_WARNING(QString("TTTTTTTV"));
                break;
            }
            else
            {
                ZW_LOG_WARNING(QString("TTTTTTTV"));
                SDL_Delay(1); //队列只是暂时没有数据而已
                continue;
            }
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        //收到这个数据 说明刚刚执行过跳转 现在需要把解码器的数据 清除一下
        if(strcmp((char*)packet->data,FLUSH_DATA) == 0)
        {
            avcodec_flush_buffers(is->video_st->codec);
            av_free_packet(packet);
            continue;
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,packet);

        if (ret < 0) {
            qDebug()<<"decode error.\n";
            av_free_packet(packet);
            continue;
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        if (packet->dts == AV_NOPTS_VALUE && pFrame->opaque&& *(uint64_t*) pFrame->opaque != AV_NOPTS_VALUE)
        {
            video_pts = *(uint64_t *) pFrame->opaque;
        }
        else if (packet->dts != AV_NOPTS_VALUE)
        {
            video_pts = packet->dts;
        }
        else
        {
            video_pts = 0;
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        video_pts *= av_q2d(is->video_st->time_base);
        video_pts = synchronize_video(is, pFrame, video_pts);

        if (is->seek_flag_video)
        {
            //发生了跳转 则跳过关键帧到目的时间的这几帧
           if (video_pts < is->seek_time)
           {
               ZW_LOG_WARNING(QString("TTTTTTTV"));
               av_free_packet(packet);
               continue;
           }
           else
           {
               ZW_LOG_WARNING(QString("TTTTTTTV"));
               is->seek_flag_video = 0;
           }
        }

        while(1)
        {
            if (is->quit)
            {
                ZW_LOG_WARNING(QString("TTTTTTTV"));
                break;
            }

            audio_pts = is->audio_clock;

            //主要是 跳转的时候 我们把video_clock设置成0了
            //因此这里需要更新video_pts
            //否则当从后面跳转到前面的时候 会卡在这里
            video_pts = is->video_clock;
            ZW_LOG_WARNING(QString("TTTTTTTVaudio_pts=%1,video_pts=%2").arg(audio_pts).arg(video_pts));


            if (video_pts <= audio_pts) break;
//            if (video_pts >= audio_pts) break;

            int delayTime = (video_pts - audio_pts) * 1000;

            delayTime = delayTime > 5 ? 5:delayTime;
            ZW_LOG_WARNING(QString("TTTTTTTV"));

            SDL_Delay(delayTime);
        }

        if (got_picture) {
            sws_scale(img_convert_ctx,
                    (uint8_t const * const *) pFrame->data,
                    pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data,
                    pFrameRGB->linesize);

            //把这个RGB数据 用QImage加载
            QImage tmpImg((uchar *)out_buffer_rgb,pCodecCtx->width,pCodecCtx->height,QImage::Format_RGB32);
            QImage image = tmpImg.copy(); //把图像复制一份 传递给界面显示
            ZW_LOG_WARNING(QString("TTTTTTTV"));
            is->player->disPlayVideo(image); //调用激发信号的函数
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        av_free_packet(packet);
        ZW_LOG_WARNING(QString("TTTTTTTV"));

    }

    av_free(pFrame);
    av_free(pFrameRGB);
    av_free(out_buffer_rgb);

    if (!is->quit)
    {
        ZW_LOG_WARNING(QString("TTTTTTTV"));
        is->quit = true;
    }

    ZW_LOG_WARNING(QString("TTTTTTTV"));
    is->videoThreadFinished = true;

    return 0;
}
void VideoThread::run(){

	/* alloco i frame YVU e RGB */
	pFrame = avcodec_alloc_frame();
	pFrameRGB = avcodec_alloc_frame();

	/* da questo momento in poi permetto alla finestra di resfreshare */
	_is->window->startdisplay();

	//Calculate the size in bytes that a picture of the given width and height would occupy if stored in the given picture format.
	bytes = avpicture_get_size(CONV_FORMAT, _is->video_st->codec->width, _is->video_st->codec->height);

	uint8_t *video_buffer = (uint8_t*)av_malloc( bytes * sizeof(uint8_t) );
	
	avpicture_fill((AVPicture *)pFrameRGB, video_buffer, CONV_FORMAT, _is->video_st->codec->width, _is->video_st->codec->height);

	/*
	ciclo di lettura dei frame
	prelevo dalla coda dei pkt
	decodifico il frame YUV
	trasformo il frame in RGB
	aggiungo il frameRGB alla nuova coda
	*/
	while(1) {

		if(_is->ut.getPauseValue() && !_is->ut.getStopValue()){
			continue;
			//this->usleep(10000);
		};

		// leggo i paccehtti dalla coda
		if(_is->videoq.Get(packet, 1) < 0){
			// means we quit getting packets
			
			//qDebug() << "quitting getting packets - videothread";
			break;
		}

		//controllo se ho letto pacchetto di FLUSH
		if(packet->data == _is->flush_pkt->data){
			
			//qDebug() << "VideoThread - letto FLUSH PKT";
			
			avcodec_flush_buffers(_is->video_st->codec);

			_is->pictq.Flush();

			_is->frame_last_pts = AV_NOPTS_VALUE;
			_is->frame_last_delay = 0;
			_is->frame_timer = (double)av_gettime() / 1000000.0;

			continue;
		}

		pts = 0;									//resetto il pts a 0, ovvero non trovato

		//Save global pts to be stored in pFrame in first call
		_is->global_video_pkt_pts = packet->pts;
		
		// Decode video frame
		avcodec_decode_video2(_is->video_st->codec, pFrame, &frameFinished, packet);

		//nota: opaque è una variabile interna a pFrame lasciata libera
		//per essere usata dall'utente come variabile di appoggio per dei dati
		
		/* caso in cui NON RIESCO a reperire DTS, ma sho allocato il buffer */
		if (packet->dts == (int64_t)AV_NOPTS_VALUE && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE)
        {
			//vado a reperire il PTS del primo pacchetto, messo in opaque dalla nostra funzione
			//di allocazione del buffer
            pts = *(uint64_t *) pFrame->opaque;	
        }
		/* caso in cui RIESCO a reperire DTS */
        else if (packet->dts != (int64_t)AV_NOPTS_VALUE)
        {
            pts = packet->dts;
        }
        else
        {
            pts = 0;
        }

		/**
		PTS = PTS * (time_base convertito in double)
		ottengo cosi il PTS in secondi
		*/
        pts *= av_q2d(_is->video_st->time_base);

		// Did we get a video frame?
		if(frameFinished) {

			synchronize_video();								//sincronizzazione del PTS

			/* conversione pFrame -> pFrameRGB */
			sws_scale(_is->sws_ctx, (uint8_t const * const *)pFrame->data,
				pFrame->linesize, 0, _is->video_st->codec->height, pFrameRGB->data, 
						pFrameRGB->linesize);

			while(_is->pictq.getSize() > VIDEO_PICTURE_QUEUE_SIZE && (_is->ut.getStopValue() == false)){
				this->usleep(1000);
			}

			/* aggiunta del frame RGB alla nuova coda */
			if(_is->pictq.Put(pFrameRGB, pts) < 0) {
				
				//qDebug() << "quitting putting frame - videothread";
				
				break;
			}

		}
		av_free_packet(packet);
	}

	av_free(pFrame);
	av_free(pFrameRGB);

	return;
}