Exemple #1
0
static int video_read_packet(FFMovie *movie, AVPacket *pkt)
{
/*DECODE THREAD*/
    unsigned char *ptr;
    int len, len1, got_picture;
    AVFrame frame;
    double pts;

    ptr = pkt->data;
    if (movie->video_st->codec.codec_id == CODEC_ID_RAWVIDEO) {
        avpicture_fill((AVPicture *)&frame, ptr,
                       movie->video_st->codec.pix_fmt,
                       movie->video_st->codec.width,
                       movie->video_st->codec.height);
        if (pkt->pts != AV_NOPTS_VALUE)
            pts = (double)pkt->pts * movie->context->pts_num / movie->context->pts_den;
        else
            pts = 0;
        frame.pict_type = FF_I_TYPE;
        update_video_clock(movie, &frame, pts);
movie->frame_count++; /*this should probably represent displayed frames, not decoded*/
        if (queue_picture(movie, &frame) < 0)
            return -1;
    } else {
        len = pkt->size;
        while (len > 0) {
            if (movie->vidpkt_start) {
                movie->vidpkt_start = 0;
                movie->vidpkt_timestamp = pkt->pts;
            }
            len1 = avcodec_decode_video(&movie->video_st->codec,
                                        &frame, &got_picture, ptr, len);
            if (len1 < 0)
                break;
            if (got_picture) {
movie->frame_count++; /*this should probably represent displayed frames, not decoded*/
                if (movie->vidpkt_timestamp != AV_NOPTS_VALUE)
                    pts = (double)movie->vidpkt_timestamp * movie->context->pts_num / movie->context->pts_den;
                else
                    pts = 0;
                update_video_clock(movie, &frame, pts);
                if (queue_picture(movie, &frame) < 0)
                    return -1;
                movie->vidpkt_start = 1;
            }
            ptr += len1;
            len -= len1;
        }
    }
    return 0;
}
int video_thread(void *arg) {
        VideoState *is = (VideoState *) arg;
        AVPacket pkt1, *packet = &pkt1;
        //int len1;
        int frameFinished;
        AVFrame *pFrame;

        pFrame = av_frame_alloc();

        for (;;) {
                if (packet_queue_get(&is->videoq, packet, 1) < 0) {
                        // means we quit getting packets
                        break;
                }
                // Decode video frame
                //len1 =
                avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
                                packet);

                // Did we get a video frame?
                if (frameFinished) {
                        if (queue_picture(is, pFrame) < 0) {
                                break;
                        }
                }
                av_free_packet(packet);
        }
        av_free(pFrame);
        return 0;
}
Exemple #3
0
static void *video_thread(ALLEGRO_THREAD * t, void *arg)
{
   VideoState *is = (VideoState *) arg;
   AVPacket pkt1, *packet = &pkt1;
   int len1, frameFinished;
   AVFrame *pFrame;
   double pts;
   (void)t;

   pFrame = avcodec_alloc_frame();

   for (;;) {
      if (packet_queue_get(is, &is->videoq, packet, 1) < 0) {
         // means we quit getting packets
         break;
      }
      if (packet->data == flush_pkt.data) {
         avcodec_flush_buffers(is->video_st->codec);
         continue;
      }
      pts = 0;

      // Save global pts to be stored in pFrame
      FIXME_global_video_pkt_pts = packet->pts;
   
      // Decode video frame
#ifdef FFMPEG_0_8
      len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
                                  packet);
#else
      len1 = avcodec_decode_video(is->video_st->codec, pFrame, &frameFinished,
                                  packet->data, packet->size);
#endif
                                  

      if (packet->dts == NOPTS_VALUE
          && pFrame->opaque && *(int64_t *) pFrame->opaque != NOPTS_VALUE) {
         pts = 0;//*(uint64_t *) pFrame->opaque;
      }
      else if (packet->dts != NOPTS_VALUE) {
         pts = packet->dts;
      }
      else {
         pts = 0;
      }
      pts *= av_q2d(is->video_st->time_base);

      // Did we get a video frame?
      if (frameFinished) {
         //pts = synchronize_video(is, pFrame, pts);
         if (queue_picture(is, pFrame, pts) < 0) {
            break;
         }
      }
      av_free_packet(packet);
   }
   av_free(pFrame);
   return NULL;
}
Exemple #4
0
void *video_thread(void *arg) {
  JNIEnv *env;
  if((*g_jvm)->AttachCurrentThread(g_jvm, &env, NULL) != JNI_OK) {
       LOGE(1, "### start video thead error");
	   return;
  }
  VideoState *is = (VideoState*)arg;
  AVPacket pkt1, *packet = &pkt1;
  int len1, frameFinished;
  AVFrame *pFrame;
  double pts;
  int numBytes;
  pFrame=avcodec_alloc_frame();
  int ret;
  for(;;) {
	if(is->quit == 1 || is->quit == 2) {
		break;
	}
    if(packet_queue_get(&is->videoq, packet, 1) < 0) {
	  if(debug) LOGI(10,"video_thread get packet exit");
      break;
    }
    pts = 0;
    global_video_pkt_pts = packet->pts;
    len1 = avcodec_decode_video2(is->video_st->codec,
				pFrame,
				&frameFinished,
				packet);	
    if(packet->dts == AV_NOPTS_VALUE
       && pFrame->opaque
       && *(uint64_t*)pFrame->opaque
       != AV_NOPTS_VALUE) {
      pts = *(uint64_t*) pFrame->opaque;
    } else if (packet->dts != AV_NOPTS_VALUE) {
      pts = packet->dts;
    } else {
      pts = 0;
    }
    pts *= av_q2d(is->video_st->time_base);
	//pts *= av_q2d(pCodecCtx->time_base);
    if (frameFinished) {
       pts = synchronize_video(is, pFrame, pts);
       if (queue_picture(is, pFrame, pts) < 0) {
			break;
       }
    }
    av_free_packet(packet);
  }
  av_free(pFrame);
  if((*g_jvm)->DetachCurrentThread(g_jvm) != JNI_OK) {
	LOGE(1,"### detach video thread error");
  }
  pthread_exit(0);
  if(debug) {
		LOGI(1,"### video_thread exit");
  }
  return ((void *)0);
}
int video_thread(void *arg) {
    VideoState *is = (VideoState *)arg;
    AVPacket pkt1, *packet = &pkt1;
    int frameFinished;
    AVFrame *pFrame;
    double pts;

    pFrame = avcodec_alloc_frame();

    for(;;) {
        if(packet_queue_get(&is->videoq, packet, 1) < 0) {
            // means we quit getting packets
            break;
        }

        if(packet->data == flush_pkt.data) {
            avcodec_flush_buffers(is->video_st->codec);
            continue;
        }

        pts = 0;

        // Save global pts to be stored in pFrame in first call
        global_video_pkt_pts = packet->pts;
        // Decode video frame
        avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
                              packet);

        if(packet->dts == AV_NOPTS_VALUE
                && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
            pts = *(uint64_t *)pFrame->opaque;

        } else if(packet->dts != AV_NOPTS_VALUE) {
            pts = packet->dts;

        } else {
            pts = 0;
        }

        pts *= av_q2d(is->video_st->time_base);

        // Did we get a video frame?
        if(frameFinished) {
            pts = synchronize_video(is, pFrame, pts);

            if(queue_picture(is, pFrame, pts) < 0) {
                break;
            }
        }

        av_free_packet(packet);
    }

    av_free(pFrame);
    return 0;
}
int video_thread(void *arg) {
  VideoState *is = (VideoState *)arg;
  AVPacket pkt1, *packet = &pkt1;
  int len1, frameFinished;
  AVFrame *pFrame;
  double pts;

  pFrame = avcodec_alloc_frame();

  is->rgbaFrame = avcodec_alloc_frame();
  avpicture_alloc ((AVPicture *)is->rgbaFrame, PIX_FMT_RGBA, is->video_st->codec->width, is->video_st->codec->height);


  for(;;) {
    if(packet_queue_get(&is->videoq, packet, 1) < 0) {
      // means we quit getting packets
      break;
    }
    pts = 0;

    // Save global pts to be stored in pFrame
    global_video_pkt_pts = packet->pts;
    // Decode video frame
    len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
        packet);
    if(packet->dts == AV_NOPTS_VALUE
       && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
      pts = (double)(*(uint64_t *)pFrame->opaque);
    } else if(packet->dts != AV_NOPTS_VALUE) {
		pts = (double)packet->dts;
    } else {
      pts = 0;
    }
    pts *= av_q2d(is->video_st->time_base);


    // Did we get a video frame?
    if(frameFinished) {
      pts = synchronize_video(is, pFrame, pts);
      if(queue_picture(is, pFrame, pts) < 0) {
  break;
      }
    }
    av_free_packet(packet);
  }

  SDL_CloseAudio();

  av_free(pFrame);

  avpicture_free((AVPicture *)is->rgbaFrame);
  av_free(is->rgbaFrame);

  return 0;
}
Exemple #7
0
int video_thread(void *arg) {
	VideoState *is = (VideoState*)arg;
	AVPacket pkt1, *packet = &pkt1;
	int len1, frameFinished;
	AVFrame *pFrame;
	double pts;

	pFrame = avcodec_alloc_frame();

	for(;;) {
		//printf("video_thread loop 1\n");
		if(packet_queue_get(&is->videoq, packet, 1) < 0) {
			fprintf(stderr, "%d: packet_queue_get errror\n", __LINE__);
			break;
		}
		//printf("video_thread loop 2\n");
		if(packet->data == flush_pkt.data) {
			avcodec_flush_buffers(is->video_st->codec);
			continue;
		}
		//printf("video_thread loop 3\n");
		pts = 0;
		global_video_pkt_pts = packet->pts;
		//printf("video_thread loop 4\n");
		len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, packet);
/*
		if(packet->dts == AV_NOPTS_VALUE && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
			pts = *(uint64_t*)pFrame->opaque;
		} else if(packet->dts != AV_NOPTS_VALUE) {
			pts = packet->dts;
		} else {
			pts = 0;
		}

		pts *= av_q2d(is->video_st->time_base);
*/
		//printf("video_thread loop 5\n");
		if(frameFinished) {
			//printf("video_thread loop 6\n");
			pts = synchronize_video(is, pFrame, pts);
			//printf("video_thread loop 7\n");
			if(queue_picture(is, pFrame, pts) < 0) 
			{
				//printf("video_thread loop 8\n");
				break;
			}
		}
		//printf("video_thread loop 6\n");
		av_free_packet(packet); 
	}
	av_free(pFrame);
	//printf("video_thread loop end\n");
	return 0;
}
Exemple #8
0
int video_thread(void* arg) {
	VideoState* is = (VideoState*) arg;
	AVPacket pkt1, *packet = &pkt1;
	int frameFinished;
	AVFrame* pFrame, *pFrameYUV;
	uint8_t *out_buffer;
	double pts;

	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();

	out_buffer = (uint8_t*) av_malloc(
			avpicture_get_size(AV_PIX_FMT_YUV420P, is->video_ctx->width,
					is->video_ctx->height));
	avpicture_fill((AVPicture*) pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P,
			is->video_ctx->width, is->video_ctx->height);

	for (;;) {
		if (packet_queue_get(&is->videoq, packet, 1, 1) < 0) {
			break;
		}

		if (packet->data == flush_pkt.data) {
			avcodec_flush_buffers(is->video_ctx);
			continue;
		}

		pts = 0;
		avcodec_decode_video2(is->video_ctx, pFrame, &frameFinished, packet);

		if ((pts = av_frame_get_best_effort_timestamp(pFrame)) == AV_NOPTS_VALUE) {
			pts = 0;
		}
		pts *= av_q2d(is->video_st->time_base);

		printf("queue_picture frameFinished=%d  packet->size=%d  pts=%lf\n",
				frameFinished, packet->size, pts);
		if (frameFinished) {
			pts = synchronize_video(is, pFrame, pts);
			if (queue_picture(is, pFrame, pFrameYUV, pts) < 0) {
				break;
			}
		}
		av_free_packet(packet);
	}

	av_frame_free(&pFrame);
	av_frame_free(&pFrameYUV);
	return 0;
}
Exemple #9
0
int video_thread(void *arg) {
  VideoState *is = (VideoState *)arg;
  AVPacket pkt1, *packet = &pkt1;
  int len1, frameFinished;
  AVFrame *pFrame;
  double pts;

  pFrame = avcodec_alloc_frame();

  for(;;) {
    // if we stopped getting packets
    while( packet_queue_get(&is->videoq, packet, 1) == 0 ) {
    }

    pts = 0;
    // Save global pts to be stored in pFrame in first call
    global_video_pkt_pts = packet->pts;

    // Decode video frame
    //frameCount++;
    len1 = avcodec_decode_video2(is->video_ctx, pFrame, &frameFinished, packet );

    if(packet->dts != AV_NOPTS_VALUE) {
      pts = packet->dts;
    } else {
      pts = 0;
    }
    pts *= av_q2d(is->video_ctx->time_base);


    // Frame is decoded, queue it to be played
    if(frameFinished) {
      pts = synchronize_video( is, pFrame, pts);
      if(queue_picture(is, pFrame, pts) < 0) {
        break;
      }
    }

    // Cleanup...
    av_free_packet(packet);
  }
  av_free(pFrame);
  return 0;
}
int video_thread(void *arg)
{
    VideoState *is = (VideoState *)arg;
    AVPacket pkt1, *packet = &pkt1;
    int frameFinished;
    AVFrame *pFrame;

    pFrame = av_frame_alloc();
    double pts;
    for(;;) {
        if(packet_queue_get(&is->videoq, packet, 1) < 0) {
            // means we quit getting packets
            break;
        }
        pts = 0;
        global_video_pkt_pts = packet->pts;
        // Decode video frame
        avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
                packet);
        /* if(packet->dts == AV_NOPTS_VALUE */
        /*         && pFrame->opaque && *(uint64_t*)pFrame->opaque != (uint64_t)AV_NOPTS_VALUE) { */
        /*     pts = *(uint64_t *)pFrame->opaque; */
        /* } else if(packet->dts != AV_NOPTS_VALUE) { */
        /*     pts = packet->dts; */
        /* } else { */
        /*     pts = 0; */
        /* } */
        if(packet->dts !=AV_NOPTS_VALUE)
            pts = packet->dts;
        //转化pts以秒显示
        pts *= av_q2d(is->video_st->time_base);
        // Did we get a video frame?
        if(frameFinished) {
            synchronize_video(is,pFrame,pts);
            if(queue_picture(is, pFrame,pts) < 0) {
                break;
            }
        }
        av_free_packet(packet);
    }
    av_free(pFrame);
    return 0;
}
Exemple #11
0
int video_thread(void *arg) {
  VideoState *is = (VideoState *)arg;
  AVPacket pkt1, *packet = &pkt1;
  int frameFinished;
  AVFrame *pFrame;
  double pts;

  pFrame = av_frame_alloc();

  for(;;) {
    if(packet_queue_get(&is->videoq, packet, 1) < 0) {
      // means we quit getting packets
      break;
    }
    if(packet_queue_get(&is->videoq, packet, 1) < 0) {
      // means we quit getting packets
      break;
    }
    pts = 0;

    // Decode video frame
    avcodec_decode_video2(is->video_ctx, pFrame, &frameFinished, packet);

    if((pts = av_frame_get_best_effort_timestamp(pFrame)) == AV_NOPTS_VALUE) {
      pts = av_frame_get_best_effort_timestamp(pFrame);
    } else {
      pts = 0;
    }
    pts *= av_q2d(is->video_st->time_base);

    // Did we get a video frame?
    if(frameFinished) {
      pts = synchronize_video(is, pFrame, pts);
      if(queue_picture(is, pFrame, pts) < 0) {
	break;
      }
    }
    av_free_packet(packet);
  }
  av_frame_free(&pFrame);
  return 0;
}
Exemple #12
0
int VideoDecoder::video_thread(void *arg)
{
    VideoState *is = (VideoState *) arg;
    AVStreamsParser* ps = is->getAVStreamsParser();
    AVFrame *frame = av_frame_alloc();
    double pts;
    double duration;
    int ret;
    AVRational tb = ps->video_st->time_base;
    AVRational frame_rate = av_guess_frame_rate(ps->ic, ps->video_st, NULL);

#if CONFIG_AVFILTER
    AVFilterGraph *graph = avfilter_graph_alloc();
    AVFilterContext *filt_out = NULL, *filt_in = NULL;
    int last_w = 0;
    int last_h = 0;
    enum AVPixelFormat last_format = (AVPixelFormat) (-2);
    int last_serial = -1;
    int last_vfilter_idx = 0;
    if (!graph) {
        av_frame_free(&frame);
        return AVERROR(ENOMEM);
    }

#endif

    if (!frame) {
#if CONFIG_AVFILTER
        avfilter_graph_free(&graph);
#endif
        return AVERROR(ENOMEM);
    }

    for (;;) {
        ret = is->viddec().get_video_frame(is, frame);
        if (ret < 0)
            goto the_end;
        if (!ret)
            continue;

#if CONFIG_AVFILTER
        if (   last_w != frame->width
            || last_h != frame->height
            || last_format != frame->format
            || last_serial != is->viddec().pkt_serial
            || last_vfilter_idx != is->vfilter_idx) {
            av_log(NULL, AV_LOG_DEBUG,
                   "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
                   last_w, last_h,
                   (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
                   frame->width, frame->height,
                   (const char *)av_x_if_null(av_get_pix_fmt_name((AVPixelFormat)frame->format), "none"), is->viddec().pkt_serial);
            avfilter_graph_free(&graph);
            graph = avfilter_graph_alloc();
            if ((ret = configure_video_filters(graph, is,gOptions.vfilters_list ? gOptions.vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
                SDL_Event event;
                event.type = FF_QUIT_EVENT;
                event.user.data1 = is;
                SDL_PushEvent(&event);
                goto the_end;
            }
            filt_in  = is->in_video_filter;
            filt_out = is->out_video_filter;
            last_w = frame->width;
            last_h = frame->height;
            last_format = (AVPixelFormat) frame->format;
            last_serial = is->viddec().pkt_serial;
            last_vfilter_idx = is->vfilter_idx;
            frame_rate = filt_out->inputs[0]->frame_rate;
        }

        ret = av_buffersrc_add_frame(filt_in, frame);
        if (ret < 0)
            goto the_end;

        while (ret >= 0) {
            is->frame_last_returned_time = av_gettime_relative() / 1000000.0;

            ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
            if (ret < 0) {
                if (ret == AVERROR_EOF)
                    is->viddec().finished = is->viddec().pkt_serial;
                ret = 0;
                break;
            }

            is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
            if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
                is->frame_last_filter_delay = 0;
            tb = filt_out->inputs[0]->time_base;
#endif
            duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
            pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
            ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec().pkt_serial);
            av_frame_unref(frame);
#if CONFIG_AVFILTER
        }
#endif

        if (ret < 0)
            goto the_end;
    }
 the_end:
#if CONFIG_AVFILTER
    avfilter_graph_free(&graph);
#endif
    av_frame_free(&frame);
    return 0;
}
Exemple #13
0
int video_thread(void *arg) {
	VideoState *is = (VideoState *) arg;
	AVPacket pkt1, *packet = &pkt1;
	int frameFinished;
	AVFrame *pFrame;
	AVFrame *pFrameRGB;
	double pts;
	int numBytes;
	uint8_t *buffer;

	

	// Allocate an AVFrame structure
	pFrameRGB = avcodec_alloc_frame();
	if (pFrameRGB == NULL)
		return -1;

	// Determine required buffer size and allocate buffer
	numBytes = avpicture_get_size(PIX_FMT_RGB24, is->video_st->codec->width,
			is->video_st->codec->height);
	buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	// of AVPicture
	avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB24,
			is->video_st->codec->width, is->video_st->codec->height);


	pFrame = avcodec_alloc_frame();

	for (;;) {
		if (packet_queue_get(&is->videoq, packet, 1) < 0) {
			// means we quit getting packets
			break;
		}

		pts = 0;

		// Save global pts to be stored in pFrame in first call
		global_video_pkt_pts = packet->pts;

		// Decode video frame

		avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
				packet);

		if (packet->dts == AV_NOPTS_VALUE && pFrame->opaque
				&& *(uint64_t*) pFrame->opaque != AV_NOPTS_VALUE) {
			pts = *(uint64_t *) pFrame->opaque;
		} else if (packet->dts != AV_NOPTS_VALUE) {
			pts = packet->dts;
		} else {
			pts = 0;
		}
		pts *= av_q2d(is->video_st->time_base);

		// Did we get a video frame?
		if (frameFinished) {
			pts = synchronize_video(is, pFrame, pts);
			if (queue_picture(is, pFrame, pFrameRGB, pts) < 0) {
				break;
			}
		}
		av_free_packet(packet);
	}
	av_free(pFrame);
	return 0;
}