// 设置ffmpeg的debug输出级别
__declspec(dllexport) void __stdcall UCIDebug(int level)
{
	av_log_set_level(level);
}
Exemple #2
0
int main(int argc, char* argv[])
{ 
  musicd_start_time = time(NULL);

  config_init();

  config_set_hook("log-level", log_level_changed);
  config_set_hook("log-time-format", log_time_format_changed);
  config_set("log-level", "debug");

  config_set_hook("directory", directory_changed);
  
  config_set("config", "~/.musicd.conf");
  config_set("directory", "~/.musicd");
  config_set("bind", "any");
  config_set("port", "6800");
  
  config_set_hook("image-prefix", scan_image_prefix_changed);
  config_set("image-prefix", "front,cover,jacket");

  config_set("server-name", "musicd server");

  if (config_load_args(argc, argv)) {
    musicd_log(LOG_FATAL, "main", "invalid command line arguments");
    print_usage(argv[0]);
    return -1;
  }
  
  if (config_get_value("help")) {
    print_usage(argv[0]);
    return 0;
  }
  if (config_get_value("version")) {
    print_version();
    return 0;
  }
  
  if (!config_to_bool("no-config")
   && config_load_file(config_to_path("config"))) {
    musicd_log(LOG_FATAL, "main", "could not read config file");
    return -1;
  }
  
  /* Reload command line arguments - this is because the config file might have
   * overwritten them, and the command line has the highest priority. */
  config_load_args(argc, argv);
  
  confirm_directory();
  
  musicd_log(LOG_INFO, "main", "musicd version %s", MUSICD_VERSION_STRING);
  
  srand(time(NULL));

  av_register_all();
  avcodec_register_all();
  av_lockmgr_register(&musicd_av_lockmgr);
  
  av_log_set_level(AV_LOG_QUIET);

  if (db_open()) {
    musicd_log(LOG_FATAL, "library", "can't open database");
    return -1;
  }

  if (library_open()) {
    musicd_log(LOG_FATAL, "main", "could not open library");
    return -1;
  }
  
  if (cache_open()) {
    musicd_log(LOG_FATAL, "main", "could not open cache");
    return -1;
  }
  
  if (server_start()) {
    musicd_log(LOG_FATAL, "main", "could not start server");
    return -1;
  }
  
  signal(SIGUSR1, start_scan_signal);
  scan_start();
  
  while (1) {
    sleep(1);
  }
  
  return 0;
}
Exemple #3
0
int main(int argc, char **argv)
{
    const char *outfilename = NULL;
    const char *infilename  = NULL;
    FILE *outfile           = NULL;
    FILE *infile            = NULL;
    char *graph_string      = NULL;
    AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
    char c;

    av_log_set_level(AV_LOG_DEBUG);

    while ((c = getopt(argc, argv, "hi:o:")) != -1) {
        switch (c) {
        case 'h':
            usage();
            return 0;
        case 'i':
            infilename = optarg;
            break;
        case 'o':
            outfilename = optarg;
            break;
        case '?':
            return 1;
        }
    }

    if (!infilename || !strcmp(infilename, "-"))
        infilename = "/dev/stdin";
    infile = fopen(infilename, "r");
    if (!infile) {
        fprintf(stderr, "Impossible to open input file '%s': %s\n",
                infilename, strerror(errno));
        return 1;
    }

    if (!outfilename || !strcmp(outfilename, "-"))
        outfilename = "/dev/stdout";
    outfile = fopen(outfilename, "w");
    if (!outfile) {
        fprintf(stderr, "Impossible to open output file '%s': %s\n",
                outfilename, strerror(errno));
        return 1;
    }

    /* read from infile and put it in a buffer */
    {
        unsigned int count = 0;
        struct line *line, *last_line, *first_line;
        char *p;
        last_line = first_line = av_malloc(sizeof(struct line));

        while (fgets(last_line->data, sizeof(last_line->data), infile)) {
            struct line *new_line = av_malloc(sizeof(struct line));
            count += strlen(last_line->data);
            last_line->next = new_line;
            last_line       = new_line;
        }
        last_line->next = NULL;

        graph_string = av_malloc(count + 1);
        p = graph_string;
        for (line = first_line; line->next; line = line->next) {
            unsigned int l = strlen(line->data);
            memcpy(p, line->data, l);
            p += l;
        }
        *p = '\0';
    }

    avfilter_register_all();

    if (avfilter_graph_parse(graph, graph_string, NULL, NULL, NULL) < 0) {
        fprintf(stderr, "Impossible to parse the graph description\n");
        return 1;
    }

    if (avfilter_graph_config(graph, NULL) < 0)
        return 1;

    print_digraph(outfile, graph);
    fflush(outfile);

    return 0;
}
Exemple #4
0
int main(int argc, char **argv)
{
    int i, j;
    AVAES b;
    static const uint8_t rkey[2][16] = {
        { 0 },
        { 0x10, 0xa5, 0x88, 0x69, 0xd7, 0x4b, 0xe5, 0xa3,
          0x74, 0xcf, 0x86, 0x7c, 0xfb, 0x47, 0x38, 0x59 }
    };
    static const uint8_t rpt[2][16] = {
        { 0x6a, 0x84, 0x86, 0x7c, 0xd7, 0x7e, 0x12, 0xad,
          0x07, 0xea, 0x1b, 0xe8, 0x95, 0xc5, 0x3f, 0xa3 },
        { 0 }
    };
    static const uint8_t rct[2][16] = {
        { 0x73, 0x22, 0x81, 0xc0, 0xa0, 0xaa, 0xb8, 0xf7,
          0xa5, 0x4a, 0x0c, 0x67, 0xa0, 0xc4, 0x5e, 0xcf },
        { 0x6d, 0x25, 0x1e, 0x69, 0x44, 0xb0, 0x51, 0xe0,
          0x4e, 0xaa, 0x6f, 0xb4, 0xdb, 0xf7, 0x84, 0x65 }
    };
    uint8_t pt[16], temp[16];
    int err = 0;

    av_log_set_level(AV_LOG_DEBUG);

    for (i = 0; i < 2; i++) {
        av_aes_init(&b, rkey[i], 128, 1);
        av_aes_crypt(&b, temp, rct[i], 1, NULL, 1);
        for (j = 0; j < 16; j++) {
            if (rpt[i][j] != temp[j]) {
                av_log(NULL, AV_LOG_ERROR, "%d %02X %02X\n",
                       j, rpt[i][j], temp[j]);
                err = 1;
            }
        }
    }

    if (argc > 1 && !strcmp(argv[1], "-t")) {
        AVAES ae, ad;
        AVLFG prng;

        av_aes_init(&ae, "PI=3.141592654..", 128, 0);
        av_aes_init(&ad, "PI=3.141592654..", 128, 1);
        av_lfg_init(&prng, 1);

        for (i = 0; i < 10000; i++) {
            for (j = 0; j < 16; j++)
                pt[j] = av_lfg_get(&prng);
            {
                START_TIMER;
                av_aes_crypt(&ae, temp, pt, 1, NULL, 0);
                if (!(i & (i - 1)))
                    av_log(NULL, AV_LOG_ERROR, "%02X %02X %02X %02X\n",
                           temp[0], temp[5], temp[10], temp[15]);
                av_aes_crypt(&ad, temp, temp, 1, NULL, 1);
                STOP_TIMER("aes");
            }
            for (j = 0; j < 16; j++) {
                if (pt[j] != temp[j]) {
                    av_log(NULL, AV_LOG_ERROR, "%d %d %02X %02X\n",
                           i, j, pt[j], temp[j]);
                }
            }
        }
    }
    return err;
}
Exemple #5
0
bool
FFmpegInput::open (const std::string &name, ImageSpec &spec)
{
    static boost::once_flag init_flag = BOOST_ONCE_INIT;
    boost::call_once (&av_register_all, init_flag);
    const char *file_name = name.c_str();
    av_log_set_level (AV_LOG_FATAL);
    if (avformat_open_input (&m_format_context, file_name, NULL, NULL) != 0) // avformat_open_input allocs format_context
    {
        error ("\"%s\" could not open input", file_name);
        return false;
    }
    if (avformat_find_stream_info (m_format_context, NULL) < 0)
    {
        error ("\"%s\" could not find stream info", file_name);
        return false;
    }
    m_video_stream = -1;
    for (unsigned int i=0; i<m_format_context->nb_streams; i++) {
        if (m_format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            if (m_video_stream < 0) {
                m_video_stream=i;
            }
            m_video_indexes.push_back (i); // needed for later use
            break;
        }
    }  
    if (m_video_stream == -1) {
        error ("\"%s\" could not find a valid videostream", file_name);
        return false;
    }
    m_codec_context = m_format_context->streams[m_video_stream]->codec; // codec context for videostream
    m_codec = avcodec_find_decoder (m_codec_context->codec_id);
    if (!m_codec) {
        error ("\"%s\" unsupported codec", file_name);
        return false;
    }
    if (avcodec_open2 (m_codec_context, m_codec, NULL) < 0) {
        error ("\"%s\" could not open codec", file_name);
        return false;
    }
    if (!strcmp (m_codec_context->codec->name, "mjpeg") ||
        !strcmp (m_codec_context->codec->name, "dvvideo")) {
        m_offset_time = false;
    }
    AVStream *stream = m_format_context->streams[m_video_stream];
    if (stream->r_frame_rate.num != 0 && stream->r_frame_rate.den != 0) {
        m_frame_rate = stream->r_frame_rate;
    }
    if (static_cast<int64_t> (m_format_context->duration) != AV_NOPTS_VALUE) {
        m_frames = static_cast<uint64_t> ((fps() * static_cast<double>(m_format_context->duration) / 
                                                   static_cast<uint64_t>(AV_TIME_BASE)));
    } else {
        m_frames = 1 << 29;
    }
    AVPacket pkt;
    if (!m_frames) {
        seek (0);
        av_init_packet (&pkt);
        av_read_frame (m_format_context, &pkt);
        uint64_t first_pts = pkt.pts;
        uint64_t max_pts = first_pts;
        seek (1 << 29);
        av_init_packet (&pkt);
        while (stream && av_read_frame (m_format_context, &pkt) >= 0) {
            uint64_t current_pts = static_cast<uint64_t> (av_q2d(stream->time_base) * (pkt.pts - first_pts) * fps());
            if (current_pts > max_pts) {
                max_pts = current_pts;
            }
        }
        m_frames = max_pts;
    }
    m_frame = av_frame_alloc();
    m_rgb_frame = av_frame_alloc();
    m_rgb_buffer.resize(
        avpicture_get_size (PIX_FMT_RGB24,
        m_codec_context->width,
        m_codec_context->height),
        0
    );
    AVPixelFormat pixFormat;
    switch (m_codec_context->pix_fmt) { // deprecation warning for YUV formats
        case AV_PIX_FMT_YUVJ420P:
            pixFormat = AV_PIX_FMT_YUV420P;
            break;
        case AV_PIX_FMT_YUVJ422P:
            pixFormat = AV_PIX_FMT_YUV422P;
            break;
        case AV_PIX_FMT_YUVJ444P:
            pixFormat = AV_PIX_FMT_YUV444P;
            break;
        case AV_PIX_FMT_YUVJ440P:
            pixFormat = AV_PIX_FMT_YUV440P;
        default:
            pixFormat = m_codec_context->pix_fmt;
            break;
    }
    m_sws_rgb_context = sws_getContext(
        m_codec_context->width,
        m_codec_context->height,
        pixFormat,
        m_codec_context->width,
        m_codec_context->height,
        PIX_FMT_RGB24,
        SWS_AREA,
        NULL,
        NULL,
        NULL
    );
    m_spec = ImageSpec (m_codec_context->width, m_codec_context->height, 3, TypeDesc::UINT8);
    AVDictionaryEntry *tag = NULL;
    while ((tag = av_dict_get (m_format_context->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
        m_spec.attribute (tag->key, tag->value);
    }
    m_spec.attribute ("fps", m_frame_rate.num / static_cast<float> (m_frame_rate.den));
    m_spec.attribute ("oiio:Movie", true);
    m_nsubimages = m_frames;
    spec = m_spec;
    return true;
}
Exemple #6
0
static int module_init(void)
{
	const uint8_t profile_idc = 0x42; /* baseline profile */
	const uint8_t profile_iop = 0x80;
	int err = 0;

	if (re_snprintf(h264_fmtp, sizeof(h264_fmtp),
			"packetization-mode=0;profile-level-id=%02x%02x%02x"
#if 0
			";max-mbps=35000"
			";max-fs=3600"
			";max-smbps=98875"  /* times 4 for full HD */
#endif
			"",
			profile_idc, profile_iop, h264_level_idc) < 0)
		return ENOMEM;

#ifdef USE_X264
	re_printf("x264 build %d\n", X264_BUILD);
#else
	re_printf("using FFmpeg H.264 encoder\n");
#endif

#if LIBAVCODEC_VERSION_INT < ((53<<16)+(10<<8)+0)
	avcodec_init();
#endif

	avcodec_register_all();

#if 0
	av_log_set_level(AV_LOG_WARNING);
#endif

	if (avcodec_find_decoder(CODEC_ID_H264)) {

		/* XXX: add two h264 codecs */
		err |= vidcodec_register(&h264, 0,    "H264",
					 h264_fmtp,
					 alloc,
#ifdef USE_X264
					 enc_x264,
#else
					 enc,
#endif
					 h264_nal_send,
					 dec_h264, h264_fmtp_cmp);
	}

	if (avcodec_find_decoder(CODEC_ID_H263)) {

		err |= vidcodec_register(&h263, "34", "H263",
					 "F=1;CIF=1;CIF4=1",
					 alloc, enc, NULL, dec_h263, NULL);
	}

	if (avcodec_find_decoder(CODEC_ID_MPEG4)) {

		err |= vidcodec_register(&mpg4, 0, "MP4V-ES",
					 "profile-level-id=3",
					 alloc, enc, NULL, dec_mpeg4, NULL);
	}

	return err;
}
Exemple #7
0
int opt_default(const char *opt, const char *arg){
    int type;
    int ret= 0;
    const AVOption *o= NULL;
    int opt_types[]={AV_OPT_FLAG_VIDEO_PARAM, AV_OPT_FLAG_AUDIO_PARAM, 0, AV_OPT_FLAG_SUBTITLE_PARAM, 0};
    AVCodec *p = NULL;
    AVOutputFormat *oformat = NULL;
    AVInputFormat *iformat = NULL;

    while ((p = av_codec_next(p))) {
        AVClass *c = p->priv_class;
        if (c && av_find_opt(&c, opt, NULL, 0, 0))
            break;
    }
    if (p)
        goto out;
    while ((oformat = av_oformat_next(oformat))) {
        const AVClass *c = oformat->priv_class;
        if (c && av_find_opt(&c, opt, NULL, 0, 0))
            break;
    }
    if (oformat)
        goto out;
    while ((iformat = av_iformat_next(iformat))) {
        const AVClass *c = iformat->priv_class;
        if (c && av_find_opt(&c, opt, NULL, 0, 0))
            break;
    }
    if (iformat)
        goto out;

    for(type=0; *avcodec_opts && type<AVMEDIA_TYPE_NB && ret>= 0; type++){
        const AVOption *o2 = av_find_opt(avcodec_opts[0], opt, NULL, opt_types[type], opt_types[type]);
        if(o2)
            ret = av_set_string3(avcodec_opts[type], opt, arg, 1, &o);
    }
    if(!o && avformat_opts)
        ret = av_set_string3(avformat_opts, opt, arg, 1, &o);
    if(!o && sws_opts)
        ret = av_set_string3(sws_opts, opt, arg, 1, &o);
    if(!o){
        if (opt[0] == 'a' && avcodec_opts[AVMEDIA_TYPE_AUDIO])
            ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_AUDIO], opt+1, arg, 1, &o);
        else if(opt[0] == 'v' && avcodec_opts[AVMEDIA_TYPE_VIDEO])
            ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_VIDEO], opt+1, arg, 1, &o);
        else if(opt[0] == 's' && avcodec_opts[AVMEDIA_TYPE_SUBTITLE])
            ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_SUBTITLE], opt+1, arg, 1, &o);
        if (ret >= 0)
            opt += 1;
    }
    if (o && ret < 0) {
        fprintf(stderr, "Invalid value '%s' for option '%s'\n", arg, opt);
        exit(1);
    }
    if (!o) {
        fprintf(stderr, "Unrecognized option '%s'\n", opt);
        exit(1);
    }

 out:
//    av_log(NULL, AV_LOG_ERROR, "%s:%s: %f 0x%0X\n", opt, arg, av_get_double(avcodec_opts, opt, NULL), (int)av_get_int(avcodec_opts, opt, NULL));

    opt_values= av_realloc(opt_values, sizeof(void*)*(opt_name_count+1));
    opt_values[opt_name_count] = av_strdup(arg);
    opt_names= av_realloc(opt_names, sizeof(void*)*(opt_name_count+1));
    opt_names[opt_name_count++] = av_strdup(opt);

    if ((*avcodec_opts && avcodec_opts[0]->debug) || (avformat_opts && avformat_opts->debug))
        av_log_set_level(AV_LOG_DEBUG);
    return 0;
}
int main(int argc, char* argv[])
{
  int ret;

  av_register_all();
  av_log_set_level(AV_LOG_DEBUG);

  if(argc < 3)
  {
    printf("usage : %s <input> <output>\n", argv[0]);
    return 0;
  }

  if(open_input(argv[1]) < 0)
  {
    goto main_end;
  }

  if(create_output(argv[2]) < 0)
  {
    goto main_end;
  }

  // OUTPUT 파일에 대한 정보를 출력합니다.
  av_dump_format(outputFile.fmt_ctx, 0, outputFile.fmt_ctx->filename, 1);

  AVPacket pkt;
  int out_stream_index;

  while(1)
  {
    ret = av_read_frame(inputFile.fmt_ctx, &pkt);
    if(ret == AVERROR_EOF)
    {
      printf("End of frame\n");
      break;
    }

    if(pkt.stream_index != inputFile.v_index && 
      pkt.stream_index != inputFile.a_index)
    {
      av_free_packet(&pkt);
      continue;
    }

    AVStream* in_stream = inputFile.fmt_ctx->streams[pkt.stream_index];
    out_stream_index = (pkt.stream_index == inputFile.v_index) ? 
            outputFile.v_index : outputFile.a_index;
    AVStream* out_stream = outputFile.fmt_ctx->streams[out_stream_index];

    av_packet_rescale_ts(&pkt, in_stream->time_base, out_stream->time_base);

    pkt.stream_index = out_stream_index;

    if(av_interleaved_write_frame(outputFile.fmt_ctx, &pkt) < 0)
    {
      printf("Error occurred when writing packet into file\n");
      break;
    }   
  } // while

  // 파일을 쓰는 시점에서 마무리하지 못한 정보를 정리하는 시점입니다.
  av_write_trailer(outputFile.fmt_ctx);

main_end:
  release();

  return 0;
}
Exemple #9
0
// init driver
static int init(sh_video_t *sh){
    AVCodecContext *avctx;
    vd_ffmpeg_ctx *ctx;
    AVCodec *lavc_codec;
    int lowres_w=0;
    int do_vis_debug= lavc_param_vismv || (lavc_param_debug&(FF_DEBUG_VIS_MB_TYPE|FF_DEBUG_VIS_QP));
    // slice is rather broken with threads, so disable that combination unless
    // explicitly requested
    int use_slices = vd_use_slices > 0 || (vd_use_slices <  0 && lavc_param_threads <= 1);
    AVDictionary *opts = NULL;

    init_avcodec();

    ctx = sh->context = malloc(sizeof(vd_ffmpeg_ctx));
    if (!ctx)
        return 0;
    memset(ctx, 0, sizeof(vd_ffmpeg_ctx));

    lavc_codec = avcodec_find_decoder_by_name(sh->codec->dll);
    if(!lavc_codec){
        mp_msg(MSGT_DECVIDEO, MSGL_ERR, MSGTR_MissingLAVCcodec, sh->codec->dll);
        uninit(sh);
        return 0;
    }

    if (auto_threads) {
#if (defined(__MINGW32__) && HAVE_PTHREADS)
        lavc_param_threads = pthread_num_processors_np();
#elif defined(__linux__)
        /* Code stolen from x264 :) */
        unsigned int bit;
        int np;
        cpu_set_t p_aff;
        memset( &p_aff, 0, sizeof(p_aff) );
        sched_getaffinity( 0, sizeof(p_aff), &p_aff );
        for( np = 0, bit = 0; bit < sizeof(p_aff); bit++ )
            np += (((uint8_t *)&p_aff)[bit / 8] >> (bit % 8)) & 1;

        lavc_param_threads = np;
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) || defined(__APPLE__) || defined (__NetBSD__) || defined(__OpenBSD__)
        /* Code stolen from x264 :) */
        int numberOfCPUs;
        size_t length = sizeof( numberOfCPUs );
#if defined (__NetBSD__) || defined(__OpenBSD__)
        int mib[2] = { CTL_HW, HW_NCPU };
        if( sysctl(mib, 2, &numberOfCPUs, &length, NULL, 0) )
#else
        if( sysctlbyname("hw.ncpu", &numberOfCPUs, &length, NULL, 0) )
#endif
        {
            numberOfCPUs = 1;
        }
        lavc_param_threads = numberOfCPUs;
#endif
        if(lavc_codec->id == CODEC_ID_H264 || lavc_codec->id == CODEC_ID_FFH264 
		 || lavc_codec->id == CODEC_ID_MPEG2TS || lavc_codec->id == CODEC_ID_MPEG2VIDEO_XVMC) {
            if (lavc_param_threads > 16) lavc_param_threads = 16;
            if (lavc_param_threads > 1 && (lavc_codec->id == CODEC_ID_MPEG2VIDEO ||
              lavc_codec->id == CODEC_ID_MPEG2TS || lavc_codec->id == CODEC_ID_MPEG2VIDEO_XVMC))
                vd_use_slices = 0;
            mp_msg(MSGT_DECVIDEO, MSGL_INFO, "Spawning %d decoding thread%s...\n", lavc_param_threads, lavc_param_threads == 1 ? "" : "s");
        } else {
            lavc_param_threads = 1;
        }
    }

    if(use_slices && (lavc_codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND) && !do_vis_debug)
        ctx->do_slices=1;

    if(lavc_codec->capabilities&CODEC_CAP_DR1 && !do_vis_debug && lavc_codec->id != CODEC_ID_INTERPLAY_VIDEO && lavc_codec->id != CODEC_ID_VP8)
        ctx->do_dr1=1;
    ctx->nonref_dr = lavc_codec->id == CODEC_ID_H264;
    ctx->ip_count= ctx->b_count= 0;

    ctx->pic = avcodec_alloc_frame();
    ctx->avctx = avcodec_alloc_context3(lavc_codec);
    avctx = ctx->avctx;
    avctx->opaque = sh;
    avctx->codec_id = lavc_codec->id;

    avctx->get_format = get_format;
    if(ctx->do_dr1){
        avctx->flags|= CODEC_FLAG_EMU_EDGE;
        avctx->    get_buffer=     get_buffer;
        avctx->release_buffer= release_buffer;
        avctx->  reget_buffer=     get_buffer;
    }

    avctx->flags|= lavc_param_bitexact;

    avctx->coded_width = sh->disp_w;
    avctx->coded_height= sh->disp_h;
    avctx->workaround_bugs= lavc_param_workaround_bugs;
    switch (lavc_param_error_resilience) {
    case 5:
        avctx->err_recognition |= AV_EF_EXPLODE | AV_EF_COMPLIANT | AV_EF_CAREFUL;
        break;
    case 4:
    case 3:
        avctx->err_recognition |= AV_EF_AGGRESSIVE;
        // Fallthrough
    case 2:
        avctx->err_recognition |= AV_EF_COMPLIANT;
        // Fallthrough
    case 1:
        avctx->err_recognition |= AV_EF_CAREFUL;
    }
    lavc_param_gray|= CODEC_FLAG_GRAY;
#ifdef CODEC_FLAG2_SHOW_ALL
    if(!lavc_param_wait_keyframe) avctx->flags2 |= CODEC_FLAG2_SHOW_ALL;
#endif
    avctx->flags2|= lavc_param_fast;
    avctx->codec_tag= sh->format;
    avctx->stream_codec_tag= sh->video.fccHandler;
    avctx->idct_algo= lavc_param_idct_algo;
    avctx->error_concealment= lavc_param_error_concealment;
    avctx->debug= lavc_param_debug;
    if (lavc_param_debug)
        av_log_set_level(AV_LOG_DEBUG);
    avctx->debug_mv= lavc_param_vismv;
    avctx->skip_top   = lavc_param_skip_top;
    avctx->skip_bottom= lavc_param_skip_bottom;
    if(lavc_param_lowres_str != NULL)
    {
        sscanf(lavc_param_lowres_str, "%d,%d", &lavc_param_lowres, &lowres_w);
        if(lavc_param_lowres < 1 || lavc_param_lowres > 16 || (lowres_w > 0 && avctx->width < lowres_w))
            lavc_param_lowres = 0;
        avctx->lowres = lavc_param_lowres;
    }
    avctx->skip_loop_filter = str2AVDiscard(lavc_param_skip_loop_filter_str);
    avctx->skip_idct        = str2AVDiscard(lavc_param_skip_idct_str);
    avctx->skip_frame       = str2AVDiscard(lavc_param_skip_frame_str);

    if(lavc_avopt){
        if (parse_avopts(avctx, lavc_avopt) < 0 &&
            (!lavc_codec->priv_class ||
             parse_avopts(avctx->priv_data, lavc_avopt) < 0)) {
            mp_msg(MSGT_DECVIDEO, MSGL_ERR, "Your options /%s/ look like gibberish to me pal\n", lavc_avopt);
            uninit(sh);
            return 0;
        }
    }

    skip_idct = avctx->skip_idct;
    skip_frame = avctx->skip_frame;

    mp_dbg(MSGT_DECVIDEO, MSGL_DBG2, "libavcodec.size: %d x %d\n", avctx->width, avctx->height);
    switch (sh->format) {
    case mmioFOURCC('S','V','Q','3'):
    /* SVQ3 extradata can show up as sh->ImageDesc if demux_mov is used, or
       in the phony AVI header if demux_lavf is used. The first case is
       handled here; the second case falls through to the next section. */
        if (sh->ImageDesc) {
            avctx->extradata_size = (*(int *)sh->ImageDesc) - sizeof(int);
            avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
            memcpy(avctx->extradata, ((int *)sh->ImageDesc)+1, avctx->extradata_size);
            break;
        }
        /* fallthrough */

    case mmioFOURCC('A','V','R','n'):
    case mmioFOURCC('M','J','P','G'):
    /* AVRn stores huffman table in AVI header */
    /* Pegasus MJPEG stores it also in AVI header, but it uses the common
       MJPG fourcc :( */
        if (!sh->bih || sh->bih->biSize <= sizeof(*sh->bih))
            break;
        av_dict_set(&opts, "extern_huff", "1", 0);
        avctx->extradata_size = sh->bih->biSize-sizeof(*sh->bih);
        avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
        memcpy(avctx->extradata, sh->bih+1, avctx->extradata_size);

#if 0
        {
            int x;
            uint8_t *p = avctx->extradata;

            for (x=0; x<avctx->extradata_size; x++)
                mp_msg(MSGT_DECVIDEO, MSGL_INFO, "[%x] ", p[x]);
            mp_msg(MSGT_DECVIDEO, MSGL_INFO, "\n");
        }
#endif
        break;

    case mmioFOURCC('R', 'V', '1', '0'):
    case mmioFOURCC('R', 'V', '1', '3'):
    case mmioFOURCC('R', 'V', '2', '0'):
    case mmioFOURCC('R', 'V', '3', '0'):
    case mmioFOURCC('R', 'V', '4', '0'):
        if(sh->bih->biSize<sizeof(*sh->bih)+8){
            /* only 1 packet per frame & sub_id from fourcc */
            avctx->extradata_size= 8;
            avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
            ((uint32_t *)avctx->extradata)[0] = 0;
            ((uint32_t *)avctx->extradata)[1] =
                (sh->format == mmioFOURCC('R', 'V', '1', '3')) ? 0x10003001 : 0x10000000;
        } else {
            /* has extra slice header (demux_rm or rm->avi streamcopy) */
            avctx->extradata_size = sh->bih->biSize-sizeof(*sh->bih);
            avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
            memcpy(avctx->extradata, sh->bih+1, avctx->extradata_size);
        }
        avctx->sub_id= AV_RB32(avctx->extradata+4);

//        printf("%X %X %d %d\n", extrahdr[0], extrahdr[1]);
        break;

    default:
        if (!sh->bih || sh->bih->biSize <= sizeof(*sh->bih))
            break;
        avctx->extradata_size = sh->bih->biSize-sizeof(*sh->bih);
        avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
        memcpy(avctx->extradata, sh->bih+1, avctx->extradata_size);
        break;
    }

    if(sh->bih)
        avctx->bits_per_coded_sample= sh->bih->biBitCount;

    avctx->thread_count = lavc_param_threads;
    avctx->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE;
    if(lavc_codec->capabilities & CODEC_CAP_HWACCEL)
        // HACK around badly placed checks in mpeg_mc_decode_init
        set_format_params(avctx, PIX_FMT_XVMC_MPEG2_IDCT);

    /* open it */
    if (avcodec_open2(avctx, lavc_codec, &opts) < 0) {
        mp_msg(MSGT_DECVIDEO, MSGL_ERR, MSGTR_CantOpenCodec);
        uninit(sh);
        return 0;
    }
    av_dict_free(&opts);
    // this is necessary in case get_format was never called and init_vo is
    // too late e.g. for H.264 VDPAU
    set_format_params(avctx, avctx->pix_fmt);
    mp_msg(MSGT_DECVIDEO, MSGL_V, "INFO: libavcodec init OK!\n");
    return 1; //mpcodecs_config_vo(sh, sh->disp_w, sh->disp_h, IMGFMT_YV12);
}
Exemple #10
0
int main (int argc, char *argv[]) {
    // setvbuf(stdout, NULL, _IONBF, 0);
    setlinebuf(stdout);

    assert(setenv("TZ", "UTC", 1) == 0);
    tzset();

    // see http://www.purposeful.co.uk/software/gopt/
    void *options = gopt_sort(&argc, (const char **)argv, gopt_start(
        gopt_option('h', 0, gopt_shorts('h', '?'), gopt_longs("help", "HELP"))//,
        // gopt_option('t', GOPT_ARG, gopt_shorts('t'), gopt_longs("thread-num")),
        // gopt_option('s', 0, gopt_shorts('s'), gopt_longs("silent", "quiet")),
    ));

    //TODO make customizable
    assert(setenv("LUA_PATH", "src/?.lua", 0) == 0);

    //http://rtmpdump.mplayerhq.hu/librtmp.3.html
    // const char usage[] = "usage: rtmp_load [--thread-num/-t N(default 1)] \"rtmp://example.com:1935/ app=app_name_or_empty playpath=path timeout=30\"\n";
    const char usage[] = "usage: rtmp_load test.lua\n";
    if (gopt(options, 'h')) {
        fprintf(stdout, usage);
        exit(EXIT_SUCCESS);
    }

    gopt_free(options);

    if (argc != 2) {
        // fprintf(stderr, "provide target rtmp string\n");
        fprintf(stderr, "provide test file\n");
        fprintf(stderr, usage);
        exit(EXIT_FAILURE);
    }

    char *script_path = argv[1];

    lua_State *lua_state = luaL_newstate();
    assert(lua_state != NULL);
    lua_gc(lua_state, LUA_GCSTOP, 0);
    luaL_openlibs(lua_state);
    lua_gc(lua_state, LUA_GCRESTART, -1);
    int r = luaL_loadfile(lua_state, script_path);
    if (r != 0) {
        if (r == LUA_ERRSYNTAX) {
            fprintf(stderr, "error loading %s, syntax error: %s\n", script_path, lua_tostring(lua_state, -1));
        } else if (r == LUA_ERRFILE) {
            fprintf(stderr, "failed to open %s: %s\n", script_path, lua_tostring(lua_state, -1));
        } else {
            fprintf(stderr, "error loading %s, ret = %i\n", script_path, r);
        }
        exit(EXIT_FAILURE);
    }
    lua_call(lua_state, 0, 1);


    // av_log_set_level(AV_LOG_ERROR);
    // av_log_set_level(AV_LOG_INFO); //http://libav.org/doxygen/master/log_8h.html
    av_log_set_level(AV_LOG_VERBOSE); // for parsing lib log
    // av_log_set_level(AV_LOG_DEBUG); // shows binary packet data
    

    //TODO http://libav.org/doxygen/master/librtmp_8c_source.html#l00080

    av_log_set_callback(av_log_my_callback);

    av_register_all();
    avcodec_register_all();
    avformat_network_init();

    if (av_lockmgr_register(handle_av_lock) != 0) {
        fprintf(stderr, "av_lockmgr_register failed\n");
        exit(EXIT_FAILURE);
    }

    lua_call(lua_state, 0, 0);


    lua_close(lua_state);

    av_lockmgr_register(NULL);
    avformat_network_deinit();


    exit(EXIT_SUCCESS);
    return 0;
}
int ReadFrames(VFInfo *st_info, CImgList<uint8_t> *pFrameList, unsigned int low_index, unsigned int hi_index)
{
        //target pixel format
	PixelFormat ffmpeg_pixfmt;
	if (st_info->pixelformat == 0)
	    ffmpeg_pixfmt = PIX_FMT_GRAY8;
	else 
	    ffmpeg_pixfmt = PIX_FMT_RGB24;

	st_info->next_index = low_index;

	if (st_info->pFormatCtx == NULL){
	    st_info->current_index= 0;

        av_log_set_level(AV_LOG_QUIET);
	    av_register_all();
	
	    // Open video file
	    if(avformat_open_input(&st_info->pFormatCtx, st_info->filename, NULL, NULL)!=0)
		return -1 ; // Couldn't open file
	 
	    // Retrieve stream information
	    if(avformat_find_stream_info(st_info->pFormatCtx,NULL)<0)
		return -1; // Couldn't find stream information
	
	    //dump_format(pFormatCtx,0,NULL,0);//debugging function to print infomation about format
	
	    unsigned int i;
	    // Find the video stream
	    for(i=0; i<st_info->pFormatCtx->nb_streams; i++)
	    {
		if(st_info->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) 
	        {
		    st_info->videoStream=i;
		    break;
		}
	    }
	    if(st_info->videoStream==-1)
		return -1; //no video stream
	
	
	    // Get a pointer to the codec context for the video stream
	    st_info->pCodecCtx = st_info->pFormatCtx->streams[st_info->videoStream]->codec;
	    if (st_info->pCodecCtx == NULL){
		return -1;
	    }

	    // Find the decoder
	    st_info->pCodec = avcodec_find_decoder(st_info->pCodecCtx->codec_id);
	    if(st_info->pCodec==NULL) 
	    {
	  	return -1 ; // Codec not found
	    }
	    // Open codec
	    if(avcodec_open2(st_info->pCodecCtx, st_info->pCodec,NULL)<0)
		return -1; // Could not open codec

	    st_info->height = (st_info->height<=0) ? st_info->pCodecCtx->height : st_info->height;
	    st_info->width  = (st_info->width<= 0) ? st_info->pCodecCtx->width : st_info->width;
	}

        AVFrame *pFrame;

	// Allocate video frame
	pFrame=avcodec_alloc_frame();
	if (pFrame==NULL)
	    return -1;

	// Allocate an AVFrame structure
	AVFrame *pConvertedFrame = avcodec_alloc_frame();
	if(pConvertedFrame==NULL)
	  return -1;
		
	uint8_t *buffer;
	int numBytes;
	// Determine required buffer size and allocate buffer
	numBytes=avpicture_get_size(ffmpeg_pixfmt, st_info->width,st_info->height);
	buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
	if (buffer == NULL)
	    return -1;

	avpicture_fill((AVPicture *)pConvertedFrame,buffer,ffmpeg_pixfmt,st_info->width,st_info->height);
		
	int frameFinished;
	int size = 0;
	

        int channels = ffmpeg_pixfmt == PIX_FMT_GRAY8 ? 1 : 3;

	AVPacket packet;
	int result = 1;
	CImg<uint8_t> next_image;
	SwsContext *c = sws_getContext(st_info->pCodecCtx->width, st_info->pCodecCtx->height, st_info->pCodecCtx->pix_fmt, st_info->width, st_info->height, ffmpeg_pixfmt , SWS_BICUBIC, NULL, NULL, NULL);
	while ((result>=0)&&(size<st_info->nb_retrieval)&&(st_info->current_index<=hi_index)){  
	  result =  av_read_frame(st_info->pFormatCtx, &packet);
          if (result < 0)
	      break;
    	  if(packet.stream_index==st_info->videoStream) {

		AVPacket avpkt; 
		av_init_packet(&avpkt); 
		avpkt.data = packet.data; 
		avpkt.size = packet.size; 
		// 
		// HACK for CorePNG to decode as normal PNG by default 
		// same method used by ffmpeg 
		avpkt.flags = AV_PKT_FLAG_KEY; 
	      
	 	avcodec_decode_video2(st_info->pCodecCtx, pFrame, &frameFinished,&avpkt);

	      // avcodec_decode_video(st_info->pCodecCtx, pFrame, &frameFinished,packet.data, packet.size);

	      if(frameFinished) {
		  if (st_info->current_index == st_info->next_index){
		      st_info->next_index += st_info->step;
		      sws_scale(c, pFrame->data, pFrame->linesize, 0, st_info->pCodecCtx->height, pConvertedFrame->data, pConvertedFrame->linesize);
			
		  next_image.assign(*pConvertedFrame->data, channels,st_info->width,st_info->height,1,true);
		  next_image.permute_axes("yzcx");
		  pFrameList->push_back(next_image);
		  size++;
		  }    
		  st_info->current_index++;
	      }
	      av_free_packet(&packet);
	  }
	}


	if (result < 0){
	    avcodec_close(st_info->pCodecCtx);
	    avformat_close_input(&st_info->pFormatCtx);
	    st_info->pFormatCtx = NULL;
	    st_info->pCodecCtx = NULL;
	    st_info->width = -1;
	    st_info->height = -1;
	}

	av_free(buffer);
	buffer = NULL;
	av_free(pConvertedFrame);
	pConvertedFrame = NULL;
	av_free(pFrame);
	pFrame = NULL;
	sws_freeContext(c);
	c = NULL;

	return size; 
}
Exemple #12
0
int main(int argc, const char *argv[]) {
  int ret = 0, got_frame, got_output;
  int video_stream_idx = -1;
  int video_dst_bufsize;
  const char *src_filename;
  const char *dst_filename;
  FILE *dst_file                  = NULL;
  AVCodec *codec_enc              = NULL;
  AVFormatContext *fmt_ctx        = NULL;
  AVStream *video_stream          = NULL;
  AVCodecContext *video_dec_ctx   = NULL;
  AVCodecContext *video_enc_ctx   = NULL;
  AVFrame *frame                  = NULL;
  AVPacket pkt_dec, pkt_enc;
  uint8_t *video_dst_data[4]      = {NULL};
  int video_dst_linesize[4];
  
  if (argc != 3) {
    printf("Usage: %s <in_file> <out_file>\n", argv[0]);
    exit(1);
  }
  
  av_register_all();
  av_log_set_level(AV_LOG_DEBUG);
  
  src_filename = argv[1];
  dst_filename = argv[2];
  
  codec_enc = avcodec_find_encoder(AV_CODEC_ID_JPEG2000);
  if (!codec_enc) {
      fprintf(stderr, "Codec not found\n");
      exit(1);
  }
  
  video_enc_ctx = avcodec_alloc_context3(codec_enc);
  if (!video_enc_ctx) {
      fprintf(stderr, "Could not allocate video codec context\n");
      exit(1);
  }
//   j2kenc_init(video_enc_ctx);
  
  /* open input file, and allocate format context */
  if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
    fprintf(stderr, "Could not open source file %s\n", src_filename);
    exit(1);
  }
  
  /* retrieve stream information */
  if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
    fprintf(stderr, "Could not find stream information\n");
    exit(1);
  }
  
  if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO, src_filename) >= 0) {
    video_stream = fmt_ctx->streams[video_stream_idx];
    video_dec_ctx = video_stream->codec;
    
    video_enc_ctx->width = video_dec_ctx->width;
    video_enc_ctx->height = video_dec_ctx->height;
    video_enc_ctx->pix_fmt = video_dec_ctx->pix_fmt;
    
    // make ffmpeg not complain about j2k being experiemntal
    video_enc_ctx->strict_std_compliance = -2;
    
//     printf("About to open encoder\n");
    if (avcodec_open2(video_enc_ctx, codec_enc, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    dst_file = fopen(dst_filename, "wb");
    if (!dst_file) {
      fprintf(stderr, "Could not open destination file %s\n", dst_filename);
      ret = 1;
      goto end;
    }

    /* allocate image where the decoded image will be put */
    ret = av_image_alloc(video_dst_data, video_dst_linesize,
              video_dec_ctx->width, video_dec_ctx->height,
              video_dec_ctx->pix_fmt, 1);
    if (ret < 0) {
      fprintf(stderr, "Could not allocate raw video buffer\n");
      goto end;
    }
    video_dst_bufsize = ret;
  }
  
  /* dump input information to stderr */
  av_dump_format(fmt_ctx, 0, src_filename, 0);
  
  frame = av_frame_alloc();
  if (!frame) {
    fprintf(stderr, "Could not allocate frame\n");
    ret = AVERROR(ENOMEM);
    goto end;
  }
  
  /* initialize packet, set data to NULL, let the demuxer fill it */
  av_init_packet(&pkt_dec);
  pkt_dec.data = NULL;
  pkt_dec.size = 0;

  if (video_stream)
    printf("Demuxing video from file '%s' into '%s'\n", src_filename, dst_filename);
  
  /* read frames from the file */
  while (av_read_frame(fmt_ctx, &pkt_dec) >= 0) {
//     AVPacket orig_pkt = pkt;
    do {
      ret = decode_packet(&got_frame, 0, &pkt_dec, video_dec_ctx, frame);
      if (ret < 0)
        break;
      pkt_dec.data += ret;
      pkt_dec.size -= ret;
    } while (pkt_dec.size > 0);
//     av_free_packet(&orig_pkt);
  }
  /* flush cached frames */
  pkt_dec.data = NULL;
  pkt_dec.size = 0;
  do {
    decode_packet(&got_frame, 1, &pkt_dec, video_dec_ctx, frame);
    if (got_frame) {
      // DO SOME ENCODING HERE
      av_init_packet(&pkt_enc);
      pkt_enc.data = NULL;
      pkt_enc.size = 0;
      
      ret = avcodec_encode_video2(video_enc_ctx, &pkt_enc, frame, &got_output);
      if (ret < 0) {
	fprintf(stderr, "Error encoding frame\n");
	goto end;
      }

      if (got_output) {
	printf("Write frame (size=%5d)\n", pkt_enc.size);
	fwrite(pkt_enc.data, 1, pkt_enc.size, dst_file);
	
      }
    }
  } while (got_frame);
  
  printf("Demuxing succeeded.\n");
  
end:
  av_free_packet(&pkt_enc);
  av_free_packet(&pkt_dec);
  if (video_dec_ctx)
    avcodec_close(video_dec_ctx);
  if (video_enc_ctx)
    avcodec_close(video_enc_ctx);
//   if (codec_enc)
//     av_free(codec_enc);
  avformat_close_input(&fmt_ctx);
  if (dst_file)
    fclose(dst_file);
  else
    av_frame_free(&frame);
  av_free(video_dst_data[0]);

  return ret < 0;
}
Exemple #13
0
int main(int argc, char* argv[])
{
    avcodec_register_all();
    av_register_all();

	av_log_set_level(AV_LOG_DEBUG);
    AVFormatContext *pFormatCtx = avformat_alloc_context();
    AVCodecContext *pCodecCtx;
    AVCodec *pCodec;
    AVFrame *pFrame, *pFrameRGB;
    AVPicture pict;
    struct SwsContext *img_convert_ctx;
    uint8_t *buffer;
    int videoStream = -1;
    int numBytes;
	int ret = 0;
	
    fprintf(stdout, "%s\n", argv[1]);
	
    AVInputFormat *fileFormat;
    fileFormat = av_find_input_format("ass");
	if (!fileFormat)
	{
        av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n");
        return AVERROR(EINVAL);
    }
	
	char *charenc = NULL;
	if (argv[2] != NULL)
	{
		charenc = av_malloc(strlen (argv[2]));
		memset(charenc, 0, 10);
		memcpy(charenc, argv[2], strlen(argv[2]));

		printf("charenc :%p  %s\n", charenc, charenc);
	}
    
	AVDictionary *codec_opts = NULL;
	if (charenc)
	{
        av_dict_set(&codec_opts, "sub_charenc", charenc, 0);
    }
	
    if ((ret = avformat_open_input(&pFormatCtx, argv[1], fileFormat, &codec_opts)) != 0)
    {
        fprintf(stderr, "av_open_input_file \n");

        return -1;
    }

	fprintf(stdout, "pFormatCtx %p\n", pFormatCtx);

    if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
    {
        fprintf(stderr, "av_find_stream_info \n");

        return -1;
    }

    av_dump_format(pFormatCtx, 0, argv[1], 0);

    int i;
    for (i = 0; i < pFormatCtx->nb_streams; ++i)
    {
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE)
        {
            videoStream = i;

            break ;
        }
    }

    if (videoStream == -1)
    {
        fprintf(stderr, "Unsupported videoStream.\n");

        return -1;
    }

    fprintf(stdout, "videoStream: %d\n", videoStream);

    pCodecCtx = pFormatCtx->streams[videoStream]->codec;
	pCodecCtx->debug = 1;
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL)
    {
        fprintf(stderr, "Unsupported codec.\n");

        return -1;
    }

	
	//pCodecCtx->sub_charenc = charenc;//"UTF-16LE";
    
    if (avcodec_open2(pCodecCtx, pCodec, &codec_opts) < 0)
    {
        fprintf(stderr, "Could not open codec.\n");

        return -1;
    }

	printf("pCodecCtx->sub_charenc :%s\n", pCodecCtx->sub_charenc);

    int index;
    
    AVPacket packet;
    av_init_packet(&packet);
    AVSubtitle sub;
    int len1, gotSubtitle;

    for (index = 0; ; index++)
    {
        //memset(&packet, 0, sizeof (packet));
        ret = av_read_frame(pFormatCtx, &packet);
        if (ret < 0)
        {
            fprintf(stderr, "read frame ret:%s\n", ret_str(ret));

            break ;
        }

        fprintf(stdout, "stream_index:%d\n", packet.stream_index);

        if (packet.stream_index == videoStream)
        {
            ret = avcodec_decode_subtitle2(pCodecCtx, &sub, &gotSubtitle, &packet);

            if (index > 30)
            {
                break ;
            }

            fprintf(stdout, "gotSubtitle:%d\n", gotSubtitle);
            dumpSubtitle(&sub);
        }
        av_free_packet(&packet);
    }

end:
    fprintf(stderr, "end return.\n");
    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);


    return 0;
}
Exemple #14
0
int main(void){
    SoftFloat one= av_int2sf(1, 0);
    SoftFloat sf1, sf2, sf3;
    double d1, d2, d3;
    int i, j;
    av_log_set_level(AV_LOG_DEBUG);

    d1= 1;
    for(i= 0; i<10; i++){
        d1= 1/(d1+1);
    }
    printf("test1 double=%d\n", (int)(d1 * (1<<24)));

    sf1= one;
    for(i= 0; i<10; i++){
        sf1= av_div_sf(one, av_normalize_sf(av_add_sf(one, sf1)));
    }
    printf("test1 sf    =%d\n", av_sf2int(sf1, 24));


    for(i= 0; i<100; i++){
        START_TIMER
        d1= i;
        d2= i/100.0;
        for(j= 0; j<1000; j++){
            d1= (d1+1)*d2;
        }
        STOP_TIMER("float add mul")
    }
    printf("test2 double=%d\n", (int)(d1 * (1<<24)));

    for(i= 0; i<100; i++){
        START_TIMER
        sf1= av_int2sf(i, 0);
        sf2= av_div_sf(av_int2sf(i, 2), av_int2sf(200, 3));
        for(j= 0; j<1000; j++){
            sf1= av_mul_sf(av_add_sf(sf1, one),sf2);
        }
        STOP_TIMER("softfloat add mul")
    }
    printf("test2 sf    =%d (%d %d)\n", av_sf2int(sf1, 24), sf1.exp, sf1.mant);

    d1 = 0.0177764893;
    d2 = 1374.40625;
    d3 = 0.1249694824;
    d2 += d1;
    d3 += d2;
    printf("test3 double: %.10lf\n", d3);

    sf1 = FLOAT_0_017776489257;
    sf2 = FLOAT_1374_40625;
    sf3 = FLOAT_0_1249694824218;
    sf2 = av_add_sf(sf1, sf2);
    sf3 = av_add_sf(sf3, sf2);
    printf("test3 softfloat: %.10lf (0x%08x %d)\n", (double)av_sf2double(sf3), sf3.mant, sf3.exp);

    sf1 = av_int2sf(0xFFFFFFF0, 0);
    printf("test4 softfloat: %.10lf (0x%08x %d)\n", (double)av_sf2double(sf1), sf1.mant, sf1.exp);
    sf1 = av_int2sf(0x00000010, 0);
    printf("test4 softfloat: %.10lf (0x%08x %d)\n", (double)av_sf2double(sf1), sf1.mant, sf1.exp);

    sf1 = av_int2sf(0x1FFFFFFF, 0);
    printf("test4 softfloat: %.10lf (0x%08x %d)\n", (double)av_sf2double(sf1), sf1.mant, sf1.exp);
    sf1 = av_int2sf(0xE0000001, 0);
    printf("test4 softfloat: %.10lf (0x%08x %d)\n", (double)av_sf2double(sf1), sf1.mant, sf1.exp);


    sf1 = (SoftFloat){ 0x20000000,   MIN_EXP };
    sf1 = av_mul_sf(sf1, sf1);
    printf("test5 softfloat: %.10lf (0x%08x %d)\n", (double)av_sf2double(sf1), sf1.mant, sf1.exp);

    sf1 = (SoftFloat){ 0x20000000,   MIN_EXP };
    sf2 = (SoftFloat){ 0x20000000,   MAX_EXP };
    i = av_cmp_sf(sf1, sf2);
    j = av_cmp_sf(sf2, sf1);
    sf1 = av_div_sf(sf1, sf2);
    printf("test6 softfloat: %.10lf (0x%08x %d) %d %d\n", (double)av_sf2double(sf1), sf1.mant, sf1.exp, i, j);

    for(i= -50; i<50; i++) {
        sf1= av_int2sf(i, 0);
        for(j= -50; j<50; j++) {
            int c;
            sf2= av_int2sf(j, 0);
            c = av_cmp_sf(sf1, sf2);
            if (FFDIFFSIGN(i,j) != c && (FFDIFFSIGN(i,j)^c)<0) {
                printf("av_cmp_sf failed at %d %d as %X\n", i, j, c);
            }
            c = av_gt_sf(sf1, sf2);
            if ((i>j) != c) {
                printf("av_gt_sf failed at %d %d as %X\n", i, j, c);
            }
        }
        sf1 = av_int2sf(1, i);
        for(j = -50; j < 50; j++) {
            int c;
            sf2 = av_int2sf(1, j);
            c = av_cmp_sf(sf2, sf1);
            if (FFDIFFSIGN(i,j) != c && (FFDIFFSIGN(i,j)^c) < 0) {
                printf("av_cmp_sf failed2 at %d %d as %X\n", i, j, c);
            }
            c = av_gt_sf(sf1, sf2);
            if ((i<j) != c) {
                printf("av_gt_sf failed2 at %d %d as %X\n", i, j, c);
            }
        }
    }


    for(i= 0; i<4*36; i++){
        int s, c;
        double errs, errc;

        av_sincos_sf(i*(1ULL<<32)/36/4, &s, &c);
        errs = (double)s/ (1<<30) - sin(i*M_PI/36);
        errc = (double)c/ (1<<30) - cos(i*M_PI/36);
        if (fabs(errs) > 0.00000002 || fabs(errc) >0.001) {
            printf("sincos FAIL %d %f %f %f %f\n", i, (float)s/ (1<<30), (float)c/ (1<<30), sin(i*M_PI/36), cos(i*M_PI/36));
        }

    }
    return 0;

}
Exemple #15
0
int fpcalc_main(int argc, char **argv)
{
	int i, j, max_length = 120, num_file_names = 0, raw = 0, raw_fingerprint_size, duration;
	int32_t *raw_fingerprint;
	char *file_name, *fingerprint, **file_names;
	ChromaprintContext *chromaprint_ctx;
	int algo = CHROMAPRINT_ALGORITHM_DEFAULT, num_failed = 0, do_hash = 0;

	file_names = malloc(argc * sizeof(char *));
	for (i = 1; i < argc; i++) {
		char *arg = argv[i];
		if (!strcmp(arg, "-length") && i + 1 < argc) {
			max_length = atoi(argv[++i]);
		}
		else if (!strcmp(arg, "-version") || !strcmp(arg, "-v")) {
			printf("fpcalc version %s\n", chromaprint_get_version());
			return 0;
		}
		else if (!strcmp(arg, "-raw")) {
			raw = 1;
		}
		else if (!strcmp(arg, "-hash")) {
			do_hash = 1;
		}
		else if (!strcmp(arg, "-algo") && i + 1 < argc) {
			const char *v = argv[++i];
			if (!strcmp(v, "test1")) { algo = CHROMAPRINT_ALGORITHM_TEST1; }
			else if (!strcmp(v, "test2")) { algo = CHROMAPRINT_ALGORITHM_TEST2; }
			else if (!strcmp(v, "test3")) { algo = CHROMAPRINT_ALGORITHM_TEST3; }
			else if (!strcmp(v, "test4")) { algo = CHROMAPRINT_ALGORITHM_TEST4; }
			else {
				fprintf(stderr, "WARNING: unknown algorithm, using the default\n");
			}
		}
		else if (!strcmp(arg, "-set") && i + 1 < argc) {
			i += 1;
		}
		else {
			file_names[num_file_names++] = argv[i];
		}
	}

	if (!num_file_names) {
		printf("usage: %s [OPTIONS] FILE...\n\n", argv[0]);
		printf("Options:\n");
		printf("  -version      print version information\n");
		printf("  -length SECS  length of the audio data used for fingerprint calculation (default 120)\n");
		printf("  -raw          output the raw uncompressed fingerprint\n");
		printf("  -algo NAME    version of the fingerprint algorithm\n");
		printf("  -hash         calculate also the fingerprint hash\n");
		return 2;
	}

	av_register_all();
	av_log_set_level(AV_LOG_ERROR);

	chromaprint_ctx = chromaprint_new(algo);

	for (i = 1; i < argc; i++) {
		char *arg = argv[i];
		if (!strcmp(arg, "-set") && i + 1 < argc) {
			char *name = argv[++i];
			char *value = strchr(name, '=');
			if (value) {
				*value++ = '\0';
				chromaprint_set_option(chromaprint_ctx, name, atoi(value));
			}
		}
	}

	for (i = 0; i < num_file_names; i++) {
		file_name = file_names[i];
		if (!decode_audio_file(chromaprint_ctx, file_name, max_length, &duration)) {
			fprintf(stderr, "ERROR: unable to calculate fingerprint for file %s, skipping\n", file_name);
			num_failed++;
			continue;
		}
		if (i > 0) {
			printf("\n");
		}
		printf("FILE=%s\n", file_name);
		printf("DURATION=%d\n", duration);
		if (raw) {
			if (!chromaprint_get_raw_fingerprint(chromaprint_ctx, (void **)&raw_fingerprint, &raw_fingerprint_size)) {
				fprintf(stderr, "ERROR: unable to calculate fingerprint for file %s, skipping\n", file_name);
				num_failed++;
				continue;
			}
			printf("FINGERPRINT=");
			for (j = 0; j < raw_fingerprint_size; j++) {
				printf("%d%s", raw_fingerprint[j], j + 1 < raw_fingerprint_size ? "," : "");
			}
			printf("\n");
			chromaprint_dealloc(raw_fingerprint);
		}
		else {
			if (!chromaprint_get_fingerprint(chromaprint_ctx, &fingerprint)) {
				fprintf(stderr, "ERROR: unable to calculate fingerprint for file %s, skipping\n", file_name);
				num_failed++;
				continue;
			}
			printf("FINGERPRINT=%s\n", fingerprint);
			chromaprint_dealloc(fingerprint);
		}
        if (do_hash) {
            int32_t hash = 0;
            chromaprint_get_fingerprint_hash(chromaprint_ctx, &hash);
            printf("HASH=%d\n", hash);
        }
	}

	chromaprint_free(chromaprint_ctx);
	free(file_names);

	return num_failed ? 1 : 0;
}
Exemple #16
0
int main (int argc, char **argv)
{
    int ret = 0;
    const char *src_filename = NULL;
    const char *dst_filename = NULL;
    char* format             = NULL;
    char* codec              = NULL;

    if (argc != 5 && argc != 3) {
        fprintf(stderr, "usage: %s input_file output_file [format codec]\n"
                "API example program to show how to read frames from an input file.\n"
                "This program reads frames from a file, decodes them, and writes decoded\n"
                "frames to a rawvideo/rawaudio file named output_file.\n"
                "Optionally format and codec can be specified.\n\n", argv[0]);
        exit(1);
    }
    src_filename = argv[1];
    dst_filename = argv[2];
    if (argc == 5) {
        format = argv[3];
        codec  = argv[4];
    }

    /* log all debug messages */
    av_log_set_level(AV_LOG_DEBUG);

    /* register all formats and codecs */
    av_register_all();

#ifdef __AFL_HAVE_MANUAL_CONTROL
    while (__AFL_LOOP(1000))
#endif
    {
        AVFormatContext *fmt_ctx = NULL;
        AVInputFormat *fmt       = NULL;
        AVCodecContext *dec_ctx  = NULL;
        FILE *dst_file           = NULL;
        AVFrame *frame           = NULL;
        int got_frame            = 0;
        int frame_count          = 0;
        AVPacket pkt             = { 0 };
        AVDictionary *opts       = NULL;
        ret = 0;
        width = 0;
        height = 0;
        pix_fmt = AV_PIX_FMT_NONE;
        video_dst_bufsize = 0;
        memset(video_dst_data, 0, sizeof(video_dst_data));
        memset(video_dst_linesize, 0, sizeof(video_dst_linesize));

        /* set the whitelists for formats and codecs */
        if (av_dict_set(&opts, "codec_whitelist", codec, 0) < 0) {
            fprintf(stderr, "Could not set codec_whitelist.\n");
            ret = 1;
            goto end;
        }
        if (av_dict_set(&opts, "format_whitelist", format, 0) < 0) {
            fprintf(stderr, "Could not set format_whitelist.\n");
            ret = 1;
            goto end;
        }

        if (format) {
            fmt = av_find_input_format(format);
            if (!fmt) {
                fprintf(stderr, "Could not find input format %s\n", format);
                ret = 1;
                goto end;
            }
        }

        /* open input file, and allocate format context */
        if (avformat_open_input(&fmt_ctx, src_filename, fmt, &opts) < 0) {
            fprintf(stderr, "Could not open source file %s\n", src_filename);
            ret = 1;
            goto end;
        }

        /* retrieve stream information */
        if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
            fprintf(stderr, "Could not find stream information\n");
        }

        /* find stream with specified codec */
        if (open_codec_context(&dec_ctx, fmt_ctx, codec) < 0) {
            fprintf(stderr, "Could not open any stream in input file '%s'\n",
                    src_filename);
            ret = 1;
            goto end;
        }

        /* open output file */
        dst_file = fopen(dst_filename, "wb");
        if (!dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", dst_filename);
            ret = 1;
            goto end;
        }

        if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
            /* allocate image where the decoded image will be put */
            width = dec_ctx->width;
            height = dec_ctx->height;
            pix_fmt = dec_ctx->pix_fmt;
            video_dst_bufsize = av_image_alloc(video_dst_data, video_dst_linesize,
                                 width, height, pix_fmt, 1);
            if (video_dst_bufsize < 0) {
                fprintf(stderr, "Could not allocate raw video buffer\n");
                ret = 1;
                goto end;
            }
        }

        /* dump input information to stderr */
        av_dump_format(fmt_ctx, 0, src_filename, 0);

        /* allocate frame */
        frame = av_frame_alloc();
        if (!frame) {
            fprintf(stderr, "Could not allocate frame\n");
            ret = 1;
            goto end;
        }

        printf("Demuxing from file '%s' into '%s'\n", src_filename, dst_filename);

        /* read frames from the file */
        while (av_read_frame(fmt_ctx, &pkt) >= 0) {
            do {
                int decoded = decode_packet(dec_ctx, dst_file, frame, &got_frame, &frame_count, &pkt);
                if (decoded < 0)
                    break;
                /* increase data pointer and decrease size of remaining data buffer */
                pkt.data += decoded;
                pkt.size -= decoded;
            } while (pkt.size > 0);
            av_free_packet(&pkt);
        }

        printf("Flushing cached frames.\n");
        pkt.data = NULL;
        pkt.size = 0;
        do {
            decode_packet(dec_ctx, dst_file, frame, &got_frame, &frame_count, &pkt);
        } while (got_frame);

        printf("Demuxing done.\n");

end:
        /* free allocated memory */
        av_dict_free(&opts);
        avcodec_close(dec_ctx);
        avformat_close_input(&fmt_ctx);
        if (dst_file)
            fclose(dst_file);
        av_frame_free(&frame);
        av_free(video_dst_data[0]);
    }

    return ret;
}
static int
ngx_http_video_thumbextractor_get_thumb(ngx_http_video_thumbextractor_loc_conf_t *cf, ngx_http_video_thumbextractor_file_info_t *info, int64_t second, ngx_uint_t width, ngx_uint_t height, caddr_t *out_buffer, size_t *out_len, ngx_pool_t *temp_pool, ngx_log_t *log)
{
    int              rc, videoStream, frameFinished = 0, frameDecoded = 0;
    unsigned int     i;
    AVFormatContext *pFormatCtx = NULL;
    AVCodecContext  *pCodecCtx = NULL;
    AVCodec         *pCodec = NULL;
    AVFrame         *pFrame = NULL, *pFrameRGB = NULL;
    uint8_t         *buffer = NULL;
    AVPacket         packet;
    size_t           uncompressed_size;
    float            scale = 0.0, new_scale = 0.0, scale_sws = 0.0, scale_w = 0.0, scale_h = 0.0;
    int              sws_width = 0, sws_height = 0;
    ngx_flag_t       needs_crop = 0;
    MagickWand      *m_wand = NULL;
    MagickBooleanType mrc;
    unsigned char   *bufferAVIO = NULL;
    AVIOContext     *pAVIOCtx = NULL;
    char            *filename = (char *) info->filename->data;

    // Open video file
    if ((info->fd = fopen(filename, "rb")) == NULL) {
        ngx_log_error(NGX_LOG_ERR, log, 0, "video thumb extractor module: Couldn't open file %s", filename);
        rc = EXIT_FAILURE;
        goto exit;
    }

    // Get file size
    fseek(info->fd, 0, SEEK_END);
    info->size = ftell(info->fd) - info->offset;
    fseek(info->fd, 0, SEEK_SET);

    pFormatCtx = avformat_alloc_context();
    bufferAVIO = (unsigned char *) av_malloc(NGX_HTTP_VIDEO_THUMBEXTRACTOR_BUFFER_SIZE * sizeof(unsigned char));
    if ((pFormatCtx == NULL) || (bufferAVIO == NULL)) {
        ngx_log_error(NGX_LOG_ERR, log, 0, "video thumb extractor module: Couldn't alloc AVIO buffer");
        rc = NGX_ERROR;
        goto exit;
    }

    pAVIOCtx = avio_alloc_context(bufferAVIO, NGX_HTTP_VIDEO_THUMBEXTRACTOR_BUFFER_SIZE, 0, info, ngx_http_video_thumbextractor_read_data_from_file, NULL, ngx_http_video_thumbextractor_seek_data_from_file);
    if (pAVIOCtx == NULL) {
        ngx_log_error(NGX_LOG_ERR, log, 0, "video thumb extractor module: Couldn't alloc AVIO context");
        rc = NGX_ERROR;
        goto exit;
    }

    pFormatCtx->pb = pAVIOCtx;

    // Open video file
    if ((rc = avformat_open_input(&pFormatCtx, filename, NULL, NULL)) != 0) {
        ngx_log_error(NGX_LOG_ERR, log, 0, "video thumb extractor module: Couldn't open file %s, error: %d", filename, rc);
        rc = (rc == AVERROR(NGX_ENOENT)) ? NGX_HTTP_VIDEO_THUMBEXTRACTOR_FILE_NOT_FOUND : NGX_ERROR;
        goto exit;
    }

    // Retrieve stream information
#if LIBAVFORMAT_VERSION_INT <= AV_VERSION_INT(53, 5, 0)
    if (av_find_stream_info(pFormatCtx) < 0) {
#else
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
#endif
        ngx_log_error(NGX_LOG_ERR, log, 0, "video thumb extractor module: Couldn't find stream information");
        rc = NGX_ERROR;
        goto exit;
    }

    if ((pFormatCtx->duration > 0) && ((((float_t) pFormatCtx->duration / AV_TIME_BASE) - second)) < 0.1) {
        ngx_log_error(NGX_LOG_WARN, log, 0, "video thumb extractor module: seconds greater than duration");
        rc = NGX_HTTP_VIDEO_THUMBEXTRACTOR_SECOND_NOT_FOUND;
        goto exit;
    }

    // Find the first video stream
    videoStream = -1;
    for (i = 0; i < pFormatCtx->nb_streams; i++) {
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStream = i;
            break;
        }
    }

    if (videoStream == -1) {
        ngx_log_error(NGX_LOG_ERR, log, 0, "video thumb extractor module: Didn't find a video stream");
        rc = NGX_ERROR;
        goto exit;
    }

    // Get a pointer to the codec context for the video stream
    pCodecCtx = pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    if ((pCodec = avcodec_find_decoder(pCodecCtx->codec_id)) == NULL) {
        ngx_log_error(NGX_LOG_ERR, log, 0, "video thumb extractor module: Codec %d not found", pCodecCtx->codec_id);
        rc = NGX_ERROR;
        goto exit;
    }

    // Open codec
#if LIBAVCODEC_VERSION_INT <= AV_VERSION_INT(53, 8, 0)
    if ((rc = avcodec_open(pCodecCtx, pCodec)) < 0) {
#else
    if ((rc = avcodec_open2(pCodecCtx, pCodec, NULL)) < 0) {
#endif
        ngx_log_error(NGX_LOG_ERR, log, 0, "video thumb extractor module: Could not open codec, error %d", rc);
        rc = NGX_ERROR;
        goto exit;
    }

    if (height == 0) {
        // keep original format
        width = pCodecCtx->width;
        height = pCodecCtx->height;
    } else if (width == 0) {
        // calculate width related with original aspect
        width = height * pCodecCtx->width / pCodecCtx->height;
    }

    if ((width < 16) || (height < 16)) {
        ngx_log_error(NGX_LOG_ERR, log, 0, "video thumb extractor module: Very small size requested, %d x %d", width, height);
        rc = NGX_ERROR;
        goto exit;
    }

    scale     = (float) pCodecCtx->width / pCodecCtx->height;
    new_scale = (float) width / height;

    sws_width = width;
    sws_height = height;

    if (scale != new_scale) {
        scale_w = (float) width / pCodecCtx->width;
        scale_h = (float) height / pCodecCtx->height;
        scale_sws = (scale_w > scale_h) ? scale_w : scale_h;

        sws_width = pCodecCtx->width * scale_sws + 0.5;
        sws_height = pCodecCtx->height * scale_sws + 0.5;

        needs_crop = 1;
    }


    // Allocate video frame
    pFrame = avcodec_alloc_frame();

    // Allocate an AVFrame structure
    pFrameRGB = avcodec_alloc_frame();
    if ((pFrame == NULL) || (pFrameRGB == NULL)) {
        ngx_log_error(NGX_LOG_ERR, log, 0, "video thumb extractor module: Could not alloc frame memory");
        rc = NGX_ERROR;
        goto exit;
    }

    // Determine required buffer size and allocate buffer
    uncompressed_size = avpicture_get_size(PIX_FMT_RGB24, sws_width, sws_height) * sizeof(uint8_t);
    buffer = (uint8_t *) av_malloc(uncompressed_size);

    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset of AVPicture
    avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB24, sws_width, sws_height);

    if ((rc = av_seek_frame(pFormatCtx, -1, second * AV_TIME_BASE, cf->next_time ? 0 : AVSEEK_FLAG_BACKWARD)) < 0) {
        ngx_log_error(NGX_LOG_ERR, log, 0, "video thumb extractor module: Seek to an invalid time, error: %d", rc);
        rc = NGX_HTTP_VIDEO_THUMBEXTRACTOR_SECOND_NOT_FOUND;
        goto exit;
    }

    int64_t second_on_stream_time_base = second * pFormatCtx->streams[videoStream]->time_base.den / pFormatCtx->streams[videoStream]->time_base.num;

    // Find the nearest frame
    rc = NGX_HTTP_VIDEO_THUMBEXTRACTOR_SECOND_NOT_FOUND;
    while (!frameFinished && av_read_frame(pFormatCtx, &packet) >= 0) {
        // Is this a packet from the video stream?
        if (packet.stream_index == videoStream) {
            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
            // Did we get a video frame?
            if (frameFinished) {
                frameDecoded = 1;
                if (!cf->only_keyframe && (pFrame->pkt_pts < second_on_stream_time_base)) {
                    frameFinished = 0;
                }
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
    }
    av_free_packet(&packet);

    if (frameDecoded) {
        // Convert the image from its native format to RGB
        struct SwsContext *img_resample_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
                sws_width, sws_height, PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);

        sws_scale(img_resample_ctx, (const uint8_t * const *) pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
        sws_freeContext(img_resample_ctx);

        if (needs_crop) {
            MagickWandGenesis();
            mrc = MagickTrue;

            if ((m_wand = NewMagickWand()) == NULL){
                ngx_log_error(NGX_LOG_ERR, log, 0, "video thumb extractor module: Could not allocate MagickWand memory");
                mrc = MagickFalse;
            }

            if (mrc == MagickTrue) {
                mrc = MagickConstituteImage(m_wand, sws_width, sws_height, NGX_HTTP_VIDEO_THUMBEXTRACTOR_RGB, CharPixel, pFrameRGB->data[0]);
            }

            if (mrc == MagickTrue) {
                mrc = MagickSetImageGravity(m_wand, CenterGravity);
            }

            if (mrc == MagickTrue) {
                mrc = MagickCropImage(m_wand, width, height, (sws_width-width)/2, (sws_height-height)/2);
            }

            if (mrc == MagickTrue) {
                mrc = MagickExportImagePixels(m_wand, 0, 0, width, height, NGX_HTTP_VIDEO_THUMBEXTRACTOR_RGB, CharPixel, pFrameRGB->data[0]);
            }

            /* Clean up */
            if (m_wand) {
                m_wand = DestroyMagickWand(m_wand);
            }

            MagickWandTerminus();

            if (mrc != MagickTrue) {
                ngx_log_error(NGX_LOG_ERR, log, 0, "video thumb extractor module: Error cropping image");
                goto exit;
            }
        }

        // Compress to jpeg
        if (ngx_http_video_thumbextractor_jpeg_compress(cf, pFrameRGB->data[0], pCodecCtx->width, pCodecCtx->height, width, height, out_buffer, out_len, uncompressed_size, temp_pool) == 0) {
            rc = NGX_OK;
        }
    }

exit:

    if ((info->fd != NULL) && (fclose(info->fd) != 0)) {
        ngx_log_error(NGX_LOG_ERR, log, 0, "video thumb extractor module: Couldn't close file %s", filename);
        rc = EXIT_FAILURE;
    }

    /* destroy unneeded objects */

    // Free the RGB image
    if (buffer != NULL) av_free(buffer);
    if (pFrameRGB != NULL) av_freep(&pFrameRGB);

    // Free the YUV frame
    if (pFrame != NULL) av_freep(&pFrame);

    // Close the codec
    if (pCodecCtx != NULL) avcodec_close(pCodecCtx);

    // Close the video file
    if (pFormatCtx != NULL) {
#if LIBAVFORMAT_VERSION_INT <= AV_VERSION_INT(53, 5, 0)
        av_close_input_file(pFormatCtx);
#else
        avformat_close_input(&pFormatCtx);
#endif
    }

    // Free AVIO context
    if (pAVIOCtx != NULL) av_freep(pAVIOCtx);

    return rc;
}


static void
ngx_http_video_thumbextractor_init_libraries(void)
{
    // Register all formats and codecs
    av_register_all();
    av_log_set_level(AV_LOG_ERROR);
}


static uint32_t
ngx_http_video_thumbextractor_jpeg_compress(ngx_http_video_thumbextractor_loc_conf_t *cf, uint8_t * buffer, int in_width, int in_height, int out_width, int out_height, caddr_t *out_buffer, size_t *out_len, size_t uncompressed_size, ngx_pool_t *temp_pool)
{
    struct jpeg_compress_struct cinfo;
    struct jpeg_error_mgr jerr;
    JSAMPROW row_pointer[1];
    int row_stride;
    int image_d_width = in_width;
    int image_d_height = in_height;

    if ( !buffer ) return 1;

    cinfo.err = jpeg_std_error(&jerr);
    jpeg_create_compress(&cinfo);
    ngx_http_video_thumbextractor_jpeg_memory_dest(&cinfo, out_buffer, out_len, uncompressed_size, temp_pool);

    cinfo.image_width = out_width;
    cinfo.image_height = out_height;
    cinfo.input_components = 3;
    cinfo.in_color_space = JCS_RGB;

    jpeg_set_defaults(&cinfo);
    /* Important: Header info must be set AFTER jpeg_set_defaults() */
    cinfo.write_JFIF_header = TRUE;
    cinfo.JFIF_major_version = 1;
    cinfo.JFIF_minor_version = 2;
    cinfo.density_unit = 1; /* 0=unknown, 1=dpi, 2=dpcm */
    /* Image DPI is determined by Y_density, so we leave that at
       jpeg_dpi if possible and crunch X_density instead (PAR > 1) */

    if (out_height * image_d_width > out_width * image_d_height) {
        image_d_width = out_height * image_d_width / image_d_height;
        image_d_height = out_height;
    } else {
        image_d_height = out_width * image_d_height / image_d_width;
        image_d_width = out_width;
    }

    cinfo.X_density = cf->jpeg_dpi * out_width / image_d_width;
    cinfo.Y_density = cf->jpeg_dpi * out_height / image_d_height;
    cinfo.write_Adobe_marker = TRUE;

    jpeg_set_quality(&cinfo, cf->jpeg_quality, cf->jpeg_baseline);
    cinfo.optimize_coding = cf->jpeg_optimize;
    cinfo.smoothing_factor = cf->jpeg_smooth;

    if ( cf->jpeg_progressive_mode ) {
        jpeg_simple_progression(&cinfo);
    }

    jpeg_start_compress(&cinfo, TRUE);

    row_stride = out_width * 3;
    while (cinfo.next_scanline < cinfo.image_height) {
        row_pointer[0] = &buffer[cinfo.next_scanline * row_stride];
        (void)jpeg_write_scanlines(&cinfo, row_pointer,1);
    }

    jpeg_finish_compress(&cinfo);
    jpeg_destroy_compress(&cinfo);

    return 0;
}


typedef struct {
    struct jpeg_destination_mgr  pub; /* public fields */

    unsigned char              **buf;
    size_t                      *size;
    size_t                       uncompressed_size;
    ngx_pool_t                  *pool;
} ngx_http_video_thumbextractor_jpeg_destination_mgr;


static void ngx_http_video_thumbextractor_init_destination (j_compress_ptr cinfo)
{
    ngx_http_video_thumbextractor_jpeg_destination_mgr * dest = (ngx_http_video_thumbextractor_jpeg_destination_mgr *) cinfo->dest;

    *(dest->buf) = ngx_palloc(dest->pool, dest->uncompressed_size);
    *(dest->size) = dest->uncompressed_size;
    dest->pub.next_output_byte = *(dest->buf);
    dest->pub.free_in_buffer = dest->uncompressed_size;
}
Exemple #18
0
int janus_pp_h264_create(char *destination, char *metadata) {
	if(destination == NULL)
		return -1;
	/* Setup FFmpeg */
	av_register_all();
	/* Adjust logging to match the postprocessor's */
	av_log_set_level(janus_log_level <= LOG_NONE ? AV_LOG_QUIET :
		(janus_log_level == LOG_FATAL ? AV_LOG_FATAL :
			(janus_log_level == LOG_ERR ? AV_LOG_ERROR :
				(janus_log_level == LOG_WARN ? AV_LOG_WARNING :
					(janus_log_level == LOG_INFO ? AV_LOG_INFO :
						(janus_log_level == LOG_VERB ? AV_LOG_VERBOSE : AV_LOG_DEBUG))))));
	/* MP4 output */
	fctx = avformat_alloc_context();
	if(fctx == NULL) {
		JANUS_LOG(LOG_ERR, "Error allocating context\n");
		return -1;
	}
	/* We save the metadata part as a comment (see #1189) */
	if(metadata)
		av_dict_set(&fctx->metadata, "comment", metadata, 0);
	fctx->oformat = av_guess_format("mp4", NULL, NULL);
	if(fctx->oformat == NULL) {
		JANUS_LOG(LOG_ERR, "Error guessing format\n");
		return -1;
	}
	snprintf(fctx->filename, sizeof(fctx->filename), "%s", destination);
#ifdef USE_CODECPAR
	AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H264);
	if(!codec) {
		/* Error opening video codec */
		JANUS_LOG(LOG_ERR, "Encoder not available\n");
		return -1;
	}
	fctx->video_codec = codec;
	fctx->oformat->video_codec = codec->id;
	vStream = avformat_new_stream(fctx, codec);
	vStream->id = fctx->nb_streams-1;
	vEncoder = avcodec_alloc_context3(codec);
	vEncoder->width = max_width;
	vEncoder->height = max_height;
	vEncoder->time_base = (AVRational){ 1, fps };
	vEncoder->pix_fmt = AV_PIX_FMT_YUV420P;
	vEncoder->flags |= CODEC_FLAG_GLOBAL_HEADER;
	if(avcodec_open2(vEncoder, codec, NULL) < 0) {
		/* Error opening video codec */
		JANUS_LOG(LOG_ERR, "Encoder error\n");
		return -1;
	}
	avcodec_parameters_from_context(vStream->codecpar, vEncoder);
#else
	vStream = avformat_new_stream(fctx, 0);
	if(vStream == NULL) {
		JANUS_LOG(LOG_ERR, "Error adding stream\n");
		return -1;
	}
#if LIBAVCODEC_VER_AT_LEAST(53, 21)
	avcodec_get_context_defaults3(vStream->codec, AVMEDIA_TYPE_VIDEO);
#else
	avcodec_get_context_defaults2(vStream->codec, AVMEDIA_TYPE_VIDEO);
#endif
#if LIBAVCODEC_VER_AT_LEAST(54, 25)
	vStream->codec->codec_id = AV_CODEC_ID_H264;
#else
	vStream->codec->codec_id = CODEC_ID_H264;
#endif
	vStream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
	vStream->codec->time_base = (AVRational){1, fps};
	vStream->time_base = (AVRational){1, 90000};
	vStream->codec->width = max_width;
	vStream->codec->height = max_height;
	vStream->codec->pix_fmt = PIX_FMT_YUV420P;
	//~ if (fctx->flags & AVFMT_GLOBALHEADER)
		vStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
	if(avio_open(&fctx->pb, fctx->filename, AVIO_FLAG_WRITE) < 0) {
		JANUS_LOG(LOG_ERR, "Error opening file for output\n");
		return -1;
	}
	if(avformat_write_header(fctx, NULL) < 0) {
		JANUS_LOG(LOG_ERR, "Error writing header\n");
		return -1;
	}
	return 0;
}
Exemple #19
0
int bl_audio_decode(
		char const * const filename,
		struct bl_song * const song) {
	int ret;
	// Contexts and libav variables
	AVPacket avpkt;
	AVFormatContext* context;
	int audio_stream;
	AVCodecContext* codec_context = NULL;
	AVCodec *codec = NULL;
	AVFrame *decoded_frame = NULL;
	struct SwrContext *swr_ctx;

	// Size of the samples
	uint64_t size = 0;

	// Dictionary to fetch tags
	AVDictionaryEntry *tags_dictionary;

	// Planar means channels are interleaved in data section
	// See MP3 vs FLAC for instance.
	int is_planar;

	// Pointer to beginning of music data
	int8_t *beginning;
	// Received frame holder
	int got_frame;
	// Position in the data buffer
	int index;
	// Initialize AV lib
	av_register_all();
	context = avformat_alloc_context();

	av_log_set_level(AV_LOG_QUIET);

	// Open input file
	if (avformat_open_input(&context, filename, NULL, NULL) < 0) {
		fprintf(stderr, "Couldn't open file: %s. Error %d encountered.\n", filename, errno);
		return BL_UNEXPECTED;
	}

	// Search for a valid stream
	if (avformat_find_stream_info(context, NULL) < 0) {
		fprintf(stderr, "Couldn't find stream information\n");
		return BL_UNEXPECTED;
	}

	// Get audio stream
	audio_stream = av_find_best_stream(context, AVMEDIA_TYPE_AUDIO, -1, -1, &codec, 0);
	if (audio_stream < 0) {
		fprintf(stderr, "Couldn't find a suitable audio stream\n");
		return BL_UNEXPECTED;
	}
	// Find associated codec
	codec_context = context->streams[audio_stream]->codec;
	if (!codec_context) {
		fprintf(stderr, "Codec not found!\n");
		return BL_UNEXPECTED;
	}
	if (avcodec_open2(codec_context, codec, NULL) < 0) {
		fprintf(stderr, "Could not open codec\n");
		return BL_UNEXPECTED;
	}

	// Fill song properties
	song->filename = malloc(strlen(filename) + 1);
	strcpy(song->filename, filename);

	song->sample_rate = codec_context->sample_rate;
	song->duration = (uint64_t)(context->duration) / ((uint64_t)AV_TIME_BASE);
	song->bitrate = context->bit_rate;
	song->not_s16 = 0;
	song->nb_bytes_per_sample = av_get_bytes_per_sample(codec_context->sample_fmt);
	song->channels = codec_context->channels;

	// Get number of samples
	size = (
		((uint64_t)(context->duration) * (uint64_t)song->sample_rate) /
		((uint64_t)AV_TIME_BASE)
		) *
		song->channels *
		song->nb_bytes_per_sample;

	// Estimated number of samples
	song->nSamples = (
		(
		((uint64_t)(context->duration) * (uint64_t)song->sample_rate) /
		((uint64_t)AV_TIME_BASE)
		) *
		song->channels
	);

	// Allocate sample_array
	if((song->sample_array = calloc(size, 1)) == NULL) {
		fprintf(stderr, "Could not allocate enough memory\n");
		return BL_UNEXPECTED;
	}

	beginning = song->sample_array;
	index = 0;

	// If the song is in a floating-point format or int32, prepare the conversion to int16
	if(codec_context->sample_fmt != AV_SAMPLE_FMT_S16 &&
		codec_context->sample_fmt != AV_SAMPLE_FMT_S16P) {
		song->not_s16 = 1;
		song->nb_bytes_per_sample = 2;
	
		swr_ctx = swr_alloc();
		av_opt_set_int(swr_ctx, "in_channel_layout", codec_context->channel_layout, 0);
		av_opt_set_int(swr_ctx, "in_sample_rate", codec_context->sample_rate, 0);
		av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", codec_context->sample_fmt, 0);

		av_opt_set_int(swr_ctx, "out_channel_layout", codec_context->channel_layout, 0);
		av_opt_set_int(swr_ctx, "out_sample_rate", codec_context->sample_rate, 0);
		av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
		if((ret = swr_init(swr_ctx)) < 0) {
			fprintf(stderr, "Could not allocate resampler context\n");
			return BL_UNEXPECTED;
		}
	}

    // Zero initialize tags
	song->artist = NULL;
	song->title = NULL;
	song->album = NULL;
	song->tracknumber = NULL;

	// Initialize tracknumber tag
	tags_dictionary = av_dict_get(context->metadata, "track", NULL, 0);
	if (tags_dictionary!= NULL) {
		song->tracknumber = malloc(strlen(tags_dictionary->value) + 1);
		strcpy(song->tracknumber, tags_dictionary->value);
		song->tracknumber[strcspn(song->tracknumber, "/")] = '\0';
	} 
	else {
		song->tracknumber = malloc(1 * sizeof(char));
		strcpy(song->tracknumber, "");
	}

    // Initialize title tag
    tags_dictionary = av_dict_get(context->metadata, "title", NULL, 0);
	if (tags_dictionary!= NULL) {
		song->title = malloc(strlen(tags_dictionary->value) + 1);
		strcpy(song->title, tags_dictionary->value);
	}
	else {
		song->title = malloc(12 * sizeof(char));
		strcpy(song->title, "<no title>");
	}

	// Initialize artist tag
	tags_dictionary = av_dict_get(context->metadata, "ARTIST", NULL, 0);
	if (tags_dictionary!= NULL) {
		song->artist= malloc(strlen(tags_dictionary->value) + 1);
		strcpy(song->artist, tags_dictionary->value);
	}
	else {
		song->artist= malloc(12 * sizeof(char));
		strcpy(song->artist, "<no artist>");
	}

	// Initialize album tag
	tags_dictionary = av_dict_get(context->metadata, "ALBUM", NULL, 0);
	if (tags_dictionary!= NULL) {
		song->album= malloc(strlen(tags_dictionary->value) + 1);
		strcpy(song->album, tags_dictionary->value);
	}
	else {
		song->album= malloc(11 * sizeof(char));
		strcpy(song->album, "<no album>");
	}

	// Initialize genre tag
	tags_dictionary = av_dict_get(context->metadata, "genre", NULL, 0);
	if (tags_dictionary!= NULL) {
		song->genre= malloc(strlen(tags_dictionary->value) + 1);
		strcpy(song->genre, tags_dictionary->value);
	}
	else {
		song->genre = malloc(11 * sizeof(char));
		strcpy(song->genre, "<no genre>");
	}

	// Planar means channels are not interleaved
	is_planar = av_sample_fmt_is_planar(codec_context->sample_fmt);

	// Read the whole data and copy them into a huge buffer
	av_init_packet(&avpkt);
	while(av_read_frame(context, &avpkt) >= 0) {
		if(avpkt.stream_index == audio_stream) {
			got_frame = 0;

			// If decoded frame has not been allocated yet
			if (!decoded_frame) {
				// Try to allocate it
				decoded_frame = av_frame_alloc();
				if(!decoded_frame) {
					fprintf(stderr, "Could not allocate audio frame\n");
					return BL_UNEXPECTED;
				}
			}
			else {
				// Else, unreference it and reset fields
				av_frame_unref(decoded_frame);
			}

			int length = avcodec_decode_audio4(codec_context,
				decoded_frame,
				&got_frame,
				&avpkt);
			if(length < 0) {
				avpkt.size = 0;
			}

			av_packet_unref(&avpkt);

			// Copy decoded data into a huge array
			if(got_frame) {
				size_t data_size = av_samples_get_buffer_size(
					NULL,
					codec_context->channels,
					decoded_frame->nb_samples,
					codec_context->sample_fmt,
				1);

				if((index * song->nb_bytes_per_sample + data_size) > size) {
					int8_t *ptr;
					ptr = realloc(beginning, size + data_size);
					if(ptr != NULL) {
						beginning = ptr;
						size += data_size;
						song->nSamples += data_size / song->nb_bytes_per_sample;
					}
					else
						break;
				}
				int8_t *p = beginning + (index * song->nb_bytes_per_sample);

				// If the song isn't in a 16-bit format, convert it to
				if(song->not_s16 == 1) {
					uint8_t **out_buffer;
					int buff_size;
					buff_size = av_samples_alloc_array_and_samples(&out_buffer, decoded_frame->linesize,
						song->channels, decoded_frame->nb_samples, AV_SAMPLE_FMT_S16, 0);
					ret = swr_convert(swr_ctx, out_buffer, buff_size,
						(const uint8_t**)decoded_frame->extended_data, decoded_frame->nb_samples);
					if(ret < 0) {
						fprintf(stderr, "Error while converting from floating-point to int\n");
						return BL_UNEXPECTED;
					}
					memcpy((index * song->nb_bytes_per_sample) + beginning,
						out_buffer[0], buff_size);
					av_freep(&out_buffer[0]);
					free(out_buffer);
					index += buff_size / song->nb_bytes_per_sample;
				}
				else if(1 == is_planar) {
					for (int i = 0;
						i < (decoded_frame->nb_samples * song->nb_bytes_per_sample);
						i += song->nb_bytes_per_sample) {
						for (int j = 0; j < codec_context->channels; ++j) {
							for (int k = 0; k < song->nb_bytes_per_sample; ++k) {
								*p = ((int8_t*)(decoded_frame->extended_data[j]))[i + k];
								++p;
							}
						}
					}
					index += data_size / song->nb_bytes_per_sample;
				}
				else if (0 == is_planar) {
					memcpy((index * song->nb_bytes_per_sample) + beginning,
						decoded_frame->extended_data[0],
						data_size);
					index += data_size / song->nb_bytes_per_sample;
				}
			}
		}
		else {
			// Dropping packets that do not belong to the audio stream
			// (such as album cover)
			av_packet_unref(&avpkt);
		}
	}
	song->sample_array = beginning;

	// Free memory
	avpkt.data = NULL;
	avpkt.size = 0;

	// Use correct number of samples after decoding
	song->nSamples = index; 
	
	// Read the end of audio, as precognized in http://ffmpeg.org/pipermail/libav-user/2015-August/008433.html
	do {
		avcodec_decode_audio4(codec_context, decoded_frame, &got_frame, &avpkt);
	} while(got_frame);
	// Free memory
	if(song->not_s16)
		swr_free(&swr_ctx);
	avcodec_close(codec_context);
	av_frame_unref(decoded_frame);
	# if LIBAVUTIL_VERSION_MAJOR > 51
	av_frame_free(&decoded_frame);
	#endif
	av_packet_unref(&avpkt);
	avformat_close_input(&context);

	return BL_OK;
}
Exemple #20
0
void TheoraVideoClip_FFmpeg::load(TheoraDataSource* source)
{
	mVideoStreamIndex = -1;
	mFrameNumber = 0;
	AVDictionary* optionsDict = NULL;
	
	if (!ffmpegInitialised)
	{
#ifdef _FFMPEG_DEBUG
		th_writelog("Initializing ffmpeg");
#endif
		th_writelog("avcodec version: " + str(avcodec_version()));
		av_register_all();
		av_log_set_level(AV_LOG_DEBUG);
		av_log_set_callback(avlog_theoraplayer);
		ffmpegInitialised = 1;
		//show_codecs(0, 0, 0);
	}
	
	mInputBuffer = (unsigned char*) av_malloc(READ_BUFFER_SIZE);
	mAvioContext = avio_alloc_context(mInputBuffer, READ_BUFFER_SIZE, 0, source, &readFunction, NULL, &seekFunction);
	
#ifdef _FFMPEG_DEBUG
	th_writelog(mName + ": avio context created");
#endif

	mFormatContext = avformat_alloc_context();
#ifdef _FFMPEG_DEBUG
	th_writelog(mName + ": avformat context created");
#endif
	mFormatContext->pb = mAvioContext;
	
	int err;
	if ((err = avformat_open_input(&mFormatContext, "", NULL, NULL)) != 0)
	{
		th_writelog(mName + ": avformat input opening failed!");
		th_writelog(mName + ": error_code: " + str(err));
		return;
	}
	
#ifdef _FFMPEG_DEBUG
	th_writelog(mName + ": avformat input opened");
#endif
	
	// Retrieve stream information
	if (avformat_find_stream_info(mFormatContext, NULL) < 0)
		return; // Couldn't find stream information
	
#ifdef _FFMPEG_DEBUG
	th_writelog(mName + ": got stream info");
#endif
	
	// Dump information about file onto standard error
	//	av_dump_format(mFormatContext, 0, "", 0);
	
	// Find the first video stream
	for (int i = 0; i < mFormatContext->nb_streams; ++i)
	{
		if(mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			mVideoStreamIndex = i;
			break;
		}
	}
	if (mVideoStreamIndex == -1)
		return; // Didn't find a video stream

#ifdef _FFMPEG_DEBUG
	th_writelog(mName + ": Found video stream at index " + str(mVideoStreamIndex));
#endif

	// Get a pointer to the codec context for the video stream
	mCodecContext = mFormatContext->streams[mVideoStreamIndex]->codec;
	
	// Find the decoder for the video stream
	mCodec = avcodec_find_decoder(mCodecContext->codec_id);
	if (mCodec == NULL)
	{
		th_writelog("Unsupported codec!");
		return; // Codec not found
	}
	// Open codec
	if(avcodec_open2(mCodecContext, mCodec, &optionsDict) < 0)
		return; // Could not open codec
	
#ifdef _FFMPEG_DEBUG
	th_writelog(mName + ": Codec opened");
#endif

	
	mFrame = avcodec_alloc_frame();
	
#ifdef _FFMPEG_DEBUG
	th_writelog(mName + ": Frame allocated");
#endif
		
	//AVRational rational = mCodecContext->time_base;

	mFPS = 25; //TODOOOOOO!!!
	
	mWidth = mStride = mCodecContext->width;
	mHeight = mCodecContext->height;
	mFrameDuration = 1.0f / mFPS;
	mDuration = mFormatContext->duration / AV_TIME_BASE;
	
	if (mFrameQueue == NULL) // todo - why is this set in the backend class? it should be set in the base class, check other backends as well
	{
		mFrameQueue = new TheoraFrameQueue(this);
		mFrameQueue->setSize(mNumPrecachedFrames);
	}
}
int main(int argc, char **argv)
{
    const AVFilter *filter;
    AVFilterContext *filter_ctx;
    AVFilterGraph *graph_ctx;
    const char *filter_name;
    const char *filter_args = NULL;
    int i;
    int ret = 0;

    av_log_set_level(AV_LOG_DEBUG);

    if (argc < 2) {
        fprintf(stderr, "Missing filter name as argument\n");
        return 1;
    }

    filter_name = argv[1];
    if (argc > 2)
        filter_args = argv[2];

    /* allocate graph */
    graph_ctx = avfilter_graph_alloc();
    if (!graph_ctx)
        return 1;

    avfilter_register_all();

    /* get a corresponding filter and open it */
    if (!(filter = avfilter_get_by_name(filter_name))) {
        fprintf(stderr, "Unrecognized filter with name '%s'\n", filter_name);
        return 1;
    }

    /* open filter and add it to the graph */
    if (!(filter_ctx = avfilter_graph_alloc_filter(graph_ctx, filter, filter_name))) {
        fprintf(stderr, "Impossible to open filter with name '%s'\n",
                filter_name);
        return 1;
    }
    if (avfilter_init_str(filter_ctx, filter_args) < 0) {
        fprintf(stderr, "Impossible to init filter '%s' with arguments '%s'\n",
                filter_name, filter_args);
        return 1;
    }

    /* create a link for each of the input pads */
    for (i = 0; i < filter_ctx->nb_inputs; i++) {
        AVFilterLink *link = av_mallocz(sizeof(AVFilterLink));
        if (!link) {
            fprintf(stderr, "Unable to allocate memory for filter input link\n");
            ret = 1;
            goto fail;
        }
        link->type = avfilter_pad_get_type(filter_ctx->input_pads, i);
        filter_ctx->inputs[i] = link;
    }
    for (i = 0; i < filter_ctx->nb_outputs; i++) {
        AVFilterLink *link = av_mallocz(sizeof(AVFilterLink));
        if (!link) {
            fprintf(stderr, "Unable to allocate memory for filter output link\n");
            ret = 1;
            goto fail;
        }
        link->type = avfilter_pad_get_type(filter_ctx->output_pads, i);
        filter_ctx->outputs[i] = link;
    }

    if (filter->query_formats)
        filter->query_formats(filter_ctx);
    else
        ff_default_query_formats(filter_ctx);

    print_formats(filter_ctx);

fail:
    avfilter_free(filter_ctx);
    avfilter_graph_free(&graph_ctx);
    fflush(stdout);
    return ret;
}
Exemple #22
0
int main(int argc, char* argv[])
{
    boost::program_options::options_description desc("");
	desc.add_options()
		("multicast-address", boost::program_options::value<std::string>(),     "")
		("interface",         boost::program_options::value<std::string>(),     "")
		("rtsp-port",         boost::program_options::value<boost::uint16_t>(), "")
		("avg-bit-rate",      boost::program_options::value<int>(),             "")
		("buffer-size",       boost::program_options::value<size_t>(),          "")
	    ("stats-interval",    boost::program_options::value<size_t>(),          "");
		
    
    boost::program_options::variables_map vm;
    boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), vm);
    boost::program_options::notify(vm);

    if (vm.count("interface"))
    {
        std::string interfaceAddress = vm["interface"].as<std::string>();
        NetAddressList addresses(interfaceAddress.c_str());
        if (addresses.numAddresses() == 0)
        {
            std::cout << "Failed to find network address for \"" << interfaceAddress << "\"" << std::endl;
            return -1;
        }
        ReceivingInterfaceAddr = *(unsigned*)(addresses.firstAddress()->data());
    }
    
    char sourceAddressStr[INET_ADDRSTRLEN];
    inet_ntop(AF_INET, &(ReceivingInterfaceAddr), sourceAddressStr, sizeof(sourceAddressStr));
    std::cout << "Using source address " << sourceAddressStr << std::endl;
    
    if (vm.count("rtsp-port"))
    {
        rtspPort = vm["rtsp-port"].as<boost::uint16_t>();
    }
    else
    {
        rtspPort = 8555;
    }
    std::cout << "Using RTSP port " << rtspPort << std::endl;
    
    // Create 'groupsocks' for RTP and RTCP:
    if(vm.count("multicast-address"))
    {
        inet_pton(AF_INET, vm["multicast-address"].as<std::string>().c_str(), &(destinationAddress.s_addr));
    }
    else
    {
        inet_pton(AF_INET, "224.0.67.67", &(destinationAddress.s_addr));
    }

	if (vm.count("avg-bit-rate"))
	{
		avgBitRate = vm["avg-bit-rate"].as<int>();
	}
	else
	{
		avgBitRate = DEFAULT_AVG_BIT_RATE;
	}
    std::string bitRateString = to_human_readable_byte_count(avgBitRate, true, false);
	std::cout << "Using an average encoding bit rate of " << bitRateString << "/s per face" << std::endl;

	if (vm.count("buffer-size"))
	{
		bufferSize = vm["buffer-size"].as<size_t>();
	}
	else
	{
		bufferSize = DEFAULT_BUFFER_SIZE;
	}
	std::string bufferSizeString = to_human_readable_byte_count(bufferSize, false, false);
	std::cout << "Using a buffer size of " << bufferSizeString << std::endl;

	size_t statsInterval;
	if (vm.count("stats-interval"))
	{
		statsInterval = vm["stats-interval"].as<size_t>();
	}
	else
	{
		statsInterval = DEFAULT_STATS_INTERVAL;
	}

    av_log_set_level(AV_LOG_WARNING);
    avcodec_register_all();
    setupRTSP();
    boost::thread networkThread = boost::thread(&networkLoop);

	

    Process unityProcess(CUBEMAPEXTRACTIONPLUGIN_ID, false);
	Process thisProcess(ALLOSERVER_ID, true);

    while (true)
    {
        std::cout << "Waiting for Unity ..." << std::endl;
        unityProcess.waitForBirth();
        std::cout << "Connected to Unity :)" << std::endl;
        startStreaming();
		stats.autoSummary(boost::chrono::seconds(statsInterval),
			              AlloReceiver::statValsMaker,
						  AlloReceiver::postProcessorMaker,
						  AlloReceiver::formatStringMaker);
        unityProcess.join();
        std::cout << "Lost connection to Unity :(" << std::endl;
        stopStreaming();
		stats.stopAutoSummary();
    }

    return 0;
}
Exemple #23
0
void VideoSystem::initialize()
{
	av_log_set_level(AV_LOG_QUIET);
	av_register_all();
}
Exemple #24
0
bool
FFmpegInput::open (const std::string &name, ImageSpec &spec)
{
    // Temporary workaround: refuse to open a file whose name does not
    // indicate that it's a movie file. This avoids the problem that ffmpeg
    // is willing to open tiff and other files better handled by other
    // plugins. The better long-term solution is to replace av_register_all
    // with our own function that registers only the formats that we want
    // this reader to handle. At some point, we will institute that superior
    // approach, but in the mean time, this is a quick solution that 90%
    // does the job.
    bool valid_extension = false;
    for (int i = 0; ffmpeg_input_extensions[i]; ++i)
        if (Strutil::ends_with (name, ffmpeg_input_extensions[i])) {
            valid_extension = true;
            break;
        }
    if (! valid_extension) {
        error ("\"%s\" could not open input", name);
        return false;
    }

    static boost::once_flag init_flag = BOOST_ONCE_INIT;
    boost::call_once (&av_register_all, init_flag);
    const char *file_name = name.c_str();
    av_log_set_level (AV_LOG_FATAL);
    if (avformat_open_input (&m_format_context, file_name, NULL, NULL) != 0) // avformat_open_input allocs format_context
    {
        error ("\"%s\" could not open input", file_name);
        return false;
    }
    if (avformat_find_stream_info (m_format_context, NULL) < 0)
    {
        error ("\"%s\" could not find stream info", file_name);
        return false;
    }
    m_video_stream = -1;
    for (unsigned int i=0; i<m_format_context->nb_streams; i++) {
        if (m_format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            if (m_video_stream < 0) {
                m_video_stream=i;
            }
            m_video_indexes.push_back (i); // needed for later use
            break;
        }
    }  
    if (m_video_stream == -1) {
        error ("\"%s\" could not find a valid videostream", file_name);
        return false;
    }
    m_codec_context = m_format_context->streams[m_video_stream]->codec; // codec context for videostream
    m_codec = avcodec_find_decoder (m_codec_context->codec_id);
    if (!m_codec) {
        error ("\"%s\" unsupported codec", file_name);
        return false;
    }
    if (avcodec_open2 (m_codec_context, m_codec, NULL) < 0) {
        error ("\"%s\" could not open codec", file_name);
        return false;
    }
    if (!strcmp (m_codec_context->codec->name, "mjpeg") ||
        !strcmp (m_codec_context->codec->name, "dvvideo")) {
        m_offset_time = false;
    }
    AVStream *stream = m_format_context->streams[m_video_stream];
    if (stream->r_frame_rate.num != 0 && stream->r_frame_rate.den != 0) {
        m_frame_rate = stream->r_frame_rate;
    }
    if (static_cast<int64_t> (m_format_context->duration) != AV_NOPTS_VALUE) {
        m_frames = static_cast<uint64_t> ((fps() * static_cast<double>(m_format_context->duration) / 
                                                   static_cast<uint64_t>(AV_TIME_BASE)));
    } else {
        m_frames = 1 << 29;
    }
    AVPacket pkt;
    if (!m_frames) {
        seek (0);
        av_init_packet (&pkt);
        av_read_frame (m_format_context, &pkt);
        uint64_t first_pts = pkt.pts;
        uint64_t max_pts = first_pts;
        seek (1 << 29);
        av_init_packet (&pkt);
        while (stream && av_read_frame (m_format_context, &pkt) >= 0) {
            uint64_t current_pts = static_cast<uint64_t> (av_q2d(stream->time_base) * (pkt.pts - first_pts) * fps());
            if (current_pts > max_pts) {
                max_pts = current_pts;
            }
        }
        m_frames = max_pts;
    }
    m_frame = av_frame_alloc();
    m_rgb_frame = av_frame_alloc();
    m_rgb_buffer.resize(
        avpicture_get_size (PIX_FMT_RGB24,
        m_codec_context->width,
        m_codec_context->height),
        0
    );
    AVPixelFormat pixFormat;
    switch (m_codec_context->pix_fmt) { // deprecation warning for YUV formats
        case AV_PIX_FMT_YUVJ420P:
            pixFormat = AV_PIX_FMT_YUV420P;
            break;
        case AV_PIX_FMT_YUVJ422P:
            pixFormat = AV_PIX_FMT_YUV422P;
            break;
        case AV_PIX_FMT_YUVJ444P:
            pixFormat = AV_PIX_FMT_YUV444P;
            break;
        case AV_PIX_FMT_YUVJ440P:
            pixFormat = AV_PIX_FMT_YUV440P;
        default:
            pixFormat = m_codec_context->pix_fmt;
            break;
    }
    m_sws_rgb_context = sws_getContext(
        m_codec_context->width,
        m_codec_context->height,
        pixFormat,
        m_codec_context->width,
        m_codec_context->height,
        PIX_FMT_RGB24,
        SWS_AREA,
        NULL,
        NULL,
        NULL
    );
    m_spec = ImageSpec (m_codec_context->width, m_codec_context->height, 3, TypeDesc::UINT8);
    AVDictionaryEntry *tag = NULL;
    while ((tag = av_dict_get (m_format_context->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
        m_spec.attribute (tag->key, tag->value);
    }
    m_spec.attribute ("fps", m_frame_rate.num / static_cast<float> (m_frame_rate.den));
    m_spec.attribute ("oiio:Movie", true);
    m_nsubimages = m_frames;
    spec = m_spec;
    return true;
}
Exemple #25
0
int main(int argc, char **argv)
{
	AVFormatContext *ic = 0;
	int rc;
	const char *url;
	size_t cnt = 0;

	if (argc != 2) {
		fprintf(stderr, "usage: %s <rtsp url>\n", argv[0]);
		return -1;
	}

	for (int i = 0; i < 32; i++)
		_last_stamp[i] = -1.0;

	url = argv[1];

	av_register_all();
	avformat_network_init();
	av_log_set_level(AV_LOG_FATAL);

	rc = avformat_open_input(&ic, url, 0, 0);
	if (rc < 0) {
		fprintf(stderr, "can't open %s\n", url);
		return -1;
	}

	rc = avformat_find_stream_info(ic, 0);
	if (rc < 0) {
		fprintf(stderr, "can't find %s stream info\n", url);
		return -1;
	}

	av_dump_format(ic, -1, 0, 0);

	_first = now();

	do {
		AVPacket pkg;
#if LIBAVFORMAT_VERSION_MAJOR >= 56
		pkg.buf = 0;
#endif //

		rc = av_read_frame(ic, &pkg);
		if (rc < 0) {
			break;
		}
		else {
			int idx = pkg.stream_index;
			AVStream *stream = ic->streams[idx];
			_last_stamp[idx] = pkg.pts * 1.0 * stream->time_base.num /
				stream->time_base.den;

//			fprintf(stderr, "pts=%lld, stamp=%.3f\n", pkg.pts, 
//					pkg.pts * 1.0 * stream->time_base.num / stream->time_base.den);
		}

#if LIBAVFORMAT_VERSION_MAJOR >= 56
		av_free_packet(&pkg);
#endif

		if (cnt++ % 100 == 0) {
			print_stamp_delta();
		}

	} while (1);

	return 0;
}
Exemple #26
0
int main(int argc, char **argv)
{
    AVFormatContext* pCtx = 0;
    AVCodecContext *pCodecCtx = 0;
    AVCodec *pCodec = 0;
    AVPacket packet;
    AVFrame *pFrame = 0;
    FILE *fpo1 = NULL;
    FILE *fpo2 = NULL;
    int nframe;
    int err;
    int got_picture = -1;
    int picwidth, picheight, linesize;
    unsigned char *pBuf;
    int i;
    int64_t timestamp;
    struct options opt;
    int usefo = 0;
    struct audio_dsp dsp;
    int dusecs;
    float usecs1 = 0;
    float usecs2 = 0;
    struct timeval elapsed1, elapsed2;
    int decoded = 0;

	//taoanran add +++++++++
	int ret = -1;
	int videoStream = -1; //video streamID
	// ----------------------

	int flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
 #if 0
    if (SDL_Init (flags)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
        exit(1);
    }
#endif
    av_register_all();

    av_log_set_callback(log_callback);
    av_log_set_level(50);

    if (Myparse_option(&opt, argc, argv) < 0 || (strlen(opt.finput) == 0))
    {
        Myshow_help(argv[0]);
        return 0;
    }

    err = avformat_open_input(&pCtx, opt.finput, 0, 0);
    if (err < 0)
    {
        printf("\n->(avformat_open_input)\tERROR:\t%d\n", err);
        goto fail;
    }
	printf("=========================\n");
    err = avformat_find_stream_info(pCtx, 0);

    if (err < 0)
    {
        printf("\n->(avformat_find_stream_info)\tERROR:\t%d\n", err);
        goto fail;
    }
	av_dump_format(pCtx, 0, opt.finput, 0);

	// check the video stream
	videoStream = find_video_stream(pCtx);
	if (videoStream < 0)
	{
		printf("there is not audio stream !!!!!!! \n");
		return -1;
	}

	pCodecCtx = pCtx->streams[videoStream]->codec;
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);//find the video decoder
 	if (!pCodec)
    {
        printf("\ncan't find the audio decoder!\n");
        goto fail;
    }

	pFrame = avcodec_alloc_frame();

	//open videDecoder
	ret = avcodec_open2(pCodecCtx, pCodec, 0);

	if (ret < 0)
	{
		printf("avcodec_open2 error \n");
		return -1;
	}

#if 0
	//only for audio
	pFrame->nb_samples = pCodecCtx->frame_size;
	pFrame->format = pCodecCtx->sample_fmt;
	pFrame->channel_layout = pCodecCtx->channel_layout;
#endif
#if 0
	//set the param of SDL
	SDL_AudioSpec wanted_spec, spec; 
	wanted_spec.freq = pCodecCtx->sample_rate;  
	wanted_spec.format = AUDIO_S16SYS;  
	wanted_spec.channels = pCodecCtx->channels;  
	wanted_spec.silence = 0;  
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;  
	wanted_spec.callback = audio_callback;//audio_callback;  
	wanted_spec.userdata = pCodecCtx;;//pCodecCtx;  
	if(SDL_OpenAudio(&wanted_spec, &spec) < 0)  
    {  
        fprintf(stderr, "SDL_OpenAudio: %s/n", SDL_GetError());  
        return -1;  
    } 
#endif			

	 printf(" bit_rate = %d \r\n", pCodecCtx->bit_rate);
     printf(" sample_rate = %d \r\n", pCodecCtx->sample_rate);
     printf(" channels = %d \r\n", pCodecCtx->channels);
     printf(" code_name = %s \r\n", pCodecCtx->codec->name);

	char *data = NULL;
	while(av_read_frame(pCtx, &packet) >= 0)
	{
		//found the  audio frame !!!
		if (packet.stream_index == videoStream)
		{
			int got;
			int i;
		
			avcodec_decode_video2(pCodecCtx, pFrame,&got_picture,&packet);
			data = (char *)malloc(pFrame->width * pFrame->height);
			memset(data,0,pFrame->width * pFrame->height);
			printf("pFrame->width = %d\n", pFrame->width);
			printf("pFrame->height = %d\n", pFrame->height);
			printf("pFrame->linesize[0] = %d\n", pFrame->linesize[0]);
			printf("pFrame->linesize[1] = %d\n", pFrame->linesize[1]);
			printf("pFrame->linesize[2] = %d\n", pFrame->linesize[2]);

			//catch the YUV420P data
			saveYUV420P(pFrame->data[0], pFrame->linesize[0], pCodecCtx->width, pCodecCtx->height);      //Y: 4
			saveYUV420P(pFrame->data[1], pFrame->linesize[1], pCodecCtx->width/2, pCodecCtx->height/2);    //U : 1
			saveYUV420P(pFrame->data[2], pFrame->linesize[2], pCodecCtx->width/2, pCodecCtx->height/2);    //V : 1
		}
	}
	
	return 0;
#if 0	
	if (!opt.nodec)
    {
        
        pCodecCtx = pCtx->streams[opt.streamId]->codec;
 
        if (opt.thread_count <= 16 && opt.thread_count > 0 )
        {
            pCodecCtx->thread_count = opt.thread_count;
            pCodecCtx->thread_type = FF_THREAD_FRAME;
        }
        pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
        if (!pCodec)
        {
            printf("\n->不能找到编解码器!\n");
            goto fail;
        }
        err = avcodec_open2(pCodecCtx, pCodec, 0);
        if (err < 0)
        {
            printf("\n->(avcodec_open)\tERROR:\t%d\n", err);
            goto fail;
        }
        pFrame = avcodec_alloc_frame();
 
        if (opt.bplay)
        {
            dsp.audio_fd = open(OSS_DEVICE, O_WRONLY);
            if (dsp.audio_fd == -1)
            {
                printf("\n-> 无法打开音频设备\n");
                goto fail;
            }
            dsp.channels = pCodecCtx->channels;
            dsp.speed = pCodecCtx->sample_rate;
            dsp.format = map_formats(pCodecCtx->sample_fmt);
            if (set_audio(&dsp) < 0)
            {
                printf("\n-> 不能设置音频设备\n");
                goto fail;
            }
        }
    }
    nframe = 0;
	printf("=========================444444\n");
    while(nframe < opt.frames || opt.frames == -1)
    {
        gettimeofday(&elapsed1, NULL);
        err = av_read_frame(pCtx, &packet);
        if (err < 0)
        {
            printf("\n->(av_read_frame)\tERROR:\t%d\n", err);
            break;
        }
        gettimeofday(&elapsed2, NULL);
        dusecs = (elapsed2.tv_sec - elapsed1.tv_sec)*1000000 + (elapsed2.tv_usec - elapsed1.tv_usec);
        usecs2 += dusecs;
        timestamp = av_rescale_q(packet.dts, pCtx->streams[packet.stream_index]->time_base, (AVRational){1, AV_TIME_BASE});
        printf("\nFrame No ] stream#%d\tsize mB, timestamp:%6lld, dts:%6lld, pts:%6lld, ", nframe++, packet.stream_index, packet.size,
               timestamp, packet.dts, packet.pts);
        if (packet.stream_index == opt.streamId)
        {
#if 0
            for (i = 0; i < 16; i++)
            {
                if (i == 0) printf("\n pktdata: ");
                printf("%2x ", packet.data[i]);
            }
            printf("\n");
#endif
            if (usefo)
            {
                fwrite(packet.data, packet.size, 1, fpo1);
                fflush(fpo1);
            }
            if (pCtx->streams[opt.streamId]->codec->codec_type == AVMEDIA_TYPE_VIDEO && !opt.nodec)
            {
                picheight = pCtx->streams[opt.streamId]->codec->height;
                picwidth = pCtx->streams[opt.streamId]->codec->width;
 
                gettimeofday(&elapsed1, NULL);
                avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet);
                decoded++;
                gettimeofday(&elapsed2, NULL);
                dusecs = (elapsed2.tv_sec - elapsed1.tv_sec)*1000000 + (elapsed2.tv_usec - elapsed1.tv_usec);
                usecs1 += dusecs;
                if (got_picture)
                {
                    printf("[Video: type %d, ref %d, pts %lld, pkt_pts %lld, pkt_dts %lld]",
                            pFrame->pict_type, pFrame->reference, pFrame->pts, pFrame->pkt_pts, pFrame->pkt_dts);
 
                    if (pCtx->streams[opt.streamId]->codec->pix_fmt == PIX_FMT_YUV420P)
                    {
                        if (usefo)
                        {
                            linesize = pFrame->linesize[0];
                            pBuf = pFrame->data[0];
                            for (i = 0; i < picheight; i++)
                            {
                                fwrite(pBuf, picwidth, 1, fpo2);
                                pBuf += linesize;
                            }
                            linesize = pFrame->linesize[1];
                            pBuf = pFrame->data[1];
                            for (i = 0; i < picheight/2; i++)
                            {
                                fwrite(pBuf, picwidth/2, 1, fpo2);
                                pBuf += linesize;
                            }
                            linesize = pFrame->linesize[2];
                            pBuf = pFrame->data[2];
                            for (i = 0; i < picheight/2; i++)
                            {
                                fwrite(pBuf, picwidth/2, 1, fpo2);
                                pBuf += linesize;
                            }
                            fflush(fpo2);
                        }
 
                        if (opt.bplay)
                        {
                            
                        }
                    }
                }
                av_free_packet(&packet);
            }
            else if (pCtx->streams[opt.streamId]->codec->codec_type == AVMEDIA_TYPE_AUDIO && !opt.nodec)
            {
                int got;
                gettimeofday(&elapsed1, NULL);
                avcodec_decode_audio4(pCodecCtx, pFrame, &got, &packet);
                decoded++;
                gettimeofday(&elapsed2, NULL);
                dusecs = (elapsed2.tv_sec - elapsed1.tv_sec)*1000000 + (elapsed2.tv_usec - elapsed1.tv_usec);
                usecs1 += dusecs;
                                if (got)
                                {
                    printf("[Audio: ]B raw data, decoding time: %d]", pFrame->linesize[0], dusecs);
                    if (usefo)
                    {
                        fwrite(pFrame->data[0], pFrame->linesize[0], 1, fpo2);
                        fflush(fpo2);
                    }
                    if (opt.bplay)
                    {
                        play_pcm(&dsp, pFrame->data[0], pFrame->linesize[0]);
                    }
                                }
            }
        }
    }
    if (!opt.nodec && pCodecCtx)
    {
        avcodec_close(pCodecCtx);
    }
    printf("\n%d 帧解析, average %.2f us per frame\n", nframe, usecs2/nframe);
    printf("%d 帧解码,平均 %.2f 我们每帧\n", decoded, usecs1/decoded);

#endif

fail:
    if (pCtx)
    {
        avformat_close_input(&pCtx);
    }


    return 0;
}
Exemple #27
0
static codec_data_t *ffmpeg_create (const char *stream_type,
                                    const char *compressor,
                                    int type,
                                    int profile,
                                    format_list_t *media_fmt,
                                    video_info_t *vinfo,
                                    const uint8_t *userdata,
                                    uint32_t ud_size,
                                    video_vft_t *vft,
                                    void *ifptr)
{
    ffmpeg_codec_t *ffmpeg;

    ffmpeg = MALLOC_STRUCTURE(ffmpeg_codec_t);
    memset(ffmpeg, 0, sizeof(*ffmpeg));

    ffmpeg->m_vft = vft;
    ffmpeg->m_ifptr = ifptr;
    avcodec_init();
    avcodec_register_all();
    av_log_set_level(AV_LOG_QUIET);

    ffmpeg->m_codecId = ffmpeg_find_codec(stream_type, compressor, type,
                                          profile, media_fmt, userdata, ud_size);

    // must have a codecID - we checked it earlier
    ffmpeg->m_codec = avcodec_find_decoder(ffmpeg->m_codecId);
    ffmpeg->m_c = avcodec_alloc_context();
    ffmpeg->m_picture = avcodec_alloc_frame();
    bool open_codec = true;
    bool run_userdata = false;
    bool free_userdata = false;

    switch (ffmpeg->m_codecId) {
    case CODEC_ID_MJPEG:
        break;
    case CODEC_ID_H264:
        // need to find height and width
        if (media_fmt != NULL && media_fmt->fmt_param != NULL) {
            userdata = h264_sdp_parse_sprop_param_sets(media_fmt->fmt_param,
                       &ud_size,
                       ffmpeg->m_vft->log_msg);
            if (userdata != NULL) free_userdata = true;
            ffmpeg_message(LOG_DEBUG, "ffmpeg", "sprop len %d", ud_size);
        }
        if (ud_size > 0) {
            ffmpeg_message(LOG_DEBUG, "ffmpeg", "userdata len %d", ud_size);
            open_codec = ffmpeg_find_h264_size(ffmpeg, userdata, ud_size);
            ffmpeg_message(LOG_DEBUG, "ffmpeg", "open codec is %d", open_codec);
            run_userdata = true;
        } else {
            open_codec = false;
        }
        break;
    case CODEC_ID_MPEG4: {
        fmtp_parse_t *fmtp = NULL;
        open_codec = false;
        if (media_fmt != NULL) {
            fmtp = parse_fmtp_for_mpeg4(media_fmt->fmt_param,
                                        ffmpeg->m_vft->log_msg);
            if (fmtp->config_binary != NULL) {
                userdata = fmtp->config_binary;
                ud_size = fmtp->config_binary_len;
                fmtp->config_binary = NULL;
                free_userdata = true;
            }
        }

        if (ud_size > 0) {
            uint8_t *vol = MP4AV_Mpeg4FindVol((uint8_t *)userdata, ud_size);
            u_int8_t TimeBits;
            u_int16_t TimeTicks;
            u_int16_t FrameDuration;
            u_int16_t FrameWidth;
            u_int16_t FrameHeight;
            u_int8_t  aspectRatioDefine;
            u_int8_t  aspectRatioWidth;
            u_int8_t  aspectRatioHeight;
            if (vol) {
                if (MP4AV_Mpeg4ParseVol(vol,
                                        ud_size - (vol - userdata),
                                        &TimeBits,
                                        &TimeTicks,
                                        &FrameDuration,
                                        &FrameWidth,
                                        &FrameHeight,
                                        &aspectRatioDefine,
                                        &aspectRatioWidth,
                                        &aspectRatioHeight)) {
                    ffmpeg->m_c->width = FrameWidth;
                    ffmpeg->m_c->height = FrameHeight;
                    open_codec = true;
                    run_userdata = true;
                }
            }
        }
        if (fmtp != NULL) {
            free_fmtp_parse(fmtp);
        }
    }
    break;
    case CODEC_ID_SVQ3:
        ffmpeg->m_c->extradata = (void *)userdata;
        ffmpeg->m_c->extradata_size = ud_size;
        if (vinfo != NULL) {
            ffmpeg->m_c->width = vinfo->width;
            ffmpeg->m_c->height = vinfo->height;
        }
        break;
    default:
        break;
    }
    if (open_codec) {
        if (avcodec_open(ffmpeg->m_c, ffmpeg->m_codec) < 0) {
            ffmpeg_message(LOG_CRIT, "ffmpeg", "failed to open codec");
            return NULL;
        }
        ffmpeg_message(LOG_DEBUG, "ffmpeg", "pixel format is %d",
                       ffmpeg->m_c->pix_fmt);
        ffmpeg->m_codec_opened = true;
        if (run_userdata) {
            uint32_t offset = 0;
            do {
                int got_picture;
                offset += avcodec_decode_video(ffmpeg->m_c,
                                               ffmpeg->m_picture,
                                               &got_picture,
                                               (uint8_t *)userdata + offset,
                                               ud_size - offset);
            } while (offset < ud_size);
        }

    }

    if (free_userdata) {
        CHECK_AND_FREE(userdata);
    }
    ffmpeg->m_did_pause = 1;
    return ((codec_data_t *)ffmpeg);
}
Exemple #28
0
int main(int argc, char **argv)
{
    AVFilter *filter;
    AVFilterContext *filter_ctx;
    const char *filter_name;
    const char *filter_args = NULL;
    int i;

    av_log_set_level(AV_LOG_DEBUG);

    if (!argv[1]) {
        fprintf(stderr, "Missing filter name as argument\n");
        return 1;
    }

    filter_name = argv[1];
    if (argv[2])
        filter_args = argv[2];

    avfilter_register_all();

    /* get a corresponding filter and open it */
    if (!(filter = avfilter_get_by_name(filter_name))) {
        fprintf(stderr, "Unrecognized filter with name '%s'\n", filter_name);
        return 1;
    }

    if (avfilter_open(&filter_ctx, filter, NULL) < 0) {
        fprintf(stderr, "Impossible to open filter with name '%s'\n",
                filter_name);
        return 1;
    }
    if (avfilter_init_filter(filter_ctx, filter_args, NULL) < 0) {
        fprintf(stderr, "Impossible to init filter '%s' with arguments '%s'\n",
                filter_name, filter_args);
        return 1;
    }

    /* create a link for each of the input pads */
    for (i = 0; i < filter_ctx->input_count; i++) {
        AVFilterLink *link = av_mallocz(sizeof(AVFilterLink));
        link->type = filter_ctx->filter->inputs[i].type;
        filter_ctx->inputs[i] = link;
    }
    for (i = 0; i < filter_ctx->output_count; i++) {
        AVFilterLink *link = av_mallocz(sizeof(AVFilterLink));
        link->type = filter_ctx->filter->outputs[i].type;
        filter_ctx->outputs[i] = link;
    }

    if (filter->query_formats)
        filter->query_formats(filter_ctx);
    else
        avfilter_default_query_formats(filter_ctx);

    print_formats(filter_ctx);

    avfilter_free(filter_ctx);
    fflush(stdout);
    return 0;
}
Exemple #29
0
void init_dumper( int width, int height )
{  
  double fps = Machine->drv->frames_per_second / (double)frame_halver;

  avcodec_init();
  avcodec_register_all();
#ifdef AVICAPTURE_DEBUG
  av_log_set_level (99);
#endif

  avc = avcodec_find_encoder( CODEC_ID_MPEG1VIDEO );
  if (avc == NULL)
  {
  	  printf ("cannot find MPEG encoder\n");
     exit (1);
  }

  avctx = avcodec_alloc_context();
    
  /* sample parameters */
  avctx->me_method = ME_LOG;
  avctx->pix_fmt = PIX_FMT_YUV420P;
  avctx->bit_rate = 2500000;
  avctx->width = width;
  avctx->height = height;
  avctx->time_base.num = 1;
  avctx->time_base.den = fps;
  avctx->gop_size=10;
  avctx->max_b_frames=1;
  avctx->draw_horiz_band = NULL;
  avctx->idct_algo = FF_IDCT_AUTO;

  int ret = avcodec_open( avctx, avc );
  if (ret)
    {
      printf("FAILED TO OPEN ENCODER, ret=%d, errno=%d\n", ret, errno);
      exit( 1 );
    }
  
  int size=height*width;
  
  pic = avcodec_alloc_frame();
  
  output_buffer=(char *)malloc(BUFFSIZE); /* Find where this value comes from */
  
  outpic.data[0]=(unsigned char *)malloc(size*3/2); /* YUV 420 Planar */
  outpic.data[1]=outpic.data[0]+size;
  outpic.data[2]=outpic.data[1]+size/4;
  outpic.data[3]=NULL;
  outpic.linesize[0]=width;
  outpic.linesize[1]=outpic.linesize[2]=width/2;
  outpic.linesize[3]=0;
  
  pic->data[0]=outpic.data[0];  /* Points to data portion of outpic     */
  pic->data[1]=outpic.data[1];  /* Since encode_video takes an AVFrame, */
  pic->data[2]=outpic.data[2];  /* and img_convert takes an AVPicture   */
  pic->data[3]=outpic.data[3];
  
  pic->linesize[0]=outpic.linesize[0]; /* This doesn't change */
  pic->linesize[1]=outpic.linesize[1];
  pic->linesize[2]=outpic.linesize[2];
  pic->linesize[3]=outpic.linesize[3];
  
  inpic.data[0]=(unsigned char *)malloc(size*3); /* RGB24 packed in 1 plane */
  inpic.data[1]=inpic.data[2]=inpic.data[3]=NULL;
  inpic.linesize[0]=width*3;
  inpic.linesize[1]=inpic.linesize[2]=inpic.linesize[3]=0;

  video_outf = fopen("video.outf","wb");
  if (video_outf == NULL)
  {
    printf ("failed to open output video file\n");
    exit (1);
  }
}
Exemple #30
0
static int ffmpeg_init_library() {
  // Register all formats and codecs
  av_register_all();
  av_log_set_level(AV_LOG_ERROR);
  return 0;
}