Ejemplo n.º 1
0
static int init_input(){
    int ret;
    //get input format
    /* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }
    
    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }
    
    //get input codec and stream
    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;
        pCodecCtx = video_stream->codec;
        
        video_dst_file = fopen(video_dst_filename, "wb");
        if (!video_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            return 1;
        }
        
        /* allocate image where the decoded image will be put */
        width = video_dec_ctx->width;
        height = video_dec_ctx->height;
        pix_fmt = video_dec_ctx->pix_fmt;
        ret = av_image_alloc(video_dst_data, video_dst_linesize,
                             width, height, pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            return 1;
        }
        video_dst_bufsize = ret;
    }
    
    if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
        audio_stream = fmt_ctx->streams[audio_stream_idx];
        audio_dec_ctx = audio_stream->codec;
        AudCodecCtx = audio_stream->codec;
        audio_dst_file = fopen(audio_dst_filename, "wb");
        if (!audio_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
            return 1;
        }
    }
    
    
    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, src_filename, 0);
    return 0;
    
}
Ejemplo n.º 2
0
jint Java_com_panahit_ui_Components_AnimatedFileDrawable_createDecoder(JNIEnv *env, jclass clazz, jstring src, jintArray data) {
    VideoInfo *info = new VideoInfo();
    
    char const *srcString = env->GetStringUTFChars(src, 0);
    int len = strlen(srcString);
    info->src = new char[len + 1];
    memcpy(info->src, srcString, len);
    info->src[len] = '\0';
    if (srcString != 0) {
        env->ReleaseStringUTFChars(src, srcString);
    }
    
    int ret;
    if ((ret = avformat_open_input(&info->fmt_ctx, info->src, NULL, NULL)) < 0) {
        LOGE("can't open source file %s, %s", info->src, av_err2str(ret));
        delete info;
        return 0;
    }
    
    if ((ret = avformat_find_stream_info(info->fmt_ctx, NULL)) < 0) {
        LOGE("can't find stream information %s, %s", info->src, av_err2str(ret));
        delete info;
        return 0;
    }
    
    if (open_codec_context(&info->video_stream_idx, info->fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        info->video_stream = info->fmt_ctx->streams[info->video_stream_idx];
        info->video_dec_ctx = info->video_stream->codec;
    }
    
    if (info->video_stream <= 0) {
        LOGE("can't find video stream in the input, aborting %s", info->src);
        delete info;
        return 0;
    }
    
    info->frame = av_frame_alloc();
    if (info->frame == nullptr) {
        LOGE("can't allocate frame %s", info->src);
        delete info;
        return 0;
    }
    
    av_init_packet(&info->pkt);
    info->pkt.data = NULL;
    info->pkt.size = 0;
    
    jint *dataArr = env->GetIntArrayElements(data, 0);
    if (dataArr != nullptr) {
        dataArr[0] = info->video_dec_ctx->width;
        dataArr[1] = info->video_dec_ctx->height;
        env->ReleaseIntArrayElements(data, dataArr, 0);
    }
    
    //LOGD("successfully opened file %s", info->src);
    
    return (int) info;
}
Ejemplo n.º 3
0
static int rtsp_connect(netcam_context_ptr netcam)
{
  if (netcam->rtsp == NULL) {
    netcam->rtsp = rtsp_new_context();

    if (netcam->rtsp == NULL) {
      MOTION_LOG(ALR, TYPE_NETCAM, NO_ERRNO, "%s: unable to create context(%s)", netcam->rtsp->path);
      return -1;
    }
  }

  // open the network connection
  AVDictionary *opts = 0;
  av_dict_set(&opts, "rtsp_transport", "tcp", 0);

  int ret = avformat_open_input(&netcam->rtsp->format_context, netcam->rtsp->path, NULL, &opts);
  if (ret < 0) {
    MOTION_LOG(ALR, TYPE_NETCAM, NO_ERRNO, "%s: unable to open input(%s): %d - %s", netcam->rtsp->path, ret, av_err2str(ret));
    rtsp_free_context(netcam->rtsp);
    netcam->rtsp = NULL;
    return -1;
  }

  // fill out stream information
  ret = avformat_find_stream_info(netcam->rtsp->format_context, NULL);
  if (ret < 0) {
    MOTION_LOG(ALR, TYPE_NETCAM, NO_ERRNO, "%s: unable to find stream info: %d", ret);
    rtsp_free_context(netcam->rtsp);
    netcam->rtsp = NULL;
    return -1;
  }

  ret = open_codec_context(&netcam->rtsp->video_stream_index, netcam->rtsp->format_context, AVMEDIA_TYPE_VIDEO);
  if (ret < 0) {
    MOTION_LOG(ALR, TYPE_NETCAM, NO_ERRNO, "%s: unable to open codec context: %d", ret);
    rtsp_free_context(netcam->rtsp);
    netcam->rtsp = NULL;
    return -1;
  }
  
  netcam->rtsp->codec_context = netcam->rtsp->format_context->streams[netcam->rtsp->video_stream_index]->codec;
  
  // start up the feed
  av_read_play(netcam->rtsp->format_context);

  return 0;
}
int main (int argc, char **argv)
{
    int ret = 0, got_frame;
    if (argc != 4 && argc != 5) {
        fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
                "API example program to show how to read frames from an input file.\n"
                "This program reads frames from a file, decodes them, and writes decoded\n"
                "video frames to a rawvideo file named video_output_file, and decoded\n"
                "audio frames to a rawaudio file named audio_output_file.\n\n"
                "If the -refcount option is specified, the program use the\n"
                "reference counting frame system which allows keeping a copy of\n"
                "the data for longer than one decode call.\n"
                "\n", argv[0]);
        exit(1);
    }
    if (argc == 5 && !strcmp(argv[1], "-refcount")) {
        refcount = 1;
        argv++;
    }
    src_filename = argv[1];
    video_dst_filename = argv[2];
    audio_dst_filename = argv[3];
    /* register all formats and codecs */
    av_register_all();
    /* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }
    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }
    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;
        video_dst_file = fopen(video_dst_filename, "wb");
        if (!video_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }
        /* allocate image where the decoded image will be put */
        width = video_dec_ctx->width;
        height = video_dec_ctx->height;
        pix_fmt = video_dec_ctx->pix_fmt;
        ret = av_image_alloc(video_dst_data, video_dst_linesize,
                             width, height, pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        video_dst_bufsize = ret;
    }
    if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
        audio_stream = fmt_ctx->streams[audio_stream_idx];
        audio_dec_ctx = audio_stream->codec;
        audio_dst_file = fopen(audio_dst_filename, "wb");
        if (!audio_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
            ret = 1;
            goto end;
        }
    }
    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, src_filename, 0);
    if (!audio_stream && !video_stream) {
        fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }
    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;
    if (video_stream)
        printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
    if (audio_stream)
        printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        AVPacket orig_pkt = pkt;
        do {
            ret = decode_packet(&got_frame, 0);
            if (ret < 0)
                break;
            pkt.data += ret;
            pkt.size -= ret;
        } while (pkt.size > 0);
        av_packet_unref(&orig_pkt);
    }
    /* flush cached frames */
    pkt.data = NULL;
    pkt.size = 0;
    do {
        decode_packet(&got_frame, 1);
    } while (got_frame);
    printf("Demuxing succeeded.\n");
    if (video_stream) {
        printf("Play the output video file with the command:\n"
               "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
               av_get_pix_fmt_name(pix_fmt), width, height,
               video_dst_filename);
    }
    if (audio_stream) {
        enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
        int n_channels = audio_dec_ctx->channels;
        const char *fmt;
        if (av_sample_fmt_is_planar(sfmt)) {
            const char *packed = av_get_sample_fmt_name(sfmt);
            printf("Warning: the sample format the decoder produced is planar "
                   "(%s). This example will output the first channel only.\n",
                   packed ? packed : "?");
            sfmt = av_get_packed_sample_fmt(sfmt);
            n_channels = 1;
        }
        if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
            goto end;
        printf("Play the output audio file with the command:\n"
               "ffplay -f %s -ac %d -ar %d %s\n",
               fmt, n_channels, audio_dec_ctx->sample_rate,
               audio_dst_filename);
    }
end:
    avcodec_close(video_dec_ctx);
    avcodec_close(audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    if (video_dst_file)
        fclose(video_dst_file);
    if (audio_dst_file)
        fclose(audio_dst_file);
    av_frame_free(&frame);
    av_free(video_dst_data[0]);
    return ret < 0;
}
int main (int argc, char **argv)
{
    int ret = 0, got_frame;

    if (argc != 3 && argc != 4) {
        fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
                "API example program to show how to read frames from an input file.\n"
                "This program reads frames from a file, decodes them, and writes decoded\n"
                "video frames to a rawvideo file named video_output_file, and decoded\n"
                "audio frames to a rawaudio file named audio_output_file.\n\n"
                "If the -refcount option is specified, the program use the\n"
                "reference counting frame system which allows keeping a copy of\n"
                "the data for longer than one decode call.\n"
                "\n", argv[0]);
        exit(1);
    }
    // if (argc == 4 && !strcmp(argv[1], "-refcount")) {
    //     refcount = 1;
    //     argv++;
    // }
    src_filename = argv[1];
    video_dst_filename = argv[2];

    /* register all formats and codecs */
    avdevice_register_all();
    av_register_all();

    // const char* format_name = "avfoundation";
    AVInputFormat* input_format = av_find_input_format("avfoundation");
    printf("input_format: %p\n", input_format);
    // printf("input_format: %s", input_format->long_name);

    AVDictionary* open_options = NULL;
    av_dict_set(&open_options, "pixel_format", "uyvy422", 0);
    av_dict_set(&open_options, "framerate", "30.000030", 0);
    av_dict_set(&open_options, "video_size", "1280x720", 0);

    /* open input file, and allocate format context */
    // if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
    if (avformat_open_input(&fmt_ctx, src_filename, input_format, &open_options) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }

    printf("fmt_ctx: %p\n", fmt_ctx);
    video_stream = fmt_ctx->streams[0];
    printf("video_stream: %p\n", video_stream);
    video_dec_ctx = video_stream->codec;
    printf("video_dec_ctx: %p\n", video_dec_ctx);
    /* allocate image where the decoded image will be put */
    width = video_dec_ctx->width;
    height = video_dec_ctx->height;
    pix_fmt = video_dec_ctx->pix_fmt;

    printf("width: %d\n", width);
    printf("height: %d\n", height);
    printf("pix_fmt: %d\n", pix_fmt);

    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }


    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;

        video_dst_file = fopen(video_dst_filename, "wb");
        if (!video_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        /* allocate image where the decoded image will be put */
        width = video_dec_ctx->width;
        height = video_dec_ctx->height;
        pix_fmt = video_dec_ctx->pix_fmt;
        printf("width: %d\n", width);
        printf("height: %d\n", height);
        printf("pix_fmt: %d\n", pix_fmt);
        ret = av_image_alloc(video_dst_data, video_dst_linesize,
                             width, height, pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        video_dst_bufsize = ret;
    }

    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, src_filename, 0);

    if (!video_stream) {
        fprintf(stderr, "Could not find video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }

    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    if (video_stream)
        printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);

    /* read frames from the file */
    int frame_index = 0;
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        AVPacket orig_pkt = pkt;
        do {
            ret = decode_packet(&got_frame, 0);
            if (ret < 0)
                break;
            pkt.data += ret;
            pkt.size -= ret;
        } while (pkt.size > 0);
        av_free_packet(&orig_pkt);

        frame_index++;
        if (frame_index > 5) {
            break;
        }
    }

    /* flush cached frames */
    pkt.data = NULL;
    pkt.size = 0;
    do {
        decode_packet(&got_frame, 1);
    } while (got_frame);

    printf("Demuxing succeeded.\n");

    if (video_stream) {
        printf("Play the output video file with the command:\n"
               "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
               av_get_pix_fmt_name(pix_fmt), width, height,
               video_dst_filename);
    }

end:
    avcodec_close(video_dec_ctx);
    avformat_close_input(&fmt_ctx);
    if (video_dst_file)
        fclose(video_dst_file);
    av_frame_free(&frame);
    av_free(video_dst_data[0]);

    return ret < 0;
}
Ejemplo n.º 6
0
/*----------------------------------------------------------------------
|    main
+---------------------------------------------------------------------*/
int main(int argc, char** argv)
{
    QApplication a(argc, argv);

    QStringList arguments = a.arguments();
    if (arguments.size() < 2) {
        LOG_ERROR(LOG_TAG, "Wrong syntax.");
        return -1;
    }

    if (!QFile(arguments.at(1)).exists()) {
        LOG_ERROR(LOG_TAG, "Input file does not exist.");
        return -1;
    }

    // Init codecs.
    av_register_all();

    // Open input file.
    if (avformat_open_input(&fmt_ctx, argv[1], NULL, NULL) < 0) {
        LOG_ERROR(LOG_TAG, "Could not open source file %s.", argv[1]);
        return -1;
    }

    // Retrieve stream information.
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        LOG_ERROR(LOG_TAG, "Could not find stream information.");
        return -1;
    }

    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream  = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;

#if 0
        video_dst_file = fopen(video_dst_filename, "wb");
        if (!video_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        /* allocate image where the decoded image will be put */
        ret = av_image_alloc(video_dst_data, video_dst_linesize,
                             video_dec_ctx->width, video_dec_ctx->height,
                             video_dec_ctx->pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        video_dst_bufsize = ret;
#endif
    }

    if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
        int nb_planes;
        audio_stream  = fmt_ctx->streams[audio_stream_idx];
        audio_dec_ctx = audio_stream->codec;

#if 0
        audio_dst_file = fopen(audio_dst_filename, "wb");
        if (!audio_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        nb_planes = av_sample_fmt_is_planar(audio_dec_ctx->sample_fmt) ?
                    audio_dec_ctx->channels : 1;
        audio_dst_data = av_mallocz(sizeof(uint8_t *) * nb_planes);
        if (!audio_dst_data) {
            fprintf(stderr, "Could not allocate audio data buffers\n");
            ret = AVERROR(ENOMEM);
            goto end;
        }
#endif
    }

    // Dump input information to stderr.
    av_dump_format(fmt_ctx, 0, argv[1], 0);

    // initialize packet, set data to NULL, let the demuxer fill it.
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    QFile outFile(argv[2]);
    if (!outFile.open(QIODevice::ReadWrite)) {
        LOG_ERROR(LOG_TAG, "Cannot create output file.");
        return -1;
    }
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        if (pkt.stream_index == audio_stream_idx) {
            LOG_VERBOSE(LOG_TAG, "Frame read.");
        }
        else
            continue;
        outFile.write((const char*)pkt.data, pkt.size);
    }
    LOG_INFORMATION(LOG_TAG, "Data written successfully.");
    outFile.close();

    // Cleanup.
    if (video_dec_ctx)
        avcodec_close(video_dec_ctx);
    if (audio_dec_ctx)
        avcodec_close(audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    return 0;
}
Ejemplo n.º 7
0
int main (int argc, char **argv)
{
    int ret = 0, got_frame;

    if (argc != 4) {
        fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
                "API example program to show how to read frames from an input file.\n"
                "This program reads frames from a file, decodes them, and writes decoded\n"
                "video frames to a rawvideo file named video_output_file, and decoded\n"
                "audio frames to a rawaudio file named audio_output_file.\n"
                "\n", argv[0]);
        exit(1);
    }
    src_filename = argv[1];
    video_dst_filename = argv[2];
    audio_dst_filename = argv[3];

    /* register all formats and codecs */
    av_register_all();

    /* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }

    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }

    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;

        video_dst_file = fopen(video_dst_filename, "wb");
        if (!video_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        /* allocate image where the decoded image will be put */
        ret = av_image_alloc(video_dst_data, video_dst_linesize,
                             video_dec_ctx->width, video_dec_ctx->height,
                             video_dec_ctx->pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        video_dst_bufsize = ret;
    }

    if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
        int nb_planes;

        audio_stream = fmt_ctx->streams[audio_stream_idx];
        audio_dec_ctx = audio_stream->codec;
        audio_dst_file = fopen(audio_dst_filename, "wb");
        if (!audio_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        nb_planes = av_sample_fmt_is_planar(audio_dec_ctx->sample_fmt) ? audio_dec_ctx->channels : 1;
        audio_dst_data = (uint8_t**)av_mallocz(sizeof(uint8_t *) * nb_planes);
        if (!audio_dst_data) {
            fprintf(stderr, "Could not allocate audio data buffers\n");
            ret = AVERROR(ENOMEM);
            goto end;
        }
    }

    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, src_filename, 0);

    if (!audio_stream && !video_stream) {
        fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }

    frame = avcodec_alloc_frame();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    if (video_stream)
        printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
    if (audio_stream)
        printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);

    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        //decode_packet(&got_frame, 0);
        if (audio_stream_idx == pkt.stream_index)
            printf("Frame found!\n");
        else
            continue;
        fwrite(pkt.data, 1, pkt.size, audio_dst_file);
    }

    /* flush cached frames */
#if 0
    pkt.data = NULL;
    pkt.size = 0;
    do {
        decode_packet(&got_frame, 1);
    } while (got_frame);
#endif

    printf("Demuxing succeeded.\n");

    if (video_stream) {
        printf("Play the output video file with the command:\n"
               "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
               av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
               video_dst_filename);
    }

    if (audio_stream) {
        const char *fmt;

        if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt) < 0))
            goto end;
        printf("Play the output audio file with the command:\n"
               "ffplay -f %s -ac %d -ar %d %s\n",
               fmt, audio_dec_ctx->channels, audio_dec_ctx->sample_rate,
               audio_dst_filename);
    }

end:
    if (video_dec_ctx)
        avcodec_close(video_dec_ctx);
    if (audio_dec_ctx)
        avcodec_close(audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    if (video_dst_file)
        fclose(video_dst_file);
    if (audio_dst_file)
        fclose(audio_dst_file);
    av_free(frame);
    av_free(video_dst_data[0]);
    av_free(audio_dst_data);

    return ret < 0;
}
Ejemplo n.º 8
0
/**
 * Opens the input file/url, allocates a AVFormatContext for it and opens the audio stream with an
 * appropriate decoder.
 *
 * @param env JNIEnv
 * @param format_context AVFormatContext
 * @param openedStream opened audio AVStream
 * @param stream_index[in] index of the desired <em>audio</em> stream
 * @param stream_index[out] index of the selected stream (index of <em>all</em> streams)
 * @param url URL to open
 * @return negative value, if something went wrong
 */
int ff_open_file(JNIEnv *env, AVFormatContext **format_context, AVStream **openedStream, AVCodecContext **context, int *stream_index, const char *url) {
    int res = 0;
    res = ff_open_format_context(env, format_context, url);
    if (res) {
        // exception has already been thrown
        goto bail;
    }

#ifdef DEBUG
    fprintf(stderr, "Desired audio stream index: %i.\n", *stream_index);
#endif

    if (*stream_index < 0) {
        // use best audio stream
        res = open_codec_context(stream_index, *format_context, *context, AVMEDIA_TYPE_AUDIO);
        if (res) {
            throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to open codec context.");
            goto bail;
        }
        *openedStream = (*format_context)->streams[*stream_index];
    } else {
        // find xth audio stream
        // count possible audio streams
        int i;
        int audio_stream_number = 0;
        AVStream* stream = NULL;

        AVFormatContext* deref_format_context = *format_context;
        for (i=0; i<deref_format_context->nb_streams; i++) {
            stream = deref_format_context->streams[i];
            if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
                if (audio_stream_number == *stream_index) {
                    *stream_index = i;
                #ifdef DEBUG
                    fprintf(stderr, "Found desired audio stream at index: %i.\n", i);
                #endif
                    break;
                }
                audio_stream_number++;
            }
            stream = NULL;
        }
        if (stream == NULL) {
            // we didn't find a stream with the given index
            res = -1;
            throwIndexOutOfBoundsExceptionIfError(env, res, *stream_index);
            goto bail;
        }
        res = ff_open_stream(env, stream, context);
        if (res) {
            goto bail;
        }
        *openedStream = stream;
    }

#ifdef DEBUG
    fprintf(stderr, "Opened stream index: %i.\n", *stream_index);
    fprintf(stderr, "Opened stream: %ld.\n", (long) *openedStream);
#endif

bail:

    return res;
}
Ejemplo n.º 9
0
int main(int argc, char **argv)
{
    int ret = 0, got_frame;

    if (argc != 2) {
        fprintf(stderr, "Usage: %s <video>\n", argv[0]);
        exit(1);
    }
    src_filename = argv[1];

    av_register_all();

    if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }

    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }

    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;
    }

    //av_dump_format(fmt_ctx, 0, src_filename, 0);

    if (!video_stream) {
        fprintf(stderr, "Could not find video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }

    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    //printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");

    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    /* print the csv header */
    printf(";Bildtyp: ;Eingangsbitrate: ;Kodierte Bildgröße: ;Zeitstempel des Bildes: ;Aktuelles Bild: ;Anzeigedauer des Bildes: ;Bisherige Spielzeit: ;Quelldateiname: ;Decoderbildrate: ;Decoder: ;Filmbildrate:\n");
    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        AVPacket orig_pkt = pkt;
        do {
            ret = decode_packet(&got_frame, 0);
            if (ret < 0)
                break;
            pkt.data += ret;
            pkt.size -= ret;
        } while (pkt.size > 0);
        av_free_packet(&orig_pkt);
    }

    /* flush cached frames */
    pkt.data = NULL;
    pkt.size = 0;
    do {
        decode_packet(&got_frame, 1);
    } while (got_frame);

end:
    avcodec_close(video_dec_ctx);
    avformat_close_input(&fmt_ctx);
    av_frame_free(&frame);
    return ret < 0;
}
Ejemplo n.º 10
0
int main (int argc, char **argv)
{
    int ret = 0;
    const char *src_filename = NULL;
    const char *dst_filename = NULL;
    char* format             = NULL;
    char* codec              = NULL;

    if (argc != 5 && argc != 3) {
        fprintf(stderr, "usage: %s input_file output_file [format codec]\n"
                "API example program to show how to read frames from an input file.\n"
                "This program reads frames from a file, decodes them, and writes decoded\n"
                "frames to a rawvideo/rawaudio file named output_file.\n"
                "Optionally format and codec can be specified.\n\n", argv[0]);
        exit(1);
    }
    src_filename = argv[1];
    dst_filename = argv[2];
    if (argc == 5) {
        format = argv[3];
        codec  = argv[4];
    }

    /* log all debug messages */
    av_log_set_level(AV_LOG_DEBUG);

    /* register all formats and codecs */
    av_register_all();

#ifdef __AFL_HAVE_MANUAL_CONTROL
    while (__AFL_LOOP(1000))
#endif
    {
        AVFormatContext *fmt_ctx = NULL;
        AVInputFormat *fmt       = NULL;
        AVCodecContext *dec_ctx  = NULL;
        FILE *dst_file           = NULL;
        AVFrame *frame           = NULL;
        int got_frame            = 0;
        int frame_count          = 0;
        AVPacket pkt             = { 0 };
        AVDictionary *opts       = NULL;
        ret = 0;
        width = 0;
        height = 0;
        pix_fmt = AV_PIX_FMT_NONE;
        video_dst_bufsize = 0;
        memset(video_dst_data, 0, sizeof(video_dst_data));
        memset(video_dst_linesize, 0, sizeof(video_dst_linesize));

        /* set the whitelists for formats and codecs */
        if (av_dict_set(&opts, "codec_whitelist", codec, 0) < 0) {
            fprintf(stderr, "Could not set codec_whitelist.\n");
            ret = 1;
            goto end;
        }
        if (av_dict_set(&opts, "format_whitelist", format, 0) < 0) {
            fprintf(stderr, "Could not set format_whitelist.\n");
            ret = 1;
            goto end;
        }

        if (format) {
            fmt = av_find_input_format(format);
            if (!fmt) {
                fprintf(stderr, "Could not find input format %s\n", format);
                ret = 1;
                goto end;
            }
        }

        /* open input file, and allocate format context */
        if (avformat_open_input(&fmt_ctx, src_filename, fmt, &opts) < 0) {
            fprintf(stderr, "Could not open source file %s\n", src_filename);
            ret = 1;
            goto end;
        }

        /* retrieve stream information */
        if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
            fprintf(stderr, "Could not find stream information\n");
        }

        /* find stream with specified codec */
        if (open_codec_context(&dec_ctx, fmt_ctx, codec) < 0) {
            fprintf(stderr, "Could not open any stream in input file '%s'\n",
                    src_filename);
            ret = 1;
            goto end;
        }

        /* open output file */
        dst_file = fopen(dst_filename, "wb");
        if (!dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", dst_filename);
            ret = 1;
            goto end;
        }

        if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
            /* allocate image where the decoded image will be put */
            width = dec_ctx->width;
            height = dec_ctx->height;
            pix_fmt = dec_ctx->pix_fmt;
            video_dst_bufsize = av_image_alloc(video_dst_data, video_dst_linesize,
                                 width, height, pix_fmt, 1);
            if (video_dst_bufsize < 0) {
                fprintf(stderr, "Could not allocate raw video buffer\n");
                ret = 1;
                goto end;
            }
        }

        /* dump input information to stderr */
        av_dump_format(fmt_ctx, 0, src_filename, 0);

        /* allocate frame */
        frame = av_frame_alloc();
        if (!frame) {
            fprintf(stderr, "Could not allocate frame\n");
            ret = 1;
            goto end;
        }

        printf("Demuxing from file '%s' into '%s'\n", src_filename, dst_filename);

        /* read frames from the file */
        while (av_read_frame(fmt_ctx, &pkt) >= 0) {
            do {
                int decoded = decode_packet(dec_ctx, dst_file, frame, &got_frame, &frame_count, &pkt);
                if (decoded < 0)
                    break;
                /* increase data pointer and decrease size of remaining data buffer */
                pkt.data += decoded;
                pkt.size -= decoded;
            } while (pkt.size > 0);
            av_free_packet(&pkt);
        }

        printf("Flushing cached frames.\n");
        pkt.data = NULL;
        pkt.size = 0;
        do {
            decode_packet(dec_ctx, dst_file, frame, &got_frame, &frame_count, &pkt);
        } while (got_frame);

        printf("Demuxing done.\n");

end:
        /* free allocated memory */
        av_dict_free(&opts);
        avcodec_close(dec_ctx);
        avformat_close_input(&fmt_ctx);
        if (dst_file)
            fclose(dst_file);
        av_frame_free(&frame);
        av_free(video_dst_data[0]);
    }

    return ret;
}
Ejemplo n.º 11
0
int main(int argc, const char *argv[]) {
  int ret = 0, got_frame, got_output;
  int video_stream_idx = -1;
  int video_dst_bufsize;
  const char *src_filename;
  const char *dst_filename;
  FILE *dst_file                  = NULL;
  AVCodec *codec_enc              = NULL;
  AVFormatContext *fmt_ctx        = NULL;
  AVStream *video_stream          = NULL;
  AVCodecContext *video_dec_ctx   = NULL;
  AVCodecContext *video_enc_ctx   = NULL;
  AVFrame *frame                  = NULL;
  AVPacket pkt_dec, pkt_enc;
  uint8_t *video_dst_data[4]      = {NULL};
  int video_dst_linesize[4];
  
  if (argc != 3) {
    printf("Usage: %s <in_file> <out_file>\n", argv[0]);
    exit(1);
  }
  
  av_register_all();
  av_log_set_level(AV_LOG_DEBUG);
  
  src_filename = argv[1];
  dst_filename = argv[2];
  
  codec_enc = avcodec_find_encoder(AV_CODEC_ID_JPEG2000);
  if (!codec_enc) {
      fprintf(stderr, "Codec not found\n");
      exit(1);
  }
  
  video_enc_ctx = avcodec_alloc_context3(codec_enc);
  if (!video_enc_ctx) {
      fprintf(stderr, "Could not allocate video codec context\n");
      exit(1);
  }
//   j2kenc_init(video_enc_ctx);
  
  /* open input file, and allocate format context */
  if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
    fprintf(stderr, "Could not open source file %s\n", src_filename);
    exit(1);
  }
  
  /* retrieve stream information */
  if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
    fprintf(stderr, "Could not find stream information\n");
    exit(1);
  }
  
  if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO, src_filename) >= 0) {
    video_stream = fmt_ctx->streams[video_stream_idx];
    video_dec_ctx = video_stream->codec;
    
    video_enc_ctx->width = video_dec_ctx->width;
    video_enc_ctx->height = video_dec_ctx->height;
    video_enc_ctx->pix_fmt = video_dec_ctx->pix_fmt;
    
    // make ffmpeg not complain about j2k being experiemntal
    video_enc_ctx->strict_std_compliance = -2;
    
//     printf("About to open encoder\n");
    if (avcodec_open2(video_enc_ctx, codec_enc, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    dst_file = fopen(dst_filename, "wb");
    if (!dst_file) {
      fprintf(stderr, "Could not open destination file %s\n", dst_filename);
      ret = 1;
      goto end;
    }

    /* allocate image where the decoded image will be put */
    ret = av_image_alloc(video_dst_data, video_dst_linesize,
              video_dec_ctx->width, video_dec_ctx->height,
              video_dec_ctx->pix_fmt, 1);
    if (ret < 0) {
      fprintf(stderr, "Could not allocate raw video buffer\n");
      goto end;
    }
    video_dst_bufsize = ret;
  }
  
  /* dump input information to stderr */
  av_dump_format(fmt_ctx, 0, src_filename, 0);
  
  frame = av_frame_alloc();
  if (!frame) {
    fprintf(stderr, "Could not allocate frame\n");
    ret = AVERROR(ENOMEM);
    goto end;
  }
  
  /* initialize packet, set data to NULL, let the demuxer fill it */
  av_init_packet(&pkt_dec);
  pkt_dec.data = NULL;
  pkt_dec.size = 0;

  if (video_stream)
    printf("Demuxing video from file '%s' into '%s'\n", src_filename, dst_filename);
  
  /* read frames from the file */
  while (av_read_frame(fmt_ctx, &pkt_dec) >= 0) {
//     AVPacket orig_pkt = pkt;
    do {
      ret = decode_packet(&got_frame, 0, &pkt_dec, video_dec_ctx, frame);
      if (ret < 0)
        break;
      pkt_dec.data += ret;
      pkt_dec.size -= ret;
    } while (pkt_dec.size > 0);
//     av_free_packet(&orig_pkt);
  }
  /* flush cached frames */
  pkt_dec.data = NULL;
  pkt_dec.size = 0;
  do {
    decode_packet(&got_frame, 1, &pkt_dec, video_dec_ctx, frame);
    if (got_frame) {
      // DO SOME ENCODING HERE
      av_init_packet(&pkt_enc);
      pkt_enc.data = NULL;
      pkt_enc.size = 0;
      
      ret = avcodec_encode_video2(video_enc_ctx, &pkt_enc, frame, &got_output);
      if (ret < 0) {
	fprintf(stderr, "Error encoding frame\n");
	goto end;
      }

      if (got_output) {
	printf("Write frame (size=%5d)\n", pkt_enc.size);
	fwrite(pkt_enc.data, 1, pkt_enc.size, dst_file);
	
      }
    }
  } while (got_frame);
  
  printf("Demuxing succeeded.\n");
  
end:
  av_free_packet(&pkt_enc);
  av_free_packet(&pkt_dec);
  if (video_dec_ctx)
    avcodec_close(video_dec_ctx);
  if (video_enc_ctx)
    avcodec_close(video_enc_ctx);
//   if (codec_enc)
//     av_free(codec_enc);
  avformat_close_input(&fmt_ctx);
  if (dst_file)
    fclose(dst_file);
  else
    av_frame_free(&frame);
  av_free(video_dst_data[0]);

  return ret < 0;
}
Ejemplo n.º 12
0
int main (int argc, char **argv)
{
	double v1 = 1 / (29.97 * 90000);
	double v2 =  1 / 29.97 * 90000;


	AVInputFormat* fmtInput ;
	char deviceBuffer[256] ={0};
    int ret = 0, got_frame;

    //if (argc != 4) {
    //    fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
    //            "API example program to show how to read frames from an input file.\n"
    //            "This program reads frames from a file, decodes them, and writes decoded\n"
    //            "video frames to a rawvideo file named video_output_file, and decoded\n"
    //            "audio frames to a rawaudio file named audio_output_file.\n"
    //            "\n", argv[0]);
    //    exit(1);
    //}
    //src_filename = argv[1];
    //video_dst_filename = argv[2];
    //audio_dst_filename = argv[3];

	//src_filename = "rtsp://*****:*****@192.168.0.6/ch1/main/av_stream"; //"d:\\wildlife.wmv";
	//src_filename = "video=Integrated Camera";
	//src_filename = "d:\\pgm\\2.jpg";
	src_filename = "dummy";
	video_dst_filename = "d:\\pgm\\wildlife.pgm";
	audio_dst_filename = "d:\\pgm\\wildlife.pcm";

    /* register all formats and codecs */
     
	av_register_all();
	avdevice_register_all();
	avformat_network_init();
	 

	 fmtInput = av_find_input_format("dshow");
	

    /* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, src_filename, fmtInput, NULL) < 0) {
	//if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }

    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }

    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;

        video_dst_file = fopen(video_dst_filename, "wb");
        if (!video_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        /* allocate image where the decoded image will be put */
        ret = av_image_alloc(video_dst_data, video_dst_linesize,
                             video_dec_ctx->width, video_dec_ctx->height,
                             video_dec_ctx->pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        video_dst_bufsize = ret;
    }

    if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
        int nb_planes;

        audio_stream = fmt_ctx->streams[audio_stream_idx];
        audio_dec_ctx = audio_stream->codec;
        audio_dst_file = fopen(audio_dst_filename, "wb");
        if (!audio_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        nb_planes = av_sample_fmt_is_planar(audio_dec_ctx->sample_fmt) ?
            audio_dec_ctx->channels : 1;
        audio_dst_data = av_mallocz(sizeof(uint8_t *) * nb_planes);
        if (!audio_dst_data) {
            fprintf(stderr, "Could not allocate audio data buffers\n");
            ret = AVERROR(ENOMEM);
            goto end;
        }
    }

    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, src_filename, 0);

    if (!audio_stream && !video_stream) {
        fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }

    frame = avcodec_alloc_frame();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    if (video_stream)
        printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
    if (audio_stream)
        printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);

    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        decode_packet(&got_frame, 0);
        av_free_packet(&pkt);
    }

    /* flush cached frames */
    pkt.data = NULL;
    pkt.size = 0;
    do {
        decode_packet(&got_frame, 1);
    } while (got_frame);

    printf("Demuxing succeeded.\n");

    if (video_stream) {
        printf("Play the output video file with the command:\n"
               "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
               av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
               video_dst_filename);
    }

    if (audio_stream) {
        const char *fmt;

        if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt)) < 0)
            goto end;
        printf("Play the output audio file with the command:\n"
               "ffplay -f %s -ac %d -ar %d %s\n",
               fmt, audio_dec_ctx->channels, audio_dec_ctx->sample_rate,
               audio_dst_filename);
    }

end:
    if (video_dec_ctx)
        avcodec_close(video_dec_ctx);
    if (audio_dec_ctx)
        avcodec_close(audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    if (video_dst_file)
        fclose(video_dst_file);
    if (audio_dst_file)
        fclose(audio_dst_file);
    av_free(frame);
    av_free(video_dst_data[0]);
    av_free(audio_dst_data);

    return ret < 0;
}
Ejemplo n.º 13
0
int _tmain(int argc, _TCHAR* argv[])
{
    ISubtitles* subtitle = NULL;
    if (!ISubtitles::create(&subtitle)) {
		printf("failed to create subtitle instance.\n");
        return 1;
    }
    
#ifdef EMBEDDING_SUBTITLE
	av_register_all();

	avformat_network_init();

	AVFormatContext *fmt_ctx = NULL;
	char *url = LOCAL_FILE;
	int subtitle_stream_idx = -1;
	AVStream* subtitle_stream;
	AVCodecContext* subtitle_dec_ctx;
	AVPacket pkt;
	AVSubtitle sub;
	int got_sub;
	int ret;
	int index = 0;

	/* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, url, NULL, NULL) < 0) {
		LOGE("Could not open source file");
        return 1;
    }

	/* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        LOGE("Could not find stream information\n");
        return 1;
    }

	if (open_codec_context(fmt_ctx, &subtitle_stream_idx, AVMEDIA_TYPE_SUBTITLE) < 0) {
		LOGE("failed to find subttile track");
		return 1;
	}

	subtitle_stream = fmt_ctx->streams[subtitle_stream_idx];
	subtitle_dec_ctx = subtitle_stream->codec;

	/* dump input information to stderr */
	av_dump_format(fmt_ctx, 0, url, 0);

	SubtitleCodecId codec_id;
	if (subtitle_dec_ctx->codec_id == AV_CODEC_ID_ASS ||
		subtitle_dec_ctx->codec_id == AV_CODEC_ID_SSA)
		codec_id = SUBTITLE_CODEC_ID_ASS;
	else
		codec_id = SUBTITLE_CODEC_ID_TEXT;
	ret = subtitle->addEmbeddingSubtitle(codec_id, "chs", "chs", 
		(const char *)subtitle_dec_ctx->extradata, subtitle_dec_ctx->extradata_size);
	if (ret < 0) {
		LOGE("failed to addEmbeddingSubtitle");
		return 1;
	}

	/* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0 && index < 10 ) {
		if (pkt.stream_index == subtitle_stream_idx) {
			AVPacket orig_pkt = pkt;
			do {
				ret = avcodec_decode_subtitle2(subtitle_dec_ctx, &sub, &got_sub, &pkt);
				if (ret < 0) {
					break;
				}
				if (got_sub) {
					LOGI("got subtitle");

					for (int i=0;i<sub.num_rects;i++) {
						if (sub.rects[i]->ass) {
							int64_t start_time ,stop_time;
							AVRational ra;
							ra.num = 1;
							ra.den = AV_TIME_BASE;
							start_time = av_rescale_q(sub.pts + sub.start_display_time * 1000,
									 ra, subtitle_stream->time_base);
							stop_time = av_rescale_q(sub.pts + sub.end_display_time * 1000,
									 ra, subtitle_stream->time_base);
							subtitle->addEmbeddingSubtitleEntity(0, start_time, stop_time - start_time, 
								sub.rects[i]->ass, strlen(sub.rects[i]->ass)); // my_strlen_utf8_c

							index++;
						}
					}
					avsubtitle_free(&sub);
				}
				pkt.data += ret;
				pkt.size -= ret;
			} while (pkt.size > 0);
			av_free_packet(&orig_pkt);
		}
		else {
			av_free_packet(&pkt);
		}
    }

#else
	if (!subtitle->loadSubtitle(SUB_FILE_PATH, false)) {
		printf("failed to load subtitle: %s", SUB_FILE_PATH);
		return 1;
	}
#endif

    STSSegment* segment = NULL;
	char subtitleText[1024] = {0};

	int line = 0;
    while(line < 20 && subtitle->getNextSubtitleSegment(&segment)) {
        int64_t startTime = segment->getStartTime();
        int64_t stopTime = segment->getStopTime();
		segment->getSubtitleText(subtitleText, 1024);
        LOGI("%01d:%02d:%02d.%02d  --> %01d:%02d:%02d.%02d %s",
            int(startTime/1000/3600), int(startTime/1000%3600/60), int(startTime/1000%60), int(startTime%1000)/10,
            int(stopTime/1000/3600), int(stopTime/1000%3600/60), int(stopTime/1000%60), int(stopTime%1000)/10,
			CW2A(CA2W(subtitleText, CP_UTF8)));

		//getchar();
		line++;
    }

    subtitle->close();

	return 0;
}
Ejemplo n.º 14
0
int _tmain(int argc, _TCHAR* argv[])
{
	int ret = 0, got_frame;

	if (argc != 3) {
        fprintf(stderr, "usage: %s input_file fifo_size(in byte)\n"
                "\n", argv[0]);
		fprintf(stderr, "example: %s \"rtmp://172.16.204.106/live/test01 live=1\" 128000\n", argv[0]);
        exit(1);
    }

	av_register_all();
	avfilter_register_all();

	avformat_network_init();

	//av_log_set_callback(ff_log_callback);

	/* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, argv[1], NULL, NULL) < 0) {
		printf("Could not open source %s\n", argv[1]);
        return 1;
    }

	/* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        printf("Could not find stream information\n");
        return 1;
    }

	if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
        audio_stream = fmt_ctx->streams[audio_stream_idx];
        audio_dec_ctx = audio_stream->codec;

		audio_dst_bufsize = 192000 * 2;
        audio_dst_data = (uint8_t*)av_malloc(audio_dst_bufsize);
		if (audio_dst_data == NULL) {
			printf("No enough memory for audio conversion\n");
			return 1;
		}

		// it may changed when decode
		audio_channel_layout = audio_dec_ctx->channel_layout;
		audio_channels = audio_dec_ctx->channels;

		swr_ctx = swr_alloc_set_opts(swr_ctx,
			AV_CH_LAYOUT_STEREO,
			AV_SAMPLE_FMT_S16,
			audio_dec_ctx->sample_rate,
			audio_dec_ctx->channel_layout,
			audio_dec_ctx->sample_fmt,
			audio_dec_ctx->sample_rate,
			0, 0);
		if (!swr_ctx) {
			printf("failed to alloc swr_ctx\n");
			return 1;
		}

		if (swr_init(swr_ctx) < 0 || swr_ctx == NULL) {
			printf("swr_init failed\n");
			goto end;
		}

		printf("swr_init done!\n");

		//if (init_filters(audio_stream, audio_stream_idx) < 0)
		//	printf("failed to init_filters!\n");
    }

	/* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, argv[1], 0);

	frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

	int fifo_size = atoi(argv[2]);
	printf("fifo_size %d\n", fifo_size);
	audio_fifo.create(fifo_size);

	// init sdl audio
	SDL_Init(SDL_INIT_AUDIO);

	SDL_AudioSpec wanted_spec, spec;
	memset(&wanted_spec, 0, sizeof(SDL_AudioSpec));
	memset(&spec, 0, sizeof(SDL_AudioSpec));
	wanted_spec.freq		= audio_dec_ctx->sample_rate;
	wanted_spec.format		= AUDIO_S16SYS;
	wanted_spec.channels	= 2;
	wanted_spec.silence		= 0;
	wanted_spec.samples		= SDL_AUDIO_SAMPLES;
	wanted_spec.callback	= audio_callback;
	wanted_spec.userdata	= &audio_fifo;

	if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
		printf("SDL_OpenAudio: %s\n", SDL_GetError());
		return 1;
	}

	printf("SDL_AudioSpec got: chn %d, fmt 0x%x, freq %d\n", spec.channels, spec.format, spec.freq);

	SDL_PauseAudio(0);

    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        decode_packet(&got_frame, 0);
        av_free_packet(&pkt);
    }

end:
    if (audio_dec_ctx)
        avcodec_close(audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    av_free(frame);
    av_free(audio_dst_data);
	if (swr_ctx)
		swr_free(&swr_ctx);
	return 0;
}