Beispiel #1
0
ffCanvas::Impl::Impl(const char* name, PixelFormat fmt, int width, int height, int stride,
                     char* bits, int fps)
: mWidth(width), mHeight(height), mStride(stride), mBuffer(bits), mFrameRate(fps),
  mError(NULL), mOutputCtx(NULL), mFrame(NULL)
{
    avcodec_register_all();
    av_register_all();

    int res = avformat_alloc_output_context2(&mOutputCtx, NULL, "mov", name);
    if (res < 0) {
        mError = "out of memory";
        return;
    }
    
    AVCodec *codec = avcodec_find_encoder(CODEC_ID_QTRLE);
    if (codec == NULL) {
        mError = "codec not found";
        return;
    }
    
    AVStream* stream = avformat_new_stream(mOutputCtx, codec);
    if (stream == NULL) {
        mError = "out of memory";
        return;
    }
    
    AVCodecContext *codecCtx = stream->codec;

    avcodec_get_context_defaults3(codecCtx, codec);
    
    /* put sample parameters */
    codecCtx->bit_rate = height * stride * fps * 8;
    /* resolution must be a multiple of two */
    assert(((width & 3) | (height & 3)) == 0);
    codecCtx->width = width;
    codecCtx->height = height;
    /* frames per second */
    codecCtx->time_base.num = 1;
    codecCtx->time_base.den = fps;
    codecCtx->gop_size = 10; /* emit one intra frame every ten frames */
    codecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
    
    switch (fmt) {
        case aggCanvas::Gray8_Blend:
            codecCtx->pix_fmt = PIX_FMT_GRAY8;
            break;
        case aggCanvas::FF24_Blend:
            codecCtx->pix_fmt = PIX_FMT_RGB24;
            break;
        case aggCanvas::FF_Blend:
            codecCtx->pix_fmt = PIX_FMT_ARGB;
            break;
        default:
            mError = "unknown pixel format";
            return;
    }
    
    if (avcodec_open2(codecCtx, codec, NULL) < 0) {
        mError = "could not open codec";
        return;
    }
    
    mFrame = avcodec_alloc_frame();
    if (mFrame == NULL) {
        mError = "out of memory";
        return;
    }
    mFrame->data[0] = (uint8_t*)bits;
    mFrame->data[1] = codecCtx->pix_fmt == PIX_FMT_GRAY8 ? (uint8_t*)dummyPalette : NULL;
    mFrame->data[2] = mFrame->data[3] = NULL;
    mFrame->linesize[0] = stride;
    mFrame->linesize[1] = codecCtx->pix_fmt == PIX_FMT_GRAY8 ? 1024 : 0;
    mFrame->linesize[2] = mFrame->linesize[3] = 0;
    
    if (avio_open(&(mOutputCtx->pb), name, AVIO_FLAG_WRITE) < 0) {
        mError = "failed to write video file header";
        return;
    }
    
    if (avformat_write_header(mOutputCtx, NULL) < 0) {
        avio_close(mOutputCtx->pb);
        mError = "failed to write video file header";
        return;
    }
}
Beispiel #2
0
static int sap_read_header(AVFormatContext *s)
{
    struct SAPState *sap = s->priv_data;
    char host[1024], path[1024], url[1024];
    uint8_t recvbuf[RTP_MAX_PACKET_LENGTH];
    int port;
    int ret, i;
    AVInputFormat* infmt;

    if (!ff_network_init())
        return AVERROR(EIO);

    av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port,
                 path, sizeof(path), s->filename);
    if (port < 0)
        port = 9875;

    if (!host[0]) {
        /* Listen for announcements on sap.mcast.net if no host was specified */
        av_strlcpy(host, "224.2.127.254", sizeof(host));
    }

    ff_url_join(url, sizeof(url), "udp", NULL, host, port, "?localport=%d",
                port);
    ret = ffurl_open_whitelist(&sap->ann_fd, url, AVIO_FLAG_READ,
                               &s->interrupt_callback, NULL,
                               s->protocol_whitelist, s->protocol_blacklist);
    if (ret)
        goto fail;

    while (1) {
        int addr_type, auth_len;
        int pos;

        ret = ffurl_read(sap->ann_fd, recvbuf, sizeof(recvbuf) - 1);
        if (ret == AVERROR(EAGAIN))
            continue;
        if (ret < 0)
            goto fail;
        recvbuf[ret] = '\0'; /* Null terminate for easier parsing */
        if (ret < 8) {
            av_log(s, AV_LOG_WARNING, "Received too short packet\n");
            continue;
        }

        if ((recvbuf[0] & 0xe0) != 0x20) {
            av_log(s, AV_LOG_WARNING, "Unsupported SAP version packet "
                                      "received\n");
            continue;
        }

        if (recvbuf[0] & 0x04) {
            av_log(s, AV_LOG_WARNING, "Received stream deletion "
                                      "announcement\n");
            continue;
        }
        addr_type = recvbuf[0] & 0x10;
        auth_len  = recvbuf[1];
        sap->hash = AV_RB16(&recvbuf[2]);
        pos = 4;
        if (addr_type)
            pos += 16; /* IPv6 */
        else
            pos += 4; /* IPv4 */
        pos += auth_len * 4;
        if (pos + 4 >= ret) {
            av_log(s, AV_LOG_WARNING, "Received too short packet\n");
            continue;
        }
#define MIME "application/sdp"
        if (strcmp(&recvbuf[pos], MIME) == 0) {
            pos += strlen(MIME) + 1;
        } else if (strncmp(&recvbuf[pos], "v=0\r\n", 5) == 0) {
            // Direct SDP without a mime type
        } else {
            av_log(s, AV_LOG_WARNING, "Unsupported mime type %s\n",
                                      &recvbuf[pos]);
            continue;
        }

        sap->sdp = av_strdup(&recvbuf[pos]);
        break;
    }

    av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sap->sdp);
    ffio_init_context(&sap->sdp_pb, sap->sdp, strlen(sap->sdp), 0, NULL, NULL,
                  NULL, NULL);

    infmt = av_find_input_format("sdp");
    if (!infmt)
        goto fail;
    sap->sdp_ctx = avformat_alloc_context();
    if (!sap->sdp_ctx) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }
    sap->sdp_ctx->max_delay = s->max_delay;
    sap->sdp_ctx->pb        = &sap->sdp_pb;
    sap->sdp_ctx->interrupt_callback = s->interrupt_callback;

    if ((ret = ff_copy_whiteblacklists(sap->sdp_ctx, s)) < 0)
        goto fail;

    ret = avformat_open_input(&sap->sdp_ctx, "temp.sdp", infmt, NULL);
    if (ret < 0)
        goto fail;
    if (sap->sdp_ctx->ctx_flags & AVFMTCTX_NOHEADER)
        s->ctx_flags |= AVFMTCTX_NOHEADER;
    for (i = 0; i < sap->sdp_ctx->nb_streams; i++) {
        AVStream *st = avformat_new_stream(s, NULL);
        if (!st) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        st->id = i;
        avcodec_parameters_copy(st->codecpar, sap->sdp_ctx->streams[i]->codecpar);
        st->time_base = sap->sdp_ctx->streams[i]->time_base;
    }

    return 0;

fail:
    sap_read_close(s);
    return ret;
}
Beispiel #3
0
/**
 * read rl2 header data and setup the avstreams
 * @param s demuxer context
 * @return 0 on success, AVERROR otherwise
 */
static av_cold int rl2_read_header(AVFormatContext *s)
{
    AVIOContext *pb = s->pb;
    AVStream *st;
    unsigned int frame_count;
    unsigned int audio_frame_counter = 0;
    unsigned int video_frame_counter = 0;
    unsigned int back_size;
    unsigned short sound_rate;
    unsigned short rate;
    unsigned short channels;
    unsigned short def_sound_size;
    unsigned int signature;
    unsigned int pts_den = 11025; /* video only case */
    unsigned int pts_num = 1103;
    unsigned int* chunk_offset = NULL;
    int* chunk_size = NULL;
    int* audio_size = NULL;
    int i;
    int ret = 0;

    avio_skip(pb,4);          /* skip FORM tag */
    back_size = avio_rl32(pb); /**< get size of the background frame */
    signature = avio_rb32(pb);
    avio_skip(pb, 4);         /* data size */
    frame_count = avio_rl32(pb);

    /* disallow back_sizes and frame_counts that may lead to overflows later */
    if(back_size > INT_MAX/2  || frame_count > INT_MAX / sizeof(uint32_t))
        return AVERROR_INVALIDDATA;

    avio_skip(pb, 2);         /* encoding mentod */
    sound_rate = avio_rl16(pb);
    rate = avio_rl16(pb);
    channels = avio_rl16(pb);
    def_sound_size = avio_rl16(pb);

    /** setup video stream */
    st = avformat_new_stream(s, NULL);
    if(!st)
         return AVERROR(ENOMEM);

    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id = AV_CODEC_ID_RL2;
    st->codec->codec_tag = 0;  /* no fourcc */
    st->codec->width = 320;
    st->codec->height = 200;

    /** allocate and fill extradata */
    st->codec->extradata_size = EXTRADATA1_SIZE;

    if(signature == RLV3_TAG && back_size > 0)
        st->codec->extradata_size += back_size;

    if(ff_get_extradata(st->codec, pb, st->codec->extradata_size) < 0)
        return AVERROR(ENOMEM);

    /** setup audio stream if present */
    if(sound_rate){
        if (!channels || channels > 42) {
            av_log(s, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels);
            return AVERROR_INVALIDDATA;
        }

        pts_num = def_sound_size;
        pts_den = rate;

        st = avformat_new_stream(s, NULL);
        if (!st)
            return AVERROR(ENOMEM);
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_id = AV_CODEC_ID_PCM_U8;
        st->codec->codec_tag = 1;
        st->codec->channels = channels;
        st->codec->bits_per_coded_sample = 8;
        st->codec->sample_rate = rate;
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
            st->codec->bits_per_coded_sample;
        st->codec->block_align = st->codec->channels *
            st->codec->bits_per_coded_sample / 8;
        avpriv_set_pts_info(st,32,1,rate);
    }

    avpriv_set_pts_info(s->streams[0], 32, pts_num, pts_den);

    chunk_size =   av_malloc(frame_count * sizeof(uint32_t));
    audio_size =   av_malloc(frame_count * sizeof(uint32_t));
    chunk_offset = av_malloc(frame_count * sizeof(uint32_t));

    if(!chunk_size || !audio_size || !chunk_offset){
        av_free(chunk_size);
        av_free(audio_size);
        av_free(chunk_offset);
        return AVERROR(ENOMEM);
    }

    /** read offset and size tables */
    for(i=0; i < frame_count;i++) {
        if (avio_feof(pb))
            return AVERROR_INVALIDDATA;
        chunk_size[i] = avio_rl32(pb);
    }
    for(i=0; i < frame_count;i++) {
        if (avio_feof(pb))
            return AVERROR_INVALIDDATA;
        chunk_offset[i] = avio_rl32(pb);
    }
    for(i=0; i < frame_count;i++) {
        if (avio_feof(pb))
            return AVERROR_INVALIDDATA;
        audio_size[i] = avio_rl32(pb) & 0xFFFF;
    }

    /** build the sample index */
    for(i=0;i<frame_count;i++){
        if(chunk_size[i] < 0 || audio_size[i] > chunk_size[i]){
            ret = AVERROR_INVALIDDATA;
            break;
        }

        if(sound_rate && audio_size[i]){
            av_add_index_entry(s->streams[1], chunk_offset[i],
                audio_frame_counter,audio_size[i], 0, AVINDEX_KEYFRAME);
            audio_frame_counter += audio_size[i] / channels;
        }
        av_add_index_entry(s->streams[0], chunk_offset[i] + audio_size[i],
            video_frame_counter,chunk_size[i]-audio_size[i],0,AVINDEX_KEYFRAME);
        ++video_frame_counter;
    }


    av_free(chunk_size);
    av_free(audio_size);
    av_free(chunk_offset);

    return ret;
}
Beispiel #4
0
static av_cold int iss_read_header(AVFormatContext *s)
{
    IssDemuxContext *iss = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *st;
    char token[MAX_TOKEN_SIZE];
    int stereo, rate_divisor;

    get_token(pb, token, sizeof(token)); //"IMA_ADPCM_Sound"
    get_token(pb, token, sizeof(token)); //packet size
    if (sscanf(token, "%d", &iss->packet_size) != 1) {
        av_log(s, AV_LOG_ERROR, "Failed parsing packet size\n");
        return AVERROR_INVALIDDATA;
    }
    get_token(pb, token, sizeof(token)); //File ID
    get_token(pb, token, sizeof(token)); //out size
    get_token(pb, token, sizeof(token)); //stereo
    if (sscanf(token, "%d", &stereo) != 1) {
        av_log(s, AV_LOG_ERROR, "Failed parsing stereo flag\n");
        return AVERROR_INVALIDDATA;
    }
    get_token(pb, token, sizeof(token)); //Unknown1
    get_token(pb, token, sizeof(token)); //RateDivisor
    if (sscanf(token, "%d", &rate_divisor) != 1) {
        av_log(s, AV_LOG_ERROR, "Failed parsing rate_divisor\n");
        return AVERROR_INVALIDDATA;
    }
    get_token(pb, token, sizeof(token)); //Unknown2
    get_token(pb, token, sizeof(token)); //Version ID
    get_token(pb, token, sizeof(token)); //Size

    if (iss->packet_size <= 0) {
        av_log(s, AV_LOG_ERROR, "packet_size %d is invalid\n", iss->packet_size);
        return AVERROR_INVALIDDATA;
    }

    iss->sample_start_pos = avio_tell(pb);

    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);
    st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
    st->codecpar->codec_id = AV_CODEC_ID_ADPCM_IMA_ISS;
    if (stereo) {
        st->codecpar->channels       = 2;
        st->codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
    } else {
        st->codecpar->channels       = 1;
        st->codecpar->channel_layout = AV_CH_LAYOUT_MONO;
    }
    st->codecpar->sample_rate = 44100;
    if(rate_divisor > 0)
         st->codecpar->sample_rate /= rate_divisor;
    st->codecpar->bits_per_coded_sample = 4;
    st->codecpar->bit_rate = st->codecpar->channels * st->codecpar->sample_rate
                                      * st->codecpar->bits_per_coded_sample;
    st->codecpar->block_align = iss->packet_size;
    avpriv_set_pts_info(st, 32, 1, st->codecpar->sample_rate);

    return 0;
}
Beispiel #5
0
// add a video output stream
static int AddVideoStream()
{
#if LIBAVFORMAT_VERSION_MAJOR >= 53
    g_pVStream = avformat_new_stream(g_pContainer, g_pVCodec);
#else
    g_pVStream = av_new_stream(g_pContainer, 0);
#endif
    if (!g_pVStream)
        return FatalError("Could not allocate video stream");

    g_pVideo = g_pVStream->codec;

    avcodec_get_context_defaults3(g_pVideo, g_pVCodec);
    g_pVideo->codec_id = g_pVCodec->id;

    // put parameters
    // resolution must be a multiple of two
    g_pVideo->width  = g_Width  & ~1; // make even (dimensions should be even)
    g_pVideo->height = g_Height & ~1; // make even
    /* time base: this is the fundamental unit of time (in seconds) in terms
       of which frame timestamps are represented. for fixed-fps content,
       timebase should be 1/framerate and timestamp increments should be
       identically 1. */
    g_pVideo->time_base.den = g_Framerate.num;
    g_pVideo->time_base.num = g_Framerate.den;
    //g_pVideo->gop_size = 12; /* emit one intra frame every twelve frames at most */
    g_pVideo->pix_fmt = PIX_FMT_YUV420P;

    // set quality
    if (g_VQuality > 100)
        g_pVideo->bit_rate = g_VQuality;
    else
    {
        g_pVideo->flags |= CODEC_FLAG_QSCALE;
        g_pVideo->global_quality = g_VQuality*FF_QP2LAMBDA;
    }

    // some formats want stream headers to be separate
    if (g_pFormat->flags & AVFMT_GLOBALHEADER)
        g_pVideo->flags |= CODEC_FLAG_GLOBAL_HEADER;

#if LIBAVCODEC_VERSION_MAJOR < 53
    // for some versions of ffmpeg x264 options must be set explicitly
    if (strcmp(g_pVCodec->name, "libx264") == 0)
    {
        g_pVideo->coder_type = FF_CODER_TYPE_AC;
        g_pVideo->flags |= CODEC_FLAG_LOOP_FILTER;
        g_pVideo->crf = 23;
        g_pVideo->thread_count = 3;
        g_pVideo->me_cmp = FF_CMP_CHROMA;
        g_pVideo->partitions = X264_PART_I8X8 | X264_PART_I4X4 | X264_PART_P8X8 | X264_PART_B8X8;
        g_pVideo->me_method = ME_HEX;
        g_pVideo->me_subpel_quality = 7;
        g_pVideo->me_range = 16;
        g_pVideo->gop_size = 250;
        g_pVideo->keyint_min = 25;
        g_pVideo->scenechange_threshold = 40;
        g_pVideo->i_quant_factor = 0.71;
        g_pVideo->b_frame_strategy = 1;
        g_pVideo->qcompress = 0.6;
        g_pVideo->qmin = 10;
        g_pVideo->qmax = 51;
        g_pVideo->max_qdiff = 4;
        g_pVideo->max_b_frames = 3;
        g_pVideo->refs = 3;
        g_pVideo->directpred = 1;
        g_pVideo->trellis = 1;
        g_pVideo->flags2 = CODEC_FLAG2_BPYRAMID | CODEC_FLAG2_MIXED_REFS | CODEC_FLAG2_WPRED | CODEC_FLAG2_8X8DCT | CODEC_FLAG2_FASTPSKIP;
        g_pVideo->weighted_p_pred = 2;
    }
#endif

    // open the codec
#if LIBAVCODEC_VERSION_MAJOR >= 53
    AVDictionary* pDict = NULL;
    if (strcmp(g_pVCodec->name, "libx264") == 0)
        av_dict_set(&pDict, "preset", "medium", 0);

    if (avcodec_open2(g_pVideo, g_pVCodec, &pDict) < 0)
#else
    if (avcodec_open(g_pVideo, g_pVCodec) < 0)
#endif
        return FatalError("Could not open video codec %s", g_pVCodec->long_name);

    g_pVFrame = avcodec_alloc_frame();
    if (!g_pVFrame)
        return FatalError("Could not allocate frame");

    g_pVFrame->linesize[0] = g_Width;
    g_pVFrame->linesize[1] = g_Width/2;
    g_pVFrame->linesize[2] = g_Width/2;
    g_pVFrame->linesize[3] = 0;
    return 0;
}
Beispiel #6
0
int muxer_mp4(void* noUse)
{
    AVOutputFormat *ofmt = NULL;
    //Input AVFormatContext and Output AVFormatContext
    AVFormatContext *ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    int ret, i;
    int videoindex_v = -1, videoindex_out = -1;
    int audioindex_a = -1, audioindex_out = -1;
    int frame_index = 0;
    int64_t cur_pts_v = 0, cur_pts_a = 0;

    //const char *in_filename_v = "cuc_ieschool.ts";//Input file URL
    const char *in_filename_v = "../testResource/bigbuckbunny_480x272.h264";
    //const char *in_filename_a = "cuc_ieschool.mp3";
    //const char *in_filename_a = "gowest.m4a";
    //const char *in_filename_a = "gowest.aac";
    const char *in_filename_a = "../testResource/WavinFlag.aac";

    const char *out_filename = "bigbuckbunny.mp4";//Output file URL
    av_register_all();
    //Input
    if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) {
        printf("Could not open input file.");
        goto end;
    }
    if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {
        printf("Failed to retrieve input stream information");
        goto end;
    }

    if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) {
        printf("Could not open input file.");
        goto end;
    }
    if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) {
        printf("Failed to retrieve input stream information");
        goto end;
    }
    printf("===========Input Information==========\n");
    av_dump_format(ifmt_ctx_v, 0, in_filename_v, 0);
    av_dump_format(ifmt_ctx_a, 0, in_filename_a, 0);
    printf("======================================\n");
    //Output
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx) {
        printf("Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    ofmt = ofmt_ctx->oformat;

    unsigned char* outbuffer = NULL;
    outbuffer = (unsigned char*)av_malloc(32768);

    AVIOContext *avio_out = avio_alloc_context(outbuffer, 32768, 0, NULL, NULL, write_buffer, NULL);
    if (avio_out == NULL)
        goto end;
    ofmt_ctx->pb = avio_out;
    ofmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO;

    for (i = 0; i < ifmt_ctx_v->nb_streams; i++) {
        //Create output AVStream according to input AVStream
        if (ifmt_ctx_v->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            AVStream *in_stream = ifmt_ctx_v->streams[i];
            AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
            videoindex_v = i;
            if (!out_stream) {
                printf("Failed allocating output stream\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            videoindex_out = out_stream->index;
            //Copy the settings of AVCodecContext
            if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
                printf("Failed to copy context from input to output stream codec context\n");
                goto end;
            }
            out_stream->codec->codec_tag = 0;
            if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
                out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
            break;
        }
    }

    for (i = 0; i < ifmt_ctx_a->nb_streams; i++) {
        //Create output AVStream according to input AVStream
        if (ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            AVStream *in_stream = ifmt_ctx_a->streams[i];
            AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
            audioindex_a = i;
            if (!out_stream) {
                printf("Failed allocating output stream\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            audioindex_out = out_stream->index;
            //Copy the settings of AVCodecContext
            if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
                printf("Failed to copy context from input to output stream codec context\n");
                goto end;
            }
            out_stream->codec->codec_tag = 0;
            if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
                out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

            break;
        }
    }

    printf("==========Output Information==========\n");
    av_dump_format(ofmt_ctx, 0, out_filename, 1);
    printf("======================================\n");
    //Open output file
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {
            printf("Could not open output file '%s'", out_filename);
            goto end;
        }
    }
    //Write file header
    if (avformat_write_header(ofmt_ctx, NULL) < 0) {
        printf("Error occurred when opening output file\n");
        goto end;
    }


    //FIX
#if USE_H264BSF
    AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif
#if USE_AACBSF
    AVBitStreamFilterContext* aacbsfc = av_bitstream_filter_init("aac_adtstoasc");
#endif

    while (1) {
        AVFormatContext *ifmt_ctx;
        int stream_index = 0;
        AVStream *in_stream, *out_stream;

        //Get an AVPacket
        if (av_compare_ts(cur_pts_v, ifmt_ctx_v->streams[videoindex_v]->time_base, cur_pts_a, ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0) {
            ifmt_ctx = ifmt_ctx_v;
            stream_index = videoindex_out;

            if (av_read_frame(ifmt_ctx, &pkt) >= 0) {
                do {
                    in_stream = ifmt_ctx->streams[pkt.stream_index];
                    out_stream = ofmt_ctx->streams[stream_index];

                    if (pkt.stream_index == videoindex_v) {
                        //FIX£ºNo PTS (Example: Raw H.264)
                        //Simple Write PTS
                        if (pkt.pts == AV_NOPTS_VALUE) {
                            //Write PTS
                            AVRational time_base1 = in_stream->time_base;
                            //Duration between 2 frames (us)
                            int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
                            //Parameters
                            pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            pkt.dts = pkt.pts;
                            pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            frame_index++;
                        }

                        cur_pts_v = pkt.pts;
                        break;
                    }
                } while (av_read_frame(ifmt_ctx, &pkt) >= 0);
            }
            else {
                break;
            }
        }
        else {
            ifmt_ctx = ifmt_ctx_a;
            stream_index = audioindex_out;
            if (av_read_frame(ifmt_ctx, &pkt) >= 0) {
                do {
                    in_stream = ifmt_ctx->streams[pkt.stream_index];
                    out_stream = ofmt_ctx->streams[stream_index];

                    if (pkt.stream_index == audioindex_a) {

                        //FIX£ºNo PTS
                        //Simple Write PTS
                        if (pkt.pts == AV_NOPTS_VALUE) {
                            //Write PTS
                            AVRational time_base1 = in_stream->time_base;
                            //Duration between 2 frames (us)
                            int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
                            //Parameters
                            pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            pkt.dts = pkt.pts;
                            pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            frame_index++;
                        }
                        cur_pts_a = pkt.pts;

                        break;
                    }
                } while (av_read_frame(ifmt_ctx, &pkt) >= 0);
            }
            else {
                break;
            }

        }

        //FIX:Bitstream Filter
#if USE_H264BSF
        av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
#if USE_AACBSF
        av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif


        //Convert PTS/DTS
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        pkt.stream_index = stream_index;

        printf("Write 1 Packet. size:%5d\tpts:%lld\n", pkt.size, pkt.pts);
        //Write
        if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
            printf("Error muxing packet\n");
            break;
        }
        av_free_packet(&pkt);

    }
    //Write file trailer
    av_write_trailer(ofmt_ctx);

#if USE_H264BSF
    av_bitstream_filter_close(h264bsfc);
#endif
#if USE_AACBSF
    av_bitstream_filter_close(aacbsfc);
#endif

end:
    avformat_close_input(&ifmt_ctx_v);
    avformat_close_input(&ifmt_ctx_a);
    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_close(ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    if (ret < 0 && ret != AVERROR_EOF) {
        printf("Error occurred.\n");
        return -1;
    }
    return 0;
}
Beispiel #7
0
static int apng_read_header(AVFormatContext *s)
{
    APNGDemuxContext *ctx = s->priv_data;
    AVIOContext *pb = s->pb;
    uint32_t len, tag;
    AVStream *st;
    int acTL_found = 0;
    int64_t ret = AVERROR_INVALIDDATA;

    /* verify PNGSIG */
    if (avio_rb64(pb) != PNGSIG)
        return ret;

    /* parse IHDR (must be first chunk) */
    len = avio_rb32(pb);
    tag = avio_rl32(pb);
    if (len != 13 || tag != MKTAG('I', 'H', 'D', 'R'))
        return ret;

    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);

    /* set the timebase to something large enough (1/100,000 of second)
     * to hopefully cope with all sane frame durations */
    avpriv_set_pts_info(st, 64, 1, 100000);
    st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codecpar->codec_id   = AV_CODEC_ID_APNG;
    st->codecpar->width      = avio_rb32(pb);
    st->codecpar->height     = avio_rb32(pb);
    if ((ret = av_image_check_size(st->codecpar->width, st->codecpar->height, 0, s)) < 0)
        return ret;

    /* extradata will contain every chunk up to the first fcTL (excluded) */
    st->codecpar->extradata = av_malloc(len + 12 + AV_INPUT_BUFFER_PADDING_SIZE);
    if (!st->codecpar->extradata)
        return AVERROR(ENOMEM);
    st->codecpar->extradata_size = len + 12;
    AV_WB32(st->codecpar->extradata,    len);
    AV_WL32(st->codecpar->extradata+4,  tag);
    AV_WB32(st->codecpar->extradata+8,  st->codecpar->width);
    AV_WB32(st->codecpar->extradata+12, st->codecpar->height);
    if ((ret = avio_read(pb, st->codecpar->extradata+16, 9)) < 0)
        goto fail;

    while (!avio_feof(pb)) {
        if (acTL_found && ctx->num_play != 1) {
            int64_t size   = avio_size(pb);
            int64_t offset = avio_tell(pb);
            if (size < 0) {
                ret = size;
                goto fail;
            } else if (offset < 0) {
                ret = offset;
                goto fail;
            } else if ((ret = ffio_ensure_seekback(pb, size - offset)) < 0) {
                av_log(s, AV_LOG_WARNING, "Could not ensure seekback, will not loop\n");
                ctx->num_play = 1;
            }
        }
        if ((ctx->num_play == 1 || !acTL_found) &&
            ((ret = ffio_ensure_seekback(pb, 4 /* len */ + 4 /* tag */)) < 0))
            goto fail;

        len = avio_rb32(pb);
        if (len > 0x7fffffff) {
            ret = AVERROR_INVALIDDATA;
            goto fail;
        }

        tag = avio_rl32(pb);
        switch (tag) {
        case MKTAG('a', 'c', 'T', 'L'):
            if ((ret = avio_seek(pb, -8, SEEK_CUR)) < 0 ||
                (ret = append_extradata(st->codecpar, pb, len + 12)) < 0)
                goto fail;
            acTL_found = 1;
            ctx->num_frames = AV_RB32(st->codecpar->extradata + ret + 8);
            ctx->num_play   = AV_RB32(st->codecpar->extradata + ret + 12);
            av_log(s, AV_LOG_DEBUG, "num_frames: %"PRIu32", num_play: %"PRIu32"\n",
                                    ctx->num_frames, ctx->num_play);
            break;
        case MKTAG('f', 'c', 'T', 'L'):
            if (!acTL_found) {
               ret = AVERROR_INVALIDDATA;
               goto fail;
            }
            if ((ret = avio_seek(pb, -8, SEEK_CUR)) < 0)
                goto fail;
            return 0;
        default:
            if ((ret = avio_seek(pb, -8, SEEK_CUR)) < 0 ||
                (ret = append_extradata(st->codecpar, pb, len + 12)) < 0)
                goto fail;
        }
    }

fail:
    if (st->codecpar->extradata_size) {
        av_freep(&st->codecpar->extradata);
        st->codecpar->extradata_size = 0;
    }
    return ret;
}
Beispiel #8
0
static int iff_read_header(AVFormatContext *s)
{
    IffDemuxContext *iff = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *st;
    uint8_t *buf;
    uint32_t chunk_id, data_size;
    uint32_t screenmode = 0;
    unsigned transparency = 0;
    unsigned masking = 0; // no mask
    uint8_t fmt[16];
    int fmt_size;

    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);

    st->codec->channels = 1;
    st->codec->channel_layout = AV_CH_LAYOUT_MONO;
    avio_skip(pb, 8);
    // codec_tag used by ByteRun1 decoder to distinguish progressive (PBM) and interlaced (ILBM) content
    st->codec->codec_tag = avio_rl32(pb);

    while(!url_feof(pb)) {
        uint64_t orig_pos;
        int res;
        const char *metadata_tag = NULL;
        chunk_id = avio_rl32(pb);
        data_size = avio_rb32(pb);
        orig_pos = avio_tell(pb);

        switch(chunk_id) {
        case ID_VHDR:
            st->codec->codec_type = AVMEDIA_TYPE_AUDIO;

            if (data_size < 14)
                return AVERROR_INVALIDDATA;
            avio_skip(pb, 12);
            st->codec->sample_rate = avio_rb16(pb);
            if (data_size >= 16) {
                avio_skip(pb, 1);
                iff->svx8_compression = avio_r8(pb);
            }
            break;

        case ID_ABIT:
        case ID_BODY:
        case ID_DBOD:
            iff->body_pos = avio_tell(pb);
            iff->body_size = data_size;
            break;

        case ID_CHAN:
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            if (avio_rb32(pb) < 6) {
                st->codec->channels       = 1;
                st->codec->channel_layout = AV_CH_LAYOUT_MONO;
            } else {
                st->codec->channels       = 2;
                st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
            }
            break;

        case ID_CAMG:
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            screenmode                = avio_rb32(pb);
            break;

        case ID_CMAP:
            st->codec->extradata_size = data_size + IFF_EXTRA_VIDEO_SIZE;
            st->codec->extradata      = av_malloc(data_size + IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!st->codec->extradata)
                return AVERROR(ENOMEM);
            if (avio_read(pb, st->codec->extradata + IFF_EXTRA_VIDEO_SIZE, data_size) < 0)
                return AVERROR(EIO);
            break;

        case ID_BMHD:
            iff->bitmap_compression = -1;
            st->codec->codec_type            = AVMEDIA_TYPE_VIDEO;
            if (data_size <= 8)
                return AVERROR_INVALIDDATA;
            st->codec->width                 = avio_rb16(pb);
            st->codec->height                = avio_rb16(pb);
            avio_skip(pb, 4); // x, y offset
            st->codec->bits_per_coded_sample = avio_r8(pb);
            if (data_size >= 10)
                masking                      = avio_r8(pb);
            if (data_size >= 11)
                iff->bitmap_compression      = avio_r8(pb);
            if (data_size >= 14) {
                avio_skip(pb, 1); // padding
                transparency                 = avio_rb16(pb);
            }
            if (data_size >= 16) {
                st->sample_aspect_ratio.num  = avio_r8(pb);
                st->sample_aspect_ratio.den  = avio_r8(pb);
            }
            break;

        case ID_DPEL:
            if (data_size < 4 || (data_size & 3))
                return AVERROR_INVALIDDATA;
            if ((fmt_size = avio_read(pb, fmt, sizeof(fmt))) < 0)
                return fmt_size;
            if (fmt_size == sizeof(deep_rgb24) && !memcmp(fmt, deep_rgb24, sizeof(deep_rgb24)))
                st->codec->pix_fmt = AV_PIX_FMT_RGB24;
            else if (fmt_size == sizeof(deep_rgba) && !memcmp(fmt, deep_rgba, sizeof(deep_rgba)))
                st->codec->pix_fmt = AV_PIX_FMT_RGBA;
            else if (fmt_size == sizeof(deep_bgra) && !memcmp(fmt, deep_bgra, sizeof(deep_bgra)))
                st->codec->pix_fmt = AV_PIX_FMT_BGRA;
            else if (fmt_size == sizeof(deep_argb) && !memcmp(fmt, deep_argb, sizeof(deep_argb)))
                st->codec->pix_fmt = AV_PIX_FMT_ARGB;
            else if (fmt_size == sizeof(deep_abgr) && !memcmp(fmt, deep_abgr, sizeof(deep_abgr)))
                st->codec->pix_fmt = AV_PIX_FMT_ABGR;
            else {
                av_log_ask_for_sample(s, "unsupported color format\n");
                return AVERROR_PATCHWELCOME;
            }
            break;

        case ID_DGBL:
            st->codec->codec_type            = AVMEDIA_TYPE_VIDEO;
            if (data_size < 8)
                return AVERROR_INVALIDDATA;
            st->codec->width                 = avio_rb16(pb);
            st->codec->height                = avio_rb16(pb);
            iff->bitmap_compression          = avio_rb16(pb);
            if (iff->bitmap_compression > 1) {
                av_log(s, AV_LOG_ERROR,
                       "compression %i not supported\n", iff->bitmap_compression);
                return AVERROR_PATCHWELCOME;
            }
            st->sample_aspect_ratio.num      = avio_r8(pb);
            st->sample_aspect_ratio.den      = avio_r8(pb);
            st->codec->bits_per_coded_sample = 24;
            break;

        case ID_DLOC:
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            st->codec->width  = avio_rb16(pb);
            st->codec->height = avio_rb16(pb);
            break;

        case ID_ANNO:
        case ID_TEXT:      metadata_tag = "comment";   break;
        case ID_AUTH:      metadata_tag = "artist";    break;
        case ID_COPYRIGHT: metadata_tag = "copyright"; break;
        case ID_NAME:      metadata_tag = "title";     break;
        }

        if (metadata_tag) {
            if ((res = get_metadata(s, metadata_tag, data_size)) < 0) {
                av_log(s, AV_LOG_ERROR, "cannot allocate metadata tag %s!\n", metadata_tag);
                return res;
            }
        }
        avio_skip(pb, data_size - (avio_tell(pb) - orig_pos) + (data_size & 1));
    }

    avio_seek(pb, iff->body_pos, SEEK_SET);

    switch(st->codec->codec_type) {
    case AVMEDIA_TYPE_AUDIO:
        avpriv_set_pts_info(st, 32, 1, st->codec->sample_rate);

        switch (iff->svx8_compression) {
        case COMP_NONE:
            st->codec->codec_id = AV_CODEC_ID_PCM_S8_PLANAR;
            break;
        case COMP_FIB:
            st->codec->codec_id = AV_CODEC_ID_8SVX_FIB;
            break;
        case COMP_EXP:
            st->codec->codec_id = AV_CODEC_ID_8SVX_EXP;
            break;
        default:
            av_log(s, AV_LOG_ERROR,
                   "Unknown SVX8 compression method '%d'\n", iff->svx8_compression);
            return -1;
        }

        st->codec->bits_per_coded_sample = iff->svx8_compression == COMP_NONE ? 8 : 4;
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample;
        st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
        break;

    case AVMEDIA_TYPE_VIDEO:
        iff->bpp          = st->codec->bits_per_coded_sample;
        if ((screenmode & 0x800 /* Hold And Modify */) && iff->bpp <= 8) {
            iff->ham      = iff->bpp > 6 ? 6 : 4;
            st->codec->bits_per_coded_sample = 24;
        }
        iff->flags        = (screenmode & 0x80 /* Extra HalfBrite */) && iff->bpp <= 8;
        iff->masking      = masking;
        iff->transparency = transparency;

        if (!st->codec->extradata) {
            st->codec->extradata_size = IFF_EXTRA_VIDEO_SIZE;
            st->codec->extradata      = av_malloc(IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!st->codec->extradata)
                return AVERROR(ENOMEM);
        }
        buf = st->codec->extradata;
        bytestream_put_be16(&buf, IFF_EXTRA_VIDEO_SIZE);
        bytestream_put_byte(&buf, iff->bitmap_compression);
        bytestream_put_byte(&buf, iff->bpp);
        bytestream_put_byte(&buf, iff->ham);
        bytestream_put_byte(&buf, iff->flags);
        bytestream_put_be16(&buf, iff->transparency);
        bytestream_put_byte(&buf, iff->masking);

        switch (iff->bitmap_compression) {
        case BITMAP_RAW:
            st->codec->codec_id = AV_CODEC_ID_IFF_ILBM;
            break;
        case BITMAP_BYTERUN1:
            st->codec->codec_id = AV_CODEC_ID_IFF_BYTERUN1;
            break;
        default:
            av_log(s, AV_LOG_ERROR,
                   "Unknown bitmap compression method '%d'\n", iff->bitmap_compression);
            return AVERROR_INVALIDDATA;
        }
        break;
    default:
        return -1;
    }

    return 0;
}
Beispiel #9
0
static int hls_read_header(AVFormatContext *s)
{
    URLContext *u = (s->flags & AVFMT_FLAG_CUSTOM_IO) ? NULL : s->pb->opaque;
    HLSContext *c = s->priv_data;
    int ret = 0, i, j, stream_offset = 0;

    c->interrupt_callback = &s->interrupt_callback;

    // if the URL context is good, read important options we must broker later
    if (u && u->prot->priv_data_class) {
        // get the previous user agent & set back to null if string size is zero
        av_freep(&c->user_agent);
        av_opt_get(u->priv_data, "user-agent", 0, (uint8_t**)&(c->user_agent));
        if (c->user_agent && !strlen(c->user_agent))
            av_freep(&c->user_agent);

        // get the previous cookies & set back to null if string size is zero
        av_freep(&c->cookies);
        av_opt_get(u->priv_data, "cookies", 0, (uint8_t**)&(c->cookies));
        if (c->cookies && !strlen(c->cookies))
            av_freep(&c->cookies);
    }

    if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0)
        goto fail;

    if (c->n_variants == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }
    /* If the playlist only contained variants, parse each individual
     * variant playlist. */
    if (c->n_variants > 1 || c->variants[0]->n_segments == 0) {
        for (i = 0; i < c->n_variants; i++) {
            struct variant *v = c->variants[i];
            if ((ret = parse_playlist(c, v->url, v, NULL)) < 0)
                goto fail;
        }
    }

    if (c->variants[0]->n_segments == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }

    /* If this isn't a live stream, calculate the total duration of the
     * stream. */
    if (c->variants[0]->finished) {
        int64_t duration = 0;
        for (i = 0; i < c->variants[0]->n_segments; i++)
            duration += round(c->variants[0]->segments[i]->duration * AV_TIME_BASE);
        s->duration = duration;
    }

    /* Open the demuxer for each variant */
    for (i = 0; i < c->n_variants; i++) {
        struct variant *v = c->variants[i];
        AVInputFormat *in_fmt = NULL;
        char bitrate_str[20];
        AVProgram *program = NULL;
        if (v->n_segments == 0)
            continue;

        if (!(v->ctx = avformat_alloc_context())) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        v->index  = i;
        v->needed = 1;
        v->parent = s;

        /* If this is a live stream with more than 3 segments, start at the
         * third last segment. */
        v->cur_seq_no = v->start_seq_no;
        if (!v->finished && v->n_segments > 3)
            v->cur_seq_no = v->start_seq_no + v->n_segments - 3;

        v->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
        ffio_init_context(&v->pb, v->read_buffer, INITIAL_BUFFER_SIZE, 0, v,
                          read_data, NULL, NULL);
        v->pb.seekable = 0;
        ret = av_probe_input_buffer(&v->pb, &in_fmt, v->segments[0]->url,
                                    NULL, 0, 0);
        if (ret < 0) {
            /* Free the ctx - it isn't initialized properly at this point,
             * so avformat_close_input shouldn't be called. If
             * avformat_open_input fails below, it frees and zeros the
             * context, so it doesn't need any special treatment like this. */
            av_log(s, AV_LOG_ERROR, "Error when loading first segment '%s'\n", v->segments[0]->url);
            avformat_free_context(v->ctx);
            v->ctx = NULL;
            goto fail;
        }
        v->ctx->pb       = &v->pb;
        ret = avformat_open_input(&v->ctx, v->segments[0]->url, in_fmt, NULL);
        if (ret < 0)
            goto fail;

        v->stream_offset = stream_offset;
        v->ctx->ctx_flags &= ~AVFMTCTX_NOHEADER;
        ret = avformat_find_stream_info(v->ctx, NULL);
        if (ret < 0)
            goto fail;
        snprintf(bitrate_str, sizeof(bitrate_str), "%d", v->bandwidth);

        /* Create new AVprogram for variant i */
        program = av_new_program(s, i);
        if (!program)
            goto fail;
        av_dict_set(&program->metadata, "variant_bitrate", bitrate_str, 0);

        /* Create new AVStreams for each stream in this variant */
        for (j = 0; j < v->ctx->nb_streams; j++) {
            AVStream *st = avformat_new_stream(s, NULL);
            AVStream *ist = v->ctx->streams[j];
            if (!st) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            ff_program_add_stream_index(s, i, stream_offset + j);
            st->id = i;
            avpriv_set_pts_info(st, ist->pts_wrap_bits, ist->time_base.num, ist->time_base.den);
            avcodec_copy_context(st->codec, v->ctx->streams[j]->codec);
            if (v->bandwidth)
                av_dict_set(&st->metadata, "variant_bitrate", bitrate_str,
                                 0);
        }
        stream_offset += v->ctx->nb_streams;
    }

    c->first_packet = 1;
    c->first_timestamp = AV_NOPTS_VALUE;
    c->seek_timestamp  = AV_NOPTS_VALUE;

    return 0;
fail:
    free_variant_list(c);
    return ret;
}
Beispiel #10
0
bool AVIDump::CreateFile()
{
  AVCodec* codec = nullptr;

  s_format_context = avformat_alloc_context();
  std::stringstream s_file_index_str;
  s_file_index_str << s_file_index;
  snprintf(s_format_context->filename, sizeof(s_format_context->filename), "%s",
           (File::GetUserPath(D_DUMPFRAMES_IDX) + "framedump" + s_file_index_str.str() + ".avi")
               .c_str());
  File::CreateFullPath(s_format_context->filename);

  // Ask to delete file
  if (File::Exists(s_format_context->filename))
  {
    if (SConfig::GetInstance().m_DumpFramesSilent ||
        AskYesNoT("Delete the existing file '%s'?", s_format_context->filename))
    {
      File::Delete(s_format_context->filename);
    }
    else
    {
      // Stop and cancel dumping the video
      return false;
    }
  }

  if (!(s_format_context->oformat = av_guess_format("avi", nullptr, nullptr)) ||
      !(s_stream = avformat_new_stream(s_format_context, codec)))
  {
    return false;
  }

  s_stream->codec->codec_id =
      g_Config.bUseFFV1 ? AV_CODEC_ID_FFV1 : s_format_context->oformat->video_codec;
  if (!g_Config.bUseFFV1)
    s_stream->codec->codec_tag =
        MKTAG('X', 'V', 'I', 'D');  // Force XVID FourCC for better compatibility
  s_stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
  s_stream->codec->bit_rate = 400000;
  s_stream->codec->width = s_width;
  s_stream->codec->height = s_height;
  s_stream->codec->time_base.num = 1;
  s_stream->codec->time_base.den = VideoInterface::GetTargetRefreshRate();
  s_stream->codec->gop_size = 12;
  s_stream->codec->pix_fmt = g_Config.bUseFFV1 ? AV_PIX_FMT_BGRA : AV_PIX_FMT_YUV420P;

  if (!(codec = avcodec_find_encoder(s_stream->codec->codec_id)) ||
      (avcodec_open2(s_stream->codec, codec, nullptr) < 0))
  {
    return false;
  }

  s_src_frame = av_frame_alloc();
  s_scaled_frame = av_frame_alloc();

  s_size = avpicture_get_size(s_stream->codec->pix_fmt, s_width, s_height);

  s_yuv_buffer = new uint8_t[s_size];
  avpicture_fill((AVPicture*)s_scaled_frame, s_yuv_buffer, s_stream->codec->pix_fmt, s_width,
                 s_height);

  NOTICE_LOG(VIDEO, "Opening file %s for dumping", s_format_context->filename);
  if (avio_open(&s_format_context->pb, s_format_context->filename, AVIO_FLAG_WRITE) < 0)
  {
    WARN_LOG(VIDEO, "Could not open %s", s_format_context->filename);
    return false;
  }

  avformat_write_header(s_format_context, nullptr);

  return true;
}
Beispiel #11
0
int main(int argc, char **argv)
{
    /*
     int i;
     char    b[40];
     char    c[21];
     HANDLE hConsole;
     int k;
     
     #ifdef _DEBUG
     _CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
     #endif
     
     for(i = 0; i < sizeof(b); ++i )
     {
     b[i] = ' ';
     }
     b[255] = '\0';
     
     system("color 5");
     for(i = 0; i < 20; ++i) {
     c[i] = '>';
     c[i+1] = '\0';
     printf("Progress |%s%*s|\r",c,19-i,&"");
     Sleep(100);
     //printf("%s\r", b);
     }
     printf("\n");
     printf("sizeof(structa_t) = %d\n", sizeof(structa_t));
     printf("sizeof(structb_t) = %d\n", sizeof(structb_t));
     printf("sizeof(structc_t) = %d\n", sizeof(structc_t));
     printf("sizeof(structd_t) = %d\n", AV_TIME_BASE);
     
     hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
     
     // you can loop k higher to see more color choices
     for(k = 1; k < 255; k++)
     {
     // pick the colorattribute k you want
     SetConsoleTextAttribute(hConsole, k);
     printf("%d I want to be nice today!",k);
     }*/
    
    AVFormatContext             *in_ctx = NULL, *out_ctx = NULL;
    AVInputFormat               *file_iformat = NULL;
    AVOutputFormat              *out_fmt = NULL;
    AVFrame                     *frame = NULL, *frameRGB = NULL;
    AVStream                    *st = NULL;
    AVCodecContext              *codec_ctx = NULL, *pCodecCtx = NULL;
    AVCodec                     *codec = NULL, *pCodec = NULL;
    AVCodec                     dummy_codec = {0};
    AVPacket                    pkt, p;
    AVBitStreamFilterContext    *bsf = NULL;
    struct SwsContext           *sws_ctx = NULL;
    BOOL                        tom = TRUE;
    char                        b[1024];
    int                         err, i, ret, frameFinished, numBytes;
    const char                  *src_filename = "final.mp4";
    int64_t                     timestamp;
    uint8_t                     buf[128];
    uint8_t                     *buffer = NULL;
    int                         video_stream_idx = -1;
    int                         audio_stream_idx = -1;
    FILE*                       sdp_file;
    
#ifdef _DEBUG
    _CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
#endif
    
    /* register all formats and codecs */
    av_register_all();
    avformat_network_init();
    av_log_set_level(AV_LOG_DEBUG);

    /* open input file, and allocate format context */
    ret = avformat_open_input(&in_ctx, src_filename, NULL, NULL);
    if (ret < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        PAUSE_EXIT(1);
    }
    in_ctx->flags |= AVFMT_FLAG_GENPTS;

    ret = avformat_find_stream_info(in_ctx, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", src_filename);
        avformat_close_input(&in_ctx);
        PAUSE_EXIT(1);
    }
    
    av_dump_format(in_ctx, 0, src_filename, 0);
    
    for (i = 0; i < in_ctx->nb_streams; i++) {
        AVStream        *st_ptr;
        AVCodecContext  *coctx_ptr;
        
        if (in_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
	  if (in_ctx->streams[i]->codec->codec_id == CODEC_ID_MPEG4) {
	      bsf = av_bitstream_filter_init("dump_extra");
	  } else if (in_ctx->streams[i]->codec->codec_id == CODEC_ID_H264) {
	      fprintf(stderr, "Found h264 Stream\n");
	      bsf = av_bitstream_filter_init("h264_mp4toannexb");
	  } else {
	      bsf = NULL;
	  }
	  pCodecCtx=in_ctx->streams[i]->codec;
	  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	  if(pCodec==NULL) {
	      fprintf(stderr, "Unsupported codec!\n");
	      return -1; // Codec not found
	  }
	  // Open codec
	  if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
	      return -1; // Could not open codec
	  
	  out_ctx = avformat_alloc_context();
	  out_fmt = av_guess_format("rtp", NULL, NULL);
	  if (!out_fmt) {
	      fprintf(stderr, "Unable for find the RTP format for output\n");
	      avformat_close_input(&in_ctx);
	      PAUSE_EXIT(1);
	  }
	  out_ctx->oformat = out_fmt;
	  //out_ctx->flags |= AVFMT_FLAG_NONBLOCK;
	  
	  st = avformat_new_stream(out_ctx, 0);
	  if (!st) {
	      fprintf(stderr, "Cannot allocate stream\n");
	      avformat_close_input(&in_ctx);
	      PAUSE_EXIT(1);
	  }
	  
	  dummy_codec.type = in_ctx->streams[i]->codec->codec_type;
	  codec_ctx = st->codec;
	  avcodec_get_context_defaults3(codec_ctx, &dummy_codec);
	  avcodec_open2(codec_ctx, NULL, NULL);
	  codec_ctx->codec_type = in_ctx->streams[i]->codec->codec_type;
	  
	  /* FIXME: global headers stuff... */
	  
	  snprintf(out_ctx->filename, sizeof(out_ctx->filename), "rtp://%s:%d", "127.0.0.1", 55444);
	  
	  /* open the UDP sockets for RTP and RTCP */
	  if (!software_streaming) {
	      printf("Distant Connection\n");
	      ret = avio_open(&out_ctx->pb, out_ctx->filename, AVIO_FLAG_WRITE);
	      if (ret < 0) {
		fprintf(stderr, "Cannot open '%s'\n", out_ctx->filename);
		avformat_close_input(&in_ctx);
		PAUSE_EXIT(1);
	      }
	  } else {
	      ret = avio_open_dyn_buf(&out_ctx->pb);
	      out_ctx->pb->max_packet_size = 1460;
	      printf("MAX packet size = %d\n",out_ctx->pb->max_packet_size);
	  }
	  st_ptr = in_ctx->streams[i];
	  coctx_ptr = st_ptr->codec;
	  
	  codec_ctx->codec_id = coctx_ptr->codec_id;
	  codec_ctx->codec_type = coctx_ptr->codec_type;
	  
	  if(!codec_ctx->codec_tag) {
	      codec_ctx->codec_tag = coctx_ptr->codec_tag;
	  }
	  codec_ctx->bit_rate = coctx_ptr->bit_rate;
	  printf("\n\n\n\nFIRE!!!!! %d %d\n\n\n\n", codec_ctx->profile, codec_ctx->level);
	  if(coctx_ptr->extradata_size) {
	      codec_ctx->extradata = (uint8_t*)av_malloc(coctx_ptr->extradata_size);
	      memcpy(codec_ctx->extradata, coctx_ptr->extradata, coctx_ptr->extradata_size);
	  } else {
	      codec_ctx->extradata = NULL;
	  }
	  
	  codec_ctx->extradata_size = coctx_ptr->extradata_size;
	  /* FIXME: ExtraData ??? */
	  if (codec_ctx->codec_id == CODEC_ID_H264) {
	      printf("BINGO\n");
	      extradata_convert(codec_ctx);
	  }
	  
	  if(out_ctx->oformat->flags & AVFMT_GLOBALHEADER)
	      codec_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
	  
	  if(av_q2d(coctx_ptr->time_base) > av_q2d(st_ptr->time_base) && av_q2d(st_ptr->time_base) < 1.0/1000) {
	      codec_ctx->time_base = coctx_ptr->time_base;
	  } else {
	      codec_ctx->time_base = st_ptr->time_base;
	  }
	  
	  switch(codec_ctx->codec_type) {
	      case AVMEDIA_TYPE_AUDIO:
		codec_ctx->sample_rate = coctx_ptr->sample_rate;
		codec_ctx->time_base.den = 1;
		codec_ctx->time_base.num = coctx_ptr->sample_rate;
		codec_ctx->channels = coctx_ptr->channels;
		codec_ctx->frame_size = coctx_ptr->frame_size;
		codec_ctx->block_align= coctx_ptr->block_align;
		
		break;
	      case AVMEDIA_TYPE_VIDEO:
		//printf("Pixel Format %d\n", coctx_ptr->pix_fmt);
		codec_ctx->pix_fmt = coctx_ptr->pix_fmt;
		codec_ctx->width = coctx_ptr->width;
		codec_ctx->height = coctx_ptr->height;
		codec_ctx->has_b_frames = coctx_ptr->has_b_frames;
		
		break;
	      default:
		fprintf(stderr, "Strange Codec Type %d\n", codec_ctx->codec_type);
		PAUSE_EXIT(1);
	  }
	  
	  
	  ret = avformat_write_header(out_ctx, NULL);
	  if (ret < 0) {
	      fprintf(stderr, "Cannot Initialize output stream %d\n", i);
	      //close_output(rtp_c->out_s[i]);
	      
	      continue;
	  }
	  av_dump_format(out_ctx, i, out_ctx->filename, 1);
        }
    }
    
    frame = avcodec_alloc_frame();
    frameRGB = avcodec_alloc_frame();
    
    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
			  pCodecCtx->height);
    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
    printf("Allocated %d", numBytes);
    
    sws_ctx = sws_getContext (
     pCodecCtx->width,
     pCodecCtx->height,
     pCodecCtx->pix_fmt,
     pCodecCtx->width,
     pCodecCtx->height,
     PIX_FMT_RGB24,
     SWS_BILINEAR,
     NULL,
     NULL,
     NULL
     );
    
    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *)frameRGB, buffer, PIX_FMT_RGB24,
		 pCodecCtx->width, pCodecCtx->height);
    
    av_sdp_create(&out_ctx,1,b,1024);
    printf("SDP : \n%s", b);
    sdp_file = fopen("rtp.sdp","w");
    fprintf(sdp_file, "%s",b);
    fclose(sdp_file);

    i = 0;
    av_init_packet(&pkt);
    av_init_packet(&p);
    printf("Payload Size %d\n", *(uint8_t *)out_ctx->streams[0]->codec->extradata != 1);
    while (av_read_frame(in_ctx, &pkt) >= 0) {
        if (pkt.stream_index == 0) {
	  int res;
	  uint8_t *ptr;
	  uint16_t ptr16;
	  
	  if (avcodec_decode_video2(pCodecCtx, frame, &frameFinished, &pkt) < 0 ) {
	      fprintf(stderr, "Error decoding packet\n");
	  }
	   
	  /* if(frameFinished) {
	  // Convert the image from its native format to RGB
	  sws_scale
	  (
	  sws_ctx,
	  (uint8_t const * const *)frame->data,
	  frame->linesize,
	  0,
	  pCodecCtx->height,
	  frameRGB->data,
	  frameRGB->linesize
	  );
	   
	  // Save the frame to disk
	  if(++i<=5)
	      SaveFrame(frameRGB, pCodecCtx->width, pCodecCtx->height, i);
	  }*/
	  printf("PTS %lld DTS%lld\n",pkt.pts,pkt.dts);
	  printf("Got frame %s %d %s\n",STRING_BOOL(frameFinished), pkt.size, STRING_BOOL(pkt.flags & AV_PKT_FLAG_KEY));
	  //break;
	  /*ret = av_bitstream_filter_filter(bsf,
	   in_ctx->streams[pkt.stream_index]->codec,
	   NULL, &p.data, &p.size,
	   pkt.data, pkt.size, pkt.flags & AV_PKT_FLAG_KEY);
	   if(ret > 0) {
	   av_free_packet(&pkt);
	   p.destruct = av_destruct_packet;
	   } else if (ret < 0) {
	   fprintf(stderr, "%s failed for stream %d, codec %s: ",
	   bsf->filter->name,
	   pkt.stream_index,
	   in_ctx->streams[pkt.stream_index]->codec->codec->name);
	   fprintf(stderr, "%d\n", ret);
	   }
	   pkt = p;*/
	  
	  stream_convert(&pkt);
	  printf("pkt size %d %d\n",pkt.size, pkt.flags);
	  av_usleep(4000000);
	  
	  if (av_write_frame(out_ctx, &pkt) < 0)
	      printf("MITSOS eisai!!!!\n");
	  
	  int written_size = avio_close_dyn_buf(out_ctx->pb,&ptr);
	  printf("Written Size %d\n", written_size);
	  ((uint8_t*)&ptr16)[0] = *(ptr+2);
	  ((uint8_t*)&ptr16)[1] = *(ptr+3);
	  printf("CC adsasd%d\n", ptr16 );
	  printByte(ptr);
	  printByte(ptr+1);
	  //printf("Second Byte %d\n", *(ptr+1));
	  
	  parseStream(ptr, written_size);
	  
	  printf("Version %d\n",(*(ptr) & 0xC0) >> 6);
	  printf("Padding %d\n",(*(ptr) & 0x20) >  0);
	  printf("Ext %d\n",(*(ptr) & 0x10) >  0);
	  printf("CC %d\n",(*(ptr) & 0xF));
	  printf("Marker %d\n",(*(ptr+1) & 0x80) > 0);
	  printf("Type %u\n",(*(ptr+1)));
	  printf("Seq %d\n",(*((uint16_t*)((uint8_t*)ptr+2))));
	  ret = avio_open_dyn_buf(&out_ctx->pb);
	  out_ctx->pb->max_packet_size = 1514;
        }
        
        av_free_packet(&pkt);
    }
Beispiel #12
0
static int rm_read_header(AVFormatContext *s)
{
    RMDemuxContext *rm = s->priv_data;
    AVStream *st;
    AVIOContext *pb = s->pb;
    unsigned int tag;
    int tag_size;
    unsigned int start_time, duration;
    unsigned int data_off = 0, indx_off = 0;
    char buf[128], mime[128];
    int flags = 0;

    tag = avio_rl32(pb);
    if (tag == MKTAG('.', 'r', 'a', 0xfd)) {
        /* very old .ra format */
        return rm_read_header_old(s);
    } else if (tag != MKTAG('.', 'R', 'M', 'F')) {
        return AVERROR(EIO);
    }

    tag_size = avio_rb32(pb);
    avio_skip(pb, tag_size - 8);

    for(;;) {
        if (url_feof(pb))
            return -1;
        tag = avio_rl32(pb);
        tag_size = avio_rb32(pb);
        avio_rb16(pb);
        av_dlog(s, "tag=%c%c%c%c (%08x) size=%d\n",
                (tag      ) & 0xff,
                (tag >>  8) & 0xff,
                (tag >> 16) & 0xff,
                (tag >> 24) & 0xff,
                tag,
                tag_size);
        if (tag_size < 10 && tag != MKTAG('D', 'A', 'T', 'A'))
            return -1;
        switch(tag) {
        case MKTAG('P', 'R', 'O', 'P'):
            /* file header */
            avio_rb32(pb); /* max bit rate */
            avio_rb32(pb); /* avg bit rate */
            avio_rb32(pb); /* max packet size */
            avio_rb32(pb); /* avg packet size */
            avio_rb32(pb); /* nb packets */
            duration = avio_rb32(pb); /* duration */
            s->duration = av_rescale(duration, AV_TIME_BASE, 1000);
            avio_rb32(pb); /* preroll */
            indx_off = avio_rb32(pb); /* index offset */
            data_off = avio_rb32(pb); /* data offset */
            avio_rb16(pb); /* nb streams */
            flags = avio_rb16(pb); /* flags */
            break;
        case MKTAG('C', 'O', 'N', 'T'):
            rm_read_metadata(s, pb, 1);
            break;
        case MKTAG('M', 'D', 'P', 'R'):
            st = avformat_new_stream(s, NULL);
            if (!st)
                return AVERROR(ENOMEM);
            st->id = avio_rb16(pb);
            avio_rb32(pb); /* max bit rate */
            st->codec->bit_rate = avio_rb32(pb); /* bit rate */
            avio_rb32(pb); /* max packet size */
            avio_rb32(pb); /* avg packet size */
            start_time = avio_rb32(pb); /* start time */
            avio_rb32(pb); /* preroll */
            duration = avio_rb32(pb); /* duration */
            st->start_time = start_time;
            st->duration = duration;
            if(duration>0)
                s->duration = AV_NOPTS_VALUE;
            get_str8(pb, buf, sizeof(buf)); /* desc */
            get_str8(pb, mime, sizeof(mime)); /* mimetype */
            st->codec->codec_type = AVMEDIA_TYPE_DATA;
            st->priv_data = ff_rm_alloc_rmstream();
            if (ff_rm_read_mdpr_codecdata(s, s->pb, st, st->priv_data,
                                          avio_rb32(pb), mime) < 0)
                return -1;
            break;
        case MKTAG('D', 'A', 'T', 'A'):
            goto header_end;
        default:
            /* unknown tag: skip it */
            avio_skip(pb, tag_size - 10);
            break;
        }
    }
 header_end:
    rm->nb_packets = avio_rb32(pb); /* number of packets */
    if (!rm->nb_packets && (flags & 4))
        rm->nb_packets = 3600 * 25;
    avio_rb32(pb); /* next data header */

    if (!data_off)
        data_off = avio_tell(pb) - 18;
    if (indx_off && pb->seekable && !(s->flags & AVFMT_FLAG_IGNIDX) &&
        avio_seek(pb, indx_off, SEEK_SET) >= 0) {
        rm_read_index(s);
        avio_seek(pb, data_off + 18, SEEK_SET);
    }

    return 0;
}
Beispiel #13
0
static int read_frame(BVID_DemuxContext *vid, AVIOContext *pb, AVPacket *pkt,
                      uint8_t block_type, AVFormatContext *s)
{
    uint8_t * vidbuf_start = NULL;
    int vidbuf_nbytes = 0;
    int code;
    int bytes_copied = 0;
    int position, duration, npixels;
    unsigned int vidbuf_capacity;
    int ret = 0;
    AVStream *st;

    if (vid->video_index < 0) {
        st = avformat_new_stream(s, NULL);
        if (!st)
            return AVERROR(ENOMEM);
        vid->video_index = st->index;
        if (vid->audio_index < 0) {
            av_log_ask_for_sample(s, "No audio packet before first video "
                                  "packet. Using default video time base.\n");
        }
        avpriv_set_pts_info(st, 64, 185, vid->sample_rate);
        st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        st->codec->codec_id   = CODEC_ID_BETHSOFTVID;
        st->codec->width      = vid->width;
        st->codec->height     = vid->height;
    }
    st      = s->streams[vid->video_index];
    npixels = st->codec->width * st->codec->height;

    vidbuf_start = av_malloc(vidbuf_capacity = BUFFER_PADDING_SIZE);
    if(!vidbuf_start)
        return AVERROR(ENOMEM);

    // save the file position for the packet, include block type
    position = avio_tell(pb) - 1;

    vidbuf_start[vidbuf_nbytes++] = block_type;

    // get the current packet duration
    duration = vid->bethsoft_global_delay + avio_rl16(pb);

    // set the y offset if it exists (decoder header data should be in data section)
    if(block_type == VIDEO_YOFF_P_FRAME){
        if (avio_read(pb, &vidbuf_start[vidbuf_nbytes], 2) != 2) {
            ret = AVERROR(EIO);
            goto fail;
        }
        vidbuf_nbytes += 2;
    }

    do{
        vidbuf_start = av_fast_realloc(vidbuf_start, &vidbuf_capacity, vidbuf_nbytes + BUFFER_PADDING_SIZE);
        if(!vidbuf_start)
            return AVERROR(ENOMEM);

        code = avio_r8(pb);
        vidbuf_start[vidbuf_nbytes++] = code;

        if(code >= 0x80){ // rle sequence
            if(block_type == VIDEO_I_FRAME)
                vidbuf_start[vidbuf_nbytes++] = avio_r8(pb);
        } else if(code){ // plain sequence
            if (avio_read(pb, &vidbuf_start[vidbuf_nbytes], code) != code) {
                ret = AVERROR(EIO);
                goto fail;
            }
            vidbuf_nbytes += code;
        }
        bytes_copied += code & 0x7F;
        if(bytes_copied == npixels){ // sometimes no stop character is given, need to keep track of bytes copied
            // may contain a 0 byte even if read all pixels
            if(avio_r8(pb))
                avio_seek(pb, -1, SEEK_CUR);
            break;
        }
        if (bytes_copied > npixels) {
            ret = AVERROR_INVALIDDATA;
            goto fail;
        }
    } while(code);

    // copy data into packet
    if ((ret = av_new_packet(pkt, vidbuf_nbytes)) < 0)
        goto fail;
    memcpy(pkt->data, vidbuf_start, vidbuf_nbytes);
    av_free(vidbuf_start);

    pkt->pos = position;
    pkt->stream_index = vid->video_index;
    pkt->duration = duration;
    if (block_type == VIDEO_I_FRAME)
        pkt->flags |= AV_PKT_FLAG_KEY;

    /* if there is a new palette available, add it to packet side data */
    if (vid->palette) {
        uint8_t *pdata = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
                                                 BVID_PALETTE_SIZE);
        memcpy(pdata, vid->palette, BVID_PALETTE_SIZE);
        av_freep(&vid->palette);
    }

    vid->nframes--;  // used to check if all the frames were read
    return 0;
fail:
    av_free(vidbuf_start);
    return ret;
}
Beispiel #14
0
static int vid_read_packet(AVFormatContext *s,
                           AVPacket *pkt)
{
    BVID_DemuxContext *vid = s->priv_data;
    AVIOContext *pb = s->pb;
    unsigned char block_type;
    int audio_length;
    int ret_value;

    if(vid->is_finished || url_feof(pb))
        return AVERROR(EIO);

    block_type = avio_r8(pb);
    switch(block_type){
        case PALETTE_BLOCK:
            if (vid->palette) {
                av_log(s, AV_LOG_WARNING, "discarding unused palette\n");
                av_freep(&vid->palette);
            }
            vid->palette = av_malloc(BVID_PALETTE_SIZE);
            if (!vid->palette)
                return AVERROR(ENOMEM);
            if (avio_read(pb, vid->palette, BVID_PALETTE_SIZE) != BVID_PALETTE_SIZE) {
                av_freep(&vid->palette);
                return AVERROR(EIO);
            }
            return vid_read_packet(s, pkt);

        case FIRST_AUDIO_BLOCK:
            avio_rl16(pb);
            // soundblaster DAC used for sample rate, as on specification page (link above)
            vid->sample_rate = 1000000 / (256 - avio_r8(pb));
        case AUDIO_BLOCK:
            if (vid->audio_index < 0) {
                AVStream *st = avformat_new_stream(s, NULL);
                if (!st)
                    return AVERROR(ENOMEM);
                vid->audio_index                 = st->index;
                st->codec->codec_type            = AVMEDIA_TYPE_AUDIO;
                st->codec->codec_id              = CODEC_ID_PCM_U8;
                st->codec->channels              = 1;
                st->codec->bits_per_coded_sample = 8;
                st->codec->sample_rate           = vid->sample_rate;
                st->codec->bit_rate              = 8 * st->codec->sample_rate;
                st->start_time                   = 0;
                avpriv_set_pts_info(st, 64, 1, vid->sample_rate);
            }
            audio_length = avio_rl16(pb);
            if ((ret_value = av_get_packet(pb, pkt, audio_length)) != audio_length) {
                if (ret_value < 0)
                    return ret_value;
                av_log(s, AV_LOG_ERROR, "incomplete audio block\n");
                return AVERROR(EIO);
            }
            pkt->stream_index = vid->audio_index;
            pkt->duration     = audio_length;
            pkt->flags |= AV_PKT_FLAG_KEY;
            return 0;

        case VIDEO_P_FRAME:
        case VIDEO_YOFF_P_FRAME:
        case VIDEO_I_FRAME:
            return read_frame(vid, pb, pkt, block_type, s);

        case EOF_BLOCK:
            if(vid->nframes != 0)
                av_log(s, AV_LOG_VERBOSE, "reached terminating character but not all frames read.\n");
            vid->is_finished = 1;
            return AVERROR(EIO);
        default:
            av_log(s, AV_LOG_ERROR, "unknown block (character = %c, decimal = %d, hex = %x)!!!\n",
                   block_type, block_type, block_type);
            return AVERROR_INVALIDDATA;
    }
}
int ff_spdif_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVIOContext *pb = s->pb;
    enum IEC61937DataType data_type;
    enum AVCodecID codec_id;
    uint32_t state = 0;
    int pkt_size_bits, offset, ret;

    while (state != (AV_BSWAP16C(SYNCWORD1) << 16 | AV_BSWAP16C(SYNCWORD2))) {
        state = (state << 8) | avio_r8(pb);
        if (url_feof(pb))
            return AVERROR_EOF;
    }

    data_type = avio_rl16(pb);
    pkt_size_bits = avio_rl16(pb);

    if (pkt_size_bits % 16)
        avpriv_request_sample(s, "Packet not ending at a 16-bit boundary");

    ret = av_new_packet(pkt, FFALIGN(pkt_size_bits, 16) >> 3);
    if (ret)
        return ret;

    pkt->pos = avio_tell(pb) - BURST_HEADER_SIZE;

    if (avio_read(pb, pkt->data, pkt->size) < pkt->size) {
        av_free_packet(pkt);
        return AVERROR_EOF;
    }
    ff_spdif_bswap_buf16((uint16_t *)pkt->data, (uint16_t *)pkt->data, pkt->size >> 1);

    ret = spdif_get_offset_and_codec(s, data_type, pkt->data,
                                     &offset, &codec_id);
    if (ret) {
        av_free_packet(pkt);
        return ret;
    }

    /* skip over the padding to the beginning of the next frame */
    avio_skip(pb, offset - pkt->size - BURST_HEADER_SIZE);

    if (!s->nb_streams) {
        /* first packet, create a stream */
        AVStream *st = avformat_new_stream(s, NULL);
        if (!st) {
            av_free_packet(pkt);
            return AVERROR(ENOMEM);
        }
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_id = codec_id;
    } else if (codec_id != s->streams[0]->codec->codec_id) {
        avpriv_report_missing_feature(s, "Codec change in IEC 61937");
        return AVERROR_PATCHWELCOME;
    }

    if (!s->bit_rate && s->streams[0]->codec->sample_rate)
        /* stream bitrate matches 16-bit stereo PCM bitrate for currently
           supported codecs */
        s->bit_rate = 2 * 16 * s->streams[0]->codec->sample_rate;

    return 0;
}
Beispiel #16
0
static int init(struct dec_audio *da, const char *decoder)
{
    struct spdifContext *spdif_ctx = talloc_zero(NULL, struct spdifContext);
    da->priv = spdif_ctx;
    spdif_ctx->log = da->log;

    AVFormatContext *lavf_ctx  = avformat_alloc_context();
    if (!lavf_ctx)
        goto fail;

    lavf_ctx->oformat = av_guess_format("spdif", NULL, NULL);
    if (!lavf_ctx->oformat)
        goto fail;

    spdif_ctx->lavf_ctx = lavf_ctx;

    void *buffer = av_mallocz(OUTBUF_SIZE);
    if (!buffer)
        abort();
    lavf_ctx->pb = avio_alloc_context(buffer, OUTBUF_SIZE, 1, spdif_ctx, NULL,
                                      write_packet, NULL);
    if (!lavf_ctx->pb) {
        av_free(buffer);
        goto fail;
    }

    // Request minimal buffering (not available on Libav)
#if LIBAVFORMAT_VERSION_MICRO >= 100
    lavf_ctx->pb->direct = 1;
#endif

    AVStream *stream = avformat_new_stream(lavf_ctx, 0);
    if (!stream)
        goto fail;

    stream->codec->codec_id = mp_codec_to_av_codec_id(decoder);

    AVDictionary *format_opts = NULL;

    int num_channels = 0;
    int sample_format = 0;
    int samplerate = 0;
    switch (stream->codec->codec_id) {
    case AV_CODEC_ID_AAC:
        sample_format                   = AF_FORMAT_S_AAC;
        samplerate                      = 48000;
        num_channels                    = 2;
        break;
    case AV_CODEC_ID_AC3:
        sample_format                   = AF_FORMAT_S_AC3;
        samplerate                      = 48000;
        num_channels                    = 2;
        break;
    case AV_CODEC_ID_DTS:
        if (da->opts->dtshd) {
            av_dict_set(&format_opts, "dtshd_rate", "768000", 0); // 4*192000
            sample_format                   = AF_FORMAT_S_DTSHD;
            samplerate                      = 192000;
            num_channels                    = 2*4;
        } else {
            sample_format                   = AF_FORMAT_S_DTS;
            samplerate                      = 48000;
            num_channels                    = 2;
        }
        break;
    case AV_CODEC_ID_EAC3:
        sample_format                   = AF_FORMAT_S_EAC3;
        samplerate                      = 192000;
        num_channels                    = 2;
        break;
    case AV_CODEC_ID_MP3:
        sample_format                   = AF_FORMAT_S_MP3;
        samplerate                      = 48000;
        num_channels                    = 2;
        break;
    case AV_CODEC_ID_TRUEHD:
        sample_format                   = AF_FORMAT_S_TRUEHD;
        samplerate                      = 192000;
        num_channels                    = 8;
        break;
    default:
        abort();
    }
    mp_audio_set_num_channels(&spdif_ctx->fmt, num_channels);
    mp_audio_set_format(&spdif_ctx->fmt, sample_format);
    spdif_ctx->fmt.rate = samplerate;

    if (avformat_write_header(lavf_ctx, &format_opts) < 0) {
        MP_FATAL(da, "libavformat spdif initialization failed.\n");
        av_dict_free(&format_opts);
        goto fail;
    }
    av_dict_free(&format_opts);

    spdif_ctx->need_close = true;

    return 1;

fail:
    uninit(da);
    return 0;
}
Beispiel #17
0
static int ape_tag_read_field(AVFormatContext *s)
{
    AVIOContext *pb = s->pb;
    uint8_t key[1024], *value;
    uint32_t size, flags;
    int i, c;

    size = avio_rl32(pb);  /* field size */
    flags = avio_rl32(pb); /* field flags */
    for (i = 0; i < sizeof(key) - 1; i++) {
        c = avio_r8(pb);
        if (c < 0x20 || c > 0x7E)
            break;
        else
            key[i] = c;
    }
    key[i] = 0;
    if (c != 0) {
        av_log(s, AV_LOG_WARNING, "Invalid APE tag key '%s'.\n", key);
        return -1;
    }
    if (size >= UINT_MAX)
        return -1;
    if (flags & APE_TAG_FLAG_IS_BINARY) {
        uint8_t filename[1024];
        enum AVCodecID id;
        AVStream *st = avformat_new_stream(s, NULL);
        if (!st)
            return AVERROR(ENOMEM);

        size -= avio_get_str(pb, size, filename, sizeof(filename));
        if (size <= 0) {
            av_log(s, AV_LOG_WARNING, "Skipping binary tag '%s'.\n", key);
            return 0;
        }

        av_dict_set(&st->metadata, key, filename, 0);

        if ((id = ff_guess_image2_codec(filename)) != AV_CODEC_ID_NONE) {
            AVPacket pkt;
            int ret;

            ret = av_get_packet(s->pb, &pkt, size);
            if (ret < 0) {
                av_log(s, AV_LOG_ERROR, "Error reading cover art.\n");
                return ret;
            }

            st->disposition      |= AV_DISPOSITION_ATTACHED_PIC;
            st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
            st->codec->codec_id   = id;

            st->attached_pic              = pkt;
            st->attached_pic.stream_index = st->index;
            st->attached_pic.flags       |= AV_PKT_FLAG_KEY;
        } else {
            if (ff_alloc_extradata(st->codec, size))
                return AVERROR(ENOMEM);
            if (avio_read(pb, st->codec->extradata, size) != size) {
                av_freep(&st->codec->extradata);
                st->codec->extradata_size = 0;
                return AVERROR(EIO);
            }
            st->codec->codec_type = AVMEDIA_TYPE_ATTACHMENT;
        }
    } else {
        value = av_malloc(size+1);
        if (!value)
            return AVERROR(ENOMEM);
        c = avio_read(pb, value, size);
        if (c < 0) {
            av_free(value);
            return c;
        }
        value[c] = 0;
        av_dict_set(&s->metadata, key, value, AV_DICT_DONT_STRDUP_VAL);
    }
    return 0;
}
Beispiel #18
0
static int init(sh_audio_t *sh)
{
    int i, x, in_size, srate, bps, *dtshd_rate;
    unsigned char *start;
    double pts;
    static const struct {
        const char *name;
        enum CodecID id;
    } fmt_id_type[] = {
        { "aac" , AV_CODEC_ID_AAC    },
        { "ac3" , AV_CODEC_ID_AC3    },
        { "dca" , AV_CODEC_ID_DTS    },
        { "eac3", AV_CODEC_ID_EAC3   },
        { "mpa" , AV_CODEC_ID_MP3    },
        { "thd" , AV_CODEC_ID_TRUEHD },
        { NULL  , 0 }
    };
    AVFormatContext     *lavf_ctx  = NULL;
    AVStream            *stream    = NULL;
    const AVOption      *opt       = NULL;
    struct spdifContext *spdif_ctx = NULL;

    spdif_ctx = av_mallocz(sizeof(*spdif_ctx));
    if (!spdif_ctx)
        goto fail;
    spdif_ctx->lavf_ctx = avformat_alloc_context();
    if (!spdif_ctx->lavf_ctx)
        goto fail;

    sh->context = spdif_ctx;
    lavf_ctx    = spdif_ctx->lavf_ctx;

    init_avformat();
    lavf_ctx->oformat = av_guess_format(FILENAME_SPDIFENC, NULL, NULL);
    if (!lavf_ctx->oformat)
        goto fail;
    lavf_ctx->priv_data = av_mallocz(lavf_ctx->oformat->priv_data_size);
    if (!lavf_ctx->priv_data)
        goto fail;
    lavf_ctx->pb = avio_alloc_context(spdif_ctx->pb_buffer, OUTBUF_SIZE, 1, spdif_ctx,
                                      read_packet, write_packet, seek);
    if (!lavf_ctx->pb)
        goto fail;
    stream = avformat_new_stream(lavf_ctx, 0);
    if (!stream)
        goto fail;
    lavf_ctx->duration   = AV_NOPTS_VALUE;
    lavf_ctx->start_time = AV_NOPTS_VALUE;
    for (i = 0; fmt_id_type[i].name; i++) {
        if (!strcmp(sh->codec->dll, fmt_id_type[i].name)) {
            lavf_ctx->streams[0]->codec->codec_id = fmt_id_type[i].id;
            break;
        }
    }
    lavf_ctx->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
    if (AVERROR_PATCHWELCOME == lavf_ctx->oformat->write_header(lavf_ctx)) {
        mp_msg(MSGT_DECAUDIO,MSGL_INFO,
               "This codec is not supported by spdifenc.\n");
        goto fail;
    }

    // get sample_rate & bitrate from parser
    x = ds_get_packet_pts(sh->ds, &start, &pts);
    in_size = x;
    if (x <= 0) {
        pts = MP_NOPTS_VALUE;
        x = 0;
    }
    ds_parse(sh->ds, &start, &x, pts, 0);
    srate = 48000;    //fake value
    bps   = 768000/8; //fake value
    if (x && sh->avctx) { // we have parser and large enough buffer
        if (sh->avctx->sample_rate < 44100) {
            mp_msg(MSGT_DECAUDIO,MSGL_INFO,
                   "This stream sample_rate[%d Hz] may be broken. "
                   "Force reset 48000Hz.\n",
                   sh->avctx->sample_rate);
            srate = 48000; //fake value
        } else
            srate = sh->avctx->sample_rate;
        bps = sh->avctx->bit_rate/8;
    }
    sh->ds->buffer_pos -= in_size;

    switch (lavf_ctx->streams[0]->codec->codec_id) {
    case AV_CODEC_ID_AAC:
        spdif_ctx->iec61937_packet_size = 16384;
        sh->sample_format               = AF_FORMAT_IEC61937_LE;
        sh->samplerate                  = srate;
        sh->channels                    = 2;
        sh->i_bps                       = bps;
        break;
    case AV_CODEC_ID_AC3:
        spdif_ctx->iec61937_packet_size = 6144;
        sh->sample_format               = AF_FORMAT_AC3_LE;
        sh->samplerate                  = srate;
        sh->channels                    = 2;
        sh->i_bps                       = bps;
        break;
    case AV_CODEC_ID_DTS: // FORCE USE DTS-HD
        opt = av_opt_find(&lavf_ctx->oformat->priv_class,
                          "dtshd_rate", NULL, 0, 0);
        if (!opt)
            goto fail;
        dtshd_rate                      = (int*)(((uint8_t*)lavf_ctx->priv_data) +
                                          opt->offset);
        *dtshd_rate                     = 192000*4;
        spdif_ctx->iec61937_packet_size = 32768;
        sh->sample_format               = AF_FORMAT_IEC61937_LE;
        sh->samplerate                  = 192000; // DTS core require 48000
        sh->channels                    = 2*4;
        sh->i_bps                       = bps;
        break;
    case AV_CODEC_ID_EAC3:
        spdif_ctx->iec61937_packet_size = 24576;
        sh->sample_format               = AF_FORMAT_IEC61937_LE;
        sh->samplerate                  = 192000;
        sh->channels                    = 2;
        sh->i_bps                       = bps;
        break;
    case AV_CODEC_ID_MP3:
        spdif_ctx->iec61937_packet_size = 4608;
        sh->sample_format               = AF_FORMAT_MPEG2;
        sh->samplerate                  = srate;
        sh->channels                    = 2;
        sh->i_bps                       = bps;
        break;
    case AV_CODEC_ID_TRUEHD:
        spdif_ctx->iec61937_packet_size = 61440;
        sh->sample_format               = AF_FORMAT_IEC61937_LE;
        sh->samplerate                  = 192000;
        sh->channels                    = 8;
        sh->i_bps                       = bps;
        break;
    default:
        break;
    }

    return 1;

fail:
    uninit(sh);
    return 0;
}
Beispiel #19
0
static int vfw_read_header(AVFormatContext *s)
{
    struct vfw_ctx *ctx = s->priv_data;
    AVCodecParameters *par;
    AVStream *st;
    int devnum;
    int bisize;
    BITMAPINFO *bi = NULL;
    CAPTUREPARMS cparms;
    DWORD biCompression;
    WORD biBitCount;
    int ret;
    AVRational framerate_q;

    if (!strcmp(s->filename, "list")) {
        for (devnum = 0; devnum <= 9; devnum++) {
            char driver_name[256];
            char driver_ver[256];
            ret = capGetDriverDescription(devnum,
                                          driver_name, sizeof(driver_name),
                                          driver_ver, sizeof(driver_ver));
            if (ret) {
                av_log(s, AV_LOG_INFO, "Driver %d\n", devnum);
                av_log(s, AV_LOG_INFO, " %s\n", driver_name);
                av_log(s, AV_LOG_INFO, " %s\n", driver_ver);
            }
        }
        return AVERROR(EIO);
    }

    ctx->hwnd = capCreateCaptureWindow(NULL, 0, 0, 0, 0, 0, HWND_MESSAGE, 0);
    if(!ctx->hwnd) {
        av_log(s, AV_LOG_ERROR, "Could not create capture window.\n");
        return AVERROR(EIO);
    }

    /* If atoi fails, devnum==0 and the default device is used */
    devnum = atoi(s->filename);

    ret = SendMessage(ctx->hwnd, WM_CAP_DRIVER_CONNECT, devnum, 0);
    if(!ret) {
        av_log(s, AV_LOG_ERROR, "Could not connect to device.\n");
        DestroyWindow(ctx->hwnd);
        return AVERROR(ENODEV);
    }

    SendMessage(ctx->hwnd, WM_CAP_SET_OVERLAY, 0, 0);
    SendMessage(ctx->hwnd, WM_CAP_SET_PREVIEW, 0, 0);

    ret = SendMessage(ctx->hwnd, WM_CAP_SET_CALLBACK_VIDEOSTREAM, 0,
                      (LPARAM) videostream_cb);
    if(!ret) {
        av_log(s, AV_LOG_ERROR, "Could not set video stream callback.\n");
        goto fail;
    }

    SetWindowLongPtr(ctx->hwnd, GWLP_USERDATA, (LONG_PTR) s);

    st = avformat_new_stream(s, NULL);
    if(!st) {
        vfw_read_close(s);
        return AVERROR(ENOMEM);
    }

    /* Set video format */
    bisize = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, 0, 0);
    if(!bisize)
        goto fail;
    bi = av_malloc(bisize);
    if(!bi) {
        vfw_read_close(s);
        return AVERROR(ENOMEM);
    }
    ret = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, bisize, (LPARAM) bi);
    if(!ret)
        goto fail;

    dump_bih(s, &bi->bmiHeader);

    ret = av_parse_video_rate(&framerate_q, ctx->framerate);
    if (ret < 0) {
        av_log(s, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", ctx->framerate);
        goto fail;
    }

    if (ctx->video_size) {
        ret = av_parse_video_size(&bi->bmiHeader.biWidth, &bi->bmiHeader.biHeight, ctx->video_size);
        if (ret < 0) {
            av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n");
            goto fail;
        }
    }

    if (0) {
        /* For testing yet unsupported compressions
         * Copy these values from user-supplied verbose information */
        bi->bmiHeader.biWidth       = 320;
        bi->bmiHeader.biHeight      = 240;
        bi->bmiHeader.biPlanes      = 1;
        bi->bmiHeader.biBitCount    = 12;
        bi->bmiHeader.biCompression = MKTAG('I','4','2','0');
        bi->bmiHeader.biSizeImage   = 115200;
        dump_bih(s, &bi->bmiHeader);
    }

    ret = SendMessage(ctx->hwnd, WM_CAP_SET_VIDEOFORMAT, bisize, (LPARAM) bi);
    if(!ret) {
        av_log(s, AV_LOG_ERROR, "Could not set Video Format.\n");
        goto fail;
    }

    biCompression = bi->bmiHeader.biCompression;
    biBitCount = bi->bmiHeader.biBitCount;

    /* Set sequence setup */
    ret = SendMessage(ctx->hwnd, WM_CAP_GET_SEQUENCE_SETUP, sizeof(cparms),
                      (LPARAM) &cparms);
    if(!ret)
        goto fail;

    dump_captureparms(s, &cparms);

    cparms.fYield = 1; // Spawn a background thread
    cparms.dwRequestMicroSecPerFrame =
                               (framerate_q.den*1000000) / framerate_q.num;
    cparms.fAbortLeftMouse = 0;
    cparms.fAbortRightMouse = 0;
    cparms.fCaptureAudio = 0;
    cparms.vKeyAbort = 0;

    ret = SendMessage(ctx->hwnd, WM_CAP_SET_SEQUENCE_SETUP, sizeof(cparms),
                      (LPARAM) &cparms);
    if(!ret)
        goto fail;

    st->avg_frame_rate = framerate_q;

    par = st->codecpar;
    par->codec_type = AVMEDIA_TYPE_VIDEO;
    par->width  = bi->bmiHeader.biWidth;
    par->height = bi->bmiHeader.biHeight;
    par->format = vfw_pixfmt(biCompression, biBitCount);
    if (par->format == AV_PIX_FMT_NONE) {
        par->codec_id = vfw_codecid(biCompression);
        if (par->codec_id == AV_CODEC_ID_NONE) {
            avpriv_report_missing_feature(s, "This compression type");
            vfw_read_close(s);
            return AVERROR_PATCHWELCOME;
        }
        par->bits_per_coded_sample = biBitCount;
    } else {
        par->codec_id = AV_CODEC_ID_RAWVIDEO;
        if(biCompression == BI_RGB) {
            par->bits_per_coded_sample = biBitCount;
            par->extradata = av_malloc(9 + AV_INPUT_BUFFER_PADDING_SIZE);
            if (par->extradata) {
                par->extradata_size = 9;
                memcpy(par->extradata, "BottomUp", 9);
            }
        }
    }

    av_freep(&bi);

    avpriv_set_pts_info(st, 32, 1, 1000);

    ctx->mutex = CreateMutex(NULL, 0, NULL);
    if(!ctx->mutex) {
        av_log(s, AV_LOG_ERROR, "Could not create Mutex.\n" );
        goto fail;
    }
    ctx->event = CreateEvent(NULL, 1, 0, NULL);
    if(!ctx->event) {
        av_log(s, AV_LOG_ERROR, "Could not create Event.\n" );
        goto fail;
    }

    ret = SendMessage(ctx->hwnd, WM_CAP_SEQUENCE_NOFILE, 0, 0);
    if(!ret) {
        av_log(s, AV_LOG_ERROR, "Could not start capture sequence.\n" );
        goto fail;
    }

    return 0;

fail:
    av_freep(&bi);
    vfw_read_close(s);
    return AVERROR(EIO);
}
Beispiel #20
0
static int ape_read_header(AVFormatContext * s)
{
    AVIOContext *pb = s->pb;
    APEContext *ape = s->priv_data;
    AVStream *st;
    uint32_t tag;
    int i;
    int total_blocks, final_size = 0;
    int64_t pts, file_size;

    /* Skip any leading junk such as id3v2 tags */
    ape->junklength = avio_tell(pb);

    tag = avio_rl32(pb);
    if (tag != MKTAG('M', 'A', 'C', ' '))
        return -1;

    ape->fileversion = avio_rl16(pb);

    if (ape->fileversion < APE_MIN_VERSION || ape->fileversion > APE_MAX_VERSION) {
        av_log(s, AV_LOG_ERROR, "Unsupported file version - %d.%02d\n",
               ape->fileversion / 1000, (ape->fileversion % 1000) / 10);
        return -1;
    }

    if (ape->fileversion >= 3980) {
        ape->padding1             = avio_rl16(pb);
        ape->descriptorlength     = avio_rl32(pb);
        ape->headerlength         = avio_rl32(pb);
        ape->seektablelength      = avio_rl32(pb);
        ape->wavheaderlength      = avio_rl32(pb);
        ape->audiodatalength      = avio_rl32(pb);
        ape->audiodatalength_high = avio_rl32(pb);
        ape->wavtaillength        = avio_rl32(pb);
        avio_read(pb, ape->md5, 16);

        /* Skip any unknown bytes at the end of the descriptor.
           This is for future compatibility */
        if (ape->descriptorlength > 52)
            avio_skip(pb, ape->descriptorlength - 52);

        /* Read header data */
        ape->compressiontype      = avio_rl16(pb);
        ape->formatflags          = avio_rl16(pb);
        ape->blocksperframe       = avio_rl32(pb);
        ape->finalframeblocks     = avio_rl32(pb);
        ape->totalframes          = avio_rl32(pb);
        ape->bps                  = avio_rl16(pb);
        ape->channels             = avio_rl16(pb);
        ape->samplerate           = avio_rl32(pb);
    } else {
        ape->descriptorlength = 0;
        ape->headerlength = 32;

        ape->compressiontype      = avio_rl16(pb);
        ape->formatflags          = avio_rl16(pb);
        ape->channels             = avio_rl16(pb);
        ape->samplerate           = avio_rl32(pb);
        ape->wavheaderlength      = avio_rl32(pb);
        ape->wavtaillength        = avio_rl32(pb);
        ape->totalframes          = avio_rl32(pb);
        ape->finalframeblocks     = avio_rl32(pb);

        if (ape->formatflags & MAC_FORMAT_FLAG_HAS_PEAK_LEVEL) {
            avio_skip(pb, 4); /* Skip the peak level */
            ape->headerlength += 4;
        }

        if (ape->formatflags & MAC_FORMAT_FLAG_HAS_SEEK_ELEMENTS) {
            ape->seektablelength = avio_rl32(pb);
            ape->headerlength += 4;
            ape->seektablelength *= sizeof(int32_t);
        } else
            ape->seektablelength = ape->totalframes * sizeof(int32_t);

        if (ape->formatflags & MAC_FORMAT_FLAG_8_BIT)
            ape->bps = 8;
        else if (ape->formatflags & MAC_FORMAT_FLAG_24_BIT)
            ape->bps = 24;
        else
            ape->bps = 16;

        if (ape->fileversion >= 3950)
            ape->blocksperframe = 73728 * 4;
        else if (ape->fileversion >= 3900 || (ape->fileversion >= 3800  && ape->compressiontype >= 4000))
            ape->blocksperframe = 73728;
        else
            ape->blocksperframe = 9216;

        /* Skip any stored wav header */
        if (!(ape->formatflags & MAC_FORMAT_FLAG_CREATE_WAV_HEADER))
            avio_skip(pb, ape->wavheaderlength);
    }

    if(!ape->totalframes) {
        av_log(s, AV_LOG_ERROR, "No frames in the file!\n");
        return AVERROR(EINVAL);
    }
    if(ape->totalframes > UINT_MAX / sizeof(APEFrame)) {
        av_log(s, AV_LOG_ERROR, "Too many frames: %"PRIu32"\n",
               ape->totalframes);
        return -1;
    }
    if (ape->seektablelength && (ape->seektablelength / sizeof(*ape->seektable)) < ape->totalframes) {
        av_log(s, AV_LOG_ERROR,
               "Number of seek entries is less than number of frames: %zu vs. %"PRIu32"\n",
               ape->seektablelength / sizeof(*ape->seektable), ape->totalframes);
        return AVERROR_INVALIDDATA;
    }
    ape->frames       = av_malloc(ape->totalframes * sizeof(APEFrame));
    if(!ape->frames)
        return AVERROR(ENOMEM);
    ape->firstframe   = ape->junklength + ape->descriptorlength + ape->headerlength + ape->seektablelength + ape->wavheaderlength;
    ape->currentframe = 0;


    ape->totalsamples = ape->finalframeblocks;
    if (ape->totalframes > 1)
        ape->totalsamples += ape->blocksperframe * (ape->totalframes - 1);

    if (ape->seektablelength > 0) {
        ape->seektable = av_malloc(ape->seektablelength);
        if (!ape->seektable)
            return AVERROR(ENOMEM);
        for (i = 0; i < ape->seektablelength / sizeof(uint32_t); i++)
            ape->seektable[i] = avio_rl32(pb);
    }

    ape->frames[0].pos     = ape->firstframe;
    ape->frames[0].nblocks = ape->blocksperframe;
    ape->frames[0].skip    = 0;
    for (i = 1; i < ape->totalframes; i++) {
        ape->frames[i].pos      = ape->seektable[i] + ape->junklength;
        ape->frames[i].nblocks  = ape->blocksperframe;
        ape->frames[i - 1].size = ape->frames[i].pos - ape->frames[i - 1].pos;
        ape->frames[i].skip     = (ape->frames[i].pos - ape->frames[0].pos) & 3;
    }
    ape->frames[ape->totalframes - 1].nblocks = ape->finalframeblocks;
    /* calculate final packet size from total file size, if available */
    file_size = avio_size(pb);
    if (file_size > 0) {
        final_size = file_size - ape->frames[ape->totalframes - 1].pos -
                     ape->wavtaillength;
        final_size -= final_size & 3;
    }
    if (file_size <= 0 || final_size <= 0)
        final_size = ape->finalframeblocks * 8;
    ape->frames[ape->totalframes - 1].size = final_size;

    for (i = 0; i < ape->totalframes; i++) {
        if(ape->frames[i].skip) {
            ape->frames[i].pos  -= ape->frames[i].skip;
            ape->frames[i].size += ape->frames[i].skip;
        }
        ape->frames[i].size = (ape->frames[i].size + 3) & ~3;
    }


    ape_dumpinfo(s, ape);

    /* try to read APE tags */
    if (pb->seekable) {
        ff_ape_parse_tag(s);
        avio_seek(pb, 0, SEEK_SET);
    }

    av_log(s, AV_LOG_DEBUG, "Decoding file - v%d.%02d, compression level %"PRIu16"\n",
           ape->fileversion / 1000, (ape->fileversion % 1000) / 10,
           ape->compressiontype);

    /* now we are ready: build format streams */
    st = avformat_new_stream(s, NULL);
    if (!st)
        return -1;

    total_blocks = (ape->totalframes == 0) ? 0 : ((ape->totalframes - 1) * ape->blocksperframe) + ape->finalframeblocks;

    st->codec->codec_type      = AVMEDIA_TYPE_AUDIO;
    st->codec->codec_id        = CODEC_ID_APE;
    st->codec->codec_tag       = MKTAG('A', 'P', 'E', ' ');
    st->codec->channels        = ape->channels;
    st->codec->sample_rate     = ape->samplerate;
    st->codec->bits_per_coded_sample = ape->bps;

    st->nb_frames = ape->totalframes;
    st->start_time = 0;
    st->duration  = total_blocks / MAC_SUBFRAME_SIZE;
    avpriv_set_pts_info(st, 64, MAC_SUBFRAME_SIZE, ape->samplerate);

    st->codec->extradata = av_malloc(APE_EXTRADATA_SIZE);
    st->codec->extradata_size = APE_EXTRADATA_SIZE;
    AV_WL16(st->codec->extradata + 0, ape->fileversion);
    AV_WL16(st->codec->extradata + 2, ape->compressiontype);
    AV_WL16(st->codec->extradata + 4, ape->formatflags);

    pts = 0;
    for (i = 0; i < ape->totalframes; i++) {
        ape->frames[i].pts = pts;
        av_add_index_entry(st, ape->frames[i].pos, ape->frames[i].pts, 0, 0, AVINDEX_KEYFRAME);
        pts += ape->blocksperframe / MAC_SUBFRAME_SIZE;
    }

    return 0;
}
Beispiel #21
0
static int modplug_read_header(AVFormatContext *s)
{
    AVStream *st;
    AVIOContext *pb = s->pb;
    ModPlug_Settings settings;
    ModPlugContext *modplug = s->priv_data;
    int sz = avio_size(pb);

    if (sz < 0) {
        av_log(s, AV_LOG_WARNING, "Could not determine file size\n");
        sz = modplug->max_size;
    } else if (modplug->max_size && sz > modplug->max_size) {
        sz = modplug->max_size;
        av_log(s, AV_LOG_WARNING, "Max file size reach%s, allocating %dB "
               "but demuxing is likely to fail due to incomplete buffer\n",
               sz == FF_MODPLUG_DEF_FILE_SIZE ? " (see -max_size)" : "", sz);
    }

    if (modplug->color_eval) {
        int r = av_expr_parse(&modplug->expr, modplug->color_eval, var_names,
                              NULL, NULL, NULL, NULL, 0, s);
        if (r < 0)
            return r;
    }

    modplug->buf = av_malloc(modplug->max_size);
    if (!modplug->buf)
        return AVERROR(ENOMEM);
    sz = avio_read(pb, modplug->buf, sz);

    ModPlug_GetSettings(&settings);
    settings.mChannels       = 2;
    settings.mBits           = 16;
    settings.mFrequency      = 44100;
    settings.mResamplingMode = MODPLUG_RESAMPLE_FIR; // best quality
    settings.mLoopCount      = 0; // prevents looping forever

    if (modplug->noise_reduction) settings.mFlags     |= MODPLUG_ENABLE_NOISE_REDUCTION;
    SET_OPT_IF_REQUESTED(mReverbDepth,   reverb_depth,   MODPLUG_ENABLE_REVERB);
    SET_OPT_IF_REQUESTED(mReverbDelay,   reverb_delay,   MODPLUG_ENABLE_REVERB);
    SET_OPT_IF_REQUESTED(mBassAmount,    bass_amount,    MODPLUG_ENABLE_MEGABASS);
    SET_OPT_IF_REQUESTED(mBassRange,     bass_range,     MODPLUG_ENABLE_MEGABASS);
    SET_OPT_IF_REQUESTED(mSurroundDepth, surround_depth, MODPLUG_ENABLE_SURROUND);
    SET_OPT_IF_REQUESTED(mSurroundDelay, surround_delay, MODPLUG_ENABLE_SURROUND);

    if (modplug->reverb_depth)   settings.mReverbDepth   = modplug->reverb_depth;
    if (modplug->reverb_delay)   settings.mReverbDelay   = modplug->reverb_delay;
    if (modplug->bass_amount)    settings.mBassAmount    = modplug->bass_amount;
    if (modplug->bass_range)     settings.mBassRange     = modplug->bass_range;
    if (modplug->surround_depth) settings.mSurroundDepth = modplug->surround_depth;
    if (modplug->surround_delay) settings.mSurroundDelay = modplug->surround_delay;

    ModPlug_SetSettings(&settings);

    modplug->f = ModPlug_Load(modplug->buf, sz);
    if (!modplug->f)
        return AVERROR_INVALIDDATA;

    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);
    avpriv_set_pts_info(st, 64, 1, 1000);
    st->duration = ModPlug_GetLength(modplug->f);
    st->codec->codec_type  = AVMEDIA_TYPE_AUDIO;
    st->codec->codec_id    = CODEC_ID_PCM_S16LE;
    st->codec->channels    = settings.mChannels;
    st->codec->sample_rate = settings.mFrequency;

    // timebase = 1/1000, 2ch 16bits 44.1kHz-> 2*2*44100
    modplug->ts_per_packet = 1000*AUDIO_PKT_SIZE / (4*44100.);

    if (modplug->video_stream) {
        AVStream *vst = avformat_new_stream(s, NULL);
        if (!vst)
            return AVERROR(ENOMEM);
        avpriv_set_pts_info(vst, 64, 1, 1000);
        vst->duration = st->duration;
        vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        vst->codec->codec_id   = CODEC_ID_XBIN;
        vst->codec->width      = modplug->w << 3;
        vst->codec->height     = modplug->h << 3;
        modplug->linesize = modplug->w * 3;
        modplug->fsize    = modplug->linesize * modplug->h;
    }

    return modplug_load_metadata(s);
}
Beispiel #22
0
HRESULT CLAVAudio::CreateBitstreamContext(AVCodecID codec, WAVEFORMATEX *wfe)
{
  int ret = 0;

  if (m_avBSContext)
    FreeBitstreamContext();
  m_bsParser.Reset();

  // Increase DTS buffer even further, as we do not have any sample caching
  if (codec == AV_CODEC_ID_DTS)
    m_faJitter.SetNumSamples(400);
  else
    m_faJitter.SetNumSamples(100);

  m_pParser = av_parser_init(codec);
  ASSERT(m_pParser);

  m_pAVCtx = avcodec_alloc_context3(avcodec_find_decoder(codec));
  CheckPointer(m_pAVCtx, E_POINTER);

  DbgLog((LOG_TRACE, 20, "Creating Bistreaming Context..."));

  ret = avformat_alloc_output_context2(&m_avBSContext, NULL, "spdif", NULL);
  if (ret < 0 || !m_avBSContext) {
    DbgLog((LOG_ERROR, 10, L"::CreateBitstreamContext() -- alloc of avformat spdif muxer failed (ret: %d)", ret));
    goto fail;
  }

  m_avBSContext->pb = m_avioBitstream;
  m_avBSContext->oformat->flags |= AVFMT_NOFILE;

  // DTS-HD is by default off, unless explicitly asked for
  if (m_settings.DTSHDFraming && m_settings.bBitstream[Bitstream_DTSHD] && !m_bForceDTSCore) {
    m_bDTSHD = TRUE;
    av_opt_set_int(m_avBSContext->priv_data, "dtshd_rate", LAV_BITSTREAM_DTS_HD_RATE, 0);
  } else {
    m_bDTSHD = FALSE;
    av_opt_set_int(m_avBSContext->priv_data, "dtshd_rate", 0, 0);
  }
  av_opt_set_int(m_avBSContext->priv_data, "dtshd_fallback_time", -1, 0);

  AVStream *st = avformat_new_stream(m_avBSContext, 0);
  if (!st) {
    DbgLog((LOG_ERROR, 10, L"::CreateBitstreamContext() -- alloc of output stream failed"));
    goto fail;
  }
  m_pAVCtx->codec_id    = st->codec->codec_id    = codec;
  m_pAVCtx->codec_type  = st->codec->codec_type  = AVMEDIA_TYPE_AUDIO;
  m_pAVCtx->channels    = st->codec->channels    = wfe->nChannels;
  m_pAVCtx->sample_rate = st->codec->sample_rate = wfe->nSamplesPerSec;

  ret = avformat_write_header(m_avBSContext, NULL);
  if (ret < 0) {
    DbgLog((LOG_ERROR, 10, L"::CreateBitstreamContext() -- av_write_header returned an error code (%d)", -ret));
    goto fail;
  }

  m_nCodecId = codec;

  return S_OK;
fail:
  FreeBitstreamContext();
  return E_FAIL;
}
Beispiel #23
0
static void AddAudioStream()
{
#if LIBAVFORMAT_VERSION_MAJOR >= 53
    g_pAStream = avformat_new_stream(g_pContainer, g_pACodec);
#else
    g_pAStream = av_new_stream(g_pContainer, 1);
#endif
    if(!g_pAStream)
    {
        Log("Could not allocate audio stream\n");
        return;
    }
    g_pAStream->id = 1;

    g_pAudio = g_pAStream->codec;

    avcodec_get_context_defaults3(g_pAudio, g_pACodec);
    g_pAudio->codec_id = g_pACodec->id;

    // put parameters
    g_pAudio->sample_fmt = AV_SAMPLE_FMT_S16;
    g_pAudio->sample_rate = g_Frequency;
    g_pAudio->channels = g_Channels;

    // set quality
    g_pAudio->bit_rate = 160000;

    // for codecs that support variable bitrate use it, it should be better
    g_pAudio->flags |= CODEC_FLAG_QSCALE;
    g_pAudio->global_quality = 1*FF_QP2LAMBDA;

    // some formats want stream headers to be separate
    if (g_pFormat->flags & AVFMT_GLOBALHEADER)
        g_pAudio->flags |= CODEC_FLAG_GLOBAL_HEADER;

    // open it
#if LIBAVCODEC_VERSION_MAJOR >= 53
    if (avcodec_open2(g_pAudio, g_pACodec, NULL) < 0)
#else
    if (avcodec_open(g_pAudio, g_pACodec) < 0)
#endif
    {
        Log("Could not open audio codec %s\n", g_pACodec->long_name);
        return;
    }

#if LIBAVCODEC_VERSION_MAJOR >= 54
    if (g_pACodec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
#else
    if (g_pAudio->frame_size == 0)
#endif
        g_NumSamples = 4096;
    else
        g_NumSamples = g_pAudio->frame_size;
    g_pSamples = (int16_t*)av_malloc(g_NumSamples*g_Channels*sizeof(int16_t));
    g_pAFrame = avcodec_alloc_frame();
    if (!g_pAFrame)
    {
        Log("Could not allocate frame\n");
        return;
    }
}
Beispiel #24
0
    /* Add an output stream. */
    bool FFMPEGer::add_stream(OutputStream *ost, AVFormatContext *oc,
                              AVCodec **codec,
                              enum AVCodecID codec_id) {
        AVCodecContext *codecContext;
        int i;

        /* find the encoder */
        *codec = avcodec_find_encoder(codec_id);
        if (!(*codec)) {
            ALOGE("Could not find encoder for '%s'",
                  avcodec_get_name(codec_id));
            return false;
        }

        ALOGV("success to find encoder for '%s'",
              avcodec_get_name(codec_id));
        //创建输出码流的AVStream。
        ost->st = avformat_new_stream(oc, *codec);
        if (!ost->st) {
            ALOGE("Could not allocate stream");
            return false;
        }
        ost->st->id = oc->nb_streams - 1;
        codecContext = ost->st->codec;

        switch ((*codec)->type) {
            case AVMEDIA_TYPE_AUDIO:
//			c->sample_fmt  = (*codec)->sample_fmts ?
//				(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_NONE;//TODO:
//                AV_SAMPLE_FMT_S16
//                codecContext->sample_fmt =
//                        (*codec)->sample_fmts[0];//TODO:

                codecContext->sample_fmt=AV_SAMPLE_FMT_S16;//TODO:
                codecContext->bit_rate = 64000;//TODO:
                codecContext->sample_rate = 44100;//TODO:
                if ((*codec)->supported_samplerates) {
                    codecContext->sample_rate = (*codec)->supported_samplerates[0];
                    for (i = 0; (*codec)->supported_samplerates[i]; i++) {
                        if ((*codec)->supported_samplerates[i] == 44100)
                            codecContext->sample_rate = 44100;
                    }
                }
//                codecContext->channels = av_get_channel_layout_nb_channels(codecContext->channel_layout);
                codecContext->channels = 1;

//                codecContext->channel_layout = AV_CH_LAYOUT_STEREO;
//                if ((*codec)->channel_layouts) {
//                    codecContext->channel_layout = (*codec)->channel_layouts[0];
//                    for (i = 0; (*codec)->channel_layouts[i]; i++) {
//                        if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
//                            codecContext->channel_layout = AV_CH_LAYOUT_STEREO;
//                    }
//                }
                codecContext->channel_layout = av_get_default_channel_layout(codecContext->channels);

                codecContext->channels = 1;
//                codecContext->channels = av_get_channel_layout_nb_channels(codecContext->channel_layout);
                ost->st->time_base = (AVRational) {1, codecContext->sample_rate};
                break;
            case AVMEDIA_TYPE_VIDEO:
                codecContext->codec_id = codec_id;
                if (codec_id == AV_CODEC_ID_H264) {
                    codecContext->profile = FF_PROFILE_H264_BASELINE;
                }

                codecContext->bit_rate = 400000;
                /* Resolution must be a multiple of two. */
                codecContext->width = mWidth;
                codecContext->height = mHeight;
                /* timebase: This is the fundamental unit of time (in seconds) in terms
                 * of which frame timestamps are represented. For fixed-fps content,
                 * timebase should be 1/framerate and timestamp increments should be
                 * identical to 1. */
                ost->st->time_base = (AVRational) {1, STREAM_FRAME_RATE};
                codecContext->time_base = ost->st->time_base;
                codecContext->gop_size = 12; /* emit one intra frame every twelve frames at most */
                codecContext->pix_fmt = STREAM_PIX_FMT;//input fmt for the encoder.
                break;
            default:
                break;
        }
        /* Some formats want stream headers to be separate. */
        if (oc->oformat->flags & AVFMT_GLOBALHEADER)
            codecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;

        return true;
    }
Beispiel #25
0
static int dxa_read_header(AVFormatContext *s)
{
    AVIOContext *pb = s->pb;
    DXAContext *c = s->priv_data;
    AVStream *st, *ast;
    uint32_t tag;
    int32_t fps;
    int w, h;
    int num, den;
    int flags;
    int ret;

    tag = avio_rl32(pb);
    if (tag != MKTAG('D', 'E', 'X', 'A'))
        return AVERROR_INVALIDDATA;
    flags = avio_r8(pb);
    c->frames = avio_rb16(pb);
    if(!c->frames){
        av_log(s, AV_LOG_ERROR, "File contains no frames ???\n");
        return AVERROR_INVALIDDATA;
    }

    fps = avio_rb32(pb);
    if(fps > 0){
        den = 1000;
        num = fps;
    }else if (fps < 0){
        den = 100000;
        num = -fps;
    }else{
        den = 10;
        num = 1;
    }
    w = avio_rb16(pb);
    h = avio_rb16(pb);
    c->has_sound = 0;

    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);

    // Parse WAV data header
    if(avio_rl32(pb) == MKTAG('W', 'A', 'V', 'E')){
        uint32_t size, fsize;
        c->has_sound = 1;
        size = avio_rb32(pb);
        c->vidpos = avio_tell(pb) + size;
        avio_skip(pb, 16);
        fsize = avio_rl32(pb);

        ast = avformat_new_stream(s, NULL);
        if (!ast)
            return AVERROR(ENOMEM);
        ret = ff_get_wav_header(pb, ast->codec, fsize);
        if (ret < 0)
            return ret;
        if (ast->codec->sample_rate > 0)
            avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
        // find 'data' chunk
        while(avio_tell(pb) < c->vidpos && !url_feof(pb)){
            tag = avio_rl32(pb);
            fsize = avio_rl32(pb);
            if(tag == MKTAG('d', 'a', 't', 'a')) break;
            avio_skip(pb, fsize);
        }
        c->bpc = (fsize + c->frames - 1) / c->frames;
        if(ast->codec->block_align)
            c->bpc = ((c->bpc + ast->codec->block_align - 1) / ast->codec->block_align) * ast->codec->block_align;
        c->bytes_left = fsize;
        c->wavpos = avio_tell(pb);
        avio_seek(pb, c->vidpos, SEEK_SET);
    }

    /* now we are ready: build format streams */
    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id   = AV_CODEC_ID_DXA;
    st->codec->width      = w;
    st->codec->height     = h;
    av_reduce(&den, &num, den, num, (1UL<<31)-1);
    avpriv_set_pts_info(st, 33, num, den);
    /* flags & 0x80 means that image is interlaced,
     * flags & 0x40 means that image has double height
     * either way set true height
     */
    if(flags & 0xC0){
        st->codec->height >>= 1;
    }
Beispiel #26
0
static int xwma_read_header(AVFormatContext *s)
{
    int64_t size;
    int ret = 0;
    uint32_t dpds_table_size = 0;
    uint32_t *dpds_table = NULL;
    unsigned int tag;
    AVIOContext *pb = s->pb;
    AVStream *st;
    XWMAContext *xwma = s->priv_data;
    int i;

    /* The following code is mostly copied from wav.c, with some
     * minor alterations.
     */

    /* check RIFF header */
    tag = avio_rl32(pb);
    if (tag != MKTAG('R', 'I', 'F', 'F'))
        return -1;
    avio_rl32(pb); /* file size */
    tag = avio_rl32(pb);
    if (tag != MKTAG('X', 'W', 'M', 'A'))
        return -1;

    /* parse fmt header */
    tag = avio_rl32(pb);
    if (tag != MKTAG('f', 'm', 't', ' '))
        return -1;
    size = avio_rl32(pb);
    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);

    ret = ff_get_wav_header(pb, st->codec, size, 0);
    if (ret < 0)
        return ret;
    st->need_parsing = AVSTREAM_PARSE_NONE;

    /* All xWMA files I have seen contained WMAv2 data. If there are files
     * using WMA Pro or some other codec, then we need to figure out the right
     * extradata for that. Thus, ask the user for feedback, but try to go on
     * anyway.
     */
    if (st->codec->codec_id != AV_CODEC_ID_WMAV2) {
        avpriv_request_sample(s, "Unexpected codec (tag 0x04%x; id %d)",
                              st->codec->codec_tag, st->codec->codec_id);
    } else {
        /* In all xWMA files I have seen, there is no extradata. But the WMA
         * codecs require extradata, so we provide our own fake extradata.
         *
         * First, check that there really was no extradata in the header. If
         * there was, then try to use it, after asking the user to provide a
         * sample of this unusual file.
         */
        if (st->codec->extradata_size != 0) {
            /* Surprise, surprise: We *did* get some extradata. No idea
             * if it will work, but just go on and try it, after asking
             * the user for a sample.
             */
            avpriv_request_sample(s, "Unexpected extradata (%d bytes)",
                                  st->codec->extradata_size);
        } else {
            st->codec->extradata_size = 6;
            st->codec->extradata      = av_mallocz(6 + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!st->codec->extradata)
                return AVERROR(ENOMEM);

            /* setup extradata with our experimentally obtained value */
            st->codec->extradata[4] = 31;
        }
    }

    if (!st->codec->channels) {
        av_log(s, AV_LOG_WARNING, "Invalid channel count: %d\n",
               st->codec->channels);
        return AVERROR_INVALIDDATA;
    }
    if (!st->codec->bits_per_coded_sample) {
        av_log(s, AV_LOG_WARNING, "Invalid bits_per_coded_sample: %d\n",
               st->codec->bits_per_coded_sample);
        return AVERROR_INVALIDDATA;
    }

    /* set the sample rate */
    avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);

    /* parse the remaining RIFF chunks */
    for (;;) {
        if (pb->eof_reached) {
            ret = AVERROR_EOF;
            goto fail;
        }
        /* read next chunk tag */
        tag = avio_rl32(pb);
        size = avio_rl32(pb);
        if (tag == MKTAG('d', 'a', 't', 'a')) {
            /* We assume that the data chunk comes last. */
            break;
        } else if (tag == MKTAG('d','p','d','s')) {
            /* Quoting the MSDN xWMA docs on the dpds chunk: "Contains the
             * decoded packet cumulative data size array, each element is the
             * number of bytes accumulated after the corresponding xWMA packet
             * is decoded in order."
             *
             * Each packet has size equal to st->codec->block_align, which in
             * all cases I saw so far was always 2230. Thus, we can use the
             * dpds data to compute a seeking index.
             */

            /* Error out if there is more than one dpds chunk. */
            if (dpds_table) {
                av_log(s, AV_LOG_ERROR, "two dpds chunks present\n");
                ret = AVERROR_INVALIDDATA;
                goto fail;
            }

            /* Compute the number of entries in the dpds chunk. */
            if (size & 3) {  /* Size should be divisible by four */
                av_log(s, AV_LOG_WARNING,
                       "dpds chunk size %"PRId64" not divisible by 4\n", size);
            }
            dpds_table_size = size / 4;
            if (dpds_table_size == 0 || dpds_table_size >= INT_MAX / 4) {
                av_log(s, AV_LOG_ERROR,
                       "dpds chunk size %"PRId64" invalid\n", size);
                return AVERROR_INVALIDDATA;
            }

            /* Allocate some temporary storage to keep the dpds data around.
             * for processing later on.
             */
            dpds_table = av_malloc(dpds_table_size * sizeof(uint32_t));
            if (!dpds_table) {
                return AVERROR(ENOMEM);
            }

            for (i = 0; i < dpds_table_size; ++i) {
                dpds_table[i] = avio_rl32(pb);
                size -= 4;
            }
        }
        avio_skip(pb, size);
    }

    /* Determine overall data length */
    if (size < 0) {
        ret = AVERROR_INVALIDDATA;
        goto fail;
    }
    if (!size) {
        xwma->data_end = INT64_MAX;
    } else
        xwma->data_end = avio_tell(pb) + size;


    if (dpds_table && dpds_table_size) {
        int64_t cur_pos;
        const uint32_t bytes_per_sample
                = (st->codec->channels * st->codec->bits_per_coded_sample) >> 3;

        /* Estimate the duration from the total number of output bytes. */
        const uint64_t total_decoded_bytes = dpds_table[dpds_table_size - 1];

        if (!bytes_per_sample) {
            av_log(s, AV_LOG_ERROR,
                   "Invalid bits_per_coded_sample %d for %d channels\n",
                   st->codec->bits_per_coded_sample, st->codec->channels);
            ret = AVERROR_INVALIDDATA;
            goto fail;
        }

        st->duration = total_decoded_bytes / bytes_per_sample;

        /* Use the dpds data to build a seek table.  We can only do this after
         * we know the offset to the data chunk, as we need that to determine
         * the actual offset to each input block.
         * Note: If we allowed ourselves to assume that the data chunk always
         * follows immediately after the dpds block, we could of course guess
         * the data block's start offset already while reading the dpds chunk.
         * I decided against that, just in case other chunks ever are
         * discovered.
         */
        cur_pos = avio_tell(pb);
        for (i = 0; i < dpds_table_size; ++i) {
            /* From the number of output bytes that would accumulate in the
             * output buffer after decoding the first (i+1) packets, we compute
             * an offset / timestamp pair.
             */
            av_add_index_entry(st,
                               cur_pos + (i+1) * st->codec->block_align, /* pos */
                               dpds_table[i] / bytes_per_sample,         /* timestamp */
                               st->codec->block_align,                   /* size */
                               0,                                        /* duration */
                               AVINDEX_KEYFRAME);
        }
    } else if (st->codec->bit_rate) {
Beispiel #27
0
int main(int argc, char **argv)
{
    char *in_graph_desc, **out_dev_name;
    int nb_out_dev = 0, nb_streams = 0;
    AVFilterGraph *in_graph = NULL;
    Stream *streams = NULL, *st;
    AVFrame *frame = NULL;
    int i, j, run = 1, ret;

    //av_log_set_level(AV_LOG_DEBUG);

    if (argc < 3) {
        av_log(NULL, AV_LOG_ERROR,
               "Usage: %s filter_graph dev:out [dev2:out2...]\n\n"
               "Examples:\n"
               "%s movie=file.nut:s=v+a xv:- alsa:default\n"
               "%s movie=file.nut:s=v+a uncodedframecrc:pipe:0\n",
               argv[0], argv[0], argv[0]);
        exit(1);
    }
    in_graph_desc = argv[1];
    out_dev_name = argv + 2;
    nb_out_dev = argc - 2;

    av_register_all();
    avdevice_register_all();
    avfilter_register_all();

    /* Create input graph */
    if (!(in_graph = avfilter_graph_alloc())) {
        ret = AVERROR(ENOMEM);
        av_log(NULL, AV_LOG_ERROR, "Unable to alloc graph graph: %s\n",
               av_err2str(ret));
        goto fail;
    }
    ret = avfilter_graph_parse_ptr(in_graph, in_graph_desc, NULL, NULL, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Unable to parse graph: %s\n",
               av_err2str(ret));
        goto fail;
    }
    nb_streams = 0;
    for (i = 0; i < in_graph->nb_filters; i++) {
        AVFilterContext *f = in_graph->filters[i];
        for (j = 0; j < f->nb_inputs; j++) {
            if (!f->inputs[j]) {
                av_log(NULL, AV_LOG_ERROR, "Graph has unconnected inputs\n");
                ret = AVERROR(EINVAL);
                goto fail;
            }
        }
        for (j = 0; j < f->nb_outputs; j++)
            if (!f->outputs[j])
                nb_streams++;
    }
    if (!nb_streams) {
        av_log(NULL, AV_LOG_ERROR, "Graph has no output stream\n");
        ret = AVERROR(EINVAL);
        goto fail;
    }
    if (nb_out_dev != 1 && nb_out_dev != nb_streams) {
        av_log(NULL, AV_LOG_ERROR,
               "Graph has %d output streams, %d devices given\n",
               nb_streams, nb_out_dev);
        ret = AVERROR(EINVAL);
        goto fail;
    }

    if (!(streams = av_calloc(nb_streams, sizeof(*streams)))) {
        ret = AVERROR(ENOMEM);
        av_log(NULL, AV_LOG_ERROR, "Could not allocate streams\n");
    }
    st = streams;
    for (i = 0; i < in_graph->nb_filters; i++) {
        AVFilterContext *f = in_graph->filters[i];
        for (j = 0; j < f->nb_outputs; j++) {
            if (!f->outputs[j]) {
                if ((ret = create_sink(st++, in_graph, f, j)) < 0)
                    goto fail;
            }
        }
    }
    av_assert0(st - streams == nb_streams);
    if ((ret = avfilter_graph_config(in_graph, NULL)) < 0) {
        av_log(NULL, AV_LOG_ERROR, "Failed to configure graph\n");
        goto fail;
    }

    /* Create output devices */
    for (i = 0; i < nb_out_dev; i++) {
        char *fmt = NULL, *dev = out_dev_name[i];
        st = &streams[i];
        if ((dev = strchr(dev, ':'))) {
            *(dev++) = 0;
            fmt = out_dev_name[i];
        }
        ret = avformat_alloc_output_context2(&st->mux, NULL, fmt, dev);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Failed to allocate output: %s\n",
                   av_err2str(ret));
            goto fail;
        }
        if (!(st->mux->oformat->flags & AVFMT_NOFILE)) {
            ret = avio_open2(&st->mux->pb, st->mux->filename, AVIO_FLAG_WRITE,
                             NULL, NULL);
            if (ret < 0) {
                av_log(st->mux, AV_LOG_ERROR, "Failed to init output: %s\n",
                       av_err2str(ret));
                goto fail;
            }
        }
    }
    for (; i < nb_streams; i++)
        streams[i].mux = streams[0].mux;

    /* Create output device streams */
    for (i = 0; i < nb_streams; i++) {
        st = &streams[i];
        if (!(st->stream = avformat_new_stream(st->mux, NULL))) {
            ret = AVERROR(ENOMEM);
            av_log(NULL, AV_LOG_ERROR, "Failed to create output stream\n");
            goto fail;
        }
        st->stream->codec->codec_type = st->link->type;
        st->stream->time_base = st->stream->codec->time_base =
            st->link->time_base;
        switch (st->link->type) {
        case AVMEDIA_TYPE_VIDEO:
            st->stream->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
            st->stream->avg_frame_rate =
            st->stream->  r_frame_rate = av_buffersink_get_frame_rate(st->sink);
            st->stream->codec->width               = st->link->w;
            st->stream->codec->height              = st->link->h;
            st->stream->codec->sample_aspect_ratio = st->link->sample_aspect_ratio;
            st->stream->codec->pix_fmt             = st->link->format;
            break;
        case AVMEDIA_TYPE_AUDIO:
            st->stream->codec->channel_layout = st->link->channel_layout;
            st->stream->codec->channels = avfilter_link_get_channels(st->link);
            st->stream->codec->sample_rate = st->link->sample_rate;
            st->stream->codec->sample_fmt = st->link->format;
            st->stream->codec->codec_id =
                av_get_pcm_codec(st->stream->codec->sample_fmt, -1);
            break;
        default:
            av_assert0(!"reached");
        }
    }

    /* Init output devices */
    for (i = 0; i < nb_out_dev; i++) {
        st = &streams[i];
        if ((ret = avformat_write_header(st->mux, NULL)) < 0) {
            av_log(st->mux, AV_LOG_ERROR, "Failed to init output: %s\n",
                   av_err2str(ret));
            goto fail;
        }
    }

    /* Check output devices */
    for (i = 0; i < nb_streams; i++) {
        st = &streams[i];
        ret = av_write_uncoded_frame_query(st->mux, st->stream->index);
        if (ret < 0) {
            av_log(st->mux, AV_LOG_ERROR,
                   "Uncoded frames not supported on stream #%d: %s\n",
                   i, av_err2str(ret));
            goto fail;
        }
    }

    while (run) {
        ret = avfilter_graph_request_oldest(in_graph);
        if (ret < 0) {
            if (ret == AVERROR_EOF) {
                run = 0;
            } else {
                av_log(NULL, AV_LOG_ERROR, "Error filtering: %s\n",
                       av_err2str(ret));
                break;
            }
        }
        for (i = 0; i < nb_streams; i++) {
            st = &streams[i];
            while (1) {
                if (!frame && !(frame = av_frame_alloc())) {
                    ret = AVERROR(ENOMEM);
                    av_log(NULL, AV_LOG_ERROR, "Could not allocate frame\n");
                    goto fail;
                }
                ret = av_buffersink_get_frame_flags(st->sink, frame,
                                                    AV_BUFFERSINK_FLAG_NO_REQUEST);
                if (ret < 0) {
                    if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
                        av_log(NULL, AV_LOG_WARNING, "Error in sink: %s\n",
                               av_err2str(ret));
                    break;
                }
                if (frame->pts != AV_NOPTS_VALUE)
                    frame->pts = av_rescale_q(frame->pts,
                                              st->link  ->time_base,
                                              st->stream->time_base);
                ret = av_interleaved_write_uncoded_frame(st->mux,
                                                         st->stream->index,
                                                         frame);
                frame = NULL;
                if (ret < 0) {
                    av_log(st->stream->codec, AV_LOG_ERROR,
                           "Error writing frame: %s\n", av_err2str(ret));
                    goto fail;
                }
            }
        }
    }
    ret = 0;

    for (i = 0; i < nb_out_dev; i++) {
        st = &streams[i];
        av_write_trailer(st->mux);
    }

fail:
    av_frame_free(&frame);
    avfilter_graph_free(&in_graph);
    if (streams) {
        for (i = 0; i < nb_out_dev; i++) {
            st = &streams[i];
            if (st->mux) {
                if (st->mux->pb)
                    avio_close(st->mux->pb);
                avformat_free_context(st->mux);
            }
        }
    }
    av_freep(&streams);
    return ret < 0;
}
/* aiff input */
static int aiff_read_header(AVFormatContext *s,
                            AVFormatParameters *ap)
{
    int size, filesize;
    int64_t offset = 0;
    uint32_t tag;
    unsigned version = AIFF_C_VERSION1;
    AVIOContext *pb = s->pb;
    AVStream * st;
    AIFFInputContext *aiff = (AIFFInputContext *)s->priv_data;

    /* check FORM header */
    filesize = get_tag(pb, &tag);
    if (filesize < 0 || tag != MKTAG('F', 'O', 'R', 'M'))
        return AVERROR_INVALIDDATA;

    /* AIFF data type */
    tag = avio_rl32(pb);
    if (tag == MKTAG('A', 'I', 'F', 'F'))       /* Got an AIFF file */
        version = AIFF;
    else if (tag != MKTAG('A', 'I', 'F', 'C'))  /* An AIFF-C file then */
        return AVERROR_INVALIDDATA;

    filesize -= 4;

    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);

    while (filesize > 0) {
        /* parse different chunks */
        size = get_tag(pb, &tag);
        if (size < 0)
            return size;

        filesize -= size + 8;

        switch (tag) {
        case MKTAG('C', 'O', 'M', 'M'):     /* Common chunk */
            /* Then for the complete header info */
            st->nb_frames = get_aiff_header(pb, st->codec, size, version);
            if (st->nb_frames < 0)
                return st->nb_frames;
            if (offset > 0) // COMM is after SSND
                goto got_sound;
            break;
        case MKTAG('F', 'V', 'E', 'R'):     /* Version chunk */
            version = avio_rb32(pb);
            break;
        case MKTAG('N', 'A', 'M', 'E'):     /* Sample name chunk */
            get_meta(s, "title"    , size);
            break;
        case MKTAG('A', 'U', 'T', 'H'):     /* Author chunk */
            get_meta(s, "author"   , size);
            break;
        case MKTAG('(', 'c', ')', ' '):     /* Copyright chunk */
            get_meta(s, "copyright", size);
            break;
        case MKTAG('A', 'N', 'N', 'O'):     /* Annotation chunk */
            get_meta(s, "comment"  , size);
            break;
        case MKTAG('S', 'S', 'N', 'D'):     /* Sampled sound chunk */
            aiff->data_end = avio_tell(pb) + size;
            offset = avio_rb32(pb);      /* Offset of sound data */
            avio_rb32(pb);               /* BlockSize... don't care */
            offset += avio_tell(pb);    /* Compute absolute data offset */
            if (st->codec->block_align)    /* Assume COMM already parsed */
                goto got_sound;
            if (!pb->seekable) {
                av_log(s, AV_LOG_ERROR, "file is not seekable\n");
                return -1;
            }
            avio_skip(pb, size - 8);
            break;
        case MKTAG('w', 'a', 'v', 'e'):
            if ((uint64_t)size > (1<<30))
                return -1;
            st->codec->extradata = (uint8_t *)av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!st->codec->extradata)
                return AVERROR(ENOMEM);
            st->codec->extradata_size = size;
            avio_read(pb, st->codec->extradata, size);
            break;
        case MKTAG('C','H','A','N'):
            if (size < 12)
                return AVERROR_INVALIDDATA;
            ff_mov_read_chan(s, size, st->codec);
            break;
        default: /* Jump */
            if (size & 1)   /* Always even aligned */
                size++;
            avio_skip(pb, size);
        }
    }

got_sound:
    if (!st->codec->block_align) {
        av_log(s, AV_LOG_ERROR, "could not find COMM tag or invalid block_align value\n");
        return -1;
    }

    /* Now positioned, get the sound data start and end */
    avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
    st->start_time = 0;
    st->duration = st->codec->frame_size ?
        st->nb_frames * st->codec->frame_size : st->nb_frames;

    /* Position the stream at the first block */
    avio_seek(pb, offset, SEEK_SET);

    return 0;
}
Beispiel #29
0
static int iff_read_header(AVFormatContext *s)
{
    IffDemuxContext *iff = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *st;
    uint8_t *buf;
    uint32_t chunk_id;
    uint64_t data_size;
    uint32_t screenmode = 0, num, den;
    unsigned transparency = 0;
    unsigned masking = 0; // no mask
    uint8_t fmt[16];
    int fmt_size;

    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);

    st->codec->channels = 1;
    st->codec->channel_layout = AV_CH_LAYOUT_MONO;
    iff->is_64bit = avio_rl32(pb) == ID_FRM8;
    avio_skip(pb, iff->is_64bit ? 8 : 4);
    // codec_tag used by ByteRun1 decoder to distinguish progressive (PBM) and interlaced (ILBM) content
    st->codec->codec_tag = avio_rl32(pb);
    if (st->codec->codec_tag == ID_ANIM) {
        avio_skip(pb, 8);
        st->codec->codec_tag = avio_rl32(pb);
    }
    iff->bitmap_compression = -1;
    iff->svx8_compression = -1;
    iff->maud_bits = -1;
    iff->maud_compression = -1;

    while(!avio_feof(pb)) {
        uint64_t orig_pos;
        int res;
        const char *metadata_tag = NULL;
        int version, nb_comments, i;
        chunk_id = avio_rl32(pb);
        data_size = iff->is_64bit ? avio_rb64(pb) : avio_rb32(pb);
        orig_pos = avio_tell(pb);

        switch(chunk_id) {
        case ID_VHDR:
            st->codec->codec_type = AVMEDIA_TYPE_AUDIO;

            if (data_size < 14)
                return AVERROR_INVALIDDATA;
            avio_skip(pb, 12);
            st->codec->sample_rate = avio_rb16(pb);
            if (data_size >= 16) {
                avio_skip(pb, 1);
                iff->svx8_compression = avio_r8(pb);
            }
            break;

        case ID_MHDR:
            st->codec->codec_type = AVMEDIA_TYPE_AUDIO;

            if (data_size < 32)
                return AVERROR_INVALIDDATA;
            avio_skip(pb, 4);
            iff->maud_bits = avio_rb16(pb);
            avio_skip(pb, 2);
            num = avio_rb32(pb);
            den = avio_rb16(pb);
            if (!den)
                return AVERROR_INVALIDDATA;
            avio_skip(pb, 2);
            st->codec->sample_rate = num / den;
            st->codec->channels = avio_rb16(pb);
            iff->maud_compression = avio_rb16(pb);
            if (st->codec->channels == 1)
                st->codec->channel_layout = AV_CH_LAYOUT_MONO;
            else if (st->codec->channels == 2)
                st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
            break;

        case ID_ABIT:
        case ID_BODY:
        case ID_DBOD:
        case ID_DSD:
        case ID_MDAT:
            iff->body_pos = avio_tell(pb);
            iff->body_end = iff->body_pos + data_size;
            iff->body_size = data_size;
            break;

        case ID_CHAN:
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            if (avio_rb32(pb) < 6) {
                st->codec->channels       = 1;
                st->codec->channel_layout = AV_CH_LAYOUT_MONO;
            } else {
                st->codec->channels       = 2;
                st->codec->channel_layout = AV_CH_LAYOUT_STEREO;
            }
            break;

        case ID_CAMG:
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            screenmode                = avio_rb32(pb);
            break;

        case ID_CMAP:
            if (data_size < 3 || data_size > 768 || data_size % 3) {
                 av_log(s, AV_LOG_ERROR, "Invalid CMAP chunk size %"PRIu64"\n",
                        data_size);
                 return AVERROR_INVALIDDATA;
            }
            st->codec->extradata_size = data_size + IFF_EXTRA_VIDEO_SIZE;
            st->codec->extradata      = av_malloc(data_size + IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!st->codec->extradata)
                return AVERROR(ENOMEM);
            if (avio_read(pb, st->codec->extradata + IFF_EXTRA_VIDEO_SIZE, data_size) < 0)
                return AVERROR(EIO);
            break;

        case ID_BMHD:
            st->codec->codec_type            = AVMEDIA_TYPE_VIDEO;
            if (data_size <= 8)
                return AVERROR_INVALIDDATA;
            st->codec->width                 = avio_rb16(pb);
            st->codec->height                = avio_rb16(pb);
            avio_skip(pb, 4); // x, y offset
            st->codec->bits_per_coded_sample = avio_r8(pb);
            if (data_size >= 10)
                masking                      = avio_r8(pb);
            if (data_size >= 11)
                iff->bitmap_compression      = avio_r8(pb);
            if (data_size >= 14) {
                avio_skip(pb, 1); // padding
                transparency                 = avio_rb16(pb);
            }
            if (data_size >= 16) {
                st->sample_aspect_ratio.num  = avio_r8(pb);
                st->sample_aspect_ratio.den  = avio_r8(pb);
            }
            break;

        case ID_DPEL:
            if (data_size < 4 || (data_size & 3))
                return AVERROR_INVALIDDATA;
            if ((fmt_size = avio_read(pb, fmt, sizeof(fmt))) < 0)
                return fmt_size;
            if (fmt_size == sizeof(deep_rgb24) && !memcmp(fmt, deep_rgb24, sizeof(deep_rgb24)))
                st->codec->pix_fmt = AV_PIX_FMT_RGB24;
            else if (fmt_size == sizeof(deep_rgba) && !memcmp(fmt, deep_rgba, sizeof(deep_rgba)))
                st->codec->pix_fmt = AV_PIX_FMT_RGBA;
            else if (fmt_size == sizeof(deep_bgra) && !memcmp(fmt, deep_bgra, sizeof(deep_bgra)))
                st->codec->pix_fmt = AV_PIX_FMT_BGRA;
            else if (fmt_size == sizeof(deep_argb) && !memcmp(fmt, deep_argb, sizeof(deep_argb)))
                st->codec->pix_fmt = AV_PIX_FMT_ARGB;
            else if (fmt_size == sizeof(deep_abgr) && !memcmp(fmt, deep_abgr, sizeof(deep_abgr)))
                st->codec->pix_fmt = AV_PIX_FMT_ABGR;
            else {
                avpriv_request_sample(s, "color format %.16s", fmt);
                return AVERROR_PATCHWELCOME;
            }
            break;

        case ID_DGBL:
            st->codec->codec_type            = AVMEDIA_TYPE_VIDEO;
            if (data_size < 8)
                return AVERROR_INVALIDDATA;
            st->codec->width                 = avio_rb16(pb);
            st->codec->height                = avio_rb16(pb);
            iff->bitmap_compression          = avio_rb16(pb);
            st->sample_aspect_ratio.num      = avio_r8(pb);
            st->sample_aspect_ratio.den      = avio_r8(pb);
            st->codec->bits_per_coded_sample = 24;
            break;

        case ID_DLOC:
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            st->codec->width  = avio_rb16(pb);
            st->codec->height = avio_rb16(pb);
            break;

        case ID_TVDC:
            if (data_size < sizeof(iff->tvdc))
                return AVERROR_INVALIDDATA;
            res = avio_read(pb, iff->tvdc, sizeof(iff->tvdc));
            if (res < 0)
                return res;
            break;

        case ID_ANNO:
        case ID_TEXT:      metadata_tag = "comment";   break;
        case ID_AUTH:      metadata_tag = "artist";    break;
        case ID_COPYRIGHT: metadata_tag = "copyright"; break;
        case ID_NAME:      metadata_tag = "title";     break;

        /* DSD tags */

        case MKTAG('F','V','E','R'):
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            version = avio_rb32(pb);
            av_log(s, AV_LOG_DEBUG, "DSIFF v%d.%d.%d.%d\n",version >> 24, (version >> 16) & 0xFF, (version >> 8) & 0xFF, version & 0xFF);
            st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
            break;

        case MKTAG('D','I','I','N'):
            res = parse_dsd_diin(s, st, orig_pos + data_size);
            if (res < 0)
                return res;
            break;

        case MKTAG('P','R','O','P'):
            if (data_size < 4)
                return AVERROR_INVALIDDATA;
            if (avio_rl32(pb) != MKTAG('S','N','D',' ')) {
                avpriv_request_sample(s, "unknown property type");
                break;
            }
            res = parse_dsd_prop(s, st, orig_pos + data_size);
            if (res < 0)
                return res;
            break;

        case MKTAG('C','O','M','T'):
            if (data_size < 2)
                return AVERROR_INVALIDDATA;
            nb_comments = avio_rb16(pb);
            for (i = 0; i < nb_comments; i++) {
                int year, mon, day, hour, min, type, ref;
                char tmp[24];
                const char *tag;
                int metadata_size;

                year = avio_rb16(pb);
                mon  = avio_r8(pb);
                day  = avio_r8(pb);
                hour = avio_r8(pb);
                min  = avio_r8(pb);
                snprintf(tmp, sizeof(tmp), "%04d-%02d-%02d %02d:%02d", year, mon, day, hour, min);
                av_dict_set(&st->metadata, "comment_time", tmp, 0);

                type = avio_rb16(pb);
                ref  = avio_rb16(pb);
                switch (type) {
                case 1:
                    if (!i)
                        tag = "channel_comment";
                    else {
                        snprintf(tmp, sizeof(tmp), "channel%d_comment", ref);
                        tag = tmp;
                    }
                    break;
                case 2:
                    tag = ref < FF_ARRAY_ELEMS(dsd_source_comment) ? dsd_source_comment[ref] : "source_comment";
                    break;
                case 3:
                    tag = ref < FF_ARRAY_ELEMS(dsd_history_comment) ? dsd_history_comment[ref] : "file_history";
                    break;
                default:
                    tag = "comment";
                }

                metadata_size  = avio_rb32(pb);
                if ((res = get_metadata(s, tag, metadata_size)) < 0) {
                    av_log(s, AV_LOG_ERROR, "cannot allocate metadata tag %s!\n", tag);
                    return res;
                }

                if (metadata_size & 1)
                    avio_skip(pb, 1);
            }
            break;
        }

        if (metadata_tag) {
            if ((res = get_metadata(s, metadata_tag, data_size)) < 0) {
                av_log(s, AV_LOG_ERROR, "cannot allocate metadata tag %s!\n", metadata_tag);
                return res;
            }
        }
        avio_skip(pb, data_size - (avio_tell(pb) - orig_pos) + (data_size & 1));
    }

    avio_seek(pb, iff->body_pos, SEEK_SET);

    switch(st->codec->codec_type) {
    case AVMEDIA_TYPE_AUDIO:
        avpriv_set_pts_info(st, 32, 1, st->codec->sample_rate);

        if (st->codec->codec_tag == ID_16SV)
            st->codec->codec_id = AV_CODEC_ID_PCM_S16BE_PLANAR;
        else if (st->codec->codec_tag == ID_MAUD) {
            if (iff->maud_bits == 8 && !iff->maud_compression) {
                st->codec->codec_id = AV_CODEC_ID_PCM_U8;
            } else if (iff->maud_bits == 16 && !iff->maud_compression) {
                st->codec->codec_id = AV_CODEC_ID_PCM_S16BE;
            } else if (iff->maud_bits ==  8 && iff->maud_compression == 2) {
                st->codec->codec_id = AV_CODEC_ID_PCM_ALAW;
            } else if (iff->maud_bits ==  8 && iff->maud_compression == 3) {
                st->codec->codec_id = AV_CODEC_ID_PCM_MULAW;
            } else {
                avpriv_request_sample(s, "compression %d and bit depth %d", iff->maud_compression, iff->maud_bits);
                return AVERROR_PATCHWELCOME;
            }
        } else if (st->codec->codec_tag != ID_DSD) {
            switch (iff->svx8_compression) {
            case COMP_NONE:
                st->codec->codec_id = AV_CODEC_ID_PCM_S8_PLANAR;
                break;
            case COMP_FIB:
                st->codec->codec_id = AV_CODEC_ID_8SVX_FIB;
                break;
            case COMP_EXP:
                st->codec->codec_id = AV_CODEC_ID_8SVX_EXP;
                break;
            default:
                av_log(s, AV_LOG_ERROR,
                       "Unknown SVX8 compression method '%d'\n", iff->svx8_compression);
                return -1;
            }
        }

        st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id);
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample;
        st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
        break;

    case AVMEDIA_TYPE_VIDEO:
        iff->bpp          = st->codec->bits_per_coded_sample;
        if ((screenmode & 0x800 /* Hold And Modify */) && iff->bpp <= 8) {
            iff->ham      = iff->bpp > 6 ? 6 : 4;
            st->codec->bits_per_coded_sample = 24;
        }
        iff->flags        = (screenmode & 0x80 /* Extra HalfBrite */) && iff->bpp <= 8;
        iff->masking      = masking;
        iff->transparency = transparency;

        if (!st->codec->extradata) {
            st->codec->extradata_size = IFF_EXTRA_VIDEO_SIZE;
            st->codec->extradata      = av_malloc(IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!st->codec->extradata)
                return AVERROR(ENOMEM);
        }
        av_assert0(st->codec->extradata_size >= IFF_EXTRA_VIDEO_SIZE);
        buf = st->codec->extradata;
        bytestream_put_be16(&buf, IFF_EXTRA_VIDEO_SIZE);
        bytestream_put_byte(&buf, iff->bitmap_compression);
        bytestream_put_byte(&buf, iff->bpp);
        bytestream_put_byte(&buf, iff->ham);
        bytestream_put_byte(&buf, iff->flags);
        bytestream_put_be16(&buf, iff->transparency);
        bytestream_put_byte(&buf, iff->masking);
        bytestream_put_buffer(&buf, iff->tvdc, sizeof(iff->tvdc));
        st->codec->codec_id = AV_CODEC_ID_IFF_ILBM;
        break;
    default:
        return -1;
    }

    return 0;
}
Beispiel #30
0
static int read_header(AVFormatContext *s)
{
    BinkDemuxContext *bink = s->priv_data;
    AVIOContext *pb = s->pb;
    uint32_t fps_num, fps_den;
    AVStream *vst, *ast;
    unsigned int i;
    uint32_t pos, next_pos;
    uint16_t flags;
    int keyframe;

    vst = avformat_new_stream(s, NULL);
    if (!vst)
        return AVERROR(ENOMEM);

    vst->codec->codec_tag = avio_rl32(pb);

    bink->file_size = avio_rl32(pb) + 8;
    vst->duration   = avio_rl32(pb);

    if (vst->duration > 1000000) {
        av_log(s, AV_LOG_ERROR, "invalid header: more than 1000000 frames\n");
        return AVERROR(EIO);
    }

    if (avio_rl32(pb) > bink->file_size) {
        av_log(s, AV_LOG_ERROR,
               "invalid header: largest frame size greater than file size\n");
        return AVERROR(EIO);
    }

    avio_skip(pb, 4);

    vst->codec->width  = avio_rl32(pb);
    vst->codec->height = avio_rl32(pb);

    fps_num = avio_rl32(pb);
    fps_den = avio_rl32(pb);
    if (fps_num == 0 || fps_den == 0) {
        av_log(s, AV_LOG_ERROR, "invalid header: invalid fps (%d/%d)\n", fps_num, fps_den);
        return AVERROR(EIO);
    }
    avpriv_set_pts_info(vst, 64, fps_den, fps_num);

    vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    vst->codec->codec_id   = CODEC_ID_BINKVIDEO;
    vst->codec->extradata  = av_mallocz(4 + FF_INPUT_BUFFER_PADDING_SIZE);
    vst->codec->extradata_size = 4;
    avio_read(pb, vst->codec->extradata, 4);

    bink->num_audio_tracks = avio_rl32(pb);

    if (bink->num_audio_tracks > BINK_MAX_AUDIO_TRACKS) {
        av_log(s, AV_LOG_ERROR,
               "invalid header: more than "AV_STRINGIFY(BINK_MAX_AUDIO_TRACKS)" audio tracks (%d)\n",
               bink->num_audio_tracks);
        return AVERROR(EIO);
    }

    if (bink->num_audio_tracks) {
        avio_skip(pb, 4 * bink->num_audio_tracks);

        for (i = 0; i < bink->num_audio_tracks; i++) {
            ast = avformat_new_stream(s, NULL);
            if (!ast)
                return AVERROR(ENOMEM);
            ast->codec->codec_type  = AVMEDIA_TYPE_AUDIO;
            ast->codec->codec_tag   = 0;
            ast->codec->sample_rate = avio_rl16(pb);
            avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
            flags = avio_rl16(pb);
            ast->codec->codec_id = flags & BINK_AUD_USEDCT ?
                                   CODEC_ID_BINKAUDIO_DCT : CODEC_ID_BINKAUDIO_RDFT;
            ast->codec->channels = flags & BINK_AUD_STEREO ? 2 : 1;
            ast->codec->extradata = av_mallocz(4 + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!ast->codec->extradata)
                return AVERROR(ENOMEM);
            ast->codec->extradata_size = 4;
            AV_WL32(ast->codec->extradata, vst->codec->codec_tag);
        }

        for (i = 0; i < bink->num_audio_tracks; i++)
            s->streams[i + 1]->id = avio_rl32(pb);
    }

    /* frame index table */
    next_pos = avio_rl32(pb);
    for (i = 0; i < vst->duration; i++) {
        pos = next_pos;
        if (i == vst->duration - 1) {
            next_pos = bink->file_size;
            keyframe = 0;
        } else {
            next_pos = avio_rl32(pb);
            keyframe = pos & 1;
        }
        pos &= ~1;
        next_pos &= ~1;

        if (next_pos <= pos) {
            av_log(s, AV_LOG_ERROR, "invalid frame index table\n");
            return AVERROR(EIO);
        }
        av_add_index_entry(vst, pos, i, next_pos - pos, 0,
                           keyframe ? AVINDEX_KEYFRAME : 0);
    }

    avio_skip(pb, 4);

    bink->current_track = -1;
    return 0;
}