Exemple #1
0
AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
{
    AVFilterContext *ret;

    if (!filter)
        return NULL;

    ret = av_mallocz(sizeof(AVFilterContext));
    if (!ret)
        return NULL;

    ret->av_class = &avfilter_class;
    ret->filter   = filter;
    ret->name     = inst_name ? av_strdup(inst_name) : NULL;
    if (filter->priv_size) {
        ret->priv     = av_mallocz(filter->priv_size);
        if (!ret->priv)
            goto err;
    }

    av_opt_set_defaults(ret);
    if (filter->priv_class) {
        *(const AVClass**)ret->priv = filter->priv_class;
        av_opt_set_defaults(ret->priv);
    }

    ret->internal = av_mallocz(sizeof(*ret->internal));
    if (!ret->internal)
        goto err;
    ret->internal->execute = default_execute;

    ret->nb_inputs = avfilter_pad_count(filter->inputs);
    if (ret->nb_inputs ) {
        ret->input_pads   = av_malloc(sizeof(AVFilterPad) * ret->nb_inputs);
        if (!ret->input_pads)
            goto err;
        memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
        ret->inputs       = av_mallocz(sizeof(AVFilterLink*) * ret->nb_inputs);
        if (!ret->inputs)
            goto err;
    }

    ret->nb_outputs = avfilter_pad_count(filter->outputs);
    if (ret->nb_outputs) {
        ret->output_pads  = av_malloc(sizeof(AVFilterPad) * ret->nb_outputs);
        if (!ret->output_pads)
            goto err;
        memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
        ret->outputs      = av_mallocz(sizeof(AVFilterLink*) * ret->nb_outputs);
        if (!ret->outputs)
            goto err;
    }
#if FF_API_FOO_COUNT
    FF_DISABLE_DEPRECATION_WARNINGS
    ret->output_count = ret->nb_outputs;
    ret->input_count  = ret->nb_inputs;
    FF_ENABLE_DEPRECATION_WARNINGS
#endif

    return ret;

err:
    av_freep(&ret->inputs);
    av_freep(&ret->input_pads);
    ret->nb_inputs = 0;
    av_freep(&ret->outputs);
    av_freep(&ret->output_pads);
    ret->nb_outputs = 0;
    av_freep(&ret->priv);
    av_freep(&ret->internal);
    av_free(ret);
    return NULL;
}
Exemple #2
0
void av_dump_format(AVFormatContext *ic, int index,
                    const char *url, int is_output)
{
    int i;
    uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
    if (ic->nb_streams && !printed)
        return;

    av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
           is_output ? "Output" : "Input",
           index,
           is_output ? ic->oformat->name : ic->iformat->name,
           is_output ? "to" : "from", url);
    dump_metadata(NULL, ic->metadata, "  ");

    if (!is_output) {
        av_log(NULL, AV_LOG_INFO, "  Duration: ");
        if (ic->duration != AV_NOPTS_VALUE) {
            int hours, mins, secs, us;
            int64_t duration = ic->duration + 5000;
            secs  = duration / AV_TIME_BASE;
            us    = duration % AV_TIME_BASE;
            mins  = secs / 60;
            secs %= 60;
            hours = mins / 60;
            mins %= 60;
            av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
                   (100 * us) / AV_TIME_BASE);
        } else {
            av_log(NULL, AV_LOG_INFO, "N/A");
        }
        if (ic->start_time != AV_NOPTS_VALUE) {
            int secs, us;
            av_log(NULL, AV_LOG_INFO, ", start: ");
            secs = ic->start_time / AV_TIME_BASE;
            us   = abs(ic->start_time % AV_TIME_BASE);
            av_log(NULL, AV_LOG_INFO, "%d.%06d",
                   secs, (int) av_rescale(us, 1000000, AV_TIME_BASE));
        }
        av_log(NULL, AV_LOG_INFO, ", bitrate: ");
        if (ic->bit_rate)
            av_log(NULL, AV_LOG_INFO, "%d kb/s", ic->bit_rate / 1000);
        else
            av_log(NULL, AV_LOG_INFO, "N/A");
        av_log(NULL, AV_LOG_INFO, "\n");
    }

    for (i = 0; i < ic->nb_chapters; i++) {
        AVChapter *ch = ic->chapters[i];
        av_log(NULL, AV_LOG_INFO, "    Chapter #%d.%d: ", index, i);
        av_log(NULL, AV_LOG_INFO,
               "start %f, ", ch->start * av_q2d(ch->time_base));
        av_log(NULL, AV_LOG_INFO,
               "end %f\n", ch->end * av_q2d(ch->time_base));

        dump_metadata(NULL, ch->metadata, "    ");
    }

    if (ic->nb_programs) {
        int j, k, total = 0;
        for (j = 0; j < ic->nb_programs; j++) {
            AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
                                                  "name", NULL, 0);
            av_log(NULL, AV_LOG_INFO, "  Program %d %s\n", ic->programs[j]->id,
                   name ? name->value : "");
            dump_metadata(NULL, ic->programs[j]->metadata, "    ");
            for (k = 0; k < ic->programs[j]->nb_stream_indexes; k++) {
                dump_stream_format(ic, ic->programs[j]->stream_index[k],
                                   index, is_output);
                printed[ic->programs[j]->stream_index[k]] = 1;
            }
            total += ic->programs[j]->nb_stream_indexes;
        }
        if (total < ic->nb_streams)
            av_log(NULL, AV_LOG_INFO, "  No Program\n");
    }

    for (i = 0; i < ic->nb_streams; i++)
        if (!printed[i])
            dump_stream_format(ic, i, index, is_output);

    av_free(printed);
}
Exemple #3
0
int configure_filtergraph(FilterGraph *fg)
{
    AVFilterInOut *inputs, *outputs, *cur;
    int ret, i, init = !fg->graph, simple = !fg->graph_desc;
    const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
                                      fg->graph_desc;

    avfilter_graph_free(&fg->graph);
    if (!(fg->graph = avfilter_graph_alloc()))
        return AVERROR(ENOMEM);

    if (simple) {
        OutputStream *ost = fg->outputs[0]->ost;
        char args[512];
        AVDictionaryEntry *e = NULL;

        snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
        fg->graph->scale_sws_opts = av_strdup(args);

        args[0] = '\0';
        while ((e = av_dict_get(fg->outputs[0]->ost->resample_opts, "", e,
                                AV_DICT_IGNORE_SUFFIX))) {
            av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
        }
        if (strlen(args))
            args[strlen(args) - 1] = '\0';
        fg->graph->resample_lavr_opts = av_strdup(args);
    }

    if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
        return ret;

    if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
        av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' does not have "
               "exactly one input and output.\n", graph_desc);
        return AVERROR(EINVAL);
    }

    for (cur = inputs; !simple && init && cur; cur = cur->next)
        init_input_filter(fg, cur);

    for (cur = inputs, i = 0; cur; cur = cur->next, i++)
        if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0)
            return ret;
    avfilter_inout_free(&inputs);

    if (!init || simple) {
        /* we already know the mappings between lavfi outputs and output streams,
         * so we can finish the setup */
        for (cur = outputs, i = 0; cur; cur = cur->next, i++)
            configure_output_filter(fg, fg->outputs[i], cur);
        avfilter_inout_free(&outputs);

        if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
            return ret;
    } else {
        /* wait until output mappings are processed */
        for (cur = outputs; cur;) {
            GROW_ARRAY(fg->outputs, fg->nb_outputs);
            if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
                exit(1);
            fg->outputs[fg->nb_outputs - 1]->graph   = fg;
            fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
            cur = cur->next;
            fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
        }
    }

    return 0;
}
static int libdirac_encode_frame(AVCodecContext *avccontext,
                                 unsigned char *frame,
                                 int buf_size, void *data)
{
    int enc_size = 0;
    dirac_encoder_state_t state;
    DiracEncoderParams     *p_dirac_params      = avccontext->priv_data;
    DiracSchroEncodedFrame *p_frame_output      = NULL;
    DiracSchroEncodedFrame *p_next_output_frame = NULL;
    int go = 1;
    int last_frame_in_sequence = 0;

    if (!data) {
        /* push end of sequence if not already signalled */
        if (!p_dirac_params->eos_signalled) {
            dirac_encoder_end_sequence(p_dirac_params->p_encoder);
            p_dirac_params->eos_signalled = 1;
        }
    } else {

        /* Allocate frame data to Dirac input buffer.
         * Input line size may differ from what the codec supports,
         * especially when transcoding from one format to another.
         * So use avpicture_layout to copy the frame. */
        avpicture_layout((AVPicture *)data, avccontext->pix_fmt,
                         avccontext->width, avccontext->height,
                         p_dirac_params->p_in_frame_buf,
                         p_dirac_params->frame_size);

        /* load next frame */
        if (dirac_encoder_load(p_dirac_params->p_encoder,
                               p_dirac_params->p_in_frame_buf,
                               p_dirac_params->frame_size) < 0) {
            av_log(avccontext, AV_LOG_ERROR, "Unrecoverable Encoder Error."
                   " dirac_encoder_load failed...\n");
            return -1;
        }
    }

    if (p_dirac_params->eos_pulled)
        go = 0;

    while (go) {
        p_dirac_params->p_encoder->enc_buf.buffer = frame;
        p_dirac_params->p_encoder->enc_buf.size   = buf_size;
        /* process frame */
        state = dirac_encoder_output(p_dirac_params->p_encoder);

        switch (state) {
        case ENC_STATE_AVAIL:
        case ENC_STATE_EOS:
            assert(p_dirac_params->p_encoder->enc_buf.size > 0);

            /* All non-frame data is prepended to actual frame data to
             * be able to set the pts correctly. So we don't write data
             * to the frame output queue until we actually have a frame
             */

            p_dirac_params->enc_buf = av_realloc(p_dirac_params->enc_buf,
                                                 p_dirac_params->enc_buf_size +
                                                 p_dirac_params->p_encoder->enc_buf.size);
            memcpy(p_dirac_params->enc_buf + p_dirac_params->enc_buf_size,
                   p_dirac_params->p_encoder->enc_buf.buffer,
                   p_dirac_params->p_encoder->enc_buf.size);

            p_dirac_params->enc_buf_size += p_dirac_params->p_encoder->enc_buf.size;

            if (state == ENC_STATE_EOS) {
                p_dirac_params->eos_pulled = 1;
                go = 0;
            }

            /* If non-frame data, don't output it until it we get an
             * encoded frame back from the encoder. */
            if (p_dirac_params->p_encoder->enc_pparams.pnum == -1)
                break;

            /* create output frame */
            p_frame_output = av_mallocz(sizeof(DiracSchroEncodedFrame));
            /* set output data */
            p_frame_output->size      = p_dirac_params->enc_buf_size;
            p_frame_output->p_encbuf  = p_dirac_params->enc_buf;
            p_frame_output->frame_num = p_dirac_params->p_encoder->enc_pparams.pnum;

            if (p_dirac_params->p_encoder->enc_pparams.ptype == INTRA_PICTURE &&
                    p_dirac_params->p_encoder->enc_pparams.rtype == REFERENCE_PICTURE)
                p_frame_output->key_frame = 1;

            ff_dirac_schro_queue_push_back(&p_dirac_params->enc_frame_queue,
                                           p_frame_output);

            p_dirac_params->enc_buf_size = 0;
            p_dirac_params->enc_buf      = NULL;
            break;

        case ENC_STATE_BUFFER:
            go = 0;
            break;

        case ENC_STATE_INVALID:
            av_log(avccontext, AV_LOG_ERROR,
                   "Unrecoverable Dirac Encoder Error. Quitting...\n");
            return -1;

        default:
            av_log(avccontext, AV_LOG_ERROR, "Unknown Dirac Encoder state\n");
            return -1;
        }
    }

    /* copy 'next' frame in queue */

    if (p_dirac_params->enc_frame_queue.size == 1 && p_dirac_params->eos_pulled)
        last_frame_in_sequence = 1;

    p_next_output_frame = ff_dirac_schro_queue_pop(&p_dirac_params->enc_frame_queue);

    if (!p_next_output_frame)
        return 0;

    memcpy(frame, p_next_output_frame->p_encbuf, p_next_output_frame->size);
    avccontext->coded_frame->key_frame = p_next_output_frame->key_frame;
    /* Use the frame number of the encoded frame as the pts. It is OK to do
     * so since Dirac is a constant framerate codec. It expects input to be
     * of constant framerate. */
    avccontext->coded_frame->pts = p_next_output_frame->frame_num;
    enc_size = p_next_output_frame->size;

    /* Append the end of sequence information to the last frame in the
     * sequence. */
    if (last_frame_in_sequence && p_dirac_params->enc_buf_size > 0) {
        memcpy(frame + enc_size, p_dirac_params->enc_buf,
               p_dirac_params->enc_buf_size);
        enc_size += p_dirac_params->enc_buf_size;
        av_freep(&p_dirac_params->enc_buf);
        p_dirac_params->enc_buf_size = 0;
    }

    /* free frame */
    DiracFreeFrame(p_next_output_frame);

    return enc_size;
}
Exemple #5
0
static int wsvqa_read_packet(AVFormatContext *s,
                             AVPacket *pkt)
{
    WsVqaDemuxContext *wsvqa = s->priv_data;
    AVIOContext *pb = s->pb;
    int ret = -1;
    unsigned char preamble[VQA_PREAMBLE_SIZE];
    unsigned int chunk_type;
    unsigned int chunk_size;
    int skip_byte;

    while (avio_read(pb, preamble, VQA_PREAMBLE_SIZE) == VQA_PREAMBLE_SIZE) {
        chunk_type = AV_RB32(&preamble[0]);
        chunk_size = AV_RB32(&preamble[4]);
        skip_byte = chunk_size & 0x01;

        if ((chunk_type == SND0_TAG) || (chunk_type == SND1_TAG) ||
            (chunk_type == SND2_TAG) || (chunk_type == VQFR_TAG)) {

            if (av_new_packet(pkt, chunk_size))
                return AVERROR(EIO);
            ret = avio_read(pb, pkt->data, chunk_size);
            if (ret != chunk_size) {
                av_packet_unref(pkt);
                return AVERROR(EIO);
            }

            switch (chunk_type) {
            case SND0_TAG:
            case SND1_TAG:
            case SND2_TAG:
                if (wsvqa->audio_stream_index == -1) {
                    AVStream *st = avformat_new_stream(s, NULL);
                    if (!st)
                        return AVERROR(ENOMEM);

                    wsvqa->audio_stream_index = st->index;
                    if (!wsvqa->sample_rate)
                        wsvqa->sample_rate = 22050;
                    if (!wsvqa->channels)
                        wsvqa->channels = 1;
                    if (!wsvqa->bps)
                        wsvqa->bps = 8;
                    st->codec->sample_rate = wsvqa->sample_rate;
                    st->codec->bits_per_coded_sample = wsvqa->bps;
                    st->codec->channels = wsvqa->channels;
                    st->codec->codec_type = AVMEDIA_TYPE_AUDIO;

                    avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);

                    switch (chunk_type) {
                    case SND0_TAG:
                        if (wsvqa->bps == 16)
                            st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
                        else
                            st->codec->codec_id = AV_CODEC_ID_PCM_U8;
                        break;
                    case SND1_TAG:
                        st->codec->codec_id = AV_CODEC_ID_WESTWOOD_SND1;
                        break;
                    case SND2_TAG:
                        st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WS;
                        st->codec->extradata_size = 2;
                        st->codec->extradata = av_mallocz(2 + AV_INPUT_BUFFER_PADDING_SIZE);
                        if (!st->codec->extradata)
                            return AVERROR(ENOMEM);
                        AV_WL16(st->codec->extradata, wsvqa->version);
                        break;
                    }
                }

                pkt->stream_index = wsvqa->audio_stream_index;
                switch (chunk_type) {
                case SND1_TAG:
                    /* unpacked size is stored in header */
                    pkt->duration = AV_RL16(pkt->data) / wsvqa->channels;
                    break;
                case SND2_TAG:
                    /* 2 samples/byte, 1 or 2 samples per frame depending on stereo */
                    pkt->duration = (chunk_size * 2) / wsvqa->channels;
                    break;
                }
                break;
            case VQFR_TAG:
                pkt->stream_index = wsvqa->video_stream_index;
                pkt->duration = 1;
                break;
            }

            /* stay on 16-bit alignment */
            if (skip_byte)
                avio_skip(pb, 1);

            return ret;
        } else {
            switch(chunk_type){
            case CMDS_TAG:
                break;
            default:
                av_log(s, AV_LOG_INFO, "Skipping unknown chunk 0x%08X\n", chunk_type);
            }
            avio_skip(pb, chunk_size + skip_byte);
        }
    }

    return ret;
}
Exemple #6
0
static int get_http_header_data(MMSHContext *mmsh)
{
    MMSContext *mms = &mmsh->mms;
    int res, len;
    ChunkType chunk_type;

    for (;;) {
        len = 0;
        res = chunk_type = get_chunk_header(mmsh, &len);
        if (res < 0) {
            return res;
        } else if (chunk_type == CHUNK_TYPE_ASF_HEADER){
            // get asf header and stored it
            if (!mms->header_parsed) {
                if (mms->asf_header) {
                    if (len != mms->asf_header_size) {
                        mms->asf_header_size = len;
                        av_dlog(NULL, "Header len changed from %d to %d\n",
                                mms->asf_header_size, len);
                        av_freep(&mms->asf_header);
                    }
                }
                mms->asf_header = av_mallocz(len);
                if (!mms->asf_header) {
                    return AVERROR(ENOMEM);
                }
                mms->asf_header_size = len;
            }
            if (len > mms->asf_header_size) {
                av_log(NULL, AV_LOG_ERROR,
                       "Asf header packet len = %d exceed the asf header buf size %d\n",
                       len, mms->asf_header_size);
                return AVERROR(EIO);
            }
            res = ffurl_read_complete(mms->mms_hd, mms->asf_header, len);
            if (res != len) {
                av_log(NULL, AV_LOG_ERROR,
                       "Recv asf header data len %d != expected len %d\n", res, len);
                return AVERROR(EIO);
            }
            mms->asf_header_size = len;
            if (!mms->header_parsed) {
                res = ff_mms_asf_header_parser(mms);
                mms->header_parsed = 1;
                return res;
            }
        } else if (chunk_type == CHUNK_TYPE_DATA) {
            // read data packet and do padding
            return read_data_packet(mmsh, len);
        } else {
            if (len) {
                if (len > sizeof(mms->in_buffer)) {
                    av_log(NULL, AV_LOG_ERROR,
                           "Other packet len = %d exceed the in_buffer size %zu\n",
                           len, sizeof(mms->in_buffer));
                    return AVERROR(EIO);
                }
                res = ffurl_read_complete(mms->mms_hd, mms->in_buffer, len);
                if (res != len) {
                    av_log(NULL, AV_LOG_ERROR, "Read other chunk type data failed!\n");
                    return AVERROR(EIO);
                } else {
                    av_dlog(NULL, "Skip chunk type %d \n", chunk_type);
                    continue;
                }
            }
        }
    }
}
int ff_rtsp_setup_output_streams(AVFormatContext *s, const char *addr)
{
    RTSPState *rt = s->priv_data;
    RTSPMessageHeader reply1, *reply = &reply1;
    int i;
    char *sdp;
    AVFormatContext sdp_ctx, *ctx_array[1];

    s->start_time_realtime = av_gettime();

    /* Announce the stream */
    sdp = av_mallocz(SDP_MAX_SIZE);
    if (sdp == NULL)
        return AVERROR(ENOMEM);
    /* We create the SDP based on the RTSP AVFormatContext where we
     * aren't allowed to change the filename field. (We create the SDP
     * based on the RTSP context since the contexts for the RTP streams
     * don't exist yet.) In order to specify a custom URL with the actual
     * peer IP instead of the originally specified hostname, we create
     * a temporary copy of the AVFormatContext, where the custom URL is set.
     *
     * FIXME: Create the SDP without copying the AVFormatContext.
     * This either requires setting up the RTP stream AVFormatContexts
     * already here (complicating things immensely) or getting a more
     * flexible SDP creation interface.
     */
    sdp_ctx = *s;
    ff_url_join(sdp_ctx.filename, sizeof(sdp_ctx.filename),
                "rtsp", NULL, addr, -1, NULL);
    ctx_array[0] = &sdp_ctx;
    if (av_sdp_create(ctx_array, 1, sdp, SDP_MAX_SIZE)) {
        av_free(sdp);
        return AVERROR_INVALIDDATA;
    }
    av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sdp);
    ff_rtsp_send_cmd_with_content(s, "ANNOUNCE", rt->control_uri,
                                  "Content-Type: application/sdp\r\n",
                                  reply, NULL, sdp, strlen(sdp));
    av_free(sdp);
    if (reply->status_code != RTSP_STATUS_OK)
        return AVERROR_INVALIDDATA;

    /* Set up the RTSPStreams for each AVStream */
    for (i = 0; i < s->nb_streams; i++) {
        RTSPStream *rtsp_st;

        rtsp_st = av_mallocz(sizeof(RTSPStream));
        if (!rtsp_st)
            return AVERROR(ENOMEM);
        dynarray_add(&rt->rtsp_streams, &rt->nb_rtsp_streams, rtsp_st);

        rtsp_st->stream_index = i;

        av_strlcpy(rtsp_st->control_url, rt->control_uri, sizeof(rtsp_st->control_url));
        /* Note, this must match the relative uri set in the sdp content */
        av_strlcatf(rtsp_st->control_url, sizeof(rtsp_st->control_url),
                    "/streamid=%d", i);
    }

    return 0;
}
Exemple #8
0
int av_vc1_decode_frame(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int *nFrameSize)
{
    int n_slices = 0, i;
    VC1Context *v = avctx->priv_data;
    MpegEncContext *s = &v->s;
    uint8_t *buf2 = NULL;
    const uint8_t *buf_start = buf, *buf_start_second_field = NULL;
    struct {
        uint8_t *buf;
        GetBitContext gb;
        int mby_start;
    } *slices = NULL, *tmp;

    v->second_field = 0;
    *nFrameSize = 0;

    /* for advanced profile we may need to parse and unescape data */
    if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
        int buf_size2 = 0;
        buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);

        if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
            const uint8_t *start, *end, *next;
            int size;

            next = buf;
            for (start = buf, end = buf + buf_size; next < end; start = next) {
                next = find_next_marker(start + 4, end);
                size = next - start - 4;
                if (size <= 0) continue;
                switch (AV_RB32(start)) {
                case VC1_CODE_FRAME:
                    buf_start = start;
                    buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
                    break;
                case VC1_CODE_FIELD: {
                    int buf_size3;
                    buf_start_second_field = start;
                    slices = av_realloc(slices, sizeof(*slices) * (n_slices + 1));
                    if (!slices)
                        goto err;
                    slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
                    if (!slices[n_slices].buf)
                        goto err;
                    buf_size3 = vc1_unescape_buffer(start + 4, size,
                                                    slices[n_slices].buf);
                    init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
                                  buf_size3 << 3);
                    /* assuming that the field marker is at the exact middle,
                       hope it's correct */
                    slices[n_slices].mby_start = s->mb_height >> 1;
                    n_slices++;
                    break;
                }
                case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
                    buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
                    init_get_bits(&s->gb, buf2, buf_size2 * 8);
                    ff_vc1_decode_entry_point(avctx, v, &s->gb);
                    break;
                case VC1_CODE_SLICE: {
                    int buf_size3;
                    slices = av_realloc(slices, sizeof(*slices) * (n_slices + 1));
                    if (!slices)
                        goto err;
                    slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
                    if (!slices[n_slices].buf)
                        goto err;
                    buf_size3 = vc1_unescape_buffer(start + 4, size,
                                                    slices[n_slices].buf);
                    init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
                                  buf_size3 << 3);
                    slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
                    n_slices++;
                    break;
                }
                }
            }
        } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
Exemple #9
0
// Split packets and add them to the waiting_buffers list. We don't queue them
// immediately, because it can happen that the decoder is temporarily blocked
// (due to us not reading/returning enough output buffers) and won't accept
// new input. (This wouldn't be an issue if MMAL input buffers always were
// complete frames - then the input buffer just would have to be big enough.)
// If is_extradata is set, send it as MMAL_BUFFER_HEADER_FLAG_CONFIG.
static int ffmmal_add_packet(AVCodecContext *avctx, AVPacket *avpkt,
                             int is_extradata)
{
    MMALDecodeContext *ctx = avctx->priv_data;
    AVBufferRef *buf = NULL;
    int size = 0;
    uint8_t *data = (uint8_t *)"";
    uint8_t *start;
    int ret = 0;

    if (avpkt->size) {
        if (avpkt->buf) {
            buf = av_buffer_ref(avpkt->buf);
            size = avpkt->size;
            data = avpkt->data;
        } else {
            buf = av_buffer_alloc(avpkt->size);
            if (buf) {
                memcpy(buf->data, avpkt->data, avpkt->size);
                size = buf->size;
                data = buf->data;
            }
        }
        if (!buf) {
            ret = AVERROR(ENOMEM);
            goto done;
        }
        if (!is_extradata)
            ctx->packets_sent++;
    } else {
        if (!ctx->packets_sent) {
            // Short-cut the flush logic to avoid upsetting MMAL.
            ctx->eos_sent = 1;
            ctx->eos_received = 1;
            goto done;
        }
    }

    start = data;

    do {
        FFBufferEntry *buffer = av_mallocz(sizeof(*buffer));
        if (!buffer) {
            ret = AVERROR(ENOMEM);
            goto done;
        }

        buffer->data = data;
        buffer->length = FFMIN(size, ctx->decoder->input[0]->buffer_size);

        if (is_extradata)
            buffer->flags |= MMAL_BUFFER_HEADER_FLAG_CONFIG;

        if (data == start)
            buffer->flags |= MMAL_BUFFER_HEADER_FLAG_FRAME_START;

        data += buffer->length;
        size -= buffer->length;

        buffer->pts = avpkt->pts == AV_NOPTS_VALUE ? MMAL_TIME_UNKNOWN : avpkt->pts;
        buffer->dts = avpkt->dts == AV_NOPTS_VALUE ? MMAL_TIME_UNKNOWN : avpkt->dts;

        if (!size) {
            buffer->flags |= MMAL_BUFFER_HEADER_FLAG_FRAME_END;
            avpriv_atomic_int_add_and_fetch(&ctx->packets_buffered, 1);
        }

        if (!buffer->length) {
            buffer->flags |= MMAL_BUFFER_HEADER_FLAG_EOS;
            ctx->eos_sent = 1;
        }

        if (buf) {
            buffer->ref = av_buffer_ref(buf);
            if (!buffer->ref) {
                av_free(buffer);
                ret = AVERROR(ENOMEM);
                goto done;
            }
        }

        // Insert at end of the list
        if (!ctx->waiting_buffers)
            ctx->waiting_buffers = buffer;
        if (ctx->waiting_buffers_tail)
            ctx->waiting_buffers_tail->next = buffer;
        ctx->waiting_buffers_tail = buffer;
    } while (size);

done:
    av_buffer_unref(&buf);
    return ret;
}
Exemple #10
0
/**
 * Decode Smacker audio data
 */
static int smka_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame_ptr, AVPacket *avpkt)
{
    SmackerAudioContext *s = avctx->priv_data;
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    GetBitContext gb;
    HuffContext h[4];
    VLC vlc[4];
    int16_t *samples;
    uint8_t *samples8;
    int val;
    int i, res, ret;
    int unp_size;
    int bits, stereo;
    int pred[2] = {0, 0};

    if (buf_size <= 4) {
        av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
        return AVERROR(EINVAL);
    }

    unp_size = AV_RL32(buf);

    init_get_bits(&gb, buf + 4, (buf_size - 4) * 8);

    if(!get_bits1(&gb)){
        av_log(avctx, AV_LOG_INFO, "Sound: no data\n");
        *got_frame_ptr = 0;
        return 1;
    }
    stereo = get_bits1(&gb);
    bits = get_bits1(&gb);
    if (stereo ^ (avctx->channels != 1)) {
        av_log(avctx, AV_LOG_ERROR, "channels mismatch\n");
        return AVERROR(EINVAL);
    }
    if (bits && avctx->sample_fmt == AV_SAMPLE_FMT_U8) {
        av_log(avctx, AV_LOG_ERROR, "sample format mismatch\n");
        return AVERROR(EINVAL);
    }

    /* get output buffer */
    s->frame.nb_samples = unp_size / (avctx->channels * (bits + 1));
    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }
    samples  = (int16_t *)s->frame.data[0];
    samples8 =            s->frame.data[0];

    memset(vlc, 0, sizeof(VLC) * 4);
    memset(h, 0, sizeof(HuffContext) * 4);
    // Initialize
    for(i = 0; i < (1 << (bits + stereo)); i++) {
        h[i].length = 256;
        h[i].maxlength = 0;
        h[i].current = 0;
        h[i].bits = av_mallocz(256 * 4);
        h[i].lengths = av_mallocz(256 * sizeof(int));
        h[i].values = av_mallocz(256 * sizeof(int));
        skip_bits1(&gb);
        smacker_decode_tree(&gb, &h[i], 0, 0);
        skip_bits1(&gb);
        if(h[i].current > 1) {
            res = init_vlc(&vlc[i], SMKTREE_BITS, h[i].length,
                    h[i].lengths, sizeof(int), sizeof(int),
                    h[i].bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE);
            if(res < 0) {
                av_log(avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
                return -1;
            }
        }
    }
    if(bits) { //decode 16-bit data
        for(i = stereo; i >= 0; i--)
            pred[i] = av_bswap16(get_bits(&gb, 16));
        for(i = 0; i <= stereo; i++)
            *samples++ = pred[i];
        for(; i < unp_size / 2; i++) {
            if(get_bits_left(&gb)<0)
                return -1;
            if(i & stereo) {
                if(vlc[2].table)
                    res = get_vlc2(&gb, vlc[2].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                val  = h[2].values[res];
                if(vlc[3].table)
                    res = get_vlc2(&gb, vlc[3].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                val |= h[3].values[res] << 8;
                pred[1] += sign_extend(val, 16);
                *samples++ = av_clip_int16(pred[1]);
            } else {
                if(vlc[0].table)
                    res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                val  = h[0].values[res];
                if(vlc[1].table)
                    res = get_vlc2(&gb, vlc[1].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                val |= h[1].values[res] << 8;
                pred[0] += sign_extend(val, 16);
                *samples++ = av_clip_int16(pred[0]);
            }
        }
    } else { //8-bit data
        for(i = stereo; i >= 0; i--)
            pred[i] = get_bits(&gb, 8);
        for(i = 0; i <= stereo; i++)
            *samples8++ = pred[i];
        for(; i < unp_size; i++) {
            if(get_bits_left(&gb)<0)
                return -1;
            if(i & stereo){
                if(vlc[1].table)
                    res = get_vlc2(&gb, vlc[1].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                pred[1] += sign_extend(h[1].values[res], 8);
                *samples8++ = av_clip_uint8(pred[1]);
            } else {
                if(vlc[0].table)
                    res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                pred[0] += sign_extend(h[0].values[res], 8);
                *samples8++ = av_clip_uint8(pred[0]);
            }
        }
    }

    for(i = 0; i < 4; i++) {
        if(vlc[i].table)
            free_vlc(&vlc[i]);
        av_free(h[i].bits);
        av_free(h[i].lengths);
        av_free(h[i].values);
    }

    *got_frame_ptr   = 1;
    *(AVFrame *)data = s->frame;

    return buf_size;
}
Exemple #11
0
STDMETHODIMP CDecAvcodec::InitDecoder(AVCodecID codec, const CMediaType *pmt)
{
  DestroyDecoder();
  DbgLog((LOG_TRACE, 10, L"Initializing ffmpeg for codec %S", avcodec_get_name(codec)));

  BITMAPINFOHEADER *pBMI = NULL;
  videoFormatTypeHandler((const BYTE *)pmt->Format(), pmt->FormatType(), &pBMI);

  m_pAVCodec = avcodec_find_decoder(codec);
  CheckPointer(m_pAVCodec, VFW_E_UNSUPPORTED_VIDEO);

  m_pAVCtx = avcodec_alloc_context3(m_pAVCodec);
  CheckPointer(m_pAVCtx, E_POINTER);

  if(codec == AV_CODEC_ID_MPEG1VIDEO || codec == AV_CODEC_ID_MPEG2VIDEO || pmt->subtype == FOURCCMap(MKTAG('H','2','6','4')) || pmt->subtype == FOURCCMap(MKTAG('h','2','6','4'))) {
    m_pParser = av_parser_init(codec);
  }

  DWORD dwDecFlags = m_pCallback->GetDecodeFlags();

  LONG biRealWidth = pBMI->biWidth, biRealHeight = pBMI->biHeight;
  if (pmt->formattype == FORMAT_VideoInfo || pmt->formattype == FORMAT_MPEGVideo) {
    VIDEOINFOHEADER *vih = (VIDEOINFOHEADER *)pmt->Format();
    if (vih->rcTarget.right != 0 && vih->rcTarget.bottom != 0) {
      biRealWidth  = vih->rcTarget.right;
      biRealHeight = vih->rcTarget.bottom;
    }
  } else if (pmt->formattype == FORMAT_VideoInfo2 || pmt->formattype == FORMAT_MPEG2Video) {
    VIDEOINFOHEADER2 *vih2 = (VIDEOINFOHEADER2 *)pmt->Format();
    if (vih2->rcTarget.right != 0 && vih2->rcTarget.bottom != 0) {
      biRealWidth  = vih2->rcTarget.right;
      biRealHeight = vih2->rcTarget.bottom;
    }
  }

  m_pAVCtx->codec_id              = codec;
  m_pAVCtx->codec_tag             = pBMI->biCompression;
  m_pAVCtx->coded_width           = pBMI->biWidth;
  m_pAVCtx->coded_height          = abs(pBMI->biHeight);
  m_pAVCtx->bits_per_coded_sample = pBMI->biBitCount;
  m_pAVCtx->error_concealment     = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
  m_pAVCtx->err_recognition       = AV_EF_CAREFUL;
  m_pAVCtx->workaround_bugs       = FF_BUG_AUTODETECT;
  m_pAVCtx->refcounted_frames     = 1;

  if (codec == AV_CODEC_ID_H264)
    m_pAVCtx->flags2             |= CODEC_FLAG2_SHOW_ALL;

  // Setup threading
  int thread_type = getThreadFlags(codec);
  if (thread_type) {
    // Thread Count. 0 = auto detect
    int thread_count = m_pSettings->GetNumThreads();
    if (thread_count == 0) {
      thread_count = av_cpu_count() * 3 / 2;
    }

    m_pAVCtx->thread_count = max(1, min(thread_count, AVCODEC_MAX_THREADS));
    m_pAVCtx->thread_type = thread_type;
  } else {
    m_pAVCtx->thread_count = 1;
  }

  if (dwDecFlags & LAV_VIDEO_DEC_FLAG_NO_MT) {
    m_pAVCtx->thread_count = 1;
  }

  m_pFrame = av_frame_alloc();
  CheckPointer(m_pFrame, E_POINTER);

  m_h264RandomAccess.SetAVCNALSize(0);

  // Process Extradata
  BYTE *extra = NULL;
  size_t extralen = 0;
  getExtraData(*pmt, NULL, &extralen);

  BOOL bH264avc = FALSE;
  if (extralen > 0) {
    DbgLog((LOG_TRACE, 10, L"-> Processing extradata of %d bytes", extralen));
    // Reconstruct AVC1 extradata format
    if (pmt->formattype == FORMAT_MPEG2Video && (m_pAVCtx->codec_tag == MAKEFOURCC('a','v','c','1') || m_pAVCtx->codec_tag == MAKEFOURCC('A','V','C','1') || m_pAVCtx->codec_tag == MAKEFOURCC('C','C','V','1'))) {
      MPEG2VIDEOINFO *mp2vi = (MPEG2VIDEOINFO *)pmt->Format();
      extralen += 7;
      extra = (uint8_t *)av_mallocz(extralen + FF_INPUT_BUFFER_PADDING_SIZE);
      extra[0] = 1;
      extra[1] = (BYTE)mp2vi->dwProfile;
      extra[2] = 0;
      extra[3] = (BYTE)mp2vi->dwLevel;
      extra[4] = (BYTE)(mp2vi->dwFlags ? mp2vi->dwFlags : 4) - 1;

      // Actually copy the metadata into our new buffer
      size_t actual_len;
      getExtraData(*pmt, extra+6, &actual_len);

      // Count the number of SPS/PPS in them and set the length
      // We'll put them all into one block and add a second block with 0 elements afterwards
      // The parsing logic does not care what type they are, it just expects 2 blocks.
      BYTE *p = extra+6, *end = extra+6+actual_len;
      BOOL bSPS = FALSE, bPPS = FALSE;
      int count = 0;
      while (p+1 < end) {
        unsigned len = (((unsigned)p[0] << 8) | p[1]) + 2;
        if (p + len > end) {
          break;
        }
        if ((p[2] & 0x1F) == 7)
          bSPS = TRUE;
        if ((p[2] & 0x1F) == 8)
          bPPS = TRUE;
        count++;
        p += len;
      }
      extra[5] = count;
      extra[extralen-1] = 0;

      bH264avc = TRUE;
      m_h264RandomAccess.SetAVCNALSize(mp2vi->dwFlags);
    } else if (pmt->subtype == MEDIASUBTYPE_LAV_RAWVIDEO) {
      if (extralen < sizeof(m_pAVCtx->pix_fmt)) {
        DbgLog((LOG_TRACE, 10, L"-> LAV RAW Video extradata is missing.."));
      } else {
        extra = (uint8_t *)av_mallocz(extralen + FF_INPUT_BUFFER_PADDING_SIZE);
        getExtraData(*pmt, extra, NULL);
        m_pAVCtx->pix_fmt = *(AVPixelFormat *)extra;
        extralen -= sizeof(AVPixelFormat);
        memmove(extra, extra+sizeof(AVPixelFormat), extralen);
      }
    } else {
      // Just copy extradata for other formats
      extra = (uint8_t *)av_mallocz(extralen + FF_INPUT_BUFFER_PADDING_SIZE);
      getExtraData(*pmt, extra, NULL);
    }
    // Hack to discard invalid MP4 metadata with AnnexB style video
    if (codec == AV_CODEC_ID_H264 && !bH264avc && extra[0] == 1) {
      av_freep(&extra);
      extralen = 0;
    }
    m_pAVCtx->extradata = extra;
    m_pAVCtx->extradata_size = (int)extralen;
  } else {
    if (codec == AV_CODEC_ID_VP6 || codec == AV_CODEC_ID_VP6A || codec == AV_CODEC_ID_VP6F) {
      int cropH = pBMI->biWidth - biRealWidth;
      int cropV = pBMI->biHeight - biRealHeight;
      if (cropH >= 0 && cropH <= 0x0f && cropV >= 0 && cropV <= 0x0f) {
        m_pAVCtx->extradata = (uint8_t *)av_mallocz(1 + FF_INPUT_BUFFER_PADDING_SIZE);
        m_pAVCtx->extradata_size = 1;
        m_pAVCtx->extradata[0] = (cropH << 4) | cropV;
      }
    }
  }

  m_h264RandomAccess.flush(m_pAVCtx->thread_count);
  m_CurrentThread = 0;
  m_rtStartCache = AV_NOPTS_VALUE;

  LAVPinInfo lavPinInfo = {0};
  BOOL bLAVInfoValid = SUCCEEDED(m_pCallback->GetLAVPinInfo(lavPinInfo));

  m_bInputPadded = dwDecFlags & LAV_VIDEO_DEC_FLAG_LAVSPLITTER;

  // Setup codec-specific timing logic
  BOOL bVC1IsPTS = (codec == AV_CODEC_ID_VC1 && !(dwDecFlags & LAV_VIDEO_DEC_FLAG_VC1_DTS));

  // Use ffmpegs logic to reorder timestamps
  // This is required for H264 content (except AVI), and generally all codecs that use frame threading
  // VC-1 is also a special case. Its required for splitters that deliver PTS timestamps (see bVC1IsPTS above)
  m_bFFReordering        =  ( codec == AV_CODEC_ID_H264 && !(dwDecFlags & LAV_VIDEO_DEC_FLAG_H264_AVI))
                           || codec == AV_CODEC_ID_VP8
                           || codec == AV_CODEC_ID_VP3
                           || codec == AV_CODEC_ID_THEORA
                           || codec == AV_CODEC_ID_HUFFYUV
                           || codec == AV_CODEC_ID_FFVHUFF
                           || codec == AV_CODEC_ID_MPEG2VIDEO
                           || codec == AV_CODEC_ID_MPEG1VIDEO
                           || codec == AV_CODEC_ID_DIRAC
                           || codec == AV_CODEC_ID_UTVIDEO
                           || codec == AV_CODEC_ID_DNXHD
                           || codec == AV_CODEC_ID_JPEG2000
                           || (codec == AV_CODEC_ID_MPEG4 && pmt->formattype == FORMAT_MPEG2Video)
                           || bVC1IsPTS;

  // Stop time is unreliable, drop it and calculate it
  m_bCalculateStopTime   = (codec == AV_CODEC_ID_H264 || codec == AV_CODEC_ID_DIRAC || (codec == AV_CODEC_ID_MPEG4 && pmt->formattype == FORMAT_MPEG2Video) || bVC1IsPTS);

  // Real Video content has some odd timestamps
  // LAV Splitter does them allright with RV30/RV40, everything else screws them up
  m_bRVDropBFrameTimings = (codec == AV_CODEC_ID_RV10 || codec == AV_CODEC_ID_RV20 || ((codec == AV_CODEC_ID_RV30 || codec == AV_CODEC_ID_RV40) && (!(dwDecFlags & LAV_VIDEO_DEC_FLAG_LAVSPLITTER) || (bLAVInfoValid && (lavPinInfo.flags & LAV_STREAM_FLAG_RV34_MKV)))));

  // Enable B-Frame delay handling
  m_bBFrameDelay = !m_bFFReordering && !m_bRVDropBFrameTimings;

  m_bWaitingForKeyFrame = TRUE;
  m_bResumeAtKeyFrame =    codec == AV_CODEC_ID_MPEG2VIDEO
                        || codec == AV_CODEC_ID_VC1
                        || codec == AV_CODEC_ID_RV30
                        || codec == AV_CODEC_ID_RV40
                        || codec == AV_CODEC_ID_VP3
                        || codec == AV_CODEC_ID_THEORA
                        || codec == AV_CODEC_ID_MPEG4;

  m_bNoBufferConsumption =    codec == AV_CODEC_ID_MJPEGB
                           || codec == AV_CODEC_ID_LOCO
                           || codec == AV_CODEC_ID_JPEG2000;

  m_bHasPalette = m_pAVCtx->bits_per_coded_sample <= 8 && m_pAVCtx->extradata_size && !(dwDecFlags & LAV_VIDEO_DEC_FLAG_LAVSPLITTER)
                  &&  (codec == AV_CODEC_ID_MSVIDEO1
                    || codec == AV_CODEC_ID_MSRLE
                    || codec == AV_CODEC_ID_CINEPAK
                    || codec == AV_CODEC_ID_8BPS
                    || codec == AV_CODEC_ID_QPEG
                    || codec == AV_CODEC_ID_QTRLE
                    || codec == AV_CODEC_ID_TSCC);

  if (FAILED(AdditionaDecoderInit())) {
    return E_FAIL;
  }

  if (bLAVInfoValid) {
    // Setting has_b_frames to a proper value will ensure smoother decoding of H264
    if (lavPinInfo.has_b_frames >= 0) {
      DbgLog((LOG_TRACE, 10, L"-> Setting has_b_frames to %d", lavPinInfo.has_b_frames));
      m_pAVCtx->has_b_frames = lavPinInfo.has_b_frames;
    }
  }

  // Open the decoder
  int ret = avcodec_open2(m_pAVCtx, m_pAVCodec, NULL);
  if (ret >= 0) {
    DbgLog((LOG_TRACE, 10, L"-> ffmpeg codec opened successfully (ret: %d)", ret));
    m_nCodecId = codec;
  } else {
    DbgLog((LOG_TRACE, 10, L"-> ffmpeg codec failed to open (ret: %d)", ret));
    DestroyDecoder();
    return VFW_E_UNSUPPORTED_VIDEO;
  }

  m_iInterlaced = 0;
  for (int i = 0; i < countof(ff_interlace_capable); i++) {
    if (codec == ff_interlace_capable[i]) {
      m_iInterlaced = -1;
      break;
    }
  }

  // Detect chroma and interlaced
  if (m_pAVCtx->extradata && m_pAVCtx->extradata_size) {
    if (codec == AV_CODEC_ID_MPEG2VIDEO) {
      CMPEG2HeaderParser mpeg2Parser(extra, extralen);
      if (mpeg2Parser.hdr.valid) {
        if (mpeg2Parser.hdr.chroma < 2) {
          m_pAVCtx->pix_fmt = AV_PIX_FMT_YUV420P;
        } else if (mpeg2Parser.hdr.chroma == 2) {
          m_pAVCtx->pix_fmt = AV_PIX_FMT_YUV422P;
        }
        m_iInterlaced = mpeg2Parser.hdr.interlaced;
      }
    } else if (codec == AV_CODEC_ID_H264) {
      CH264SequenceParser h264parser;
      if (bH264avc)
        h264parser.ParseNALs(extra+6, extralen-6, 2);
      else
        h264parser.ParseNALs(extra, extralen, 0);
      if (h264parser.sps.valid)
        m_iInterlaced = h264parser.sps.interlaced;
    } else if (codec == AV_CODEC_ID_VC1) {
      CVC1HeaderParser vc1parser(extra, extralen);
      if (vc1parser.hdr.valid)
        m_iInterlaced = (vc1parser.hdr.interlaced ? -1 : 0);
    }
  }

  if (codec == AV_CODEC_ID_DNXHD)
    m_pAVCtx->pix_fmt = AV_PIX_FMT_YUV422P10;
  else if (codec == AV_CODEC_ID_FRAPS)
    m_pAVCtx->pix_fmt = AV_PIX_FMT_BGR24;

  if (bLAVInfoValid && codec != AV_CODEC_ID_FRAPS && m_pAVCtx->pix_fmt != AV_PIX_FMT_DXVA2_VLD)
    m_pAVCtx->pix_fmt = lavPinInfo.pix_fmt;

  DbgLog((LOG_TRACE, 10, L"AVCodec init successfull. interlaced: %d", m_iInterlaced));

  return S_OK;
}
Exemple #12
0
/**
 * Store large tree as FFmpeg's vlc codes
 */
static int smacker_decode_header_tree(SmackVContext *smk, GetBitContext *gb, int **recodes, int *last, int size)
{
    int res;
    HuffContext huff;
    HuffContext tmp1, tmp2;
    VLC vlc[2];
    int escapes[3];
    DBCtx ctx;
    int err = 0;

    if(size >= UINT_MAX>>4){ // (((size + 3) >> 2) + 3) << 2 must not overflow
        av_log(smk->avctx, AV_LOG_ERROR, "size too large\n");
        return -1;
    }

    tmp1.length = 256;
    tmp1.maxlength = 0;
    tmp1.current = 0;
    tmp1.bits = av_mallocz(256 * 4);
    tmp1.lengths = av_mallocz(256 * sizeof(int));
    tmp1.values = av_mallocz(256 * sizeof(int));

    tmp2.length = 256;
    tmp2.maxlength = 0;
    tmp2.current = 0;
    tmp2.bits = av_mallocz(256 * 4);
    tmp2.lengths = av_mallocz(256 * sizeof(int));
    tmp2.values = av_mallocz(256 * sizeof(int));

    memset(&vlc[0], 0, sizeof(VLC));
    memset(&vlc[1], 0, sizeof(VLC));

    if(get_bits1(gb)) {
        smacker_decode_tree(gb, &tmp1, 0, 0);
        skip_bits1(gb);
        res = init_vlc(&vlc[0], SMKTREE_BITS, tmp1.length,
                    tmp1.lengths, sizeof(int), sizeof(int),
                    tmp1.bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE);
        if(res < 0) {
            av_log(smk->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
            return -1;
        }
    } else {
        av_log(smk->avctx, AV_LOG_ERROR, "Skipping low bytes tree\n");
    }
    if(get_bits1(gb)){
        smacker_decode_tree(gb, &tmp2, 0, 0);
        skip_bits1(gb);
        res = init_vlc(&vlc[1], SMKTREE_BITS, tmp2.length,
                    tmp2.lengths, sizeof(int), sizeof(int),
                    tmp2.bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE);
        if(res < 0) {
            av_log(smk->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
            return -1;
        }
    } else {
        av_log(smk->avctx, AV_LOG_ERROR, "Skipping high bytes tree\n");
    }

    escapes[0]  = get_bits(gb, 8);
    escapes[0] |= get_bits(gb, 8) << 8;
    escapes[1]  = get_bits(gb, 8);
    escapes[1] |= get_bits(gb, 8) << 8;
    escapes[2]  = get_bits(gb, 8);
    escapes[2] |= get_bits(gb, 8) << 8;

    last[0] = last[1] = last[2] = -1;

    ctx.escapes[0] = escapes[0];
    ctx.escapes[1] = escapes[1];
    ctx.escapes[2] = escapes[2];
    ctx.v1 = &vlc[0];
    ctx.v2 = &vlc[1];
    ctx.recode1 = tmp1.values;
    ctx.recode2 = tmp2.values;
    ctx.last = last;

    huff.length = ((size + 3) >> 2) + 3;
    huff.maxlength = 0;
    huff.current = 0;
    huff.values = av_mallocz(huff.length * sizeof(int));

    if (smacker_decode_bigtree(gb, &huff, &ctx) < 0)
        err = -1;
    skip_bits1(gb);
    if(ctx.last[0] == -1) ctx.last[0] = huff.current++;
    if(ctx.last[1] == -1) ctx.last[1] = huff.current++;
    if(ctx.last[2] == -1) ctx.last[2] = huff.current++;
    if(huff.current > huff.length){
        ctx.last[0] = ctx.last[1] = ctx.last[2] = 1;
        av_log(smk->avctx, AV_LOG_ERROR, "bigtree damaged\n");
        return -1;
    }

    *recodes = huff.values;

    if(vlc[0].table)
        free_vlc(&vlc[0]);
    if(vlc[1].table)
        free_vlc(&vlc[1]);
    av_free(tmp1.bits);
    av_free(tmp1.lengths);
    av_free(tmp1.values);
    av_free(tmp2.bits);
    av_free(tmp2.lengths);
    av_free(tmp2.values);

    return err;
}
Exemple #13
0
static PayloadContext *new_context(void)
{
    return av_mallocz(sizeof(PayloadContext));
}
Exemple #14
0
/** get sizes and offsets of image, tiles; number of components */
static int get_siz(J2kDecoderContext *s)
{
    int i, ret;

    if (bytestream2_get_bytes_left(&s->g) < 36)
        return AVERROR(EINVAL);

    bytestream2_get_be16u(&s->g); // Rsiz (skipped)
    s->width = bytestream2_get_be32u(&s->g); // width
    s->height = bytestream2_get_be32u(&s->g); // height
    s->image_offset_x = bytestream2_get_be32u(&s->g); // X0Siz
    s->image_offset_y = bytestream2_get_be32u(&s->g); // Y0Siz

    s->tile_width = bytestream2_get_be32u(&s->g); // XTSiz
    s->tile_height = bytestream2_get_be32u(&s->g); // YTSiz
    s->tile_offset_x = bytestream2_get_be32u(&s->g); // XT0Siz
    s->tile_offset_y = bytestream2_get_be32u(&s->g); // YT0Siz
    s->ncomponents = bytestream2_get_be16u(&s->g); // CSiz

    if(s->ncomponents <= 0 || s->ncomponents > 4) {
        av_log(s->avctx, AV_LOG_ERROR, "unsupported/invalid ncomponents: %d\n", s->ncomponents);
        return AVERROR(EINVAL);
    }
    if(s->tile_width<=0 || s->tile_height<=0)
        return AVERROR(EINVAL);

    if (bytestream2_get_bytes_left(&s->g) < 3 * s->ncomponents)
        return AVERROR(EINVAL);

    for (i = 0; i < s->ncomponents; i++) { // Ssiz_i XRsiz_i, YRsiz_i
        uint8_t x = bytestream2_get_byteu(&s->g);
        s->cbps[i] = (x & 0x7f) + 1;
        s->precision = FFMAX(s->cbps[i], s->precision);
        s->sgnd[i] = !!(x & 0x80);
        s->cdx[i] = bytestream2_get_byteu(&s->g);
        s->cdy[i] = bytestream2_get_byteu(&s->g);
    }

    s->numXtiles = ff_j2k_ceildiv(s->width - s->tile_offset_x, s->tile_width);
    s->numYtiles = ff_j2k_ceildiv(s->height - s->tile_offset_y, s->tile_height);

    if(s->numXtiles * (uint64_t)s->numYtiles > INT_MAX/sizeof(J2kTile))
        return AVERROR(EINVAL);

    s->tile = av_mallocz(s->numXtiles * s->numYtiles * sizeof(J2kTile));
    if (!s->tile)
        return AVERROR(ENOMEM);

    for (i = 0; i < s->numXtiles * s->numYtiles; i++) {
        J2kTile *tile = s->tile + i;

        tile->comp = av_mallocz(s->ncomponents * sizeof(J2kComponent));
        if (!tile->comp)
            return AVERROR(ENOMEM);
    }

    s->avctx->width  = s->width  - s->image_offset_x;
    s->avctx->height = s->height - s->image_offset_y;

    switch(s->ncomponents) {
    case 1:
        if (s->precision > 8) {
            s->avctx->pix_fmt = PIX_FMT_GRAY16;
        } else {
            s->avctx->pix_fmt = PIX_FMT_GRAY8;
        }
        break;
    case 3:
        if (s->precision > 8) {
            s->avctx->pix_fmt = PIX_FMT_RGB48;
        } else {
            s->avctx->pix_fmt = PIX_FMT_RGB24;
        }
        break;
    case 4:
        s->avctx->pix_fmt = PIX_FMT_RGBA;
        break;
    }

    if (s->picture.data[0])
        s->avctx->release_buffer(s->avctx, &s->picture);

    if ((ret = s->avctx->get_buffer(s->avctx, &s->picture)) < 0)
        return ret;

    s->picture.pict_type = AV_PICTURE_TYPE_I;
    s->picture.key_frame = 1;

    return 0;
}
// Allocate object
VALUE audio_stream_alloc(VALUE klass) {
	AudioStreamInternal * internal = (AudioStreamInternal *)av_mallocz(sizeof(AudioStreamInternal));
	if (!internal) rb_raise(rb_eNoMemError, "Failed to allocate internal structure");

	return Data_Wrap_Struct(klass, audio_stream_mark, audio_stream_free, (void *)internal);
}
Exemple #16
0
static int flv_write_header(AVFormatContext *s)
{
    AVIOContext *pb = s->pb;
    FLVContext *flv = s->priv_data;
    AVCodecContext *audio_enc = NULL, *video_enc = NULL, *data_enc = NULL;
    int i, metadata_count = 0;
    double framerate = 0.0;
    int64_t metadata_size_pos, data_size, metadata_count_pos;
    AVDictionaryEntry *tag = NULL;

    for (i = 0; i < s->nb_streams; i++) {
        AVCodecContext *enc = s->streams[i]->codec;
        FLVStreamContext *sc;
        switch (enc->codec_type) {
        case AVMEDIA_TYPE_VIDEO:
            if (s->streams[i]->avg_frame_rate.den &&
                s->streams[i]->avg_frame_rate.num) {
                framerate = av_q2d(s->streams[i]->avg_frame_rate);
            } else {
                framerate = 1 / av_q2d(s->streams[i]->codec->time_base);
            }
            video_enc = enc;
            if (enc->codec_tag == 0) {
                av_log(s, AV_LOG_ERROR, "Video codec '%s' for stream %d is not compatible with FLV\n",
                       avcodec_get_name(enc->codec_id), i);
                return AVERROR(EINVAL);
            }
            break;
        case AVMEDIA_TYPE_AUDIO:
            audio_enc = enc;
            if (get_audio_flags(s, enc) < 0)
                return AVERROR_INVALIDDATA;
            break;
        case AVMEDIA_TYPE_DATA:
            if (enc->codec_id != AV_CODEC_ID_TEXT) {
                av_log(s, AV_LOG_ERROR, "Data codec '%s' for stream %d is not compatible with FLV\n",
                       avcodec_get_name(enc->codec_id), i);
                return AVERROR_INVALIDDATA;
            }
            data_enc = enc;
            break;
        default:
            av_log(s, AV_LOG_ERROR, "Codec type '%s' for stream %d is not compatible with FLV\n",
                   av_get_media_type_string(enc->codec_type), i);
            return AVERROR(EINVAL);
        }
        avpriv_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */

        sc = av_mallocz(sizeof(FLVStreamContext));
        if (!sc)
            return AVERROR(ENOMEM);
        s->streams[i]->priv_data = sc;
        sc->last_ts = -1;
    }

    flv->delay = AV_NOPTS_VALUE;

    avio_write(pb, "FLV", 3);
    avio_w8(pb, 1);
    avio_w8(pb, FLV_HEADER_FLAG_HASAUDIO * !!audio_enc +
                FLV_HEADER_FLAG_HASVIDEO * !!video_enc);
    avio_wb32(pb, 9);
    avio_wb32(pb, 0);

    for (i = 0; i < s->nb_streams; i++)
        if (s->streams[i]->codec->codec_tag == 5) {
            avio_w8(pb, 8);     // message type
            avio_wb24(pb, 0);   // include flags
            avio_wb24(pb, 0);   // time stamp
            avio_wb32(pb, 0);   // reserved
            avio_wb32(pb, 11);  // size
            flv->reserved = 5;
        }

    /* write meta_tag */
    avio_w8(pb, 18);            // tag type META
    metadata_size_pos = avio_tell(pb);
    avio_wb24(pb, 0);           // size of data part (sum of all parts below)
    avio_wb24(pb, 0);           // timestamp
    avio_wb32(pb, 0);           // reserved

    /* now data of data_size size */

    /* first event name as a string */
    avio_w8(pb, AMF_DATA_TYPE_STRING);
    put_amf_string(pb, "onMetaData"); // 12 bytes

    /* mixed array (hash) with size and string/type/data tuples */
    avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY);
    metadata_count_pos = avio_tell(pb);
    metadata_count = 5 * !!video_enc +
                     5 * !!audio_enc +
                     1 * !!data_enc  +
                     2; // +2 for duration and file size

    avio_wb32(pb, metadata_count);

    put_amf_string(pb, "duration");
    flv->duration_offset= avio_tell(pb);

    // fill in the guessed duration, it'll be corrected later if incorrect
    put_amf_double(pb, s->duration / AV_TIME_BASE);

    if (video_enc) {
        put_amf_string(pb, "width");
        put_amf_double(pb, video_enc->width);

        put_amf_string(pb, "height");
        put_amf_double(pb, video_enc->height);

        put_amf_string(pb, "videodatarate");
        put_amf_double(pb, video_enc->bit_rate / 1024.0);

        put_amf_string(pb, "framerate");
        put_amf_double(pb, framerate);

        put_amf_string(pb, "videocodecid");
        put_amf_double(pb, video_enc->codec_tag);
    }

    if (audio_enc) {
        put_amf_string(pb, "audiodatarate");
        put_amf_double(pb, audio_enc->bit_rate / 1024.0);

        put_amf_string(pb, "audiosamplerate");
        put_amf_double(pb, audio_enc->sample_rate);

        put_amf_string(pb, "audiosamplesize");
        put_amf_double(pb, audio_enc->codec_id == AV_CODEC_ID_PCM_U8 ? 8 : 16);

        put_amf_string(pb, "stereo");
        put_amf_bool(pb, audio_enc->channels == 2);

        put_amf_string(pb, "audiocodecid");
        put_amf_double(pb, audio_enc->codec_tag);
    }

    if (data_enc) {
        put_amf_string(pb, "datastream");
        put_amf_double(pb, 0.0);
    }

    while ((tag = av_dict_get(s->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
        if(   !strcmp(tag->key, "width")
            ||!strcmp(tag->key, "height")
            ||!strcmp(tag->key, "videodatarate")
            ||!strcmp(tag->key, "framerate")
            ||!strcmp(tag->key, "videocodecid")
            ||!strcmp(tag->key, "audiodatarate")
            ||!strcmp(tag->key, "audiosamplerate")
            ||!strcmp(tag->key, "audiosamplesize")
            ||!strcmp(tag->key, "stereo")
            ||!strcmp(tag->key, "audiocodecid")
            ||!strcmp(tag->key, "duration")
            ||!strcmp(tag->key, "onMetaData")
        ){
            av_log(s, AV_LOG_DEBUG, "Ignoring metadata for %s\n", tag->key);
            continue;
        }
        put_amf_string(pb, tag->key);
        avio_w8(pb, AMF_DATA_TYPE_STRING);
        put_amf_string(pb, tag->value);
        metadata_count++;
    }

    put_amf_string(pb, "filesize");
    flv->filesize_offset = avio_tell(pb);
    put_amf_double(pb, 0); // delayed write

    put_amf_string(pb, "");
    avio_w8(pb, AMF_END_OF_OBJECT);

    /* write total size of tag */
    data_size = avio_tell(pb) - metadata_size_pos - 10;

    avio_seek(pb, metadata_count_pos, SEEK_SET);
    avio_wb32(pb, metadata_count);

    avio_seek(pb, metadata_size_pos, SEEK_SET);
    avio_wb24(pb, data_size);
    avio_skip(pb, data_size + 10 - 3);
    avio_wb32(pb, data_size + 11);

    for (i = 0; i < s->nb_streams; i++) {
        AVCodecContext *enc = s->streams[i]->codec;
        if (enc->codec_id == AV_CODEC_ID_AAC || enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4) {
            int64_t pos;
            avio_w8(pb, enc->codec_type == AVMEDIA_TYPE_VIDEO ?
                    FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO);
            avio_wb24(pb, 0); // size patched later
            avio_wb24(pb, 0); // ts
            avio_w8(pb, 0);   // ts ext
            avio_wb24(pb, 0); // streamid
            pos = avio_tell(pb);
            if (enc->codec_id == AV_CODEC_ID_AAC) {
                avio_w8(pb, get_audio_flags(s, enc));
                avio_w8(pb, 0); // AAC sequence header
                avio_write(pb, enc->extradata, enc->extradata_size);
            } else {
                avio_w8(pb, enc->codec_tag | FLV_FRAME_KEY); // flags
                avio_w8(pb, 0); // AVC sequence header
                avio_wb24(pb, 0); // composition time
                ff_isom_write_avcc(pb, enc->extradata, enc->extradata_size);
            }
            data_size = avio_tell(pb) - pos;
            avio_seek(pb, -data_size - 10, SEEK_CUR);
            avio_wb24(pb, data_size);
            avio_skip(pb, data_size + 10 - 3);
            avio_wb32(pb, data_size + 11); // previous tag size
        }
    }

    return 0;
}
Exemple #17
0
av_cold int ff_rate_control_init(MpegEncContext *s)
{
    RateControlContext *rcc = &s->rc_context;
    int i, res;
    static const char * const const_names[] = {
        "PI",
        "E",
        "iTex",
        "pTex",
        "tex",
        "mv",
        "fCode",
        "iCount",
        "mcVar",
        "var",
        "isI",
        "isP",
        "isB",
        "avgQP",
        "qComp",
#if 0
        "lastIQP",
        "lastPQP",
        "lastBQP",
        "nextNonBQP",
#endif
        "avgIITex",
        "avgPITex",
        "avgPPTex",
        "avgBPTex",
        "avgTex",
        NULL
    };
    static double (* const func1[])(void *, double) = {
        (void *)bits2qp,
        (void *)qp2bits,
        NULL
    };
    static const char * const func1_names[] = {
        "bits2qp",
        "qp2bits",
        NULL
    };
    emms_c();

    if (!s->avctx->rc_max_available_vbv_use && s->avctx->rc_buffer_size) {
        if (s->avctx->rc_max_rate) {
            s->avctx->rc_max_available_vbv_use = av_clipf(s->avctx->rc_max_rate/(s->avctx->rc_buffer_size*get_fps(s->avctx)), 1.0/3, 1.0);
        } else
            s->avctx->rc_max_available_vbv_use = 1.0;
    }

    res = av_expr_parse(&rcc->rc_eq_eval,
                        s->rc_eq ? s->rc_eq : "tex^qComp",
                        const_names, func1_names, func1,
                        NULL, NULL, 0, s->avctx);
    if (res < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "Error parsing rc_eq \"%s\"\n", s->rc_eq);
        return res;
    }

#if FF_API_RC_STRATEGY
FF_DISABLE_DEPRECATION_WARNINGS
    if (!s->rc_strategy)
        s->rc_strategy = s->avctx->rc_strategy;
FF_ENABLE_DEPRECATION_WARNINGS
#endif

    for (i = 0; i < 5; i++) {
        rcc->pred[i].coeff = FF_QP2LAMBDA * 7.0;
        rcc->pred[i].count = 1.0;
        rcc->pred[i].decay = 0.4;

        rcc->i_cplx_sum [i] =
        rcc->p_cplx_sum [i] =
        rcc->mv_bits_sum[i] =
        rcc->qscale_sum [i] =
        rcc->frame_count[i] = 1; // 1 is better because of 1/0 and such

        rcc->last_qscale_for[i] = FF_QP2LAMBDA * 5;
    }
    rcc->buffer_index = s->avctx->rc_initial_buffer_occupancy;
    if (!rcc->buffer_index)
        rcc->buffer_index = s->avctx->rc_buffer_size * 3 / 4;

    if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
        int i;
        char *p;

        /* find number of pics */
        p = s->avctx->stats_in;
        for (i = -1; p; i++)
            p = strchr(p + 1, ';');
        i += s->max_b_frames;
        if (i <= 0 || i >= INT_MAX / sizeof(RateControlEntry))
            return -1;
        rcc->entry       = av_mallocz(i * sizeof(RateControlEntry));
        if (!rcc->entry)
            return AVERROR(ENOMEM);
        rcc->num_entries = i;

        /* init all to skipped P-frames
         * (with B-frames we might have a not encoded frame at the end FIXME) */
        for (i = 0; i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];

            rce->pict_type  = rce->new_pict_type = AV_PICTURE_TYPE_P;
            rce->qscale     = rce->new_qscale    = FF_QP2LAMBDA * 2;
            rce->misc_bits  = s->mb_num + 10;
            rce->mb_var_sum = s->mb_num * 100;
        }

        /* read stats */
        p = s->avctx->stats_in;
        for (i = 0; i < rcc->num_entries - s->max_b_frames; i++) {
            RateControlEntry *rce;
            int picture_number;
            int e;
            char *next;

            next = strchr(p, ';');
            if (next) {
                (*next) = 0; // sscanf is unbelievably slow on looong strings // FIXME copy / do not write
                next++;
            }
            e = sscanf(p, " in:%d ", &picture_number);

            av_assert0(picture_number >= 0);
            av_assert0(picture_number < rcc->num_entries);
            rce = &rcc->entry[picture_number];

            e += sscanf(p, " in:%*d out:%*d type:%d q:%f itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%"SCNd64" var:%"SCNd64" icount:%d skipcount:%d hbits:%d",
                        &rce->pict_type, &rce->qscale, &rce->i_tex_bits, &rce->p_tex_bits,
                        &rce->mv_bits, &rce->misc_bits,
                        &rce->f_code, &rce->b_code,
                        &rce->mc_mb_var_sum, &rce->mb_var_sum,
                        &rce->i_count, &rce->skip_count, &rce->header_bits);
            if (e != 14) {
                av_log(s->avctx, AV_LOG_ERROR,
                       "statistics are damaged at line %d, parser out=%d\n",
                       i, e);
                return -1;
            }

            p = next;
        }

        if (init_pass2(s) < 0) {
            ff_rate_control_uninit(s);
            return -1;
        }

#if FF_API_RC_STRATEGY
        av_assert0(MPV_RC_STRATEGY_XVID == FF_RC_STRATEGY_XVID);
#endif

        // FIXME maybe move to end
        if ((s->avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == MPV_RC_STRATEGY_XVID) {
#if CONFIG_LIBXVID
            return ff_xvid_rate_control_init(s);
#else
            av_log(s->avctx, AV_LOG_ERROR,
                   "Xvid ratecontrol requires libavcodec compiled with Xvid support.\n");
            return -1;
#endif
        }
    }

    if (!(s->avctx->flags & AV_CODEC_FLAG_PASS2)) {
        rcc->short_term_qsum   = 0.001;
        rcc->short_term_qcount = 0.001;

        rcc->pass1_rc_eq_output_sum = 0.001;
        rcc->pass1_wanted_bits      = 0.001;

        if (s->avctx->qblur > 1.0) {
            av_log(s->avctx, AV_LOG_ERROR, "qblur too large\n");
            return -1;
        }
        /* init stuff with the user specified complexity */
        if (s->rc_initial_cplx) {
            for (i = 0; i < 60 * 30; i++) {
                double bits = s->rc_initial_cplx * (i / 10000.0 + 1.0) * s->mb_num;
                RateControlEntry rce;

                if (i % ((s->gop_size + 3) / 4) == 0)
                    rce.pict_type = AV_PICTURE_TYPE_I;
                else if (i % (s->max_b_frames + 1))
                    rce.pict_type = AV_PICTURE_TYPE_B;
                else
                    rce.pict_type = AV_PICTURE_TYPE_P;

                rce.new_pict_type = rce.pict_type;
                rce.mc_mb_var_sum = bits * s->mb_num / 100000;
                rce.mb_var_sum    = s->mb_num;

                rce.qscale    = FF_QP2LAMBDA * 2;
                rce.f_code    = 2;
                rce.b_code    = 1;
                rce.misc_bits = 1;

                if (s->pict_type == AV_PICTURE_TYPE_I) {
                    rce.i_count    = s->mb_num;
                    rce.i_tex_bits = bits;
                    rce.p_tex_bits = 0;
                    rce.mv_bits    = 0;
                } else {
                    rce.i_count    = 0; // FIXME we do know this approx
                    rce.i_tex_bits = 0;
                    rce.p_tex_bits = bits * 0.9;
                    rce.mv_bits    = bits * 0.1;
                }
                rcc->i_cplx_sum[rce.pict_type]  += rce.i_tex_bits * rce.qscale;
                rcc->p_cplx_sum[rce.pict_type]  += rce.p_tex_bits * rce.qscale;
                rcc->mv_bits_sum[rce.pict_type] += rce.mv_bits;
                rcc->frame_count[rce.pict_type]++;

                get_qscale(s, &rce, rcc->pass1_wanted_bits / rcc->pass1_rc_eq_output_sum, i);

                // FIXME misbehaves a little for variable fps
                rcc->pass1_wanted_bits += s->bit_rate / get_fps(s->avctx);
            }
        }
    }

    return 0;
}
Exemple #18
0
Fichier : mvi.c Projet : 1c0n/xbmc
static int read_header(AVFormatContext *s)
{
    MviDemuxContext *mvi = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *ast, *vst;
    unsigned int version, frames_count, msecs_per_frame, player_version;

    ast = avformat_new_stream(s, NULL);
    if (!ast)
        return AVERROR(ENOMEM);

    vst = avformat_new_stream(s, NULL);
    if (!vst)
        return AVERROR(ENOMEM);

    vst->codec->extradata_size = 2;
    vst->codec->extradata = av_mallocz(2 + FF_INPUT_BUFFER_PADDING_SIZE);
    if (!vst->codec->extradata)
        return AVERROR(ENOMEM);

    version                  = avio_r8(pb);
    vst->codec->extradata[0] = avio_r8(pb);
    vst->codec->extradata[1] = avio_r8(pb);
    frames_count             = avio_rl32(pb);
    msecs_per_frame          = avio_rl32(pb);
    vst->codec->width        = avio_rl16(pb);
    vst->codec->height       = avio_rl16(pb);
    avio_r8(pb);
    ast->codec->sample_rate  = avio_rl16(pb);
    mvi->audio_data_size     = avio_rl32(pb);
    avio_r8(pb);
    player_version           = avio_rl32(pb);
    avio_rl16(pb);
    avio_r8(pb);

    if (frames_count == 0 || mvi->audio_data_size == 0)
        return AVERROR_INVALIDDATA;

    if (version != 7 || player_version > 213) {
        av_log(s, AV_LOG_ERROR, "unhandled version (%d,%d)\n", version, player_version);
        return AVERROR_INVALIDDATA;
    }

    avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
    ast->codec->codec_type      = AVMEDIA_TYPE_AUDIO;
    ast->codec->codec_id        = AV_CODEC_ID_PCM_U8;
    ast->codec->channels        = 1;
    ast->codec->channel_layout  = AV_CH_LAYOUT_MONO;
    ast->codec->bits_per_coded_sample = 8;
    ast->codec->bit_rate        = ast->codec->sample_rate * 8;

    avpriv_set_pts_info(vst, 64, msecs_per_frame, 1000000);
    vst->avg_frame_rate    = av_inv_q(vst->time_base);
    vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    vst->codec->codec_id   = AV_CODEC_ID_MOTIONPIXELS;

    mvi->get_int = (vst->codec->width * vst->codec->height < (1 << 16)) ? avio_rl16 : avio_rl24;

    mvi->audio_frame_size   = ((uint64_t)mvi->audio_data_size << MVI_FRAC_BITS) / frames_count;
    if (!mvi->audio_frame_size) {
        av_log(s, AV_LOG_ERROR, "audio_frame_size is 0\n");
        return AVERROR_INVALIDDATA;
    }
    mvi->audio_size_counter = (ast->codec->sample_rate * 830 / mvi->audio_frame_size - 1) * mvi->audio_frame_size;
    mvi->audio_size_left    = mvi->audio_data_size;

    return 0;
}
Exemple #19
0
static int mmsh_open_internal(URLContext *h, const char *uri, int flags, int timestamp, int64_t pos)
{
    int i, port, err;
    char httpname[256], path[256], host[128];
    char *stream_selection = NULL;
    char headers[1024];
    MMSHContext *mmsh = h->priv_data;
    MMSContext *mms;

    mmsh->request_seq = h->is_streamed = 1;
    mms = &mmsh->mms;
    av_strlcpy(mmsh->location, uri, sizeof(mmsh->location));

    av_url_split(NULL, 0, NULL, 0,
        host, sizeof(host), &port, path, sizeof(path), mmsh->location);
    if (port<0)
        port = 80; // default mmsh protocol port
    ff_url_join(httpname, sizeof(httpname), "http", NULL, host, port, "%s", path);

    if (ffurl_alloc(&mms->mms_hd, httpname, AVIO_FLAG_READ,
                    &h->interrupt_callback) < 0) {
        return AVERROR(EIO);
    }

    snprintf(headers, sizeof(headers),
             "Accept: */*\r\n"
             USERAGENT
             "Host: %s:%d\r\n"
             "Pragma: no-cache,rate=1.000000,stream-time=0,"
             "stream-offset=0:0,request-context=%u,max-duration=0\r\n"
             CLIENTGUID
             "Connection: Close\r\n",
             host, port, mmsh->request_seq++);
    av_opt_set(mms->mms_hd->priv_data, "headers", headers, 0);

    err = ffurl_connect(mms->mms_hd, NULL);
    if (err) {
        goto fail;
    }
    err = get_http_header_data(mmsh);
    if (err) {
        av_log(NULL, AV_LOG_ERROR, "Get http header data failed!\n");
        goto fail;
    }

    // close the socket and then reopen it for sending the second play request.
    ffurl_close(mms->mms_hd);
    memset(headers, 0, sizeof(headers));
    if ((err = ffurl_alloc(&mms->mms_hd, httpname, AVIO_FLAG_READ,
                           &h->interrupt_callback)) < 0) {
        goto fail;
    }
    stream_selection = av_mallocz(mms->stream_num * 19 + 1);
    if (!stream_selection)
        return AVERROR(ENOMEM);
    for (i = 0; i < mms->stream_num; i++) {
        char tmp[20];
        err = snprintf(tmp, sizeof(tmp), "ffff:%d:0 ", mms->streams[i].id);
        if (err < 0)
            goto fail;
        av_strlcat(stream_selection, tmp, mms->stream_num * 19 + 1);
    }
    // send play request
    err = snprintf(headers, sizeof(headers),
                   "Accept: */*\r\n"
                   USERAGENT
                   "Host: %s:%d\r\n"
                   "Pragma: no-cache,rate=1.000000,request-context=%u\r\n"
                   "Pragma: xPlayStrm=1\r\n"
                   CLIENTGUID
                   "Pragma: stream-switch-count=%d\r\n"
                   "Pragma: stream-switch-entry=%s\r\n"
                   "Pragma: no-cache,rate=1.000000,stream-time=%u"
                   "Connection: Close\r\n",
                   host, port, mmsh->request_seq++, mms->stream_num, stream_selection, timestamp);
    av_freep(&stream_selection);
    if (err < 0) {
        av_log(NULL, AV_LOG_ERROR, "Build play request failed!\n");
        goto fail;
    }
    av_dlog(NULL, "out_buffer is %s", headers);
    av_opt_set(mms->mms_hd->priv_data, "headers", headers, 0);

    err = ffurl_connect(mms->mms_hd, NULL);
    if (err) {
          goto fail;
    }

    err = get_http_header_data(mmsh);
    if (err) {
        av_log(NULL, AV_LOG_ERROR, "Get http header data failed!\n");
        goto fail;
    }

    av_dlog(NULL, "Connection successfully open\n");
    return 0;
fail:
    av_freep(&stream_selection);
    mmsh_close(h);
    av_dlog(NULL, "Connection failed with error %d\n", err);
    return err;
}
Exemple #20
0
static int tta_read_header(AVFormatContext *s)
{
    TTAContext *c = s->priv_data;
    AVStream *st;
    int i, channels, bps, samplerate, datalen;
    uint64_t framepos, start_offset;

    if (!av_dict_get(s->metadata, "", NULL, AV_DICT_IGNORE_SUFFIX))
        ff_id3v1_read(s);

    start_offset = avio_tell(s->pb);
    if (avio_rl32(s->pb) != AV_RL32("TTA1"))
        return -1; // not tta file

    avio_skip(s->pb, 2); // FIXME: flags
    channels = avio_rl16(s->pb);
    bps = avio_rl16(s->pb);
    samplerate = avio_rl32(s->pb);
    if(samplerate <= 0 || samplerate > 1000000){
        av_log(s, AV_LOG_ERROR, "nonsense samplerate\n");
        return -1;
    }

    datalen = avio_rl32(s->pb);
    if(datalen < 0){
        av_log(s, AV_LOG_ERROR, "nonsense datalen\n");
        return -1;
    }

    avio_skip(s->pb, 4); // header crc

    c->frame_size      = samplerate * 256 / 245;
    c->last_frame_size = datalen % c->frame_size;
    if (!c->last_frame_size)
        c->last_frame_size = c->frame_size;
    c->totalframes = datalen / c->frame_size + (c->last_frame_size < c->frame_size);
    c->currentframe = 0;

    if(c->totalframes >= UINT_MAX/sizeof(uint32_t) || c->totalframes <= 0){
        av_log(s, AV_LOG_ERROR, "totalframes %d invalid\n", c->totalframes);
        return -1;
    }

    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);

    avpriv_set_pts_info(st, 64, 1, samplerate);
    st->start_time = 0;
    st->duration = datalen;

    framepos = avio_tell(s->pb) + 4*c->totalframes + 4;

    for (i = 0; i < c->totalframes; i++) {
        uint32_t size = avio_rl32(s->pb);
        av_add_index_entry(st, framepos, i * c->frame_size, size, 0,
                           AVINDEX_KEYFRAME);
        framepos += size;
    }
    avio_skip(s->pb, 4); // seektable crc

    st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
    st->codec->codec_id = AV_CODEC_ID_TTA;
    st->codec->channels = channels;
    st->codec->sample_rate = samplerate;
    st->codec->bits_per_coded_sample = bps;

    st->codec->extradata_size = avio_tell(s->pb) - start_offset;
    if(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)st->codec->extradata_size){
        //this check is redundant as avio_read should fail
        av_log(s, AV_LOG_ERROR, "extradata_size too large\n");
        return -1;
    }
    st->codec->extradata = av_mallocz(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE);
    if (!st->codec->extradata) {
        st->codec->extradata_size = 0;
        return AVERROR(ENOMEM);
    }
    avio_seek(s->pb, start_offset, SEEK_SET);
    avio_read(s->pb, st->codec->extradata, st->codec->extradata_size);

    return 0;
}
Exemple #21
0
static opj_image_t *libopenjpeg_create_image(AVCodecContext *avctx,
                                             opj_cparameters_t *parameters)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
    opj_image_cmptparm_t *cmptparm;
    OPJ_COLOR_SPACE color_space;
    opj_image_t *img;
    int i;
    int sub_dx[4];
    int sub_dy[4];
    int numcomps = desc->nb_components;

    sub_dx[0] =
    sub_dx[3] = 1;
    sub_dy[0] =
    sub_dy[3] = 1;
    sub_dx[1] =
    sub_dx[2] = 1 << desc->log2_chroma_w;
    sub_dy[1] =
    sub_dy[2] = 1 << desc->log2_chroma_h;

    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_GRAY8:
    case AV_PIX_FMT_GRAY16:
    case AV_PIX_FMT_YA8:
        color_space = CLRSPC_GRAY;
        break;
    case AV_PIX_FMT_RGB24:
    case AV_PIX_FMT_RGBA:
    case AV_PIX_FMT_RGB48:
    case AV_PIX_FMT_RGBA64:
        color_space = CLRSPC_SRGB;
        break;
    case AV_PIX_FMT_YUV410P:
    case AV_PIX_FMT_YUV411P:
    case AV_PIX_FMT_YUV420P:
    case AV_PIX_FMT_YUV422P:
    case AV_PIX_FMT_YUV440P:
    case AV_PIX_FMT_YUV444P:
    case AV_PIX_FMT_YUVA420P:
    case AV_PIX_FMT_YUV420P9:
    case AV_PIX_FMT_YUV422P9:
    case AV_PIX_FMT_YUV444P9:
    case AV_PIX_FMT_YUV420P10:
    case AV_PIX_FMT_YUV422P10:
    case AV_PIX_FMT_YUV444P10:
    case AV_PIX_FMT_YUV420P16:
    case AV_PIX_FMT_YUV422P16:
    case AV_PIX_FMT_YUV444P16:
        color_space = CLRSPC_SYCC;
        break;
    default:
        av_log(avctx, AV_LOG_ERROR,
               "The requested pixel format '%s' is not supported\n",
               av_get_pix_fmt_name(avctx->pix_fmt));
        return NULL;
    }

    cmptparm = av_mallocz(numcomps * sizeof(*cmptparm));
    if (!cmptparm) {
        av_log(avctx, AV_LOG_ERROR, "Not enough memory");
        return NULL;
    }

    for (i = 0; i < numcomps; i++) {
        cmptparm[i].prec = desc->comp[i].depth;
        cmptparm[i].bpp  = desc->comp[i].depth;
        cmptparm[i].sgnd = 0;
        cmptparm[i].dx   = sub_dx[i];
        cmptparm[i].dy   = sub_dy[i];
        cmptparm[i].w    = avctx->width / sub_dx[i];
        cmptparm[i].h    = avctx->height / sub_dy[i];
    }

    img = opj_image_create(numcomps, cmptparm, color_space);
    av_freep(&cmptparm);
    return img;
}
Exemple #22
0
//Thread d'initialisation
void *drawingAndParam(void * arg)
{
	string winParametrage = "Thresholded";
	string winDetected = "Parametrages";
	char key;
	drawing = false;
	onDrawing = true;
	pthread_mutex_init(&mutexVideo, NULL);
#if output_video == ov_remote_ffmpeg
	int errorcode = avformat_open_input(&pFormatCtx, "tcp://192.168.1.1:5555", NULL, NULL);
	if (errorcode < 0) {
		cout << "ERREUR CAMERA DRONE!!!" << errorcode;
		return 0;
	}
	avformat_find_stream_info(pFormatCtx, NULL);
	av_dump_format(pFormatCtx, 0, "tcp://192.168.1.1:5555", 0);
	pCodecCtx = pFormatCtx->streams[0]->codec;
	AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		cout << "ERREUR avcodec_find_decoder!!!";
		return 0;
	}
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		cout << "ERREUR avcodec_open2!!!";
		return 0;
	}
	//pFrame = av_frame_alloc();
	//pFrameBGR = av_frame_alloc();
	pFrame = avcodec_alloc_frame();
	pFrameBGR = avcodec_alloc_frame();
	bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));
	avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
	pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
	img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
	if (!img) {
		cout << "ERREUR PAS D'IMAGE!!!";
		return 0;
	}

	pthread_t ii;
	pthread_create(&ii, NULL, getimg, NULL);

#else	
	VideoCapture cap(0); //capture video webcam

#endif
	HH=179;LS=1;HS=255;LV=1;HV=255;LH=1;
	namedWindow(winDetected, CV_WINDOW_NORMAL);
	Mat frame;
	setMouseCallback(winDetected, MouseCallBack, NULL);
	while(true)
	{	
		if(onDrawing) //Tant que l'utilisateur ne commence pas la sélection!
		{
			#if output_video != ov_remote_ffmpeg
				bool bSuccess = cap.read(frame); // Nouvelle capture
			if (!bSuccess) {
				cout << "Impossible de lire le flux video" << endl;
				break;
			}
			#else
				pthread_mutex_lock(&mutexVideo);
				memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
				pthread_mutex_unlock(&mutexVideo);
				frame = cv::cvarrToMat(img, true);
			#endif
		imshow(winDetected, frame);
		}
		if(!onDrawing && !drawing) //On affiche en direct la sélection de l'utilisateur
		{
			Mat tmpFrame=frame.clone();
			rectangle(tmpFrame, rec, CV_RGB(51,156,204),1,8,0);
			imshow(winDetected, tmpFrame);
		}
		if(drawing) //L'utilisateur a fini de sélectionner
		{
			//cible Ball(1);
			namedWindow(winParametrage, CV_WINDOW_NORMAL);
			setMouseCallback(winDetected, NULL, NULL);	
			rectangle(frame, rec, CV_RGB(51,156,204),2,8,0);
			imshow(winDetected, frame);
			Mat selection = frame(rec);
			Ball.setPicture(selection);
			while(key != 'q')
			{
				//Trackbar pour choix de la couleur
				createTrackbar("LowH", winParametrage, &LH, 179); //Hue (0 - 179)
				createTrackbar("HighH", winParametrage, &HH, 179);
				//Trackbar pour Saturation comparer au blanc
				createTrackbar("LowS", winParametrage, &LS, 255); //Saturation (0 - 255)
				createTrackbar("HighS", winParametrage, &HS, 255);
				//Trackbar pour la lumminosite comparer au noir
				createTrackbar("LowV", winParametrage, &LV, 255);//Value (0 - 255)
				createTrackbar("HighV", winParametrage, &HV, 255);
				Mat imgHSV;

				cvtColor(selection, imgHSV, COLOR_BGR2HSV); //Passe de BGR a HSV

				Mat imgDetection;

				inRange(imgHSV, Scalar(LH, LS, LV), Scalar(HH, HS, HV), imgDetection); //Met en noir les parties non comprises dans l'intervalle de la couleur choisie par l'utilisateur

				//Retire les bruits
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				imshow(winParametrage, imgDetection);

				//Calcul de la "distance" à la cible. On s'en sert comme seuil.
				Moments position;
				position = moments(imgDetection);
				Ball.lastdZone = position.m00;

				key = waitKey(10);
			}
			
			//Extraction des points d'intérêts de la sélection de l'utilisateur
			Mat graySelect;
			int minHessian = 800;
			cvtColor(selection, graySelect, COLOR_BGR2GRAY);
			Ptr<SURF> detector = SURF::create(minHessian);
			vector<KeyPoint> KP;
			detector->detect(graySelect, KP);
			Mat KPimg;
			drawKeypoints(graySelect, KP, KPimg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
			Mat desc;
			Ptr<SURF> extractor = SURF::create();
			extractor->compute(graySelect, KP, desc);
			Ball.setimgGray(graySelect);
			Ball.setKP(KP);
			Ball.setDesc(desc);
			break;
		}
		key = waitKey(10);
	}
	//Fin de l'initiatlisation on ferme toutes les fenêtres et on passe au tracking
	destroyAllWindows();
#if output_video != ov_remote_ffmpeg
	cap.release();
#endif
}
Exemple #23
0
static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code)
{
    TM2Huff huff;
    int res = 0;

    huff.val_bits = get_bits(&ctx->gb, 5);
    huff.max_bits = get_bits(&ctx->gb, 5);
    huff.min_bits = get_bits(&ctx->gb, 5);
    huff.nodes    = get_bits_long(&ctx->gb, 17);
    huff.num      = 0;

    /* check for correct codes parameters */
    if ((huff.val_bits < 1) || (huff.val_bits > 32) ||
        (huff.max_bits < 0) || (huff.max_bits > 25)) {
        av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal "
               "length: %i, max code length: %i\n", huff.val_bits, huff.max_bits);
        return AVERROR_INVALIDDATA;
    }
    if ((huff.nodes <= 0) || (huff.nodes > 0x10000)) {
        av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of Huffman tree "
               "nodes: %i\n", huff.nodes);
        return AVERROR_INVALIDDATA;
    }
    /* one-node tree */
    if (huff.max_bits == 0)
        huff.max_bits = 1;

    /* allocate space for codes - it is exactly ceil(nodes / 2) entries */
    huff.max_num = (huff.nodes + 1) >> 1;
    huff.nums    = av_mallocz(huff.max_num * sizeof(int));
    huff.bits    = av_mallocz(huff.max_num * sizeof(uint32_t));
    huff.lens    = av_mallocz(huff.max_num * sizeof(int));

    res = tm2_read_tree(ctx, 0, 0, &huff);

    if (huff.num != huff.max_num) {
        av_log(ctx->avctx, AV_LOG_ERROR, "Got less codes than expected: %i of %i\n",
               huff.num, huff.max_num);
        res = AVERROR_INVALIDDATA;
    }

    /* convert codes to vlc_table */
    if (res >= 0) {
        int i;

        res = init_vlc(&code->vlc, huff.max_bits, huff.max_num,
                       huff.lens, sizeof(int), sizeof(int),
                       huff.bits, sizeof(uint32_t), sizeof(uint32_t), 0);
        if (res < 0)
            av_log(ctx->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
        else {
            code->bits = huff.max_bits;
            code->length = huff.max_num;
            code->recode = av_malloc(code->length * sizeof(int));
            for (i = 0; i < code->length; i++)
                code->recode[i] = huff.nums[i];
        }
    }
    /* free allocated memory */
    av_free(huff.nums);
    av_free(huff.bits);
    av_free(huff.lens);

    return res;
}
Exemple #24
0
static int generate_joint_tables(HYuvContext *s)
{
    int ret;
    uint16_t *symbols = av_mallocz(5 << VLC_BITS);
    uint16_t *bits;
    uint8_t *len;
    if (!symbols)
        return AVERROR(ENOMEM);
    bits = symbols + (1 << VLC_BITS);
    len = (uint8_t *)(bits + (1 << VLC_BITS));

    if (s->bitstream_bpp < 24 || s->version > 2) {
        int p, i, y, u;
        for (p = 0; p < 4; p++) {
            int p0 = s->version > 2 ? p : 0;
            for (i = y = 0; y < s->vlc_n; y++) {
                int len0  = s->len[p0][y];
                int limit = VLC_BITS - len0;
                if (limit <= 0 || !len0)
                    continue;
                if ((sign_extend(y, 8) & (s->vlc_n-1)) != y)
                    continue;
                for (u = 0; u < s->vlc_n; u++) {
                    int len1 = s->len[p][u];
                    if (len1 > limit || !len1)
                        continue;
                    if ((sign_extend(u, 8) & (s->vlc_n-1)) != u)
                        continue;
                    av_assert0(i < (1 << VLC_BITS));
                    len[i]     = len0 + len1;
                    bits[i]    = (s->bits[p0][y] << len1) + s->bits[p][u];
                    symbols[i] = (y << 8) + (u & 0xFF);
                        i++;
                }
            }
            ff_free_vlc(&s->vlc[4 + p]);
            if ((ret = ff_init_vlc_sparse(&s->vlc[4 + p], VLC_BITS, i, len, 1, 1,
                                          bits, 2, 2, symbols, 2, 2, 0)) < 0)
                goto out;
        }
    } else {
        uint8_t (*map)[4] = (uint8_t(*)[4]) s->pix_bgr_map;
        int i, b, g, r, code;
        int p0 = s->decorrelate;
        int p1 = !s->decorrelate;
        /* Restrict the range to +/-16 because that's pretty much guaranteed
         * to cover all the combinations that fit in 11 bits total, and it
         *  does not matter if we miss a few rare codes. */
        for (i = 0, g = -16; g < 16; g++) {
            int len0   = s->len[p0][g & 255];
            int limit0 = VLC_BITS - len0;
            if (limit0 < 2 || !len0)
                continue;
            for (b = -16; b < 16; b++) {
                int len1   = s->len[p1][b & 255];
                int limit1 = limit0 - len1;
                if (limit1 < 1 || !len1)
                    continue;
                code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
                for (r = -16; r < 16; r++) {
                    int len2 = s->len[2][r & 255];
                    if (len2 > limit1 || !len2)
                        continue;
                    av_assert0(i < (1 << VLC_BITS));
                    len[i]  = len0 + len1 + len2;
                    bits[i] = (code << len2) + s->bits[2][r & 255];
                    if (s->decorrelate) {
                        map[i][G] = g;
                        map[i][B] = g + b;
                        map[i][R] = g + r;
                    } else {
                        map[i][B] = g;
                        map[i][G] = b;
                        map[i][R] = r;
                    }
                    i++;
                }
            }
        }
        ff_free_vlc(&s->vlc[4]);
        if ((ret = init_vlc(&s->vlc[4], VLC_BITS, i, len, 1, 1,
                            bits, 2, 2, 0)) < 0)
            goto out;
    }
    ret = 0;
out:
    av_freep(&symbols);
    return ret;
}
Exemple #25
0
static int wsvqa_read_header(AVFormatContext *s)
{
    WsVqaDemuxContext *wsvqa = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *st;
    unsigned char *header;
    unsigned char scratch[VQA_PREAMBLE_SIZE];
    unsigned int chunk_tag;
    unsigned int chunk_size;
    int fps;

    /* initialize the video decoder stream */
    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);
    st->start_time = 0;
    wsvqa->video_stream_index = st->index;
    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id = AV_CODEC_ID_WS_VQA;
    st->codec->codec_tag = 0;  /* no fourcc */

    /* skip to the start of the VQA header */
    avio_seek(pb, 20, SEEK_SET);

    /* the VQA header needs to go to the decoder */
    st->codec->extradata_size = VQA_HEADER_SIZE;
    st->codec->extradata = av_mallocz(VQA_HEADER_SIZE + AV_INPUT_BUFFER_PADDING_SIZE);
    header = (unsigned char *)st->codec->extradata;
    if (avio_read(pb, st->codec->extradata, VQA_HEADER_SIZE) !=
        VQA_HEADER_SIZE) {
        return AVERROR(EIO);
    }
    st->codec->width = AV_RL16(&header[6]);
    st->codec->height = AV_RL16(&header[8]);
    fps = header[12];
    st->nb_frames =
    st->duration  = AV_RL16(&header[4]);
    if (fps < 1 || fps > 30) {
        av_log(s, AV_LOG_ERROR, "invalid fps: %d\n", fps);
        return AVERROR_INVALIDDATA;
    }
    avpriv_set_pts_info(st, 64, 1, fps);

    wsvqa->version      = AV_RL16(&header[ 0]);
    wsvqa->sample_rate  = AV_RL16(&header[24]);
    wsvqa->channels     = header[26];
    wsvqa->bps          = header[27];
    wsvqa->audio_stream_index = -1;

    s->ctx_flags |= AVFMTCTX_NOHEADER;

    /* there are 0 or more chunks before the FINF chunk; iterate until
     * FINF has been skipped and the file will be ready to be demuxed */
    do {
        if (avio_read(pb, scratch, VQA_PREAMBLE_SIZE) != VQA_PREAMBLE_SIZE) {
            return AVERROR(EIO);
        }
        chunk_tag = AV_RB32(&scratch[0]);
        chunk_size = AV_RB32(&scratch[4]);

        /* catch any unknown header tags, for curiousity */
        switch (chunk_tag) {
        case CINF_TAG:
        case CINH_TAG:
        case CIND_TAG:
        case PINF_TAG:
        case PINH_TAG:
        case PIND_TAG:
        case FINF_TAG:
        case CMDS_TAG:
            break;

        default:
            av_log (s, AV_LOG_ERROR, " note: unknown chunk seen (%c%c%c%c)\n",
                scratch[0], scratch[1],
                scratch[2], scratch[3]);
            break;
        }

        avio_skip(pb, chunk_size);
    } while (chunk_tag != FINF_TAG);

    return 0;
}
Exemple #26
0
STDMETHODIMP CDecWMV9::InitDecoder(AVCodecID codec, const CMediaType *pmt)
{
  HRESULT hr = S_OK;
  DbgLog((LOG_TRACE, 10, L"CDecWMV9::InitDecoder(): Initializing WMV9 DMO decoder"));

  DestroyDecoder(false);

  BITMAPINFOHEADER *pBMI = NULL;
  REFERENCE_TIME rtAvg = 0;
  DWORD dwARX = 0, dwARY = 0;
  videoFormatTypeHandler(*pmt, &pBMI, &rtAvg, &dwARX, &dwARY);
  
  size_t extralen = 0;
  BYTE *extra = NULL;
  getExtraData(*pmt, NULL, &extralen);
  if (extralen > 0) {
    extra = (BYTE *)av_mallocz(extralen + FF_INPUT_BUFFER_PADDING_SIZE);
    getExtraData(*pmt, extra, &extralen);
  }

  if (codec == AV_CODEC_ID_VC1 && extralen) {
    size_t i = 0;
    for (i = 0; i < (extralen - 4); i++) {
      uint32_t code = AV_RB32(extra+i);
      if (IS_MARKER(code))
        break;
    }
    if (i == 0) {
      memmove(extra+1, extra, extralen);
      *extra = 0;
      extralen++;
    } else if (i > 1) {
      DbgLog((LOG_TRACE, 10, L"-> VC-1 Header at position %u (should be 0 or 1)", i));
    }
  }

  /* Create input type */

  GUID subtype = codec == AV_CODEC_ID_VC1 ? MEDIASUBTYPE_WVC1 : MEDIASUBTYPE_WMV3;
  m_nCodecId = codec;

  mtIn.SetType(&MEDIATYPE_Video);
  mtIn.SetSubtype(&subtype);
  mtIn.SetFormatType(&FORMAT_VideoInfo);
  mtIn.SetTemporalCompression(TRUE);
  mtIn.SetSampleSize(0);
  mtIn.SetVariableSize();
  
  VIDEOINFOHEADER *vih = (VIDEOINFOHEADER *)mtIn.AllocFormatBuffer((ULONG)(sizeof(VIDEOINFOHEADER) + extralen));
  memset(vih, 0, sizeof(VIDEOINFOHEADER));
  vih->bmiHeader.biWidth       = pBMI->biWidth;
  vih->bmiHeader.biHeight      = pBMI->biHeight;
  vih->bmiHeader.biPlanes      = 1;
  vih->bmiHeader.biBitCount    = 24;
  vih->bmiHeader.biSizeImage   = pBMI->biWidth * pBMI->biHeight * 3 / 2;
  vih->bmiHeader.biSize        = (DWORD)(sizeof(BITMAPINFOHEADER) + extralen);
  vih->bmiHeader.biCompression = subtype.Data1;
  vih->AvgTimePerFrame = rtAvg;
  SetRect(&vih->rcSource, 0, 0, pBMI->biWidth, pBMI->biHeight);
  vih->rcTarget = vih->rcSource;
  
  if (extralen > 0) {
    memcpy((BYTE *)vih + sizeof(VIDEOINFOHEADER), extra, extralen);
    av_freep(&extra);
    extra = (BYTE *)vih + sizeof(VIDEOINFOHEADER);
  }

  hr = m_pDMO->SetInputType(0, &mtIn, 0);
  if (FAILED(hr)) {
    DbgLog((LOG_TRACE, 10, L"-> Failed to set input type on DMO"));
    return hr;
  }

  /* Create output type */
  int idx = 0;
  while(SUCCEEDED(hr = m_pDMO->GetOutputType(0, idx++, &mtOut))) {
    if (mtOut.subtype == MEDIASUBTYPE_NV12) {
      hr = m_pDMO->SetOutputType(0, &mtOut, 0);
      m_OutPixFmt = LAVPixFmt_NV12;
      break;
    } else if (mtOut.subtype == MEDIASUBTYPE_YV12) {
      hr = m_pDMO->SetOutputType(0, &mtOut, 0);
      m_OutPixFmt = LAVPixFmt_YUV420;
      break;
    }
  }

  if (FAILED(hr)) {
    DbgLog((LOG_TRACE, 10, L"-> Failed to set output type on DMO"));
    return hr;
  }

  videoFormatTypeHandler(mtOut, &pBMI);
  m_pRawBufferSize = pBMI->biSizeImage + FF_INPUT_BUFFER_PADDING_SIZE;

  m_bInterlaced = FALSE;
  memset(&m_StreamAR, 0, sizeof(m_StreamAR));
  if (extralen > 0) {
    m_vc1Header = new CVC1HeaderParser(extra, extralen, codec);
    if (m_vc1Header->hdr.valid) {
      m_bInterlaced = m_vc1Header->hdr.interlaced;
      m_StreamAR = m_vc1Header->hdr.ar;
    }
  }

  m_bManualReorder = (codec == AV_CODEC_ID_VC1) && !(m_pCallback->GetDecodeFlags() & LAV_VIDEO_DEC_FLAG_ONLY_DTS);

  return S_OK;
}
Exemple #27
0
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
{
    InputStream *ist = NULL;
    enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx);
    int i;

    // TODO: support other filter types
    if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
        av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
               "currently.\n");
        exit(1);
    }

    if (in->name) {
        AVFormatContext *s;
        AVStream       *st = NULL;
        char *p;
        int file_idx = strtol(in->name, &p, 0);

        if (file_idx < 0 || file_idx >= nb_input_files) {
            av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n",
                   file_idx, fg->graph_desc);
            exit(1);
        }
        s = input_files[file_idx]->ctx;

        for (i = 0; i < s->nb_streams; i++) {
            if (s->streams[i]->codec->codec_type != type)
                continue;
            if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
                st = s->streams[i];
                break;
            }
        }
        if (!st) {
            av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
                   "matches no streams.\n", p, fg->graph_desc);
            exit(1);
        }
        ist = input_streams[input_files[file_idx]->ist_index + st->index];
    } else {
        /* find the first unused stream of corresponding type */
        for (i = 0; i < nb_input_streams; i++) {
            ist = input_streams[i];
            if (ist->dec_ctx->codec_type == type && ist->discard)
                break;
        }
        if (i == nb_input_streams) {
            av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
                   "unlabeled input pad %d on filter %s\n", in->pad_idx,
                   in->filter_ctx->name);
            exit(1);
        }
    }
    av_assert0(ist);

    ist->discard         = 0;
    ist->decoding_needed = 1;
    ist->st->discard = AVDISCARD_NONE;

    GROW_ARRAY(fg->inputs, fg->nb_inputs);
    if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
        exit(1);
    fg->inputs[fg->nb_inputs - 1]->ist   = ist;
    fg->inputs[fg->nb_inputs - 1]->graph = fg;

    GROW_ARRAY(ist->filters, ist->nb_filters);
    ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
}
Exemple #28
0
void BDRingBuffer::SubmitOverlay(const bd_overlay_s * const overlay)
{
    QMutexLocker lock(&m_overlayLock);

    if (!overlay)
        return;

    if ((overlay->w < 1) || (overlay->w > 1920) || (overlay->x > 1920) ||
        (overlay->h < 1) || (overlay->h > 1080) || (overlay->y > 1080))
    {
        LOG(VB_GENERAL, LOG_ERR, LOC +
            QString("Invalid overlay size: %1x%2+%3+%4")
                .arg(overlay->w).arg(overlay->h)
                .arg(overlay->x).arg(overlay->y));
        return;
    }

    if (!overlay->img)
    {
        m_inMenu = false;
        QRect pos(overlay->x, overlay->y, overlay->w, overlay->h);
        m_overlayImages.append(new BDOverlay(NULL, NULL, pos,
                               overlay->plane, overlay->pts));
        return;
    }

    const BD_PG_RLE_ELEM *rlep = overlay->img;
    static const unsigned palettesize = 256 * 4;
    unsigned width   = (overlay->w + 0x3) & (~0x3);
    unsigned pixels  = ((overlay->w + 0xf) & (~0xf)) *
                       ((overlay->h + 0xf) & (~0xf));
    unsigned actual  = overlay->w * overlay->h;
    uint8_t *data    = (uint8_t*)av_mallocz(pixels);
    uint8_t *palette = (uint8_t*)av_mallocz(palettesize);

    int line = 0;
    int this_line = 0;
    for (unsigned i = 0; i < actual; i += rlep->len, rlep++)
    {
        if ((rlep->color == 0 && rlep->len == 0) || this_line >= overlay->w)
        {
            this_line = 0;
            line++;
            i = (line * width) + 1;
        }
        else
        {
            this_line += rlep->len;
            memset(data + i, rlep->color, rlep->len);
        }
    }

    memcpy(palette, overlay->palette, palettesize);

    QRect pos(overlay->x, overlay->y, width, overlay->h);
    m_overlayImages.append(new BDOverlay(data, palette, pos,
                           overlay->plane, overlay->pts));

    if (overlay->plane == 1)
        m_inMenu = true;
}
Exemple #29
0
static int dxva2_device_create(AVHWDeviceContext *ctx, const char *device,
                               AVDictionary *opts, int flags)
{
    AVDXVA2DeviceContext *hwctx = ctx->hwctx;
    DXVA2DevicePriv *priv;

    pDirect3DCreate9 *createD3D = NULL;
    pCreateDeviceManager9 *createDeviceManager = NULL;
    D3DPRESENT_PARAMETERS d3dpp = {0};
    D3DDISPLAYMODE        d3ddm;
    unsigned resetToken = 0;
    UINT adapter = D3DADAPTER_DEFAULT;
    HRESULT hr;

    if (device)
        adapter = atoi(device);

    priv = av_mallocz(sizeof(*priv));
    if (!priv)
        return AVERROR(ENOMEM);

    ctx->user_opaque = priv;
    ctx->free        = dxva2_device_free;

    priv->device_handle = INVALID_HANDLE_VALUE;

    priv->d3dlib = LoadLibrary("d3d9.dll");
    if (!priv->d3dlib) {
        av_log(ctx, AV_LOG_ERROR, "Failed to load D3D9 library\n");
        return AVERROR_UNKNOWN;
    }
    priv->dxva2lib = LoadLibrary("dxva2.dll");
    if (!priv->dxva2lib) {
        av_log(ctx, AV_LOG_ERROR, "Failed to load DXVA2 library\n");
        return AVERROR_UNKNOWN;
    }

    createD3D = (pDirect3DCreate9 *)GetProcAddress(priv->d3dlib, "Direct3DCreate9");
    if (!createD3D) {
        av_log(ctx, AV_LOG_ERROR, "Failed to locate Direct3DCreate9\n");
        return AVERROR_UNKNOWN;
    }
    createDeviceManager = (pCreateDeviceManager9 *)GetProcAddress(priv->dxva2lib,
                                                                  "DXVA2CreateDirect3DDeviceManager9");
    if (!createDeviceManager) {
        av_log(ctx, AV_LOG_ERROR, "Failed to locate DXVA2CreateDirect3DDeviceManager9\n");
        return AVERROR_UNKNOWN;
    }

    priv->d3d9 = createD3D(D3D_SDK_VERSION);
    if (!priv->d3d9) {
        av_log(ctx, AV_LOG_ERROR, "Failed to create IDirect3D object\n");
        return AVERROR_UNKNOWN;
    }

    IDirect3D9_GetAdapterDisplayMode(priv->d3d9, adapter, &d3ddm);
    d3dpp.Windowed         = TRUE;
    d3dpp.BackBufferWidth  = 640;
    d3dpp.BackBufferHeight = 480;
    d3dpp.BackBufferCount  = 0;
    d3dpp.BackBufferFormat = d3ddm.Format;
    d3dpp.SwapEffect       = D3DSWAPEFFECT_DISCARD;
    d3dpp.Flags            = D3DPRESENTFLAG_VIDEO;

    hr = IDirect3D9_CreateDevice(priv->d3d9, adapter, D3DDEVTYPE_HAL, GetShellWindow(),
                                 D3DCREATE_SOFTWARE_VERTEXPROCESSING | D3DCREATE_MULTITHREADED | D3DCREATE_FPU_PRESERVE,
                                 &d3dpp, &priv->d3d9device);
    if (FAILED(hr)) {
        av_log(ctx, AV_LOG_ERROR, "Failed to create Direct3D device\n");
        return AVERROR_UNKNOWN;
    }

    hr = createDeviceManager(&resetToken, &hwctx->devmgr);
    if (FAILED(hr)) {
        av_log(ctx, AV_LOG_ERROR, "Failed to create Direct3D device manager\n");
        return AVERROR_UNKNOWN;
    }

    hr = IDirect3DDeviceManager9_ResetDevice(hwctx->devmgr, priv->d3d9device, resetToken);
    if (FAILED(hr)) {
        av_log(ctx, AV_LOG_ERROR, "Failed to bind Direct3D device to device manager\n");
        return AVERROR_UNKNOWN;
    }

    hr = IDirect3DDeviceManager9_OpenDeviceHandle(hwctx->devmgr, &priv->device_handle);
    if (FAILED(hr)) {
        av_log(ctx, AV_LOG_ERROR, "Failed to open device handle\n");
        return AVERROR_UNKNOWN;
    }

    return 0;
}
Exemple #30
0
static int CrackURI( const char *path, int *streamType, int *res, int *cam, time_t *from, time_t *to, int *rate, vcrMode *playMode )
{
    int             retVal = AVERROR_INVALIDDATA;
    const char *    delim = "?&";
    char *          pathStr = NULL;
    char *          token = NULL;

    *res = DS_RESOLUTION_HI;
    *streamType = DS_PLAYBACK_MODE_LIVE;
    *cam = 1;

    if( path != NULL ) {
        /* Take a copy of the path string so that strtok can modify it */
        if( (pathStr = (char*)av_mallocz( strlen(path) + 1 )) != NULL ) {
            strcpy( pathStr, path );

            retVal = 0;

            token = strtok( pathStr, delim );

            while( token != NULL ) {
                char *          name = NULL;
                char *          value = NULL;

                /* Now look inside this token string for a name value pair separated by an = */
                if( (value = strstr(token, "=")) != NULL ) {
                    value++;

                    name = token;
                    name[(value-1) - token] = '\0';

                    if( name != NULL && value != NULL ) {
                        /* Which parameter have we got? */
                        if( av_strcasecmp(name, "res" ) == 0 ) {
                            if( strcmp( value, "hi" ) == 0 )
                                *res = DS_RESOLUTION_HI;
                            else if( strcmp( value, "med" ) == 0 )
                                *res = DS_RESOLUTION_MED;
                            else if( strcmp( value, "low" ) == 0 )
                                *res = DS_RESOLUTION_LOW;
                        }
                        else if( av_strcasecmp(name, "stream" ) == 0 ) {
                            if( av_strcasecmp(value, "live" ) == 0 )
                                *streamType = DS_PLAYBACK_MODE_LIVE;
                            else if( av_strcasecmp(value, "play" ) == 0 )
                                *streamType = DS_PLAYBACK_MODE_PLAY;
                        }
                        else if( av_strcasecmp(name, "cam" ) == 0 ) {
                            *cam = atoi( value );
                        }
                        else if( av_strcasecmp(name, "from" ) == 0 ) {
                            *from = (time_t)atoi( value );
                        }
                        else if( av_strcasecmp(name, "to" ) == 0 ) {
                            *to = (time_t)atoi( value );
                        }
                        else if( av_strcasecmp(name, "rate" ) == 0 ) {
                            *rate = atoi( value );
                        }
                        else if( av_strcasecmp(name, "mode" ) == 0 ) {
                            if( av_strcasecmp(value, "play" ) == 0 )
                                *playMode = VM_PLAY;
                            else if( av_strcasecmp(value, "rwd" ) == 0 )
                                *playMode = VM_VIS_REW;
                            else if( av_strcasecmp(value, "fwd" ) == 0 )
                                *playMode = VM_VIS_FF;
                            else if( av_strcasecmp(value, "stop" ) == 0 )
                                *playMode = VM_STOP;
                            else if( av_strcasecmp(value, "shuttle" ) == 0 )
                                *playMode = VM_PLAY_SHUTTLE;
                            else if( av_strcasecmp(value, "finish" ) == 0 )
                                *playMode = VM_FINISH;
                        }
                    }
                }

                token = strtok( NULL, delim );
            }

            av_free( pathStr );
            pathStr = NULL;
        }
        else
            retVal = AVERROR(ENOMEM);
    }

    return retVal;
}