예제 #1
0
파일: gifdec.c 프로젝트: Bilibili/FFmpeg
static int gif_read_extension(GifState *s)
{
    int ext_code, ext_len, gce_flags, gce_transparent_index;

    /* There must be at least 2 bytes:
     * 1 for extension label and 1 for extension length. */
    if (bytestream2_get_bytes_left(&s->gb) < 2)
        return AVERROR_INVALIDDATA;

    ext_code = bytestream2_get_byteu(&s->gb);
    ext_len  = bytestream2_get_byteu(&s->gb);

    ff_dlog(s->avctx, "ext_code=0x%x len=%d\n", ext_code, ext_len);

    switch(ext_code) {
    case GIF_GCE_EXT_LABEL:
        if (ext_len != 4)
            goto discard_ext;

        /* We need at least 5 bytes more: 4 is for extension body
         * and 1 for next block size. */
        if (bytestream2_get_bytes_left(&s->gb) < 5)
            return AVERROR_INVALIDDATA;

        gce_flags    = bytestream2_get_byteu(&s->gb);
        bytestream2_skipu(&s->gb, 2);    // delay during which the frame is shown
        gce_transparent_index = bytestream2_get_byteu(&s->gb);
        if (gce_flags & 0x01)
            s->transparent_color_index = gce_transparent_index;
        else
            s->transparent_color_index = -1;
        s->gce_disposal = (gce_flags >> 2) & 0x7;

        ff_dlog(s->avctx, "gce_flags=%x tcolor=%d disposal=%d\n",
               gce_flags,
               s->transparent_color_index, s->gce_disposal);

        if (s->gce_disposal > 3) {
            s->gce_disposal = GCE_DISPOSAL_NONE;
            ff_dlog(s->avctx, "invalid value in gce_disposal (%d). Using default value of 0.\n", ext_len);
        }

        ext_len = bytestream2_get_byteu(&s->gb);
        break;
    }

    /* NOTE: many extension blocks can come after */
 discard_ext:
    while (ext_len) {
        /* There must be at least ext_len bytes and 1 for next block size byte. */
        if (bytestream2_get_bytes_left(&s->gb) < ext_len + 1)
            return AVERROR_INVALIDDATA;

        bytestream2_skipu(&s->gb, ext_len);
        ext_len = bytestream2_get_byteu(&s->gb);

        ff_dlog(s->avctx, "ext_len1=%d\n", ext_len);
    }
    return 0;
}
예제 #2
0
static double get_scene_score(AVFilterContext *ctx, AVFrame *crnt, AVFrame *next)
{
    FrameRateContext *s = ctx->priv;
    double ret = 0;

    ff_dlog(ctx, "get_scene_score()\n");

    if (crnt->height == next->height &&
        crnt->width  == next->width) {
        int64_t sad;
        double mafd, diff;

        ff_dlog(ctx, "get_scene_score() process\n");
        if (s->bitdepth == 8)
            sad = scene_sad8(s, crnt->data[0], crnt->linesize[0], next->data[0], next->linesize[0], crnt->width, crnt->height);
        else
            sad = scene_sad16(s, (const uint16_t*)crnt->data[0], crnt->linesize[0] / 2, (const uint16_t*)next->data[0], next->linesize[0] / 2, crnt->width, crnt->height);

        mafd = (double)sad * 100.0 / FFMAX(1, (crnt->height & ~7) * (crnt->width & ~7)) / (1 << s->bitdepth);
        diff = fabs(mafd - s->prev_mafd);
        ret  = av_clipf(FFMIN(mafd, diff), 0, 100.0);
        s->prev_mafd = mafd;
    }
    ff_dlog(ctx, "get_scene_score() result is:%f\n", ret);
    return ret;
}
예제 #3
0
static int blend_frames16(AVFilterContext *ctx, float interpolate,
                          AVFrame *copy_src1, AVFrame *copy_src2)
{
    FrameRateContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    double interpolate_scene_score = 0;

    if ((s->flags & FRAMERATE_FLAG_SCD) && copy_src2) {
        interpolate_scene_score = get_scene_score16(ctx, copy_src1, copy_src2);
        ff_dlog(ctx, "blend_frames16() interpolate scene score:%f\n", interpolate_scene_score);
    }
    // decide if the shot-change detection allows us to blend two frames
    if (interpolate_scene_score < s->scene_score && copy_src2) {
        uint16_t src2_factor = fabsf(interpolate) * (1 << (s->bitdepth - 8));
        uint16_t src1_factor = s->max - src2_factor;
        const int half = s->max / 2;
        const int uv = (s->max + 1) * half;
        const int shift = s->bitdepth;
        int plane, line, pixel;

        // get work-space for output frame
        s->work = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!s->work)
            return AVERROR(ENOMEM);

        av_frame_copy_props(s->work, s->srce[s->crnt]);

        ff_dlog(ctx, "blend_frames16() INTERPOLATE to create work frame\n");
        for (plane = 0; plane < 4 && copy_src1->data[plane] && copy_src2->data[plane]; plane++) {
            int cpy_line_width = s->line_size[plane];
            const uint16_t *cpy_src1_data = (const uint16_t *)copy_src1->data[plane];
            int cpy_src1_line_size = copy_src1->linesize[plane] / 2;
            const uint16_t *cpy_src2_data = (const uint16_t *)copy_src2->data[plane];
            int cpy_src2_line_size = copy_src2->linesize[plane] / 2;
            int cpy_src_h = (plane > 0 && plane < 3) ? (copy_src1->height >> s->vsub) : (copy_src1->height);
            uint16_t *cpy_dst_data = (uint16_t *)s->work->data[plane];
            int cpy_dst_line_size = s->work->linesize[plane] / 2;

            if (plane <1 || plane >2) {
                // luma or alpha
                for (line = 0; line < cpy_src_h; line++) {
                    for (pixel = 0; pixel < cpy_line_width; pixel++)
                        cpy_dst_data[pixel] = ((cpy_src1_data[pixel] * src1_factor) + (cpy_src2_data[pixel] * src2_factor) + half) >> shift;
                    cpy_src1_data += cpy_src1_line_size;
                    cpy_src2_data += cpy_src2_line_size;
                    cpy_dst_data += cpy_dst_line_size;
                }
            } else {
                // chroma
                for (line = 0; line < cpy_src_h; line++) {
                    for (pixel = 0; pixel < cpy_line_width; pixel++) {
                        cpy_dst_data[pixel] = (((cpy_src1_data[pixel] - half) * src1_factor) + ((cpy_src2_data[pixel] - half) * src2_factor) + uv) >> shift;
                    }
                    cpy_src1_data += cpy_src1_line_size;
                    cpy_src2_data += cpy_src2_line_size;
                    cpy_dst_data += cpy_dst_line_size;
                }
            }
        }
예제 #4
0
static void af_queue_log_state(AudioFrameQueue *afq)
{
    AudioFrame *f;
    ff_dlog(afq->avctx, "remaining delay   = %d\n", afq->remaining_delay);
    ff_dlog(afq->avctx, "remaining samples = %d\n", afq->remaining_samples);
    ff_dlog(afq->avctx, "frames:\n");
    f = afq->frame_queue;
    while (f) {
        ff_dlog(afq->avctx, "  [ pts=%9"PRId64" duration=%d ]\n",
                f->pts, f->duration);
        f = f->next;
    }
}
예제 #5
0
static inline int get_vlc_symbol(GetBitContext *gb, VlcState *const state,
                                 int bits)
{
    int k, i, v, ret;

    i = state->count;
    k = 0;
    while (i < state->error_sum) { // FIXME: optimize
        k++;
        i += i;
    }

    v = get_sr_golomb(gb, k, 12, bits);
    ff_dlog(NULL, "v:%d bias:%d error:%d drift:%d count:%d k:%d",
            v, state->bias, state->error_sum, state->drift, state->count, k);

#if 0 // JPEG LS
    if (k == 0 && 2 * state->drift <= -state->count)
        v ^= (-1);
#else
    v ^= ((2 * state->drift + state->count) >> 31);
#endif

    ret = fold(v + state->bias, bits);

    update_vlc_state(state, v);

    return ret;
}
예제 #6
0
파일: msmpeg4dec.c 프로젝트: andrewrk/libav
/* This is identical to h263 except that its range is multiplied by 2. */
static int msmpeg4v2_decode_motion(MpegEncContext * s, int pred, int f_code)
{
    int code, val, sign, shift;

    code = get_vlc2(&s->gb, v2_mv_vlc.table, V2_MV_VLC_BITS, 2);
    ff_dlog(s, "MV code %d at %d %d pred: %d\n", code, s->mb_x,s->mb_y, pred);
    if (code < 0)
        return 0xffff;

    if (code == 0)
        return pred;
    sign = get_bits1(&s->gb);
    shift = f_code - 1;
    val = code;
    if (shift) {
        val = (val - 1) << shift;
        val |= get_bits(&s->gb, shift);
        val++;
    }
    if (sign)
        val = -val;

    val += pred;
    if (val <= -64)
        val += 64;
    else if (val >= 64)
        val -= 64;

    return val;
}
예제 #7
0
/**
 * Decode LSE block with initialization parameters
 */
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
{
    int id;

    skip_bits(&s->gb, 16);  /* length: FIXME: verify field validity */
    id = get_bits(&s->gb, 8);

    switch (id) {
    case 1:
        s->maxval = get_bits(&s->gb, 16);
        s->t1     = get_bits(&s->gb, 16);
        s->t2     = get_bits(&s->gb, 16);
        s->t3     = get_bits(&s->gb, 16);
        s->reset  = get_bits(&s->gb, 16);

//        ff_jpegls_reset_coding_parameters(s, 0);
        //FIXME quant table?
        break;
    case 2:
    case 3:
        av_log(s->avctx, AV_LOG_ERROR, "palette not supported\n");
        return AVERROR(ENOSYS);
    case 4:
        av_log(s->avctx, AV_LOG_ERROR, "oversize image not supported\n");
        return AVERROR(ENOSYS);
    default:
        av_log(s->avctx, AV_LOG_ERROR, "invalid id %d\n", id);
        return AVERROR_INVALIDDATA;
    }
    ff_dlog(s->avctx, "ID=%i, T=%i,%i,%i\n", id, s->t1, s->t2, s->t3);

    return 0;
}
예제 #8
0
파일: vaapi_hevc.c 프로젝트: 0day-ci/FFmpeg
/** Initialize and start decoding a frame with VA API. */
static int vaapi_hevc_start_frame(AVCodecContext          *avctx,
                                  av_unused const uint8_t *buffer,
                                  av_unused uint32_t       size)
{
    HEVCContext * const h = avctx->priv_data;
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    vaapi_hevc_frame_data *frame_data = h->ref->hwaccel_picture_private;
    VAPictureParameterBufferHEVC *pic_param;
    VAIQMatrixBufferHEVC *iq_matrix;
    ScalingList const * scaling_list;
    int i, j, pos;

    ff_dlog(avctx, "vaapi_hevc_start_frame()\n");

    vactx->slice_param_size = sizeof(VASliceParameterBufferHEVC);

    /* Fill in VAPictureParameterBufferHEVC. */
    pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferHEVC));
    if (!pic_param)
        return -1;
    fill_picture_parameters(h, pic_param);
    frame_data->pic_param = pic_param;

    /* Fill in VAIQMatrixBufferHEVC. */
    if (h->ps.pps->scaling_list_data_present_flag) {
        scaling_list = &h->ps.pps->scaling_list;
    } else if (h->ps.sps->scaling_list_enable_flag) {
        scaling_list = &h->ps.sps->scaling_list;
    } else {
        return 0;
    }

    iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferHEVC));
    if (!iq_matrix)
        return -1;

    for (i = 0; i < 6; ++i) {
        for (j = 0; j < 16; ++j) {
            pos = 4 * ff_hevc_diag_scan4x4_y[j] + ff_hevc_diag_scan4x4_x[j];
            iq_matrix->ScalingList4x4[i][j] = scaling_list->sl[0][i][pos];
        }
        for (j = 0; j < 64; ++j) {
            pos = 8 * ff_hevc_diag_scan8x8_y[j] + ff_hevc_diag_scan8x8_x[j];
            iq_matrix->ScalingList8x8[i][j] = scaling_list->sl[1][i][pos];
            iq_matrix->ScalingList16x16[i][j] = scaling_list->sl[2][i][pos];
            if (i < 2) {
                iq_matrix->ScalingList32x32[i][j] = scaling_list->sl[3][i * 3][pos];
            }
        }
        iq_matrix->ScalingListDC16x16[i] = scaling_list->sl_dc[0][i];
        if (i < 2) {
            iq_matrix->ScalingListDC32x32[i] = scaling_list->sl_dc[1][i * 3];
        }
    }

    return 0;
}
예제 #9
0
static void next_source(AVFilterContext *ctx)
{
    FrameRateContext *s = ctx->priv;
    int i;

    ff_dlog(ctx,  "next_source()\n");

    if (s->srce[s->last] && s->srce[s->last] != s->srce[s->last-1]) {
        ff_dlog(ctx, "next_source() unlink %d\n", s->last);
        av_frame_free(&s->srce[s->last]);
    }
    for (i = s->last; i > s->frst; i--) {
        ff_dlog(ctx, "next_source() copy %d to %d\n", i - 1, i);
        s->srce[i] = s->srce[i - 1];
    }
    ff_dlog(ctx, "next_source() make %d null\n", s->frst);
    s->srce[s->frst] = NULL;
}
예제 #10
0
static double get_scene_score(AVFilterContext *ctx, AVFrame *crnt, AVFrame *next)
{
    FrameRateContext *s = ctx->priv;
    double ret = 0;

    ff_dlog(ctx, "get_scene_score()\n");

    if (crnt &&
        crnt->height == next->height &&
        crnt->width  == next->width) {
        int x, y;
        int64_t sad;
        double mafd, diff;
        uint8_t *p1 = crnt->data[0];
        uint8_t *p2 = next->data[0];
        const int p1_linesize = crnt->linesize[0];
        const int p2_linesize = next->linesize[0];

        ff_dlog(ctx, "get_scene_score() process\n");

        for (sad = y = 0; y < crnt->height; y += 8) {
            for (x = 0; x < p1_linesize; x += 8) {
                sad += s->sad(p1 + y * p1_linesize + x,
                              p1_linesize,
                              p2 + y * p2_linesize + x,
                              p2_linesize);
            }
        }
        emms_c();
        mafd = sad / (crnt->height * crnt->width * 3);
        diff = fabs(mafd - s->prev_mafd);
        ret  = av_clipf(FFMIN(mafd, diff), 0, 100.0);
        s->prev_mafd = mafd;
    }
        ff_dlog(ctx, "get_scene_score() result is:%f\n", ret);
    return ret;
}
예제 #11
0
파일: gifdec.c 프로젝트: Bilibili/FFmpeg
static int gif_read_header1(GifState *s)
{
    uint8_t sig[6];
    int v, n;
    int background_color_index;

    if (bytestream2_get_bytes_left(&s->gb) < 13)
        return AVERROR_INVALIDDATA;

    /* read gif signature */
    bytestream2_get_bufferu(&s->gb, sig, 6);
    if (memcmp(sig, gif87a_sig, 6) &&
        memcmp(sig, gif89a_sig, 6))
        return AVERROR_INVALIDDATA;

    /* read screen header */
    s->transparent_color_index = -1;
    s->screen_width  = bytestream2_get_le16u(&s->gb);
    s->screen_height = bytestream2_get_le16u(&s->gb);

    v = bytestream2_get_byteu(&s->gb);
    s->color_resolution = ((v & 0x70) >> 4) + 1;
    s->has_global_palette = (v & 0x80);
    s->bits_per_pixel = (v & 0x07) + 1;
    background_color_index = bytestream2_get_byteu(&s->gb);
    n = bytestream2_get_byteu(&s->gb);
    if (n) {
        s->avctx->sample_aspect_ratio.num = n + 15;
        s->avctx->sample_aspect_ratio.den = 64;
    }

    ff_dlog(s->avctx, "screen_w=%d screen_h=%d bpp=%d global_palette=%d\n",
           s->screen_width, s->screen_height, s->bits_per_pixel,
           s->has_global_palette);

    if (s->has_global_palette) {
        s->background_color_index = background_color_index;
        n = 1 << s->bits_per_pixel;
        if (bytestream2_get_bytes_left(&s->gb) < n * 3)
            return AVERROR_INVALIDDATA;

        gif_read_palette(s, s->global_palette, n);
        s->bg_color = s->global_palette[s->background_color_index];
    } else
        s->background_color_index = -1;

    return 0;
}
예제 #12
0
파일: vaapi.c 프로젝트: 0day-ci/FFmpeg
void ff_vaapi_common_end_frame(AVCodecContext *avctx)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);

    ff_dlog(avctx, "ff_vaapi_common_end_frame()\n");

    destroy_buffers(vactx->display, &vactx->pic_param_buf_id, 1);
    destroy_buffers(vactx->display, &vactx->iq_matrix_buf_id, 1);
    destroy_buffers(vactx->display, &vactx->bitplane_buf_id, 1);
    destroy_buffers(vactx->display, vactx->slice_buf_ids, vactx->n_slice_buf_ids);
    av_freep(&vactx->slice_buf_ids);
    av_freep(&vactx->slice_params);
    vactx->n_slice_buf_ids     = 0;
    vactx->slice_buf_ids_alloc = 0;
    vactx->slice_count         = 0;
    vactx->slice_params_alloc  = 0;
}
static int vaapi_mpeg4_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
{
    MpegEncContext * const s = avctx->priv_data;
    VASliceParameterBufferMPEG4 *slice_param;

    ff_dlog(avctx, "vaapi_mpeg4_decode_slice(): buffer %p, size %d\n", buffer, size);

    /* Fill in VASliceParameterBufferMPEG4 */
    slice_param = (VASliceParameterBufferMPEG4 *)ff_vaapi_alloc_slice(avctx->hwaccel_context, buffer, size);
    if (!slice_param)
        return -1;
    slice_param->macroblock_offset      = get_bits_count(&s->gb) % 8;
    slice_param->macroblock_number      = 0;
    slice_param->quant_scale            = s->qscale;

    return 0;
}
예제 #14
0
int ff_vbv_update(MpegEncContext *s, int frame_size)
{
    RateControlContext *rcc = &s->rc_context;
    const double fps        = get_fps(s->avctx);
    const int buffer_size   = s->avctx->rc_buffer_size;
    const double min_rate   = s->avctx->rc_min_rate / fps;
    const double max_rate   = s->avctx->rc_max_rate / fps;

    ff_dlog(s, "%d %f %d %f %f\n",
            buffer_size, rcc->buffer_index, frame_size, min_rate, max_rate);

    if (buffer_size) {
        int left;

        rcc->buffer_index -= frame_size;
        if (rcc->buffer_index < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "rc buffer underflow\n");
            if (frame_size > max_rate && s->qscale == s->avctx->qmax) {
                av_log(s->avctx, AV_LOG_ERROR, "max bitrate possibly too small or try trellis with large lmax or increase qmax\n");
            }
            rcc->buffer_index = 0;
        }

        left = buffer_size - rcc->buffer_index - 1;
        rcc->buffer_index += av_clip(left, min_rate, max_rate);

        if (rcc->buffer_index > buffer_size) {
            int stuffing = ceil((rcc->buffer_index - buffer_size) / 8);

            if (stuffing < 4 && s->codec_id == AV_CODEC_ID_MPEG4)
                stuffing = 4;
            rcc->buffer_index -= 8 * stuffing;

            if (s->avctx->debug & FF_DEBUG_RC)
                av_log(s->avctx, AV_LOG_DEBUG, "stuffing %d bytes\n", stuffing);

            return stuffing;
        }
    }
    return 0;
}
예제 #15
0
int ff_vbv_update(MpegEncContext *s, int frame_size)
{
    RateControlContext *rcc = &s->rc_context;
    const double fps        = 1 / av_q2d(s->avctx->time_base);
    const int buffer_size   = s->avctx->rc_buffer_size;
    const double min_rate   = s->avctx->rc_min_rate / fps;
    const double max_rate   = s->avctx->rc_max_rate / fps;

    ff_dlog(s, "%d %f %d %f %f\n",
            buffer_size, rcc->buffer_index, frame_size, min_rate, max_rate);

    if (buffer_size) {
        int left;

        rcc->buffer_index -= frame_size;
        if (rcc->buffer_index < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "rc buffer underflow\n");
            rcc->buffer_index = 0;
        }

        left = buffer_size - rcc->buffer_index - 1;
        rcc->buffer_index += av_clip(left, min_rate, max_rate);

        if (rcc->buffer_index > buffer_size) {
            int stuffing = ceil((rcc->buffer_index - buffer_size) / 8);

            if (stuffing < 4 && s->codec_id == AV_CODEC_ID_MPEG4)
                stuffing = 4;
            rcc->buffer_index -= 8 * stuffing;

            if (s->avctx->debug & FF_DEBUG_RC)
                av_log(s->avctx, AV_LOG_DEBUG, "stuffing %d bytes\n", stuffing);

            return stuffing;
        }
    }
    return 0;
}
예제 #16
0
파일: vaapi_hevc.c 프로젝트: 0day-ci/FFmpeg
/** End a hardware decoding based frame. */
static int vaapi_hevc_end_frame(AVCodecContext *avctx)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    HEVCContext * const h = avctx->priv_data;
    vaapi_hevc_frame_data *frame_data = h->ref->hwaccel_picture_private;
    int ret;

    ff_dlog(avctx, "vaapi_hevc_end_frame()\n");

    frame_data->last_slice_param->LongSliceFlags.fields.LastSliceOfPic = 1;

    ret = ff_vaapi_commit_slices(vactx);
    if (ret < 0)
        goto finish;

    ret = ff_vaapi_render_picture(vactx, ff_vaapi_get_surface_id(h->ref->frame));
    if (ret < 0)
        goto finish;

finish:
    ff_vaapi_common_end_frame(avctx);
    return ret;
}
예제 #17
0
static int dvbsub_parse(AVCodecParserContext *s,
                        AVCodecContext *avctx,
                        const uint8_t **poutbuf, int *poutbuf_size,
                        const uint8_t *buf, int buf_size)
{
    DVBSubParseContext *pc = s->priv_data;
    uint8_t *p, *p_end;
    int i, len, buf_pos = 0;

    ff_dlog(avctx, "DVB parse packet pts=%"PRIx64", lpts=%"PRIx64", cpts=%"PRIx64":\n",
            s->pts, s->last_pts, s->cur_frame_pts[s->cur_frame_start_index]);

    for (i=0; i < buf_size; i++)
    {
        ff_dlog(avctx, "%02x ", buf[i]);
        if (i % 16 == 15)
            ff_dlog(avctx, "\n");
    }

    if (i % 16 != 0)
        ff_dlog(avctx, "\n");

    *poutbuf = NULL;
    *poutbuf_size = 0;

    s->fetch_timestamp = 1;

    if (s->last_pts != s->pts && s->pts != AV_NOPTS_VALUE) /* Start of a new packet */
    {
        if (pc->packet_index != pc->packet_start)
        {
            ff_dlog(avctx, "Discarding %d bytes\n",
                    pc->packet_index - pc->packet_start);
        }

        pc->packet_start = 0;
        pc->packet_index = 0;

        if (buf_size < 2 || buf[0] != 0x20 || buf[1] != 0x00) {
            ff_dlog(avctx, "Bad packet header\n");
            return -1;
        }

        buf_pos = 2;

        pc->in_packet = 1;
    } else {
        if (pc->packet_start != 0)
        {
            if (pc->packet_index != pc->packet_start)
            {
                memmove(pc->packet_buf, pc->packet_buf + pc->packet_start,
                            pc->packet_index - pc->packet_start);

                pc->packet_index -= pc->packet_start;
                pc->packet_start = 0;
            } else {
                pc->packet_start = 0;
                pc->packet_index = 0;
            }
        }
    }

    if (buf_size - buf_pos + pc->packet_index > PARSE_BUF_SIZE)
        return -1;

/* if not currently in a packet, discard data */
    if (pc->in_packet == 0)
        return buf_size;

    memcpy(pc->packet_buf + pc->packet_index, buf + buf_pos, buf_size - buf_pos);
    pc->packet_index += buf_size - buf_pos;

    p = pc->packet_buf;
    p_end = pc->packet_buf + pc->packet_index;

    while (p < p_end)
    {
        if (*p == 0x0f)
        {
            if (6 <= p_end - p)
            {
                len = AV_RB16(p + 4);

                if (len + 6 <= p_end - p)
                {
                    *poutbuf_size += len + 6;

                    p += len + 6;
                } else
                    break;
            } else
                break;
        } else if (*p == 0xff) {
            if (1 < p_end - p)
            {
                ff_dlog(avctx, "Junk at end of packet\n");
            }
            pc->packet_index = p - pc->packet_buf;
            pc->in_packet = 0;
            break;
        } else {
            av_log(avctx, AV_LOG_ERROR, "Junk in packet\n");

            pc->packet_index = p - pc->packet_buf;
            pc->in_packet = 0;
            break;
        }
    }

    if (*poutbuf_size > 0)
    {
        *poutbuf = pc->packet_buf;
        pc->packet_start = *poutbuf_size;
    }

    if (s->pts == AV_NOPTS_VALUE)
        s->pts = s->last_pts;

    return buf_size;
}
예제 #18
0
파일: parser.c 프로젝트: AndreDing/FFmpeg
int ff_combine_frame(ParseContext *pc, int next,
                     const uint8_t **buf, int *buf_size)
{
    if (pc->overread) {
        ff_dlog(NULL, "overread %d, state:%X next:%d index:%d o_index:%d\n",
                pc->overread, pc->state, next, pc->index, pc->overread_index);
        ff_dlog(NULL, "%X %X %X %X\n",
                (*buf)[0], (*buf)[1], (*buf)[2], (*buf)[3]);
    }

    /* Copy overread bytes from last frame into buffer. */
    for (; pc->overread > 0; pc->overread--)
        pc->buffer[pc->index++] = pc->buffer[pc->overread_index++];

    /* flush remaining if EOF */
    if (!*buf_size && next == END_NOT_FOUND)
        next = 0;

    pc->last_index = pc->index;

    /* copy into buffer end return */
    if (next == END_NOT_FOUND) {
        void *new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size,
                                           *buf_size + pc->index +
                                           AV_INPUT_BUFFER_PADDING_SIZE);

        if (!new_buffer) {
            av_log(NULL, AV_LOG_ERROR, "Failed to reallocate parser buffer to %d\n", *buf_size + pc->index + AV_INPUT_BUFFER_PADDING_SIZE);
            pc->index = 0;
            return AVERROR(ENOMEM);
        }
        pc->buffer = new_buffer;
        memcpy(&pc->buffer[pc->index], *buf, *buf_size);
        pc->index += *buf_size;
        return -1;
    }

    *buf_size          =
    pc->overread_index = pc->index + next;

    /* append to buffer */
    if (pc->index) {
        void *new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size,
                                           next + pc->index +
                                           AV_INPUT_BUFFER_PADDING_SIZE);
        if (!new_buffer) {
            av_log(NULL, AV_LOG_ERROR, "Failed to reallocate parser buffer to %d\n", next + pc->index + AV_INPUT_BUFFER_PADDING_SIZE);
            pc->overread_index =
            pc->index = 0;
            return AVERROR(ENOMEM);
        }
        pc->buffer = new_buffer;
        if (next > -AV_INPUT_BUFFER_PADDING_SIZE)
            memcpy(&pc->buffer[pc->index], *buf,
                   next + AV_INPUT_BUFFER_PADDING_SIZE);
        pc->index = 0;
        *buf      = pc->buffer;
    }

    /* store overread bytes */
    for (; next < 0; next++) {
        pc->state   = pc->state   << 8 | pc->buffer[pc->last_index + next];
        pc->state64 = pc->state64 << 8 | pc->buffer[pc->last_index + next];
        pc->overread++;
    }

    if (pc->overread) {
        ff_dlog(NULL, "overread %d, state:%X next:%d index:%d o_index:%d\n",
                pc->overread, pc->state, next, pc->index, pc->overread_index);
        ff_dlog(NULL, "%X %X %X %X\n",
                (*buf)[0], (*buf)[1], (*buf)[2], (*buf)[3]);
    }

    return 0;
}
예제 #19
0
static av_always_inline int RENAME(decode_line)(FFV1Context *s, int w,
                                                 TYPE *sample[2],
                                                 int plane_index, int bits)
{
    PlaneContext *const p = &s->plane[plane_index];
    RangeCoder *const c   = &s->c;
    int x;
    int run_count = 0;
    int run_mode  = 0;
    int run_index = s->run_index;

    if (is_input_end(s))
        return AVERROR_INVALIDDATA;

    if (s->slice_coding_mode == 1) {
        int i;
        for (x = 0; x < w; x++) {
            int v = 0;
            for (i=0; i<bits; i++) {
                uint8_t state = 128;
                v += v + get_rac(c, &state);
            }
            sample[1][x] = v;
        }
        return 0;
    }

    for (x = 0; x < w; x++) {
        int diff, context, sign;

        if (!(x & 1023)) {
            if (is_input_end(s))
                return AVERROR_INVALIDDATA;
        }

        context = RENAME(get_context)(p, sample[1] + x, sample[0] + x, sample[1] + x);
        if (context < 0) {
            context = -context;
            sign    = 1;
        } else
            sign = 0;

        av_assert2(context < p->context_count);

        if (s->ac != AC_GOLOMB_RICE) {
            diff = get_symbol_inline(c, p->state[context], 1);
        } else {
            if (context == 0 && run_mode == 0)
                run_mode = 1;

            if (run_mode) {
                if (run_count == 0 && run_mode == 1) {
                    if (get_bits1(&s->gb)) {
                        run_count = 1 << ff_log2_run[run_index];
                        if (x + run_count <= w)
                            run_index++;
                    } else {
                        if (ff_log2_run[run_index])
                            run_count = get_bits(&s->gb, ff_log2_run[run_index]);
                        else
                            run_count = 0;
                        if (run_index)
                            run_index--;
                        run_mode = 2;
                    }
                }
                while (run_count > 1 && w-x > 1) {
                    sample[1][x] = RENAME(predict)(sample[1] + x, sample[0] + x);
                    x++;
                    run_count--;
                }
                run_count--;
                if (run_count < 0) {
                    run_mode  = 0;
                    run_count = 0;
                    diff      = get_vlc_symbol(&s->gb, &p->vlc_state[context],
                                               bits);
                    if (diff >= 0)
                        diff++;
                } else
                    diff = 0;
            } else
                diff = get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);

            ff_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
                    run_count, run_index, run_mode, x, get_bits_count(&s->gb));
        }

        if (sign)
            diff = -(unsigned)diff;

        sample[1][x] = av_mod_uintp2(RENAME(predict)(sample[1] + x, sample[0] + x) + (SUINT)diff, bits);
    }
    s->run_index = run_index;
    return 0;
}
static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
{
    Mpeg4DecContext *ctx = avctx->priv_data;
    MpegEncContext * const s = &ctx->m;
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    VAPictureParameterBufferMPEG4 *pic_param;
    VAIQMatrixBufferMPEG4 *iq_matrix;
    int i;

    ff_dlog(avctx, "vaapi_mpeg4_start_frame()\n");

    vactx->slice_param_size = sizeof(VASliceParameterBufferMPEG4);

    /* Fill in VAPictureParameterBufferMPEG4 */
    pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferMPEG4));
    if (!pic_param)
        return -1;
    pic_param->vop_width                                = s->width;
    pic_param->vop_height                               = s->height;
    pic_param->forward_reference_picture                = VA_INVALID_ID;
    pic_param->backward_reference_picture               = VA_INVALID_ID;
    pic_param->vol_fields.value                         = 0; /* reset all bits */
    pic_param->vol_fields.bits.short_video_header       = avctx->codec->id == AV_CODEC_ID_H263;
    pic_param->vol_fields.bits.chroma_format            = CHROMA_420;
    pic_param->vol_fields.bits.interlaced               = !s->progressive_sequence;
    pic_param->vol_fields.bits.obmc_disable             = 1;
    pic_param->vol_fields.bits.sprite_enable            = ctx->vol_sprite_usage;
    pic_param->vol_fields.bits.sprite_warping_accuracy  = s->sprite_warping_accuracy;
    pic_param->vol_fields.bits.quant_type               = s->mpeg_quant;
    pic_param->vol_fields.bits.quarter_sample           = s->quarter_sample;
    pic_param->vol_fields.bits.data_partitioned         = s->data_partitioning;
    pic_param->vol_fields.bits.reversible_vlc           = ctx->rvlc;
    pic_param->vol_fields.bits.resync_marker_disable    = !ctx->resync_marker;
    pic_param->no_of_sprite_warping_points              = ctx->num_sprite_warping_points;
    for (i = 0; i < ctx->num_sprite_warping_points && i < 3; i++) {
        pic_param->sprite_trajectory_du[i]              = ctx->sprite_traj[i][0];
        pic_param->sprite_trajectory_dv[i]              = ctx->sprite_traj[i][1];
    }
    pic_param->quant_precision                          = s->quant_precision;
    pic_param->vop_fields.value                         = 0; /* reset all bits */
    pic_param->vop_fields.bits.vop_coding_type          = s->pict_type - AV_PICTURE_TYPE_I;
    pic_param->vop_fields.bits.backward_reference_vop_coding_type = s->pict_type == AV_PICTURE_TYPE_B ? s->next_picture.f->pict_type - AV_PICTURE_TYPE_I : 0;
    pic_param->vop_fields.bits.vop_rounding_type        = s->no_rounding;
    pic_param->vop_fields.bits.intra_dc_vlc_thr         = mpeg4_get_intra_dc_vlc_thr(ctx);
    pic_param->vop_fields.bits.top_field_first          = s->top_field_first;
    pic_param->vop_fields.bits.alternate_vertical_scan_flag = s->alternate_scan;
    pic_param->vop_fcode_forward                        = s->f_code;
    pic_param->vop_fcode_backward                       = s->b_code;
    pic_param->vop_time_increment_resolution            = avctx->framerate.num;
    pic_param->num_macroblocks_in_gob                   = s->mb_width * H263_GOB_HEIGHT(s->height);
    pic_param->num_gobs_in_vop                          = (s->mb_width * s->mb_height) / pic_param->num_macroblocks_in_gob;
    pic_param->TRB                                      = s->pb_time;
    pic_param->TRD                                      = s->pp_time;

    if (s->pict_type == AV_PICTURE_TYPE_B)
        pic_param->backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f);
    if (s->pict_type != AV_PICTURE_TYPE_I)
        pic_param->forward_reference_picture  = ff_vaapi_get_surface_id(s->last_picture.f);

    /* Fill in VAIQMatrixBufferMPEG4 */
    /* Only the first inverse quantisation method uses the weighting matrices */
    if (pic_param->vol_fields.bits.quant_type) {
        iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferMPEG4));
        if (!iq_matrix)
            return -1;
        iq_matrix->load_intra_quant_mat         = 1;
        iq_matrix->load_non_intra_quant_mat     = 1;

        for (i = 0; i < 64; i++) {
            int n = s->idsp.idct_permutation[ff_zigzag_direct[i]];
            iq_matrix->intra_quant_mat[i]       = s->intra_matrix[n];
            iq_matrix->non_intra_quant_mat[i]   = s->inter_matrix[n];
        }
    }
    return 0;
}
예제 #21
0
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
{
    float q;
    int qmin, qmax;
    float br_compensation;
    double diff;
    double short_term_q;
    double fps;
    int picture_number = s->picture_number;
    int64_t wanted_bits;
    RateControlContext *rcc = &s->rc_context;
    AVCodecContext *a       = s->avctx;
    RateControlEntry local_rce, *rce;
    double bits;
    double rate_factor;
    int var;
    const int pict_type = s->pict_type;
    Picture * const pic = &s->current_picture;
    emms_c();

    get_qminmax(&qmin, &qmax, s, pict_type);

    fps = 1 / av_q2d(s->avctx->time_base);
    /* update predictors */
    if (picture_number > 2 && !dry_run) {
        const int last_var = s->last_pict_type == AV_PICTURE_TYPE_I ? rcc->last_mb_var_sum
                                                                    : rcc->last_mc_mb_var_sum;
        update_predictor(&rcc->pred[s->last_pict_type],
                         rcc->last_qscale,
                         sqrt(last_var), s->frame_bits);
    }

    if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
        assert(picture_number >= 0);
        assert(picture_number < rcc->num_entries);
        rce         = &rcc->entry[picture_number];
        wanted_bits = rce->expected_bits;
    } else {
        Picture *dts_pic;
        rce = &local_rce;

        /* FIXME add a dts field to AVFrame and ensure it is set and use it
         * here instead of reordering but the reordering is simpler for now
         * until H.264 B-pyramid must be handled. */
        if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
            dts_pic = s->current_picture_ptr;
        else
            dts_pic = s->last_picture_ptr;

        if (!dts_pic || dts_pic->f->pts == AV_NOPTS_VALUE)
            wanted_bits = (uint64_t)(s->bit_rate * (double)picture_number / fps);
        else
            wanted_bits = (uint64_t)(s->bit_rate * (double)dts_pic->f->pts / fps);
    }

    diff = s->total_bits - wanted_bits;
    br_compensation = (a->bit_rate_tolerance - diff) / a->bit_rate_tolerance;
    if (br_compensation <= 0.0)
        br_compensation = 0.001;

    var = pict_type == AV_PICTURE_TYPE_I ? pic->mb_var_sum : pic->mc_mb_var_sum;

    short_term_q = 0; /* avoid warning */
    if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
        if (pict_type != AV_PICTURE_TYPE_I)
            assert(pict_type == rce->new_pict_type);

        q = rce->new_qscale / br_compensation;
        ff_dlog(s, "%f %f %f last:%d var:%d type:%d//\n", q, rce->new_qscale,
                br_compensation, s->frame_bits, var, pict_type);
    } else {
        rce->pict_type     =
        rce->new_pict_type = pict_type;
        rce->mc_mb_var_sum = pic->mc_mb_var_sum;
        rce->mb_var_sum    = pic->mb_var_sum;
        rce->qscale        = FF_QP2LAMBDA * 2;
        rce->f_code        = s->f_code;
        rce->b_code        = s->b_code;
        rce->misc_bits     = 1;

        bits = predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var));
        if (pict_type == AV_PICTURE_TYPE_I) {
            rce->i_count    = s->mb_num;
            rce->i_tex_bits = bits;
            rce->p_tex_bits = 0;
            rce->mv_bits    = 0;
        } else {
            rce->i_count    = 0;    // FIXME we do know this approx
            rce->i_tex_bits = 0;
            rce->p_tex_bits = bits * 0.9;
            rce->mv_bits    = bits * 0.1;
        }
        rcc->i_cplx_sum[pict_type]  += rce->i_tex_bits * rce->qscale;
        rcc->p_cplx_sum[pict_type]  += rce->p_tex_bits * rce->qscale;
        rcc->mv_bits_sum[pict_type] += rce->mv_bits;
        rcc->frame_count[pict_type]++;

        bits        = rce->i_tex_bits + rce->p_tex_bits;
        rate_factor = rcc->pass1_wanted_bits /
                      rcc->pass1_rc_eq_output_sum * br_compensation;

        q = get_qscale(s, rce, rate_factor, picture_number);
        if (q < 0)
            return -1;

        assert(q > 0.0);
        q = get_diff_limited_q(s, rce, q);
        assert(q > 0.0);

        // FIXME type dependent blur like in 2-pass
        if (pict_type == AV_PICTURE_TYPE_P || s->intra_only) {
            rcc->short_term_qsum   *= a->qblur;
            rcc->short_term_qcount *= a->qblur;

            rcc->short_term_qsum += q;
            rcc->short_term_qcount++;
            q = short_term_q = rcc->short_term_qsum / rcc->short_term_qcount;
        }
        assert(q > 0.0);

        q = modify_qscale(s, rce, q, picture_number);

        rcc->pass1_wanted_bits += s->bit_rate / fps;

        assert(q > 0.0);
    }

    if (s->avctx->debug & FF_DEBUG_RC) {
        av_log(s->avctx, AV_LOG_DEBUG,
               "%c qp:%d<%2.1f<%d %d want:%d total:%d comp:%f st_q:%2.2f "
               "size:%d var:%d/%d br:%d fps:%d\n",
               av_get_picture_type_char(pict_type),
               qmin, q, qmax, picture_number,
               (int)wanted_bits / 1000, (int)s->total_bits / 1000,
               br_compensation, short_term_q, s->frame_bits,
               pic->mb_var_sum, pic->mc_mb_var_sum,
               s->bit_rate / 1000, (int)fps);
    }

    if (q < qmin)
        q = qmin;
    else if (q > qmax)
        q = qmax;

    if (s->adaptive_quant)
        adaptive_quantization(s, q);
    else
        q = (int)(q + 0.5);

    if (!dry_run) {
        rcc->last_qscale        = q;
        rcc->last_mc_mb_var_sum = pic->mc_mb_var_sum;
        rcc->last_mb_var_sum    = pic->mb_var_sum;
    }
    return q;
}
예제 #22
0
static int init_pass2(MpegEncContext *s)
{
    RateControlContext *rcc = &s->rc_context;
    AVCodecContext *a       = s->avctx;
    int i, toobig;
    double fps             = 1 / av_q2d(s->avctx->time_base);
    double complexity[5]   = { 0 }; // approximate bits at quant=1
    uint64_t const_bits[5] = { 0 }; // quantizer independent bits
    uint64_t all_const_bits;
    uint64_t all_available_bits = (uint64_t)(s->bit_rate *
                                             (double)rcc->num_entries / fps);
    double rate_factor          = 0;
    double step;
    const int filter_size = (int)(a->qblur * 4) | 1;
    double expected_bits;
    double *qscale, *blurred_qscale, qscale_sum;

    /* find complexity & const_bits & decide the pict_types */
    for (i = 0; i < rcc->num_entries; i++) {
        RateControlEntry *rce = &rcc->entry[i];

        rce->new_pict_type                = rce->pict_type;
        rcc->i_cplx_sum[rce->pict_type]  += rce->i_tex_bits * rce->qscale;
        rcc->p_cplx_sum[rce->pict_type]  += rce->p_tex_bits * rce->qscale;
        rcc->mv_bits_sum[rce->pict_type] += rce->mv_bits;
        rcc->frame_count[rce->pict_type]++;

        complexity[rce->new_pict_type] += (rce->i_tex_bits + rce->p_tex_bits) *
                                          (double)rce->qscale;
        const_bits[rce->new_pict_type] += rce->mv_bits + rce->misc_bits;
    }

    all_const_bits = const_bits[AV_PICTURE_TYPE_I] +
                     const_bits[AV_PICTURE_TYPE_P] +
                     const_bits[AV_PICTURE_TYPE_B];

    if (all_available_bits < all_const_bits) {
        av_log(s->avctx, AV_LOG_ERROR, "requested bitrate is too low\n");
        return -1;
    }

    qscale         = av_malloc(sizeof(double) * rcc->num_entries);
    blurred_qscale = av_malloc(sizeof(double) * rcc->num_entries);
    if (!qscale || !blurred_qscale) {
        av_free(qscale);
        av_free(blurred_qscale);
        return AVERROR(ENOMEM);
    }
    toobig = 0;

    for (step = 256 * 256; step > 0.0000001; step *= 0.5) {
        expected_bits = 0;
        rate_factor  += step;

        rcc->buffer_index = s->avctx->rc_buffer_size / 2;

        /* find qscale */
        for (i = 0; i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];

            qscale[i] = get_qscale(s, &rcc->entry[i], rate_factor, i);
            rcc->last_qscale_for[rce->pict_type] = qscale[i];
        }
        assert(filter_size % 2 == 1);

        /* fixed I/B QP relative to P mode */
        for (i = rcc->num_entries - 1; i >= 0; i--) {
            RateControlEntry *rce = &rcc->entry[i];

            qscale[i] = get_diff_limited_q(s, rce, qscale[i]);
        }

        /* smooth curve */
        for (i = 0; i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];
            const int pict_type   = rce->new_pict_type;
            int j;
            double q = 0.0, sum = 0.0;

            for (j = 0; j < filter_size; j++) {
                int index    = i + j - filter_size / 2;
                double d     = index - i;
                double coeff = a->qblur == 0 ? 1.0 : exp(-d * d / (a->qblur * a->qblur));

                if (index < 0 || index >= rcc->num_entries)
                    continue;
                if (pict_type != rcc->entry[index].new_pict_type)
                    continue;
                q   += qscale[index] * coeff;
                sum += coeff;
            }
            blurred_qscale[i] = q / sum;
        }

        /* find expected bits */
        for (i = 0; i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];
            double bits;

            rce->new_qscale = modify_qscale(s, rce, blurred_qscale[i], i);

            bits  = qp2bits(rce, rce->new_qscale) + rce->mv_bits + rce->misc_bits;
            bits += 8 * ff_vbv_update(s, bits);

            rce->expected_bits = expected_bits;
            expected_bits     += bits;
        }

        ff_dlog(s->avctx,
                "expected_bits: %f all_available_bits: %d rate_factor: %f\n",
                expected_bits, (int)all_available_bits, rate_factor);
        if (expected_bits > all_available_bits) {
            rate_factor -= step;
            ++toobig;
        }
    }
    av_free(qscale);
    av_free(blurred_qscale);

    /* check bitrate calculations and print info */
    qscale_sum = 0.0;
    for (i = 0; i < rcc->num_entries; i++) {
        ff_dlog(s, "[lavc rc] entry[%d].new_qscale = %.3f  qp = %.3f\n",
                i,
                rcc->entry[i].new_qscale,
                rcc->entry[i].new_qscale / FF_QP2LAMBDA);
        qscale_sum += av_clip(rcc->entry[i].new_qscale / FF_QP2LAMBDA,
                              s->avctx->qmin, s->avctx->qmax);
    }
    assert(toobig <= 40);
    av_log(s->avctx, AV_LOG_DEBUG,
           "[lavc rc] requested bitrate: %d bps  expected bitrate: %d bps\n",
           s->bit_rate,
           (int)(expected_bits / ((double)all_available_bits / s->bit_rate)));
    av_log(s->avctx, AV_LOG_DEBUG,
           "[lavc rc] estimated target average qp: %.3f\n",
           (float)qscale_sum / rcc->num_entries);
    if (toobig == 0) {
        av_log(s->avctx, AV_LOG_INFO,
               "[lavc rc] Using all of requested bitrate is not "
               "necessary for this video with these parameters.\n");
    } else if (toobig == 40) {
        av_log(s->avctx, AV_LOG_ERROR,
               "[lavc rc] Error: bitrate too low for this video "
               "with these parameters.\n");
        return -1;
    } else if (fabs(expected_bits / all_available_bits - 1.0) > 0.01) {
        av_log(s->avctx, AV_LOG_ERROR,
               "[lavc rc] Error: 2pass curve failed to converge\n");
        return -1;
    }

    return 0;
}
예제 #23
0
static double modify_qscale(MpegEncContext *s, RateControlEntry *rce,
                            double q, int frame_num)
{
    RateControlContext *rcc  = &s->rc_context;
    const double buffer_size = s->avctx->rc_buffer_size;
    const double fps         = 1 / av_q2d(s->avctx->time_base);
    const double min_rate    = s->avctx->rc_min_rate / fps;
    const double max_rate    = s->avctx->rc_max_rate / fps;
    const int pict_type      = rce->new_pict_type;
    int qmin, qmax;

    get_qminmax(&qmin, &qmax, s, pict_type);

    /* modulation */
    if (s->rc_qmod_freq &&
        frame_num % s->rc_qmod_freq == 0 &&
        pict_type == AV_PICTURE_TYPE_P)
        q *= s->rc_qmod_amp;

    /* buffer overflow/underflow protection */
    if (buffer_size) {
        double expected_size = rcc->buffer_index;
        double q_limit;

        if (min_rate) {
            double d = 2 * (buffer_size - expected_size) / buffer_size;
            if (d > 1.0)
                d = 1.0;
            else if (d < 0.0001)
                d = 0.0001;
            q *= pow(d, 1.0 / s->rc_buffer_aggressivity);

            q_limit = bits2qp(rce,
                              FFMAX((min_rate - buffer_size + rcc->buffer_index) *
                                    s->avctx->rc_min_vbv_overflow_use, 1));

            if (q > q_limit) {
                if (s->avctx->debug & FF_DEBUG_RC)
                    av_log(s->avctx, AV_LOG_DEBUG,
                           "limiting QP %f -> %f\n", q, q_limit);
                q = q_limit;
            }
        }

        if (max_rate) {
            double d = 2 * expected_size / buffer_size;
            if (d > 1.0)
                d = 1.0;
            else if (d < 0.0001)
                d = 0.0001;
            q /= pow(d, 1.0 / s->rc_buffer_aggressivity);

            q_limit = bits2qp(rce,
                              FFMAX(rcc->buffer_index *
                                    s->avctx->rc_max_available_vbv_use,
                                    1));
            if (q < q_limit) {
                if (s->avctx->debug & FF_DEBUG_RC)
                    av_log(s->avctx, AV_LOG_DEBUG,
                           "limiting QP %f -> %f\n", q, q_limit);
                q = q_limit;
            }
        }
    }
    ff_dlog(s, "q:%f max:%f min:%f size:%f index:%f agr:%f\n",
            q, max_rate, min_rate, buffer_size, rcc->buffer_index,
            s->rc_buffer_aggressivity);
    if (s->rc_qsquish == 0.0 || qmin == qmax) {
        if (q < qmin)
            q = qmin;
        else if (q > qmax)
            q = qmax;
    } else {
        double min2 = log(qmin);
        double max2 = log(qmax);

        q  = log(q);
        q  = (q - min2) / (max2 - min2) - 0.5;
        q *= -4.0;
        q  = 1.0 / (1.0 + exp(q));
        q  = q * (max2 - min2) + min2;

        q = exp(q);
    }

    return q;
}
예제 #24
0
파일: vaapi_hevc.c 프로젝트: 0day-ci/FFmpeg
/** Decode the given hevc slice with VA API. */
static int vaapi_hevc_decode_slice(AVCodecContext *avctx,
                                   const uint8_t  *buffer,
                                   uint32_t        size)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    HEVCContext * const h = avctx->priv_data;
    vaapi_hevc_frame_data *frame_data = h->ref->hwaccel_picture_private;
    SliceHeader * const sh = &h->sh;
    VASliceParameterBufferHEVC *slice_param;
    int i, list_idx;
    uint8_t nb_list = sh->slice_type == B_SLICE ? 2 : 1;

    if (sh->slice_type == I_SLICE)
        nb_list = 0;

    ff_dlog(avctx, "vaapi_hevc_decode_slice(): buffer %p, size %d\n", buffer, size);

    /* Fill in VASliceParameterBufferH264. */
    slice_param = (VASliceParameterBufferHEVC *)ff_vaapi_alloc_slice(vactx, buffer, size);
    if (!slice_param)
        return -1;

    frame_data->last_slice_param = slice_param;

    /* The base structure changed, so this has to be re-set in order to be valid on every byte order. */
    slice_param->slice_data_flag = VA_SLICE_DATA_FLAG_ALL;

    /* Add 1 to the bits count here to account for the byte_alignment bit, which allways is at least one bit and not accounted for otherwise. */
    slice_param->slice_data_byte_offset = (get_bits_count(&h->HEVClc->gb) + 1 + 7) / 8;

    slice_param->slice_segment_address = sh->slice_segment_addr;

    slice_param->LongSliceFlags.value = 0;
    slice_param->LongSliceFlags.fields.dependent_slice_segment_flag = sh->dependent_slice_segment_flag;
    slice_param->LongSliceFlags.fields.slice_type = sh->slice_type;
    slice_param->LongSliceFlags.fields.color_plane_id = sh->colour_plane_id;
    slice_param->LongSliceFlags.fields.mvd_l1_zero_flag = sh->mvd_l1_zero_flag;
    slice_param->LongSliceFlags.fields.cabac_init_flag = sh->cabac_init_flag;
    slice_param->LongSliceFlags.fields.slice_temporal_mvp_enabled_flag = sh->slice_temporal_mvp_enabled_flag;
    slice_param->LongSliceFlags.fields.slice_deblocking_filter_disabled_flag = sh->disable_deblocking_filter_flag;
    slice_param->LongSliceFlags.fields.collocated_from_l0_flag = sh->collocated_list == L0 ? 1 : 0;
    slice_param->LongSliceFlags.fields.slice_loop_filter_across_slices_enabled_flag = sh->slice_loop_filter_across_slices_enabled_flag;

    slice_param->LongSliceFlags.fields.slice_sao_luma_flag = sh->slice_sample_adaptive_offset_flag[0];
    if (h->ps.sps->chroma_format_idc) {
        slice_param->LongSliceFlags.fields.slice_sao_chroma_flag = sh->slice_sample_adaptive_offset_flag[1];
    }

    if (sh->slice_temporal_mvp_enabled_flag) {
        slice_param->collocated_ref_idx = sh->collocated_ref_idx;
    } else {
        slice_param->collocated_ref_idx = 0xFF;
    }

    slice_param->slice_qp_delta = sh->slice_qp_delta;
    slice_param->slice_cb_qp_offset = sh->slice_cb_qp_offset;
    slice_param->slice_cr_qp_offset = sh->slice_cr_qp_offset;
    slice_param->slice_beta_offset_div2 = sh->beta_offset / 2;
    slice_param->slice_tc_offset_div2 = sh->tc_offset / 2;

    if (sh->slice_type == I_SLICE) {
        slice_param->five_minus_max_num_merge_cand = 0;
    } else {
        slice_param->five_minus_max_num_merge_cand = 5 - sh->max_num_merge_cand;
    }

    slice_param->num_ref_idx_l0_active_minus1 = sh->nb_refs[L0] ? sh->nb_refs[L0] - 1 : 0;
    slice_param->num_ref_idx_l1_active_minus1 = sh->nb_refs[L1] ? sh->nb_refs[L1] - 1 : 0;

    memset(slice_param->RefPicList, 0xFF, sizeof(slice_param->RefPicList));

    /* h->ref->refPicList is updated befor calling each slice */
    for (list_idx = 0; list_idx < nb_list; ++list_idx) {
        RefPicList *rpl = &h->ref->refPicList[list_idx];

        for (i = 0; i < rpl->nb_refs; ++i) {
            slice_param->RefPicList[list_idx][i] = get_ref_pic_index(h, rpl->ref[i]);
        }
    }

    return fill_pred_weight_table(h, slice_param, sh);
}
예제 #25
0
static int decode_slice(MpegEncContext *s)
{
    const int part_mask = s->partitioned_frame
                          ? (ER_AC_END | ER_AC_ERROR) : 0x7F;
    const int mb_size   = 16 >> s->avctx->lowres;
    int ret;

    s->last_resync_gb   = s->gb;
    s->first_slice_line = 1;
    s->resync_mb_x      = s->mb_x;
    s->resync_mb_y      = s->mb_y;

    ff_set_qscale(s, s->qscale);

    if (s->studio_profile) {
        if ((ret = ff_mpeg4_decode_studio_slice_header(s->avctx->priv_data)) < 0)
            return ret;
    }

    if (s->avctx->hwaccel) {
        const uint8_t *start = s->gb.buffer + get_bits_count(&s->gb) / 8;
        ret = s->avctx->hwaccel->decode_slice(s->avctx, start, s->gb.buffer_end - start);
        // ensure we exit decode loop
        s->mb_y = s->mb_height;
        return ret;
    }

    if (s->partitioned_frame) {
        const int qscale = s->qscale;

        if (CONFIG_MPEG4_DECODER && s->codec_id == AV_CODEC_ID_MPEG4)
            if ((ret = ff_mpeg4_decode_partitions(s->avctx->priv_data)) < 0)
                return ret;

        /* restore variables which were modified */
        s->first_slice_line = 1;
        s->mb_x             = s->resync_mb_x;
        s->mb_y             = s->resync_mb_y;
        ff_set_qscale(s, qscale);
    }

    for (; s->mb_y < s->mb_height; s->mb_y++) {
        /* per-row end of slice checks */
        if (s->msmpeg4_version) {
            if (s->resync_mb_y + s->slice_height == s->mb_y) {
                ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
                                s->mb_x - 1, s->mb_y, ER_MB_END);

                return 0;
            }
        }

        if (s->msmpeg4_version == 1) {
            s->last_dc[0] =
            s->last_dc[1] =
            s->last_dc[2] = 128;
        }

        ff_init_block_index(s);
        for (; s->mb_x < s->mb_width; s->mb_x++) {
            int ret;

            ff_update_block_index(s);

            if (s->resync_mb_x == s->mb_x && s->resync_mb_y + 1 == s->mb_y)
                s->first_slice_line = 0;

            /* DCT & quantize */

            s->mv_dir  = MV_DIR_FORWARD;
            s->mv_type = MV_TYPE_16X16;
            ff_dlog(s, "%d %06X\n",
                    get_bits_count(&s->gb), show_bits(&s->gb, 24));

            ff_tlog(NULL, "Decoding MB at %dx%d\n", s->mb_x, s->mb_y);
            ret = s->decode_mb(s, s->block);

            if (s->pict_type != AV_PICTURE_TYPE_B)
                ff_h263_update_motion_val(s);

            if (ret < 0) {
                const int xy = s->mb_x + s->mb_y * s->mb_stride;
                if (ret == SLICE_END) {
                    ff_mpv_reconstruct_mb(s, s->block);
                    if (s->loop_filter)
                        ff_h263_loop_filter(s);

                    ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
                                    s->mb_x, s->mb_y, ER_MB_END & part_mask);

                    s->padding_bug_score--;

                    if (++s->mb_x >= s->mb_width) {
                        s->mb_x = 0;
                        ff_mpeg_draw_horiz_band(s, s->mb_y * mb_size, mb_size);
                        ff_mpv_report_decode_progress(s);
                        s->mb_y++;
                    }
                    return 0;
                } else if (ret == SLICE_NOEND) {
                    av_log(s->avctx, AV_LOG_ERROR,
                           "Slice mismatch at MB: %d\n", xy);
                    ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
                                    s->mb_x + 1, s->mb_y,
                                    ER_MB_END & part_mask);
                    return AVERROR_INVALIDDATA;
                }
                av_log(s->avctx, AV_LOG_ERROR, "Error at MB: %d\n", xy);
                ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
                                s->mb_x, s->mb_y, ER_MB_ERROR & part_mask);

                if (s->avctx->err_recognition & AV_EF_IGNORE_ERR)
                    continue;
                return AVERROR_INVALIDDATA;
            }

            ff_mpv_reconstruct_mb(s, s->block);
            if (s->loop_filter)
                ff_h263_loop_filter(s);
        }

        ff_mpeg_draw_horiz_band(s, s->mb_y * mb_size, mb_size);
        ff_mpv_report_decode_progress(s);

        s->mb_x = 0;
    }

    av_assert1(s->mb_x == 0 && s->mb_y == s->mb_height);

    // Detect incorrect padding with wrong stuffing codes used by NEC N-02B
    if (s->codec_id == AV_CODEC_ID_MPEG4         &&
        (s->workaround_bugs & FF_BUG_AUTODETECT) &&
        get_bits_left(&s->gb) >= 48              &&
        show_bits(&s->gb, 24) == 0x4010          &&
        !s->data_partitioning)
        s->padding_bug_score += 32;

    /* try to detect the padding bug */
    if (s->codec_id == AV_CODEC_ID_MPEG4         &&
        (s->workaround_bugs & FF_BUG_AUTODETECT) &&
        get_bits_left(&s->gb) >= 0               &&
        get_bits_left(&s->gb) < 137              &&
        !s->data_partitioning) {
        const int bits_count = get_bits_count(&s->gb);
        const int bits_left  = s->gb.size_in_bits - bits_count;

        if (bits_left == 0) {
            s->padding_bug_score += 16;
        } else if (bits_left != 1) {
            int v = show_bits(&s->gb, 8);
            v |= 0x7F >> (7 - (bits_count & 7));

            if (v == 0x7F && bits_left <= 8)
                s->padding_bug_score--;
            else if (v == 0x7F && ((get_bits_count(&s->gb) + 8) & 8) &&
                     bits_left <= 16)
                s->padding_bug_score += 4;
            else
                s->padding_bug_score++;
        }
    }
예제 #26
0
파일: pcm-bluray.c 프로젝트: 0day-ci/FFmpeg
/**
 * Parse the header of a LPCM frame read from a Blu-ray MPEG-TS stream
 * @param avctx the codec context
 * @param header pointer to the first four bytes of the data packet
 */
static int pcm_bluray_parse_header(AVCodecContext *avctx,
                                   const uint8_t *header)
{
    static const uint8_t bits_per_samples[4] = { 0, 16, 20, 24 };
    static const uint32_t channel_layouts[16] = {
        0, AV_CH_LAYOUT_MONO, 0, AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_SURROUND,
        AV_CH_LAYOUT_2_1, AV_CH_LAYOUT_4POINT0, AV_CH_LAYOUT_2_2,
        AV_CH_LAYOUT_5POINT0, AV_CH_LAYOUT_5POINT1, AV_CH_LAYOUT_7POINT0,
        AV_CH_LAYOUT_7POINT1, 0, 0, 0, 0
    };
    static const uint8_t channels[16] = {
        0, 1, 0, 2, 3, 3, 4, 4, 5, 6, 7, 8, 0, 0, 0, 0
    };
    uint8_t channel_layout = header[2] >> 4;

    if (avctx->debug & FF_DEBUG_PICT_INFO)
        ff_dlog(avctx, "pcm_bluray_parse_header: header = %02x%02x%02x%02x\n",
                header[0], header[1], header[2], header[3]);

    /* get the sample depth and derive the sample format from it */
    avctx->bits_per_coded_sample = bits_per_samples[header[3] >> 6];
    if (!(avctx->bits_per_coded_sample == 16 || avctx->bits_per_coded_sample == 24)) {
        av_log(avctx, AV_LOG_ERROR, "unsupported sample depth (%d)\n", avctx->bits_per_coded_sample);
        return AVERROR_INVALIDDATA;
    }
    avctx->sample_fmt = avctx->bits_per_coded_sample == 16 ? AV_SAMPLE_FMT_S16
                                                           : AV_SAMPLE_FMT_S32;
    if (avctx->sample_fmt == AV_SAMPLE_FMT_S32)
        avctx->bits_per_raw_sample = avctx->bits_per_coded_sample;

    /* get the sample rate. Not all values are used. */
    switch (header[2] & 0x0f) {
    case 1:
        avctx->sample_rate = 48000;
        break;
    case 4:
        avctx->sample_rate = 96000;
        break;
    case 5:
        avctx->sample_rate = 192000;
        break;
    default:
        avctx->sample_rate = 0;
        av_log(avctx, AV_LOG_ERROR, "reserved sample rate (%d)\n",
               header[2] & 0x0f);
        return AVERROR_INVALIDDATA;
    }

    /*
     * get the channel number (and mapping). Not all values are used.
     * It must be noted that the number of channels in the MPEG stream can
     * differ from the actual meaningful number, e.g. mono audio still has two
     * channels, one being empty.
     */
    avctx->channel_layout  = channel_layouts[channel_layout];
    avctx->channels        =        channels[channel_layout];
    if (!avctx->channels) {
        av_log(avctx, AV_LOG_ERROR, "reserved channel configuration (%d)\n",
               channel_layout);
        return AVERROR_INVALIDDATA;
    }

    avctx->bit_rate = FFALIGN(avctx->channels, 2) * avctx->sample_rate *
                      avctx->bits_per_coded_sample;

    if (avctx->debug & FF_DEBUG_PICT_INFO)
        ff_dlog(avctx,
                "pcm_bluray_parse_header: %d channels, %d bits per sample, %d Hz, %"PRId64" bit/s\n",
                avctx->channels, avctx->bits_per_coded_sample,
                avctx->sample_rate, (int64_t)avctx->bit_rate);
    return 0;
}
예제 #27
0
파일: flicvideo.c 프로젝트: AVLeo/libav
static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
                                      void *data, int *got_frame,
                                      const uint8_t *buf, int buf_size)
{
    /* Note, the only difference between the 15Bpp and 16Bpp */
    /* Format is the pixel format, the packets are processed the same. */
    FlicDecodeContext *s = avctx->priv_data;

    GetByteContext g2;
    int pixel_ptr;
    unsigned char palette_idx1;

    unsigned int frame_size;
    int num_chunks;

    unsigned int chunk_size;
    int chunk_type;

    int i, j, ret;

    int lines;
    int compressed_lines;
    signed short line_packets;
    int y_ptr;
    int byte_run;
    int pixel_skip;
    int pixel_countdown;
    unsigned char *pixels;
    int pixel;
    unsigned int pixel_limit;

    bytestream2_init(&g2, buf, buf_size);

    if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return ret;
    }

    pixels = s->frame->data[0];
    pixel_limit = s->avctx->height * s->frame->linesize[0];

    frame_size = bytestream2_get_le32(&g2);
    bytestream2_skip(&g2, 2);  /* skip the magic number */
    num_chunks = bytestream2_get_le16(&g2);
    bytestream2_skip(&g2, 8);  /* skip padding */

    frame_size -= 16;

    /* iterate through the chunks */
    while ((frame_size > 0) && (num_chunks > 0)) {
        chunk_size = bytestream2_get_le32(&g2);
        chunk_type = bytestream2_get_le16(&g2);

        switch (chunk_type) {
        case FLI_256_COLOR:
        case FLI_COLOR:
            /* For some reason, it seems that non-palettized flics do
             * include one of these chunks in their first frame.
             * Why I do not know, it seems rather extraneous. */
            ff_dlog(avctx,
                    "Unexpected Palette chunk %d in non-palettized FLC\n",
                    chunk_type);
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        case FLI_DELTA:
        case FLI_DTA_LC:
            y_ptr = 0;
            compressed_lines = bytestream2_get_le16(&g2);
            while (compressed_lines > 0) {
                line_packets = bytestream2_get_le16(&g2);
                if (line_packets < 0) {
                    line_packets = -line_packets;
                    y_ptr += line_packets * s->frame->linesize[0];
                } else {
                    compressed_lines--;
                    pixel_ptr = y_ptr;
                    CHECK_PIXEL_PTR(0);
                    pixel_countdown = s->avctx->width;
                    for (i = 0; i < line_packets; i++) {
                        /* account for the skip bytes */
                        pixel_skip = bytestream2_get_byte(&g2);
                        pixel_ptr += (pixel_skip*2); /* Pixel is 2 bytes wide */
                        pixel_countdown -= pixel_skip;
                        byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                        if (byte_run < 0) {
                            byte_run = -byte_run;
                            pixel    = bytestream2_get_le16(&g2);
                            CHECK_PIXEL_PTR(2 * byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown -= 2) {
                                *((signed short*)(&pixels[pixel_ptr])) = pixel;
                                pixel_ptr += 2;
                            }
                        } else {
                            CHECK_PIXEL_PTR(2 * byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown--) {
                                *((signed short*)(&pixels[pixel_ptr])) = bytestream2_get_le16(&g2);
                                pixel_ptr += 2;
                            }
                        }
                    }

                    y_ptr += s->frame->linesize[0];
                }
            }
            break;

        case FLI_LC:
            av_log(avctx, AV_LOG_ERROR, "Unexpected FLI_LC chunk in non-palettized FLC\n");
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        case FLI_BLACK:
            /* set the whole frame to 0x0000 which is black in both 15Bpp and 16Bpp modes. */
            memset(pixels, 0x0000,
                   s->frame->linesize[0] * s->avctx->height);
            break;

        case FLI_BRUN:
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                bytestream2_skip(&g2, 1);
                pixel_countdown = (s->avctx->width * 2);

                while (pixel_countdown > 0) {
                    byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                    if (byte_run > 0) {
                        palette_idx1 = bytestream2_get_byte(&g2);
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) (linea%d)\n",
                                       pixel_countdown, lines);
                        }
                    } else {  /* copy bytes if byte_run < 0 */
                        byte_run = -byte_run;
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            palette_idx1 = bytestream2_get_byte(&g2);
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n",
                                       pixel_countdown, lines);
                        }
                    }
                }

                /* Now FLX is strange, in that it is "byte" as opposed to "pixel" run length compressed.
                 * This does not give us any good opportunity to perform word endian conversion
                 * during decompression. So if it is required (i.e., this is not a LE target, we do
                 * a second pass over the line here, swapping the bytes.
                 */
#if HAVE_BIGENDIAN
                pixel_ptr = y_ptr;
                pixel_countdown = s->avctx->width;
                while (pixel_countdown > 0) {
                    *((signed short*)(&pixels[pixel_ptr])) = AV_RL16(&buf[pixel_ptr]);
                    pixel_ptr += 2;
                }
#endif
                y_ptr += s->frame->linesize[0];
            }
            break;

        case FLI_DTA_BRUN:
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                bytestream2_skip(&g2, 1);
                pixel_countdown = s->avctx->width; /* Width is in pixels, not bytes */

                while (pixel_countdown > 0) {
                    byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                    if (byte_run > 0) {
                        pixel    = bytestream2_get_le16(&g2);
                        CHECK_PIXEL_PTR(2 * byte_run);
                        for (j = 0; j < byte_run; j++) {
                            *((signed short*)(&pixels[pixel_ptr])) = pixel;
                            pixel_ptr += 2;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n",
                                       pixel_countdown);
                        }
                    } else {  /* copy pixels if byte_run < 0 */
                        byte_run = -byte_run;
                        CHECK_PIXEL_PTR(2 * byte_run);
                        for (j = 0; j < byte_run; j++) {
                            *((signed short*)(&pixels[pixel_ptr])) = bytestream2_get_le16(&g2);
                            pixel_ptr  += 2;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n",
                                       pixel_countdown);
                        }
                    }
                }

                y_ptr += s->frame->linesize[0];
            }
            break;

        case FLI_COPY:
        case FLI_DTA_COPY:
            /* copy the chunk (uncompressed frame) */
            if (chunk_size - 6 > (unsigned int)(s->avctx->width * s->avctx->height)*2) {
                av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \
                       "bigger than image, skipping chunk\n", chunk_size - 6);
                bytestream2_skip(&g2, chunk_size - 6);
            } else {

                for (y_ptr = 0; y_ptr < s->frame->linesize[0] * s->avctx->height;
                     y_ptr += s->frame->linesize[0]) {

                    pixel_countdown = s->avctx->width;
                    pixel_ptr = 0;
                    while (pixel_countdown > 0) {
                      *((signed short*)(&pixels[y_ptr + pixel_ptr])) = bytestream2_get_le16(&g2);
                      pixel_ptr += 2;
                      pixel_countdown--;
                    }
                }
            }
            break;

        case FLI_MINI:
            /* some sort of a thumbnail? disregard this chunk... */
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        default:
            av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type);
            break;
        }

        frame_size -= chunk_size;
        num_chunks--;
    }

    /* by the end of the chunk, the stream ptr should equal the frame
     * size (minus 1, possibly); if it doesn't, issue a warning */
    if ((bytestream2_get_bytes_left(&g2) != 0) && (bytestream2_get_bytes_left(&g2) != 1))
        av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
               "and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2));

    if ((ret = av_frame_ref(data, s->frame)) < 0)
        return ret;

    *got_frame = 1;

    return buf_size;
}
예제 #28
0
static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
                               const int data_size, AVCodecContext *avctx)
{
    int hdr_size, width, height, flags;
    int version;
    const uint8_t *ptr;

    hdr_size = AV_RB16(buf);
    ff_dlog(avctx, "header size %d\n", hdr_size);
    if (hdr_size > data_size) {
        av_log(avctx, AV_LOG_ERROR, "error, wrong header size\n");
        return AVERROR_INVALIDDATA;
    }

    version = AV_RB16(buf + 2);
    ff_dlog(avctx, "%.4s version %d\n", buf+4, version);
    if (version > 1) {
        av_log(avctx, AV_LOG_ERROR, "unsupported version: %d\n", version);
        return AVERROR_PATCHWELCOME;
    }

    width  = AV_RB16(buf + 8);
    height = AV_RB16(buf + 10);
    if (width != avctx->width || height != avctx->height) {
        av_log(avctx, AV_LOG_ERROR, "picture resolution change: %dx%d -> %dx%d\n",
               avctx->width, avctx->height, width, height);
        return AVERROR_PATCHWELCOME;
    }

    ctx->frame_type = (buf[12] >> 2) & 3;
    ctx->alpha_info = buf[17] & 0xf;

    if (ctx->alpha_info > 2) {
        av_log(avctx, AV_LOG_ERROR, "Invalid alpha mode %d\n", ctx->alpha_info);
        return AVERROR_INVALIDDATA;
    }
    if (avctx->skip_alpha) ctx->alpha_info = 0;

    ff_dlog(avctx, "frame type %d\n", ctx->frame_type);

    if (ctx->frame_type == 0) {
        ctx->scan = ctx->progressive_scan; // permuted
    } else {
        ctx->scan = ctx->interlaced_scan; // permuted
        ctx->frame->interlaced_frame = 1;
        ctx->frame->top_field_first = ctx->frame_type == 1;
    }

    if (ctx->alpha_info) {
        avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P10 : AV_PIX_FMT_YUVA422P10;
    } else {
        avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV422P10;
    }

    avctx->color_primaries = buf[14];
    avctx->color_trc       = buf[15];
    avctx->colorspace      = buf[16];
    avctx->color_range     = AVCOL_RANGE_MPEG;

    ptr   = buf + 20;
    flags = buf[19];
    ff_dlog(avctx, "flags %x\n", flags);

    if (flags & 2) {
        if(buf + data_size - ptr < 64) {
            av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
            return AVERROR_INVALIDDATA;
        }
        permute(ctx->qmat_luma, ctx->prodsp.idct_permutation, ptr);
        ptr += 64;
    } else {
        memset(ctx->qmat_luma, 4, 64);
    }

    if (flags & 1) {
        if(buf + data_size - ptr < 64) {
            av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
            return AVERROR_INVALIDDATA;
        }
        permute(ctx->qmat_chroma, ctx->prodsp.idct_permutation, ptr);
    } else {
        memset(ctx->qmat_chroma, 4, 64);
    }

    return hdr_size;
}
예제 #29
0
static av_always_inline void decode_line(FFV1Context *s, int w,
                                         int16_t *sample[2],
                                         int plane_index, int bits)
{
    PlaneContext *const p = &s->plane[plane_index];
    RangeCoder *const c   = &s->c;
    int x;
    int run_count = 0;
    int run_mode  = 0;
    int run_index = s->run_index;

    if (s->slice_coding_mode == 1) {
        int i;
        for (x = 0; x < w; x++) {
            int v = 0;
            for (i=0; i<bits; i++) {
                uint8_t state = 128;
                v += v + get_rac(c, &state);
            }
            sample[1][x] = v;
        }
        return;
    }

    for (x = 0; x < w; x++) {
        int diff, context, sign;

        context = get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
        if (context < 0) {
            context = -context;
            sign    = 1;
        } else
            sign = 0;

        av_assert2(context < p->context_count);

        if (s->ac) {
            diff = get_symbol_inline(c, p->state[context], 1);
        } else {
            if (context == 0 && run_mode == 0)
                run_mode = 1;

            if (run_mode) {
                if (run_count == 0 && run_mode == 1) {
                    if (get_bits1(&s->gb)) {
                        run_count = 1 << ff_log2_run[run_index];
                        if (x + run_count <= w)
                            run_index++;
                    } else {
                        if (ff_log2_run[run_index])
                            run_count = get_bits(&s->gb, ff_log2_run[run_index]);
                        else
                            run_count = 0;
                        if (run_index)
                            run_index--;
                        run_mode = 2;
                    }
                }
                run_count--;
                if (run_count < 0) {
                    run_mode  = 0;
                    run_count = 0;
                    diff      = get_vlc_symbol(&s->gb, &p->vlc_state[context],
                                               bits);
                    if (diff >= 0)
                        diff++;
                } else
                    diff = 0;
            } else
                diff = get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);

            ff_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
                    run_count, run_index, run_mode, x, get_bits_count(&s->gb));
        }

        if (sign)
            diff = -diff;

        sample[1][x] = av_mod_uintp2(predict(sample[1] + x, sample[0] + x) + diff, bits);
    }
    s->run_index = run_index;
}
예제 #30
0
파일: pcm-bluray.c 프로젝트: 0day-ci/FFmpeg
static int pcm_bluray_decode_frame(AVCodecContext *avctx, void *data,
                                   int *got_frame_ptr, AVPacket *avpkt)
{
    AVFrame *frame     = data;
    const uint8_t *src = avpkt->data;
    int buf_size = avpkt->size;
    GetByteContext gb;
    int num_source_channels, channel, retval;
    int sample_size, samples;
    int16_t *dst16;
    int32_t *dst32;

    if (buf_size < 4) {
        av_log(avctx, AV_LOG_ERROR, "PCM packet too small\n");
        return AVERROR_INVALIDDATA;
    }

    if ((retval = pcm_bluray_parse_header(avctx, src)))
        return retval;
    src += 4;
    buf_size -= 4;

    bytestream2_init(&gb, src, buf_size);

    /* There's always an even number of channels in the source */
    num_source_channels = FFALIGN(avctx->channels, 2);
    sample_size = (num_source_channels *
                   (avctx->sample_fmt == AV_SAMPLE_FMT_S16 ? 16 : 24)) >> 3;
    samples = buf_size / sample_size;

    /* get output buffer */
    frame->nb_samples = samples;
    if ((retval = ff_get_buffer(avctx, frame, 0)) < 0)
        return retval;
    dst16 = (int16_t *)frame->data[0];
    dst32 = (int32_t *)frame->data[0];

    if (samples) {
        switch (avctx->channel_layout) {
            /* cases with same number of source and coded channels */
        case AV_CH_LAYOUT_STEREO:
        case AV_CH_LAYOUT_4POINT0:
        case AV_CH_LAYOUT_2_2:
            samples *= num_source_channels;
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
#if HAVE_BIGENDIAN
                bytestream2_get_buffer(&gb, dst16, buf_size);
#else
                do {
                    *dst16++ = bytestream2_get_be16u(&gb);
                } while (--samples);
#endif
            } else {
                do {
                    *dst32++ = bytestream2_get_be24u(&gb) << 8;
                } while (--samples);
            }
            break;
        /* cases where number of source channels = coded channels + 1 */
        case AV_CH_LAYOUT_MONO:
        case AV_CH_LAYOUT_SURROUND:
        case AV_CH_LAYOUT_2_1:
        case AV_CH_LAYOUT_5POINT0:
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
                do {
#if HAVE_BIGENDIAN
                    bytestream2_get_buffer(&gb, dst16, avctx->channels * 2);
                    dst16 += avctx->channels;
#else
                    channel = avctx->channels;
                    do {
                        *dst16++ = bytestream2_get_be16u(&gb);
                    } while (--channel);
#endif
                    bytestream2_skip(&gb, 2);
                } while (--samples);
            } else {
                do {
                    channel = avctx->channels;
                    do {
                        *dst32++ = bytestream2_get_be24u(&gb) << 8;
                    } while (--channel);
                    bytestream2_skip(&gb, 3);
                } while (--samples);
            }
            break;
            /* remapping: L, R, C, LBack, RBack, LF */
        case AV_CH_LAYOUT_5POINT1:
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
                do {
                    dst16[0] = bytestream2_get_be16u(&gb);
                    dst16[1] = bytestream2_get_be16u(&gb);
                    dst16[2] = bytestream2_get_be16u(&gb);
                    dst16[4] = bytestream2_get_be16u(&gb);
                    dst16[5] = bytestream2_get_be16u(&gb);
                    dst16[3] = bytestream2_get_be16u(&gb);
                    dst16 += 6;
                } while (--samples);
            } else {
                do {
                    dst32[0] = bytestream2_get_be24u(&gb) << 8;
                    dst32[1] = bytestream2_get_be24u(&gb) << 8;
                    dst32[2] = bytestream2_get_be24u(&gb) << 8;
                    dst32[4] = bytestream2_get_be24u(&gb) << 8;
                    dst32[5] = bytestream2_get_be24u(&gb) << 8;
                    dst32[3] = bytestream2_get_be24u(&gb) << 8;
                    dst32 += 6;
                } while (--samples);
            }
            break;
            /* remapping: L, R, C, LSide, LBack, RBack, RSide, <unused> */
        case AV_CH_LAYOUT_7POINT0:
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
                do {
                    dst16[0] = bytestream2_get_be16u(&gb);
                    dst16[1] = bytestream2_get_be16u(&gb);
                    dst16[2] = bytestream2_get_be16u(&gb);
                    dst16[5] = bytestream2_get_be16u(&gb);
                    dst16[3] = bytestream2_get_be16u(&gb);
                    dst16[4] = bytestream2_get_be16u(&gb);
                    dst16[6] = bytestream2_get_be16u(&gb);
                    dst16 += 7;
                    bytestream2_skip(&gb, 2);
                } while (--samples);
            } else {
                do {
                    dst32[0] = bytestream2_get_be24u(&gb) << 8;
                    dst32[1] = bytestream2_get_be24u(&gb) << 8;
                    dst32[2] = bytestream2_get_be24u(&gb) << 8;
                    dst32[5] = bytestream2_get_be24u(&gb) << 8;
                    dst32[3] = bytestream2_get_be24u(&gb) << 8;
                    dst32[4] = bytestream2_get_be24u(&gb) << 8;
                    dst32[6] = bytestream2_get_be24u(&gb) << 8;
                    dst32 += 7;
                    bytestream2_skip(&gb, 3);
                } while (--samples);
            }
            break;
            /* remapping: L, R, C, LSide, LBack, RBack, RSide, LF */
        case AV_CH_LAYOUT_7POINT1:
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
                do {
                    dst16[0] = bytestream2_get_be16u(&gb);
                    dst16[1] = bytestream2_get_be16u(&gb);
                    dst16[2] = bytestream2_get_be16u(&gb);
                    dst16[6] = bytestream2_get_be16u(&gb);
                    dst16[4] = bytestream2_get_be16u(&gb);
                    dst16[5] = bytestream2_get_be16u(&gb);
                    dst16[7] = bytestream2_get_be16u(&gb);
                    dst16[3] = bytestream2_get_be16u(&gb);
                    dst16 += 8;
                } while (--samples);
            } else {
                do {
                    dst32[0] = bytestream2_get_be24u(&gb) << 8;
                    dst32[1] = bytestream2_get_be24u(&gb) << 8;
                    dst32[2] = bytestream2_get_be24u(&gb) << 8;
                    dst32[6] = bytestream2_get_be24u(&gb) << 8;
                    dst32[4] = bytestream2_get_be24u(&gb) << 8;
                    dst32[5] = bytestream2_get_be24u(&gb) << 8;
                    dst32[7] = bytestream2_get_be24u(&gb) << 8;
                    dst32[3] = bytestream2_get_be24u(&gb) << 8;
                    dst32 += 8;
                } while (--samples);
            }
            break;
        }
    }

    *got_frame_ptr = 1;

    retval = bytestream2_tell(&gb);
    if (avctx->debug & FF_DEBUG_BITSTREAM)
        ff_dlog(avctx, "pcm_bluray_decode_frame: decoded %d -> %d bytes\n",
                retval, buf_size);
    return retval + 4;
}