int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
                               enum AVSampleFormat sample_fmt, int align)
{
    int line_size;
    int sample_size = av_get_bytes_per_sample(sample_fmt);
    int planar      = av_sample_fmt_is_planar(sample_fmt);

    /* validate parameter ranges */
    if (!sample_size || nb_samples <= 0 || nb_channels <= 0)
        return AVERROR(EINVAL);

    /* check for integer overflow */
    if (nb_channels > INT_MAX / align ||
        (int64_t)nb_channels * nb_samples > (INT_MAX - (align * nb_channels)) / sample_size)
        return AVERROR(EINVAL);

    line_size = planar ? FFALIGN(nb_samples * sample_size,               align) :
                         FFALIGN(nb_samples * sample_size * nb_channels, align);
    if (linesize)
        *linesize = line_size;

    return planar ? line_size * nb_channels : line_size;
}
Example #2
0
static int dxv_init(AVCodecContext *avctx)
{
    DXVContext *ctx = avctx->priv_data;
    int ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);

    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n",
               avctx->width, avctx->height);
        return ret;
    }

    /* Codec requires 16x16 alignment. */
    avctx->coded_width  = FFALIGN(avctx->width,  16);
    avctx->coded_height = FFALIGN(avctx->height, 16);

    ff_texturedsp_init(&ctx->texdsp);
    avctx->pix_fmt = AV_PIX_FMT_RGBA;

    ctx->slice_count = av_clip(avctx->thread_count, 1,
                               avctx->coded_height / TEXTURE_BLOCK_H);

    return 0;
}
Example #3
0
/**
 * Backward synthesis filter, find the LPC coefficients from past speech data.
 */
static void backward_filter(RA288Context *ractx,
                            float *hist, float *rec, const float *window,
                            float *lpc, const float *tab,
                            int order, int n, int non_rec, int move_size)
{
    float temp[MAX_BACKWARD_FILTER_ORDER+1];

    do_hybrid_window(ractx, order, n, non_rec, temp, hist, rec, window);

    if (!compute_lpc_coefs(temp, order, lpc, 0, 1, 1))
        ractx->fdsp->vector_fmul(lpc, lpc, tab, FFALIGN(order, 16));

    memmove(hist, hist + n, move_size*sizeof(*hist));
}
Example #4
0
static int ffmal_copy_frame(AVCodecContext *avctx,  AVFrame *frame,
                            MMAL_BUFFER_HEADER_T *buffer)
{
    MMALDecodeContext *ctx = avctx->priv_data;
    int ret = 0;

    if (avctx->pix_fmt == AV_PIX_FMT_MMAL) {
        if (!ctx->pool_out)
            return AVERROR_UNKNOWN; // format change code failed with OOM previously

        if ((ret = ff_decode_frame_props(avctx, frame)) < 0)
            goto done;

        if ((ret = ffmmal_set_ref(frame, ctx->pool_out, buffer)) < 0)
            goto done;
    } else {
        int w = FFALIGN(avctx->width, 32);
        int h = FFALIGN(avctx->height, 16);
        uint8_t *src[4];
        int linesize[4];

        if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
            goto done;

        av_image_fill_arrays(src, linesize,
                             buffer->data + buffer->type->video.offset[0],
                             avctx->pix_fmt, w, h, 1);
        av_image_copy(frame->data, frame->linesize, src, linesize,
                      avctx->pix_fmt, avctx->width, avctx->height);
    }

    frame->pkt_pts = buffer->pts == MMAL_TIME_UNKNOWN ? AV_NOPTS_VALUE : buffer->pts;
    frame->pkt_dts = AV_NOPTS_VALUE;

done:
    return ret;
}
Example #5
0
static int msrle_decode_frame(AVCodecContext *avctx,
                              void *data, int *data_size,
                              AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    MsrleContext *s = avctx->priv_data;
    int istride = FFALIGN(avctx->width*avctx->bits_per_coded_sample, 32) / 8;

    s->buf = buf;
    s->size = buf_size;

    s->frame.reference = 1;
    s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
    if (avctx->reget_buffer(avctx, &s->frame)) {
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return -1;
    }

    if (s->avctx->palctrl) {
        /* make the palette available */
        memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE);
        if (s->avctx->palctrl->palette_changed) {
            s->frame.palette_has_changed = 1;
            s->avctx->palctrl->palette_changed = 0;
        }
    }

    /* FIXME how to correctly detect RLE ??? */
    if (avctx->height * istride == avpkt->size) { /* assume uncompressed */
        int linesize = avctx->width * avctx->bits_per_coded_sample / 8;
        uint8_t *ptr = s->frame.data[0];
        uint8_t *buf = avpkt->data + (avctx->height-1)*istride;
        int i, j;

        for (i = 0; i < avctx->height; i++) {
            if (avctx->bits_per_coded_sample == 4) {
                for (j = 0; j < avctx->width - 1; j += 2) {
                    ptr[j+0] = buf[j>>1] >> 4;
                    ptr[j+1] = buf[j>>1] & 0xF;
                }
                if (avctx->width & 1)
                    ptr[j+0] = buf[j>>1] >> 4;
            } else {
                memcpy(ptr, buf, linesize);
            }
            buf -= istride;
            ptr += s->frame.linesize[0];
        }
Example #6
0
static int w64_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
    int64_t size;
    ByteIOContext *pb  = s->pb;
    WAVContext    *wav = s->priv_data;
    AVStream *st;
    uint8_t guid[16];

    get_buffer(pb, guid, 16);
    if (memcmp(guid, guid_riff, 16))
        return -1;

    if (get_le64(pb) < 16 + 8 + 16 + 8 + 16 + 8) /* riff + wave + fmt + sizes */
        return -1;

    get_buffer(pb, guid, 16);
    if (memcmp(guid, guid_wave, 16)) {
        av_log(s, AV_LOG_ERROR, "could not find wave guid\n");
        return -1;
    }

    size = find_guid(pb, guid_fmt);
    if (size < 0) {
        av_log(s, AV_LOG_ERROR, "could not find fmt guid\n");
        return -1;
    }

    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);

    /* subtract chunk header size - normal wav file doesn't count it */
    ff_get_wav_header(pb, st->codec, size - 24);
    url_fskip(pb, FFALIGN(size, INT64_C(8)) - size);

    st->need_parsing = AVSTREAM_PARSE_FULL;

    av_set_pts_info(st, 64, 1, st->codec->sample_rate);

    size = find_guid(pb, guid_data);
    if (size < 0) {
        av_log(s, AV_LOG_ERROR, "could not find data guid\n");
        return -1;
    }
    wav->data_end = url_ftell(pb) + size - 24;
    wav->w64      = 1;

    return 0;
}
Example #7
0
// Filter data through filter
static af_data_t* play(struct af_instance_s* af, af_data_t* data)
{
  af_resample_t *s = af->setup;
  int ret;
  int8_t *in = (int8_t*)data->audio;
  int8_t *out;
  int chans   = data->nch;
  int in_len  = data->len;
  int out_len = in_len * af->mul + 10;

  if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
      return NULL;

  av_fast_malloc(&s->tmp[0], &s->tmp_alloc, FFALIGN(out_len,32));
  if(s->tmp[0] == NULL) return NULL;

  out= (int8_t*)af->data->audio;

  out_len= FFMIN(out_len, af->data->len);

  av_fast_malloc(&s->in[0], &s->in_alloc, FFALIGN(in_len,32));
  if(s->in[0] == NULL) return NULL;

  memcpy(s->in[0], in, in_len);

  ret = swr_convert(s->swrctx, &s->tmp[0], out_len/chans/2, &s->in[0], in_len/chans/2);
  if (ret < 0) return NULL;
  out_len= ret*chans*2;

  memcpy(out, s->tmp[0], out_len);

  data->audio = af->data->audio;
  data->len   = out_len;
  data->rate  = af->data->rate;
  return data;
}
Example #8
0
static int codec_reinit(AVCodecContext *avctx, int width, int height,
                        int quality)
{
    NuvContext *c = avctx->priv_data;
    int ret;

    width  = FFALIGN(width,  2);
    height = FFALIGN(height, 2);
    if (quality >= 0)
        get_quant_quality(c, quality);
    if (width != c->width || height != c->height) {
        // also reserve space for a possible additional header
        int buf_size = height * width * 3 / 2
                     + FFMAX(AV_LZO_OUTPUT_PADDING, FF_INPUT_BUFFER_PADDING_SIZE)
                     + RTJPEG_HEADER_SIZE;
        if (buf_size > INT_MAX/8)
            return -1;
        if ((ret = av_image_check_size(height, width, 0, avctx)) < 0)
            return ret;
        avctx->width  = c->width  = width;
        avctx->height = c->height = height;
        av_fast_malloc(&c->decomp_buf, &c->decomp_size,
                       buf_size);
        if (!c->decomp_buf) {
            av_log(avctx, AV_LOG_ERROR,
                   "Can't allocate decompression buffer.\n");
            return AVERROR(ENOMEM);
        }
        ff_rtjpeg_decode_init(&c->rtj, c->width, c->height, c->lq, c->cq);
        av_frame_unref(c->pic);
        return 1;
    } else if (quality != c->quality)
        ff_rtjpeg_decode_init(&c->rtj, c->width, c->height, c->lq, c->cq);

    return 0;
}
Example #9
0
static void put_meta(AVFormatContext *s, const char *key, uint32_t id)
{
    AVDictionaryEntry *tag;
    AVIOContext *pb = s->pb;

    if (tag = av_dict_get(s->metadata, key, NULL, 0)) {
        int size = strlen(tag->value);

        avio_wl32(pb, id);
        avio_wb32(pb, FFALIGN(size, 2));
        avio_write(pb, tag->value, size);
        if (size & 1)
            avio_w8(pb, 0);
    }
}
Example #10
0
File: vo_xv.c Project: benf/mpv
static void allocate_xvimage(struct vo *vo, int foo)
{
    struct xvctx *ctx = vo->priv;
    struct vo_x11_state *x11 = vo->x11;
    // align it for faster OSD rendering (draw_bmp.c swscale usage)
    int aligned_w = FFALIGN(ctx->image_width, 32);
#ifdef HAVE_SHM
    if (x11->display_is_local && XShmQueryExtension(x11->display)) {
        ctx->Shmem_Flag = 1;
        x11->ShmCompletionEvent = XShmGetEventBase(x11->display)
                                + ShmCompletion;
    } else {
        ctx->Shmem_Flag = 0;
        MP_INFO(vo, "Shared memory not supported\nReverting to normal Xv.\n");
    }
    if (ctx->Shmem_Flag) {
        ctx->xvimage[foo] =
            (XvImage *) XvShmCreateImage(x11->display, ctx->xv_port,
                                         ctx->xv_format, NULL,
                                         aligned_w, ctx->image_height,
                                         &ctx->Shminfo[foo]);

        ctx->Shminfo[foo].shmid = shmget(IPC_PRIVATE,
                                         ctx->xvimage[foo]->data_size,
                                         IPC_CREAT | 0777);
        ctx->Shminfo[foo].shmaddr = (char *) shmat(ctx->Shminfo[foo].shmid, 0,
                                                   0);
        ctx->Shminfo[foo].readOnly = False;

        ctx->xvimage[foo]->data = ctx->Shminfo[foo].shmaddr;
        XShmAttach(x11->display, &ctx->Shminfo[foo]);
        XSync(x11->display, False);
        shmctl(ctx->Shminfo[foo].shmid, IPC_RMID, 0);
    } else
#endif
    {
        ctx->xvimage[foo] =
            (XvImage *) XvCreateImage(x11->display, ctx->xv_port,
                                      ctx->xv_format, NULL, aligned_w,
                                      ctx->image_height);
        ctx->xvimage[foo]->data = av_malloc(ctx->xvimage[foo]->data_size);
        XSync(x11->display, False);
    }
    struct mp_image img = get_xv_buffer(vo, foo);
    img.w = aligned_w;
    mp_image_clear(&img, 0, 0, img.w, img.h);
    return;
}
Example #11
0
static int msrle_decode_frame(AVCodecContext *avctx,
                              void *data, int *got_frame,
                              AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    MsrleContext *s = avctx->priv_data;
    int istride = FFALIGN(avctx->width*avctx->bits_per_coded_sample, 32) / 8;
    int ret;

    s->buf = buf;
    s->size = buf_size;

    if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
        return ret;

    if (avctx->bits_per_coded_sample > 1 && avctx->bits_per_coded_sample <= 8) {
        const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);

        if (pal) {
            s->frame->palette_has_changed = 1;
            memcpy(s->pal, pal, AVPALETTE_SIZE);
        }
        /* make the palette available */
        memcpy(s->frame->data[1], s->pal, AVPALETTE_SIZE);
    }

    /* FIXME how to correctly detect RLE ??? */
    if (avctx->height * istride == avpkt->size) { /* assume uncompressed */
        int linesize = (avctx->width * avctx->bits_per_coded_sample + 7) / 8;
        uint8_t *ptr = s->frame->data[0];
        uint8_t *buf = avpkt->data + (avctx->height-1)*istride;
        int i, j;

        for (i = 0; i < avctx->height; i++) {
            if (avctx->bits_per_coded_sample == 4) {
                for (j = 0; j < avctx->width - 1; j += 2) {
                    ptr[j+0] = buf[j>>1] >> 4;
                    ptr[j+1] = buf[j>>1] & 0xF;
                }
                if (avctx->width & 1)
                    ptr[j+0] = buf[j>>1] >> 4;
            } else {
                memcpy(ptr, buf, linesize);
            }
            buf -= istride;
            ptr += s->frame->linesize[0];
        }
Example #12
0
int ff_image_copy_plane_uc_from_x86(uint8_t       *dst, ptrdiff_t dst_linesize,
                                    const uint8_t *src, ptrdiff_t src_linesize,
                                    ptrdiff_t bytewidth, int height)
{
    int cpu_flags = av_get_cpu_flags();
    ptrdiff_t bw_aligned = FFALIGN(bytewidth, 64);

    if (EXTERNAL_SSE4(cpu_flags) &&
        bw_aligned <= dst_linesize && bw_aligned <= src_linesize)
        ff_image_copy_plane_uc_from_sse4(dst, dst_linesize, src, src_linesize,
                                         bw_aligned, height);
    else
        return AVERROR(ENOSYS);

    return 0;
}
Example #13
0
/** Find chunk with w64 GUID by skipping over other chunks
 * @return the size of the found chunk
 */
static int64_t find_guid(ByteIOContext *pb, const uint8_t guid1[16])
{
    uint8_t guid[16];
    int64_t size;

    while (!url_feof(pb)) {
        get_buffer(pb, guid, 16);
        size = get_le64(pb);
        if (size <= 24)
            return -1;
        if (!memcmp(guid, guid1, 16))
            return size;
        url_fskip(pb, FFALIGN(size, INT64_C(8)) - 24);
    }
    return -1;
}
Example #14
0
/** Find chunk with w64 GUID by skipping over other chunks
 * @return the size of the found chunk
 */
static int64_t find_guid(AVIOContext *pb, const uint8_t guid1[16])
{
    uint8_t guid[16];
    int64_t size;

    while (!url_feof(pb)) {
        avio_read(pb, guid, 16);
        size = avio_rl64(pb);
        if (size <= 24)
            return -1;
        if (!memcmp(guid, guid1, 16))
            return size;
        avio_skip(pb, FFALIGN(size, INT64_C(8)) - 24);
    }
    return -1;
}
Example #15
0
static int get_video_buffer(AVFrame *frame, int align)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
    int ret, i;

    if (!desc)
        return AVERROR(EINVAL);

    if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
        return ret;

    if (!frame->linesize[0]) {
        ret = av_image_fill_linesizes(frame->linesize, frame->format,
                                      frame->width);
        if (ret < 0)
            return ret;

        for (i = 0; i < 4 && frame->linesize[i]; i++)
            frame->linesize[i] = FFALIGN(frame->linesize[i], align);
    }

    for (i = 0; i < 4 && frame->linesize[i]; i++) {
        int h = frame->height;
        if (i == 1 || i == 2)
            h = -((-h) >> desc->log2_chroma_h);

        frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h);
        if (!frame->buf[i])
            goto fail;

        frame->data[i] = frame->buf[i]->data;
    }
    if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {
        av_buffer_unref(&frame->buf[1]);
        frame->buf[1] = av_buffer_alloc(1024);
        if (!frame->buf[1])
            goto fail;
        frame->data[1] = frame->buf[1]->data;
    }

    frame->extended_data = frame->data;

    return 0;
fail:
    av_frame_unref(frame);
    return AVERROR(ENOMEM);
}
Example #16
0
void ff_mediacodec_sw_buffer_copy_yuv420_semi_planar(AVCodecContext *avctx,
                                                     MediaCodecDecContext *s,
                                                     uint8_t *data,
                                                     size_t size,
                                                     FFAMediaCodecBufferInfo *info,
                                                     AVFrame *frame)
{
    int i;
    uint8_t *src = NULL;

    for (i = 0; i < 2; i++) {
        int height;

        src = data + info->offset;
        if (i == 0) {
            height = avctx->height;

            src += s->crop_top * s->stride;
            src += s->crop_left;
        } else if (i == 1) {
            height = avctx->height / 2;

            src += s->slice_height * s->stride;
            src += s->crop_top * s->stride;
            src += s->crop_left;
        }

        if (frame->linesize[i] == s->stride) {
            memcpy(frame->data[i], src, height * s->stride);
        } else {
            int j, width;
            uint8_t *dst = frame->data[i];

            if (i == 0) {
                width = avctx->width;
            } else if (i == 1) {
                width = FFMIN(frame->linesize[i], FFALIGN(avctx->width, 2));
            }

            for (j = 0; j < height; j++) {
                memcpy(dst, src, width);
                src += s->stride;
                dst += frame->linesize[i];
            }
        }
    }
}
static int cuda_frames_init(AVHWFramesContext *ctx)
{
    CUDAFramesContext *priv = ctx->internal->priv;
    int aligned_width = FFALIGN(ctx->width, CUDA_FRAME_ALIGNMENT);
    int i;

    for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) {
        if (ctx->sw_format == supported_formats[i])
            break;
    }
    if (i == FF_ARRAY_ELEMS(supported_formats)) {
        av_log(ctx, AV_LOG_ERROR, "Pixel format '%s' is not supported\n",
               av_get_pix_fmt_name(ctx->sw_format));
        return AVERROR(ENOSYS);
    }

    av_pix_fmt_get_chroma_sub_sample(ctx->sw_format, &priv->shift_width, &priv->shift_height);

    if (!ctx->pool) {
        int size;

        switch (ctx->sw_format) {
        case AV_PIX_FMT_NV12:
        case AV_PIX_FMT_YUV420P:
            size = aligned_width * ctx->height * 3 / 2;
            break;
        case AV_PIX_FMT_YUV444P:
        case AV_PIX_FMT_P010:
        case AV_PIX_FMT_P016:
            size = aligned_width * ctx->height * 3;
            break;
        case AV_PIX_FMT_YUV444P16:
            size = aligned_width * ctx->height * 6;
            break;
        default:
            av_log(ctx, AV_LOG_ERROR, "BUG: Pixel format missing from size calculation.");
            return AVERROR_BUG;
        }

        ctx->internal->pool_internal = av_buffer_pool_init2(size, ctx, cuda_pool_alloc, NULL);
        if (!ctx->internal->pool_internal)
            return AVERROR(ENOMEM);
    }

    return 0;
}
Example #18
0
static void draw_alpha(int x0,int y0, int w,int h, unsigned char* src, unsigned char *srca, int stride){
    uint32_t bespitch = FFALIGN(mga_vid_config.src_width, 32);
    x0+=mga_vid_config.src_width*(vo_panscan_x>>1)/(vo_dwidth+vo_panscan_x);
    switch(mga_vid_config.format){
    case MGA_VID_FORMAT_YV12:
    case MGA_VID_FORMAT_IYUV:
    case MGA_VID_FORMAT_I420:
        vo_draw_alpha_yv12(w,h,src,srca,stride,vid_data+bespitch*y0+x0,bespitch);
        break;
    case MGA_VID_FORMAT_YUY2:
        vo_draw_alpha_yuy2(w,h,src,srca,stride,vid_data+2*(bespitch*y0+x0),2*bespitch);
        break;
    case MGA_VID_FORMAT_UYVY:
        vo_draw_alpha_yuy2(w,h,src,srca,stride,vid_data+2*(bespitch*y0+x0)+1,2*bespitch);
        break;
    }
}
Example #19
0
static int y41p_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame, AVPacket *avpkt)
{
    AVFrame *pic = data;
    uint8_t *src = avpkt->data;
    uint8_t *y, *u, *v;
    int i, j, ret;

    if (avpkt->size < 3LL * avctx->height * FFALIGN(avctx->width, 8) / 2) {
        av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
        return AVERROR(EINVAL);
    }

    if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
        return ret;

    pic->key_frame = 1;
    pic->pict_type = AV_PICTURE_TYPE_I;

    for (i = avctx->height - 1; i >= 0 ; i--) {
        y = &pic->data[0][i * pic->linesize[0]];
        u = &pic->data[1][i * pic->linesize[1]];
        v = &pic->data[2][i * pic->linesize[2]];
        for (j = 0; j < avctx->width; j += 8) {
            *(u++) = *src++;
            *(y++) = *src++;
            *(v++) = *src++;
            *(y++) = *src++;

            *(u++) = *src++;
            *(y++) = *src++;
            *(v++) = *src++;
            *(y++) = *src++;

            *(y++) = *src++;
            *(y++) = *src++;
            *(y++) = *src++;
            *(y++) = *src++;
        }
    }

    *got_frame = 1;

    return avpkt->size;
}
Example #20
0
/**
 * Read samples from the input FIFOs, mix, and write to the output link.
 */
static int output_frame(AVFilterLink *outlink, int nb_samples)
{
    AVFilterContext *ctx = outlink->src;
    MixContext      *s = ctx->priv;
    AVFrame *out_buf, *in_buf;
    int i;

    calculate_scales(s, nb_samples);

    out_buf = ff_get_audio_buffer(outlink, nb_samples);
    if (!out_buf)
        return AVERROR(ENOMEM);

    in_buf = ff_get_audio_buffer(outlink, nb_samples);
    if (!in_buf) {
        av_frame_free(&out_buf);
        return AVERROR(ENOMEM);
    }

    for (i = 0; i < s->nb_inputs; i++) {
        if (s->input_state[i] == INPUT_ON) {
            int planes, plane_size, p;

            av_audio_fifo_read(s->fifos[i], (void **)in_buf->extended_data,
                               nb_samples);

            planes     = s->planar ? s->nb_channels : 1;
            plane_size = nb_samples * (s->planar ? 1 : s->nb_channels);
            plane_size = FFALIGN(plane_size, 16);

            for (p = 0; p < planes; p++) {
                s->fdsp->vector_fmac_scalar((float *)out_buf->extended_data[p],
                                           (float *) in_buf->extended_data[p],
                                           s->input_scale[i], plane_size);
            }
        }
    }
    av_frame_free(&in_buf);

    out_buf->pts = s->next_pts;
    if (s->next_pts != AV_NOPTS_VALUE)
        s->next_pts += nb_samples;

    return ff_filter_frame(outlink, out_buf);
}
Example #21
0
static void set_bpp(struct fb_var_screeninfo *p, int bpp, int rgb)
{
    p->bits_per_pixel = FFALIGN(bpp, 2);
    p->red.msb_right  = p->green.msb_right = p->blue.msb_right = p->transp.msb_right = 0;
    p->transp.offset  = p->transp.length = 0;
    p->blue.offset    = 0;
    switch (bpp) {
    case 32:
        p->transp.offset = 24;
        p->transp.length = 8;
        // fallthrough
    case 24:
        p->red.offset   = 16;
        p->red.length   = 8;
        p->green.offset = 8;
        p->green.length = 8;
        p->blue.length  = 8;
        break;
    case 16:
        p->red.offset   = 11;
        p->green.length = 6;
        p->red.length   = 5;
        p->green.offset = 5;
        p->blue.length  = 5;
        break;
    case 15:
        p->red.offset   = 10;
        p->green.length = 5;
        p->red.length   = 5;
        p->green.offset = 5;
        p->blue.length  = 5;
        break;
    case 12:
        p->red.offset   = 8;
        p->green.length = 4;
        p->red.length   = 4;
        p->green.offset = 4;
        p->blue.length  = 4;
        break;
    }
    if (rgb) {
        p->blue.offset = p->red.offset;
        p->red.offset = 0;
    }
}
Example #22
0
static uint32_t
draw_image(mp_image_t *mpi){
    uint32_t bespitch = FFALIGN(mga_vid_config.src_width, 32);

    // if -dr or -slices then do nothing:
    if(mpi->flags&(MP_IMGFLAG_DIRECT|MP_IMGFLAG_DRAW_CALLBACK)) return VO_TRUE;

    if(mpi->flags&MP_IMGFLAG_PLANAR){
	// copy planar:
        draw_slice(mpi->planes,mpi->stride,mpi->w,mpi->h,mpi->x,mpi->y);
    } else {
	// copy packed:
	mem2agpcpy_pic(vid_data, mpi->planes[0],	// dst,src
		    mpi->w*(mpi->bpp/8), mpi->h,	// w,h
		    bespitch*2, mpi->stride[0]);	// dstride,sstride
    }
    return VO_TRUE;
}
Example #23
0
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                        const AVFrame *pic, int *got_packet)
{
    int i, j, ret;
    int aligned_width = FFALIGN(avctx->width,
                                avctx->codec_id == AV_CODEC_ID_R10K ? 1 : 64);
    int pad = (aligned_width - avctx->width) * 4;
    uint8_t *src_line;
    uint8_t *dst;

    if ((ret = ff_alloc_packet2(avctx, pkt, 4 * aligned_width * avctx->height)) < 0)
        return ret;

    avctx->coded_frame->key_frame = 1;
    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
    src_line = pic->data[0];
    dst = pkt->data;

    for (i = 0; i < avctx->height; i++) {
        uint16_t *src = (uint16_t *)src_line;
        for (j = 0; j < avctx->width; j++) {
            uint32_t pixel;
            uint16_t r = *src++ >> 6;
            uint16_t g = *src++ >> 6;
            uint16_t b = *src++ >> 6;
            if (avctx->codec_id == AV_CODEC_ID_R210)
                pixel = (r << 20) | (g << 10) | b;
            else
                pixel = (r << 22) | (g << 12) | (b << 2);
            if (avctx->codec_id == AV_CODEC_ID_AVRP)
                bytestream_put_le32(&dst, pixel);
            else
                bytestream_put_be32(&dst, pixel);
        }
        memset(dst, 0, pad);
        dst += pad;
        src_line += pic->linesize[0];
    }

    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
Example #24
0
static int sox_write_header(AVFormatContext *s)
{
    SoXContext *sox = s->priv_data;
    AVIOContext *pb = s->pb;
    AVCodecContext *enc = s->streams[0]->codec;
    AVDictionaryEntry *comment;
    size_t comment_len = 0, comment_size;

    comment = av_dict_get(s->metadata, "comment", NULL, 0);
    if (comment)
        comment_len = strlen(comment->value);
    comment_size = FFALIGN(comment_len, 8);

    sox->header_size = SOX_FIXED_HDR + comment_size;

    if (enc->codec_id == AV_CODEC_ID_PCM_S32LE) {
        ffio_wfourcc(pb, ".SoX");
        avio_wl32(pb, sox->header_size);
        avio_wl64(pb, 0); /* number of samples */
        avio_wl64(pb, av_double2int(enc->sample_rate));
        avio_wl32(pb, enc->channels);
        avio_wl32(pb, comment_size);
    } else if (enc->codec_id == AV_CODEC_ID_PCM_S32BE) {
        ffio_wfourcc(pb, "XoS.");
        avio_wb32(pb, sox->header_size);
        avio_wb64(pb, 0); /* number of samples */
        avio_wb64(pb, av_double2int(enc->sample_rate));
        avio_wb32(pb, enc->channels);
        avio_wb32(pb, comment_size);
    } else {
        av_log(s, AV_LOG_ERROR, "invalid codec; use pcm_s32le or pcm_s32be\n");
        return AVERROR(EINVAL);
    }

    if (comment_len)
        avio_write(pb, comment->value, comment_len);

    ffio_fill(pb, 0, comment_size - comment_len);

    avio_flush(pb);

    return 0;
}
Example #25
0
static av_cold int decode_init(AVCodecContext *avctx)
{
	CamStudioContext *c = avctx->priv_data;
	int stride;
	switch (avctx->bits_per_coded_sample)
	{
	case 16:
		avctx->pix_fmt = PIX_FMT_RGB555;
		break;
	case 24:
		avctx->pix_fmt = PIX_FMT_BGR24;
		break;
	case 32:
		avctx->pix_fmt = PIX_FMT_RGB32;
		break;
	default:
		av_log(avctx, AV_LOG_ERROR,
		       "CamStudio codec error: invalid depth %i bpp\n",
		       avctx->bits_per_coded_sample);
		return 1;
	}
	c->bpp = avctx->bits_per_coded_sample;
	c->pic.data[0] = NULL;
	c->linelen = avctx->width * avctx->bits_per_coded_sample / 8;
	c->height = avctx->height;
	stride = c->linelen;
	if (avctx->bits_per_coded_sample == 24)
		stride = FFALIGN(stride, 4);
	c->decomp_size = c->height * stride;
	c->decomp_buf = av_malloc(c->decomp_size + AV_LZO_OUTPUT_PADDING);
	if (!c->decomp_buf)
	{
		av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
		return 1;
	}
	return 0;
}
Example #26
0
static av_cold int ffmmal_init_decoder(AVCodecContext *avctx)
{
    MMALDecodeContext *ctx = avctx->priv_data;
    MMAL_STATUS_T status;
    MMAL_ES_FORMAT_T *format_in;
    MMAL_COMPONENT_T *decoder;
    char tmp[32];
    int ret = 0;

    bcm_host_init();

    if (mmal_vc_init()) {
        av_log(avctx, AV_LOG_ERROR, "Cannot initialize MMAL VC driver!\n");
        return AVERROR(ENOSYS);
    }

    if ((ret = ff_get_format(avctx, avctx->codec->pix_fmts)) < 0)
        return ret;

    avctx->pix_fmt = ret;

    if ((status = mmal_component_create(MMAL_COMPONENT_DEFAULT_VIDEO_DECODER, &ctx->decoder)))
        goto fail;

    decoder = ctx->decoder;

    format_in = decoder->input[0]->format;
    format_in->type = MMAL_ES_TYPE_VIDEO;
    switch (avctx->codec_id) {
    case AV_CODEC_ID_MPEG2VIDEO:
        format_in->encoding = MMAL_ENCODING_MP2V;
        break;
    case AV_CODEC_ID_MPEG4:
        format_in->encoding = MMAL_ENCODING_MP4V;
        break;
    case AV_CODEC_ID_VC1:
        format_in->encoding = MMAL_ENCODING_WVC1;
        break;
    case AV_CODEC_ID_H264:
    default:
        format_in->encoding = MMAL_ENCODING_H264;
        break;
    }
    format_in->es->video.width = FFALIGN(avctx->width, 32);
    format_in->es->video.height = FFALIGN(avctx->height, 16);
    format_in->es->video.crop.width = avctx->width;
    format_in->es->video.crop.height = avctx->height;
    format_in->es->video.frame_rate.num = 24000;
    format_in->es->video.frame_rate.den = 1001;
    format_in->es->video.par.num = avctx->sample_aspect_ratio.num;
    format_in->es->video.par.den = avctx->sample_aspect_ratio.den;
    format_in->flags = MMAL_ES_FORMAT_FLAG_FRAMED;

    av_get_codec_tag_string(tmp, sizeof(tmp), format_in->encoding);
    av_log(avctx, AV_LOG_DEBUG, "Using MMAL %s encoding.\n", tmp);

#if HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS
    if (mmal_port_parameter_set_uint32(decoder->input[0], MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS,
                                       -1 - ctx->extra_decoder_buffers)) {
        av_log(avctx, AV_LOG_WARNING, "Could not set input buffering limit.\n");
    }
#endif

    if ((status = mmal_port_format_commit(decoder->input[0])))
        goto fail;

    decoder->input[0]->buffer_num =
        FFMAX(decoder->input[0]->buffer_num_min, 20);
    decoder->input[0]->buffer_size =
        FFMAX(decoder->input[0]->buffer_size_min, 512 * 1024);
    ctx->pool_in = mmal_pool_create(decoder->input[0]->buffer_num, 0);
    if (!ctx->pool_in) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    if ((ret = ffmal_update_format(avctx)) < 0)
        goto fail;

    ctx->queue_decoded_frames = mmal_queue_create();
    if (!ctx->queue_decoded_frames)
        goto fail;

    decoder->input[0]->userdata = (void*)avctx;
    decoder->output[0]->userdata = (void*)avctx;
    decoder->control->userdata = (void*)avctx;

    if ((status = mmal_port_enable(decoder->control, control_port_cb)))
        goto fail;
    if ((status = mmal_port_enable(decoder->input[0], input_callback)))
        goto fail;
    if ((status = mmal_port_enable(decoder->output[0], output_callback)))
        goto fail;

    if ((status = mmal_component_enable(decoder)))
        goto fail;

    return 0;

fail:
    ffmmal_close_decoder(avctx);
    return ret < 0 ? ret : AVERROR_UNKNOWN;
}
Example #27
0
HRESULT CDecDXVA2::CreateDXVA2Decoder(int nSurfaces, IDirect3DSurface9 **ppSurfaces)
{
  DbgLog((LOG_TRACE, 10, L"-> CDecDXVA2::CreateDXVA2Decoder"));
  HRESULT hr = S_OK;
  LPDIRECT3DSURFACE9 pSurfaces[DXVA2_MAX_SURFACES];

  if (!m_pDXVADecoderService)
    return E_FAIL;

  DestroyDecoder(false, true);

  GUID input = GUID_NULL;
  D3DFORMAT output;
  FindVideoServiceConversion(m_pAVCtx->codec_id, &input, &output);

  if (!nSurfaces) {
    m_dwSurfaceWidth = FFALIGN(m_pAVCtx->coded_width, 16);
    m_dwSurfaceHeight = FFALIGN(m_pAVCtx->coded_height, 16);

    m_NumSurfaces = GetBufferCount();
    hr = m_pDXVADecoderService->CreateSurface(m_dwSurfaceWidth, m_dwSurfaceHeight, m_NumSurfaces - 1, output, D3DPOOL_DEFAULT, 0, DXVA2_VideoDecoderRenderTarget, pSurfaces, NULL);
    if (FAILED(hr)) {
      DbgLog((LOG_TRACE, 10, L"-> Creation of surfaces failed with hr: %X", hr));
      m_NumSurfaces = 0;
      return E_FAIL;
    }
    ppSurfaces = pSurfaces;
  } else {
    m_NumSurfaces = nSurfaces;
    for (int i = 0; i < m_NumSurfaces; i++) {
      ppSurfaces[i]->AddRef();
    }
  }

  for (int i = 0; i < m_NumSurfaces; i++) {
    m_pSurfaces[i].index = i;
    m_pSurfaces[i].d3d = ppSurfaces[i];
    m_pSurfaces[i].age = 0;
    m_pSurfaces[i].used = false;
  }

  DbgLog((LOG_TRACE, 10, L"-> Successfully created %d surfaces (%dx%d)", m_NumSurfaces, m_dwSurfaceWidth, m_dwSurfaceHeight));

  DXVA2_VideoDesc desc;
  ZeroMemory(&desc, sizeof(desc));
  desc.SampleWidth = m_pAVCtx->coded_width;
  desc.SampleHeight = m_pAVCtx->coded_height;
  desc.Format = output;

  hr = FindDecoderConfiguration(input, &desc, &m_DXVAVideoDecoderConfig);
  if (FAILED(hr)) {
    DbgLog((LOG_TRACE, 10, L"-> FindDecoderConfiguration failed with hr: %X", hr));
    return hr;
  }

  IDirectXVideoDecoder *decoder = NULL;
  hr = m_pDXVADecoderService->CreateVideoDecoder(input, &desc, &m_DXVAVideoDecoderConfig, ppSurfaces, m_NumSurfaces, &decoder);
  if (FAILED(hr)) {
    DbgLog((LOG_TRACE, 10, L"-> CreateVideoDecoder failed with hr: %X", hr));
    return E_FAIL;
  }
  m_pDecoder = decoder;

  /* fill hwaccel_context */
  dxva_context *ctx = (dxva_context *)m_pAVCtx->hwaccel_context;
  ctx->cfg           = &m_DXVAVideoDecoderConfig;
  ctx->decoder       = m_pDecoder;
  ctx->surface       = m_pRawSurface;
  ctx->surface_count = m_NumSurfaces;

  if (m_dwVendorId == VEND_ID_INTEL && input == DXVADDI_Intel_ModeH264_E)
    ctx->workaround = FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO;
  else if (m_dwVendorId == VEND_ID_ATI && IsAMDUVD(m_dwDeviceId))
    ctx->workaround = FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG;
  else
    ctx->workaround = 0;

  memset(m_pRawSurface, 0, sizeof(m_pRawSurface));
  for (int i = 0; i < m_NumSurfaces; i++) {
    m_pRawSurface[i] = m_pSurfaces[i].d3d;
  }

  return S_OK;
}
Example #28
0
int CDecDXVA2::get_dxva2_buffer(struct AVCodecContext *c, AVFrame *pic, int flags)
{
  CDecDXVA2 *pDec = (CDecDXVA2 *)c->opaque;
  IMediaSample *pSample = NULL;

  HRESULT hr = S_OK;

  if (pic->format != AV_PIX_FMT_DXVA2_VLD || (c->codec_id == AV_CODEC_ID_H264 && !H264_CHECK_PROFILE(c->profile))) {
    DbgLog((LOG_ERROR, 10, L"DXVA2 buffer request, but not dxva2 pixfmt or unsupported profile"));
    pDec->m_bFailHWDecode = TRUE;
    return -1;
  }

  if (!pDec->m_pDecoder || FFALIGN(c->coded_width, 16) != pDec->m_dwSurfaceWidth || FFALIGN(c->coded_height, 16) != pDec->m_dwSurfaceHeight) {
    DbgLog((LOG_TRACE, 10, L"No DXVA2 Decoder or image dimensions changed -> Re-Allocating resources"));
    if (!pDec->m_pDecoder && pDec->m_bNative && !pDec->m_pDXVA2Allocator) {
      ASSERT(0);
      hr = E_FAIL;
    } else if (pDec->m_bNative) {
      avcodec_flush_buffers(c);

      pDec->m_dwSurfaceWidth = FFALIGN(c->coded_width, 16);
      pDec->m_dwSurfaceHeight = FFALIGN(c->coded_height, 16);

      // Re-Commit the allocator (creates surfaces and new decoder)
      hr = pDec->m_pDXVA2Allocator->Decommit();
      if (pDec->m_pDXVA2Allocator->DecommitInProgress()) {
        DbgLog((LOG_TRACE, 10, L"WARNING! DXVA2 Allocator is still busy, trying to flush downstream"));
        pDec->m_pCallback->ReleaseAllDXVAResources();
        pDec->m_pCallback->GetOutputPin()->GetConnected()->BeginFlush();
        pDec->m_pCallback->GetOutputPin()->GetConnected()->EndFlush();
        if (pDec->m_pDXVA2Allocator->DecommitInProgress()) {
          DbgLog((LOG_TRACE, 10, L"WARNING! Flush had no effect, decommit of the allocator still not complete"));
        } else {
          DbgLog((LOG_TRACE, 10, L"Flush was successfull, decommit completed!"));
        }
      }
      hr = pDec->m_pDXVA2Allocator->Commit();
    } else if (!pDec->m_bNative) {
      hr = pDec->CreateDXVA2Decoder();
    }
    if (FAILED(hr)) {
      pDec->m_bFailHWDecode = TRUE;
      return -1;
    }
  }

  if (FAILED(pDec->m_pD3DDevMngr->TestDevice(pDec->m_hDevice))) {
    DbgLog((LOG_ERROR, 10, L"Device Lost"));
  }

  int i;
  if (pDec->m_bNative) {
    if (!pDec->m_pDXVA2Allocator)
      return -1;

    hr = pDec->m_pDXVA2Allocator->GetBuffer(&pSample, NULL, NULL, 0);
    if (FAILED(hr)) {
      DbgLog((LOG_ERROR, 10, L"DXVA2Allocator returned error, hr: 0x%x", hr));
      return -1;
    }

    ILAVDXVA2Sample *pLavDXVA2 = NULL;
    hr = pSample->QueryInterface(&pLavDXVA2);
    if (FAILED(hr)) {
      DbgLog((LOG_ERROR, 10, L"Sample is no LAV DXVA2 sample?????"));
      SafeRelease(&pSample);
      return -1;
    }
    i = pLavDXVA2->GetDXSurfaceId();
    SafeRelease(&pLavDXVA2);
  } else {
    int old, old_unused;
    for (i = 0, old = 0, old_unused = -1; i < pDec->m_NumSurfaces; i++) {
      d3d_surface_t *surface = &pDec->m_pSurfaces[i];
      if (!surface->used && (old_unused == -1 || surface->age < pDec->m_pSurfaces[old_unused].age))
        old_unused = i;
      if (surface->age < pDec->m_pSurfaces[old].age)
        old = i;
    }
    if (old_unused == -1) {
      DbgLog((LOG_TRACE, 10, L"No free surface, using oldest"));
      i = old;
    } else {
      i = old_unused;
    }
  }

  LPDIRECT3DSURFACE9 pSurface = pDec->m_pSurfaces[i].d3d;
  if (!pSurface) {
    DbgLog((LOG_ERROR, 10, L"There is a sample, but no D3D Surace? WTF?"));
    SafeRelease(&pSample);
    return -1;
  }

  pDec->m_pSurfaces[i].age  = pDec->m_CurrentSurfaceAge++;
  pDec->m_pSurfaces[i].used = true;

  memset(pic->data, 0, sizeof(pic->data));
  memset(pic->linesize, 0, sizeof(pic->linesize));
  memset(pic->buf, 0, sizeof(pic->buf));

  pic->data[0] = pic->data[3] = (uint8_t *)pSurface;
  pic->data[4] = (uint8_t *)pSample;

  SurfaceWrapper *surfaceWrapper = new SurfaceWrapper();
  surfaceWrapper->pDec = pDec;
  surfaceWrapper->surface = pSurface;
  surfaceWrapper->sample = pSample;
  pic->buf[0] = av_buffer_create(NULL, 0, free_dxva2_buffer, surfaceWrapper, 0);

  return 0;
}
Example #29
0
static int dds_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    DDSContext *ctx = avctx->priv_data;
    GetByteContext *gbc = &ctx->gbc;
    AVFrame *frame = data;
    int mipmap;
    int ret;

    ff_texturedsp_init(&ctx->texdsp);
    bytestream2_init(gbc, avpkt->data, avpkt->size);

    if (bytestream2_get_bytes_left(gbc) < 128) {
        av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n",
               bytestream2_get_bytes_left(gbc));
        return AVERROR_INVALIDDATA;
    }

    if (bytestream2_get_le32(gbc) != MKTAG('D', 'D', 'S', ' ') ||
            bytestream2_get_le32(gbc) != 124) { // header size
        av_log(avctx, AV_LOG_ERROR, "Invalid DDS header.\n");
        return AVERROR_INVALIDDATA;
    }

    bytestream2_skip(gbc, 4); // flags

    avctx->height = bytestream2_get_le32(gbc);
    avctx->width  = bytestream2_get_le32(gbc);
    ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n",
               avctx->width, avctx->height);
        return ret;
    }

    /* Since codec is based on 4x4 blocks, size is aligned to 4. */
    avctx->coded_width  = FFALIGN(avctx->width,  TEXTURE_BLOCK_W);
    avctx->coded_height = FFALIGN(avctx->height, TEXTURE_BLOCK_H);

    bytestream2_skip(gbc, 4); // pitch
    bytestream2_skip(gbc, 4); // depth
    mipmap = bytestream2_get_le32(gbc);
    if (mipmap != 0)
        av_log(avctx, AV_LOG_VERBOSE, "Found %d mipmaps (ignored).\n", mipmap);

    /* Extract pixel format information, considering additional elements
     * in reserved1 and reserved2. */
    ret = parse_pixel_format(avctx);
    if (ret < 0)
        return ret;

    ret = ff_get_buffer(avctx, frame, 0);
    if (ret < 0)
        return ret;

    if (ctx->compressed) {
        int size = (avctx->coded_height / TEXTURE_BLOCK_H) *
                   (avctx->coded_width / TEXTURE_BLOCK_W) * ctx->tex_ratio;
        ctx->slice_count = av_clip(avctx->thread_count, 1,
                                   avctx->coded_height / TEXTURE_BLOCK_H);

        if (bytestream2_get_bytes_left(gbc) < size) {
            av_log(avctx, AV_LOG_ERROR,
                   "Compressed Buffer is too small (%d < %d).\n",
                   bytestream2_get_bytes_left(gbc), size);
            return AVERROR_INVALIDDATA;
        }

        /* Use the decompress function on the texture, one block per thread. */
        ctx->tex_data = gbc->buffer;
        avctx->execute2(avctx, decompress_texture_thread, frame, NULL, ctx->slice_count);
    } else {
        int linesize = av_image_get_linesize(avctx->pix_fmt, frame->width, 0);

        if (ctx->paletted) {
            int i;
            /* Use the first 1024 bytes as palette, then copy the rest. */
            bytestream2_get_buffer(gbc, frame->data[1], 256 * 4);
            for (i = 0; i < 256; i++)
                AV_WN32(frame->data[1] + i*4,
                        (frame->data[1][2+i*4]<<0)+
                        (frame->data[1][1+i*4]<<8)+
                        (frame->data[1][0+i*4]<<16)+
                        (frame->data[1][3+i*4]<<24)
                       );

            frame->palette_has_changed = 1;
        }

        if (bytestream2_get_bytes_left(gbc) < frame->height * linesize) {
            av_log(avctx, AV_LOG_ERROR, "Buffer is too small (%d < %d).\n",
                   bytestream2_get_bytes_left(gbc), frame->height * linesize);
            return AVERROR_INVALIDDATA;
        }

        av_image_copy_plane(frame->data[0], frame->linesize[0],
                            gbc->buffer, linesize,
                            linesize, frame->height);
    }

    /* Run any post processing here if needed. */
    if (avctx->pix_fmt == AV_PIX_FMT_BGRA ||
            avctx->pix_fmt == AV_PIX_FMT_RGBA ||
            avctx->pix_fmt == AV_PIX_FMT_RGB0 ||
            avctx->pix_fmt == AV_PIX_FMT_BGR0 ||
            avctx->pix_fmt == AV_PIX_FMT_YA8)
        run_postproc(avctx, frame);

    /* Frame is ready to be output. */
    frame->pict_type = AV_PICTURE_TYPE_I;
    frame->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
Example #30
0
static int pcm_bluray_decode_frame(AVCodecContext *avctx, void *data,
                                   int *got_frame_ptr, AVPacket *avpkt)
{
    const uint8_t *src = avpkt->data;
    int buf_size = avpkt->size;
    PCMBRDecode *s = avctx->priv_data;
    GetByteContext gb;
    int num_source_channels, channel, retval;
    int sample_size, samples;
    int16_t *dst16;
    int32_t *dst32;

    if (buf_size < 4) {
        av_log(avctx, AV_LOG_ERROR, "PCM packet too small\n");
        return -1;
    }

    if (pcm_bluray_parse_header(avctx, src))
        return -1;
    src += 4;
    buf_size -= 4;

    bytestream2_init(&gb, src, buf_size);

    /* There's always an even number of channels in the source */
    num_source_channels = FFALIGN(avctx->channels, 2);
    sample_size = (num_source_channels * (avctx->sample_fmt == AV_SAMPLE_FMT_S16 ? 16 : 24)) >> 3;
    samples = buf_size / sample_size;

    /* get output buffer */
    s->frame.nb_samples = samples;
    if ((retval = avctx->get_buffer(avctx, &s->frame)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return retval;
    }
    dst16 = (int16_t *)s->frame.data[0];
    dst32 = (int32_t *)s->frame.data[0];

    if (samples) {
        switch (avctx->channel_layout) {
            /* cases with same number of source and coded channels */
        case AV_CH_LAYOUT_STEREO:
        case AV_CH_LAYOUT_4POINT0:
        case AV_CH_LAYOUT_2_2:
            samples *= num_source_channels;
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
#if HAVE_BIGENDIAN
                bytestream2_get_buffer(&gb, dst16, buf_size);
#else
                do {
                    *dst16++ = bytestream2_get_be16u(&gb);
                } while (--samples);
#endif
            } else {
                do {
                    *dst32++ = bytestream2_get_be24u(&gb) << 8;
                } while (--samples);
            }
            break;
        /* cases where number of source channels = coded channels + 1 */
        case AV_CH_LAYOUT_MONO:
        case AV_CH_LAYOUT_SURROUND:
        case AV_CH_LAYOUT_2_1:
        case AV_CH_LAYOUT_5POINT0:
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
                do {
#if HAVE_BIGENDIAN
                    bytestream2_get_buffer(&gb, dst16, avctx->channels * 2);
                    dst16 += avctx->channels;
#else
                    channel = avctx->channels;
                    do {
                        *dst16++ = bytestream2_get_be16u(&gb);
                    } while (--channel);
#endif
                    bytestream2_skip(&gb, 2);
                } while (--samples);
            } else {
                do {
                    channel = avctx->channels;
                    do {
                        *dst32++ = bytestream2_get_be24u(&gb) << 8;
                    } while (--channel);
                    bytestream2_skip(&gb, 3);
                } while (--samples);
            }
            break;
            /* remapping: L, R, C, LBack, RBack, LF */
        case AV_CH_LAYOUT_5POINT1:
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
                do {
                    dst16[0] = bytestream2_get_be16u(&gb);
                    dst16[1] = bytestream2_get_be16u(&gb);
                    dst16[2] = bytestream2_get_be16u(&gb);
                    dst16[4] = bytestream2_get_be16u(&gb);
                    dst16[5] = bytestream2_get_be16u(&gb);
                    dst16[3] = bytestream2_get_be16u(&gb);
                    dst16 += 6;
                } while (--samples);
            } else {
                do {
                    dst32[0] = bytestream2_get_be24u(&gb) << 8;
                    dst32[1] = bytestream2_get_be24u(&gb) << 8;
                    dst32[2] = bytestream2_get_be24u(&gb) << 8;
                    dst32[4] = bytestream2_get_be24u(&gb) << 8;
                    dst32[5] = bytestream2_get_be24u(&gb) << 8;
                    dst32[3] = bytestream2_get_be24u(&gb) << 8;
                    dst32 += 6;
                } while (--samples);
            }
            break;
            /* remapping: L, R, C, LSide, LBack, RBack, RSide, <unused> */
        case AV_CH_LAYOUT_7POINT0:
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
                do {
                    dst16[0] = bytestream2_get_be16u(&gb);
                    dst16[1] = bytestream2_get_be16u(&gb);
                    dst16[2] = bytestream2_get_be16u(&gb);
                    dst16[5] = bytestream2_get_be16u(&gb);
                    dst16[3] = bytestream2_get_be16u(&gb);
                    dst16[4] = bytestream2_get_be16u(&gb);
                    dst16[6] = bytestream2_get_be16u(&gb);
                    dst16 += 7;
                    bytestream2_skip(&gb, 2);
                } while (--samples);
            } else {
                do {
                    dst32[0] = bytestream2_get_be24u(&gb) << 8;
                    dst32[1] = bytestream2_get_be24u(&gb) << 8;
                    dst32[2] = bytestream2_get_be24u(&gb) << 8;
                    dst32[5] = bytestream2_get_be24u(&gb) << 8;
                    dst32[3] = bytestream2_get_be24u(&gb) << 8;
                    dst32[4] = bytestream2_get_be24u(&gb) << 8;
                    dst32[6] = bytestream2_get_be24u(&gb) << 8;
                    dst32 += 7;
                    bytestream2_skip(&gb, 3);
                } while (--samples);
            }
            break;
            /* remapping: L, R, C, LSide, LBack, RBack, RSide, LF */
        case AV_CH_LAYOUT_7POINT1:
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
                do {
                    dst16[0] = bytestream2_get_be16u(&gb);
                    dst16[1] = bytestream2_get_be16u(&gb);
                    dst16[2] = bytestream2_get_be16u(&gb);
                    dst16[6] = bytestream2_get_be16u(&gb);
                    dst16[4] = bytestream2_get_be16u(&gb);
                    dst16[5] = bytestream2_get_be16u(&gb);
                    dst16[7] = bytestream2_get_be16u(&gb);
                    dst16[3] = bytestream2_get_be16u(&gb);
                    dst16 += 8;
                } while (--samples);
            } else {
                do {
                    dst32[0] = bytestream2_get_be24u(&gb) << 8;
                    dst32[1] = bytestream2_get_be24u(&gb) << 8;
                    dst32[2] = bytestream2_get_be24u(&gb) << 8;
                    dst32[6] = bytestream2_get_be24u(&gb) << 8;
                    dst32[4] = bytestream2_get_be24u(&gb) << 8;
                    dst32[5] = bytestream2_get_be24u(&gb) << 8;
                    dst32[7] = bytestream2_get_be24u(&gb) << 8;
                    dst32[3] = bytestream2_get_be24u(&gb) << 8;
                    dst32 += 8;
                } while (--samples);
            }
            break;
        }
    }

    *got_frame_ptr   = 1;
    *(AVFrame *)data = s->frame;

    retval = bytestream2_tell(&gb);
    if (avctx->debug & FF_DEBUG_BITSTREAM)
        av_dlog(avctx, "pcm_bluray_decode_frame: decoded %d -> %d bytes\n",
                retval, buf_size);
    return retval + 4;
}