Exemplo n.º 1
0
static const AVFilterPad avfilter_vf_boxblur_inputs[] = {
    {
        .name         = "default",
        .type         = AVMEDIA_TYPE_VIDEO,
        .config_props = config_input,
        .draw_slice   = draw_slice,
        .min_perms    = AV_PERM_READ
    },
    { NULL }
};

static const AVFilterPad avfilter_vf_boxblur_outputs[] = {
    {
        .name = "default",
        .type = AVMEDIA_TYPE_VIDEO,
    },
    { NULL }
};

AVFilter avfilter_vf_boxblur = {
    .name          = "boxblur",
    .description   = NULL_IF_CONFIG_SMALL("Blur the input."),
    .priv_size     = sizeof(BoxBlurContext),
    .init          = init,
    .uninit        = uninit,
    .query_formats = query_formats,

    .inputs    = avfilter_vf_boxblur_inputs,
    .outputs   = avfilter_vf_boxblur_outputs,
};
Exemplo n.º 2
0
    avctx->coded_frame->key_frame = 1;
    if(!avctx->codec_tag)
        avctx->codec_tag = avcodec_pix_fmt_to_codec_tag(avctx->pix_fmt);
    return 0;
}

static int raw_encode(AVCodecContext *avctx,
                            unsigned char *frame, int buf_size, void *data)
{
    int ret = avpicture_layout((AVPicture *)data, avctx->pix_fmt, avctx->width,
                                               avctx->height, frame, buf_size);

    if(avctx->codec_tag == AV_RL32("yuv2") && ret > 0 &&
       avctx->pix_fmt   == PIX_FMT_YUYV422) {
        int x;
        for(x = 1; x < avctx->height*avctx->width*2; x += 2)
            frame[x] ^= 0x80;
    }
    return ret;
}

AVCodec rawvideo_encoder = {
    "rawvideo",
    CODEC_TYPE_VIDEO,
    CODEC_ID_RAWVIDEO,
    sizeof(AVFrame),
    raw_init_encoder,
    raw_encode,
    .long_name = NULL_IF_CONFIG_SMALL("raw video"),
};
Exemplo n.º 3
0
    ctx->slice_count = av_clip(avctx->thread_count, 1,
                               avctx->coded_height / TEXTURE_BLOCK_H);

    return 0;
}

static av_cold int hap_close(AVCodecContext *avctx)
{
    HapContext *ctx = avctx->priv_data;

    ff_hap_free_context(ctx);

    return 0;
}

AVCodec ff_hap_decoder = {
    .name           = "hap",
    .long_name      = NULL_IF_CONFIG_SMALL("Vidvox Hap decoder"),
    .type           = AVMEDIA_TYPE_VIDEO,
    .id             = AV_CODEC_ID_HAP,
    .init           = hap_init,
    .decode         = hap_decode,
    .close          = hap_close,
    .priv_data_size = sizeof(HapContext),
    .capabilities   = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS |
                      AV_CODEC_CAP_DR1,
    .caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE |
                      FF_CODEC_CAP_INIT_CLEANUP,
};
Exemplo n.º 4
0
                break;
        }
    }

    *data_size=sizeof(AVFrame);
    *(AVFrame*)data= *avctx->coded_frame;

    return avpkt->size;
}

static av_cold int decode_close(AVCodecContext *avctx)
{
    AVFrame *pic = avctx->coded_frame;
    if (pic->data[0])
        avctx->release_buffer(avctx, pic);
    av_freep(&avctx->coded_frame);

    return 0;
}

AVCodec ff_v210x_decoder = {
    .name           = "v210x",
    .type           = AVMEDIA_TYPE_VIDEO,
    .id             = AV_CODEC_ID_V210X,
    .init           = decode_init,
    .close          = decode_close,
    .decode         = decode_frame,
    .capabilities   = CODEC_CAP_DR1,
    .long_name      = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
};
Exemplo n.º 5
0
            avio_w8(pb, (ico->images[i].bits >= 8) ? 0 : 1 << ico->images[i].bits);
        } else {
            avio_w8(pb, 0);
        }

        avio_w8(pb, 0); // reserved
        avio_wl16(pb, 1); // color planes
        avio_wl16(pb, ico->images[i].bits);
        avio_wl32(pb, ico->images[i].size);
        avio_wl32(pb, ico->images[i].offset);
    }

    av_freep(&ico->images);

    return 0;
}

AVOutputFormat ff_ico_muxer = {
    .name           = "ico",
    .long_name      = NULL_IF_CONFIG_SMALL("Microsoft Windows ICO"),
    .priv_data_size = sizeof(IcoMuxContext),
    .mime_type      = "image/vnd.microsoft.icon",
    .extensions     = "ico",
    .audio_codec    = CODEC_ID_NONE,
    .video_codec    = CODEC_ID_BMP,
    .write_header   = ico_write_header,
    .write_packet   = ico_write_packet,
    .write_trailer  = ico_write_trailer,
    .flags          = AVFMT_NOTIMESTAMPS,
};
Exemplo n.º 6
0
        ret = av_new_packet(pkt, 8 * codec->channels);
        if (ret < 0)
            return ret;
        for (i = 0; i < 8 / c->interleave_size; i++) {
            for (ch = 0; ch < codec->channels; ch++) {
                pkt->data[ch * 8 + i*c->interleave_size+0] = avio_r8(s->pb);
                pkt->data[ch * 8 + i*c->interleave_size+1] = avio_r8(s->pb);
            }
        }
        ret = 0;
    } else if (codec->codec_id == AV_CODEC_ID_SDX2_DPCM) {
        ret = av_get_packet(s->pb, pkt, codec->block_align * 1024);

    } else {
        ret = av_get_packet(s->pb, pkt, codec->block_align ? codec->block_align : 1024 * codec->channels);
    }

    pkt->stream_index = 0;
    return ret;
}

AVInputFormat ff_genh_demuxer = {
    .name           = "genh",
    .long_name      = NULL_IF_CONFIG_SMALL("GENeric Header"),
    .priv_data_size = sizeof(GENHDemuxContext),
    .read_probe     = genh_probe,
    .read_header    = genh_read_header,
    .read_packet    = genh_read_packet,
    .extensions     = "genh",
};
Exemplo n.º 7
0
    },{
        .name         = "reference",
        .type         = AVMEDIA_TYPE_VIDEO,
        .filter_frame = filter_frame,
        .config_props = config_input_ref,
    },
    { NULL }
};

static const AVFilterPad psnr_outputs[] = {
    {
        .name          = "default",
        .type          = AVMEDIA_TYPE_VIDEO,
        .config_props  = config_output,
        .request_frame = request_frame,
    },
    { NULL }
};

AVFilter ff_vf_psnr = {
    .name          = "psnr",
    .description   = NULL_IF_CONFIG_SMALL("Calculate the PSNR between two video streams."),
    .init          = init,
    .uninit        = uninit,
    .query_formats = query_formats,
    .priv_size     = sizeof(PSNRContext),
    .priv_class    = &psnr_class,
    .inputs        = psnr_inputs,
    .outputs       = psnr_outputs,
};
Exemplo n.º 8
0
    return size;
}

static int mpjpeg_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    int ret;
    int size = parse_multipart_header(s);

    if (size < 0)
        return size;

    ret = av_get_packet(s->pb, pkt, size);
    if (ret < 0)
        return ret;

    // trailing empty line
    avio_skip(s->pb, 2);

    return 0;
}

AVInputFormat ff_mpjpeg_demuxer = {
    .name              = "mpjpeg",
    .long_name         = NULL_IF_CONFIG_SMALL("MIME multipart JPEG"),
    .mime_type         = "multipart/x-mixed-replace",
    .extensions        = "mjpg",
    .read_probe        = mpjpeg_read_probe,
    .read_header       = mpjpeg_read_header,
    .read_packet       = mpjpeg_read_packet,
};
Exemplo n.º 9
0
    c->height = 0;
    c->codec_frameheader = avctx->codec_tag == MKTAG('R', 'J', 'P', 'G');
    if (avctx->extradata_size)
        get_quant(avctx, c, avctx->extradata, avctx->extradata_size);
    dsputil_init(&c->dsp, avctx);
    if (!codec_reinit(avctx, avctx->width, avctx->height, -1))
        return 1;
    return 0;
}

static av_cold int decode_end(AVCodecContext *avctx) {
    NuvContext *c = avctx->priv_data;
    av_freep(&c->decomp_buf);
    if (c->pic.data[0])
        avctx->release_buffer(avctx, &c->pic);
    return 0;
}

AVCodec ff_nuv_decoder = {
    .name           = "nuv",
    .type           = AVMEDIA_TYPE_VIDEO,
    .id             = CODEC_ID_NUV,
    .priv_data_size = sizeof(NuvContext),
    .init           = decode_init,
    .close          = decode_end,
    .decode         = decode_frame,
    .capabilities   = CODEC_CAP_DR1,
    .long_name = NULL_IF_CONFIG_SMALL("NuppelVideo/RTJPEG"),
};

        case AVMEDIA_TYPE_AUDIO:
            audio_frame_cksum(&bp, *frame);
            break;
    }

    av_bprint_chars(&bp, '\n', 1);
    if (av_bprint_is_complete(&bp))
        avio_write(s->pb, bp.str, bp.len);
    else
        ret = AVERROR(ENOMEM);
    av_bprint_finalize(&bp, NULL);
    return ret;
}

static int write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
    return AVERROR(ENOSYS);
}

AVOutputFormat ff_uncodedframecrc_muxer = {
    .name              = "uncodedframecrc",
    .long_name         = NULL_IF_CONFIG_SMALL("uncoded framecrc testing"),
    .audio_codec       = AV_CODEC_ID_PCM_S16LE,
    .video_codec       = AV_CODEC_ID_RAWVIDEO,
    .write_header      = ff_framehash_write_header,
    .write_packet      = write_packet,
    .write_uncoded_frame = write_frame,
    .flags             = AVFMT_VARIABLE_FPS | AVFMT_TS_NONSTRICT |
                         AVFMT_TS_NEGATIVE,
};
Exemplo n.º 11
0
            if ((fps=s->streams[n]->avg_frame_rate.num) > 255) {
                av_log(s, AV_LOG_ERROR, "Frame rate may not exceed 255fps\n");
                return AVERROR(EINVAL);
            }

            if (fps != 30) {
                av_log(s, AV_LOG_WARNING, "For vintage compatibility fps must be 30\n");
            }

            header[6] = fps;
            break;
        }
    }

    avio_write(s->pb, header, 8);
    avio_flush(s->pb);

    return 0;
}

AVOutputFormat ff_roq_muxer = {
    .name         = "roq",
    .long_name    = NULL_IF_CONFIG_SMALL("raw id RoQ"),
    .extensions   = "roq",
    .audio_codec  = AV_CODEC_ID_ROQ_DPCM,
    .video_codec  = AV_CODEC_ID_ROQ,
    .write_header = roq_write_header,
    .write_packet = ff_raw_write_packet,
};
Exemplo n.º 12
0
static av_cold int libopenjpeg_encode_close(AVCodecContext *avctx)
{
    LibOpenJPEGContext *ctx = avctx->priv_data;

    opj_destroy_compress(ctx->compress);
    opj_image_destroy(ctx->image);
    av_freep(&avctx->coded_frame);
    return 0 ;
}


AVCodec ff_libopenjpeg_encoder = {
    .name           = "libopenjpeg",
    .type           = AVMEDIA_TYPE_VIDEO,
    .id             = CODEC_ID_JPEG2000,
    .priv_data_size = sizeof(LibOpenJPEGContext),
    .init           = libopenjpeg_encode_init,
    .encode         = libopenjpeg_encode_frame,
    .close          = libopenjpeg_encode_close,
    .decode         = NULL,
    .capabilities   = 0,
    .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24,PIX_FMT_RGBA,PIX_FMT_GRAY8,
                                           PIX_FMT_YUV420P,PIX_FMT_YUV422P,
                                           PIX_FMT_YUV440P,PIX_FMT_YUV444P,
                                           PIX_FMT_YUV420P9,PIX_FMT_YUV422P9,PIX_FMT_YUV444P9,
                                           PIX_FMT_YUV420P10,PIX_FMT_YUV422P10,PIX_FMT_YUV444P10,
                                           PIX_FMT_YUV420P16,PIX_FMT_YUV422P16,PIX_FMT_YUV444P16},
    .long_name = NULL_IF_CONFIG_SMALL("OpenJPEG based JPEG 2000 encoder"),
} ;
Exemplo n.º 13
0
    avio_write(pb, data ? data : pkt->data, size);

    avio_wb32(pb,size+flags_size+11); // previous tag size
    flv->duration = FFMAX(flv->duration, pkt->pts + flv->delay + pkt->duration);

    avio_flush(pb);

    av_free(data);

    return pb->error;
}

AVOutputFormat ff_flv_muxer = {
    .name           = "flv",
    .long_name      = NULL_IF_CONFIG_SMALL("FLV format"),
    .mime_type      = "video/x-flv",
    .extensions     = "flv",
    .priv_data_size = sizeof(FLVContext),
#if CONFIG_LIBMP3LAME
    .audio_codec    = CODEC_ID_MP3,
#else // CONFIG_LIBMP3LAME
    .audio_codec    = CODEC_ID_ADPCM_SWF,
#endif // CONFIG_LIBMP3LAME
    .video_codec    = CODEC_ID_FLV1,
    .write_header   = flv_write_header,
    .write_packet   = flv_write_packet,
    .write_trailer  = flv_write_trailer,
    .codec_tag= (const AVCodecTag* const []){flv_video_codec_ids, flv_audio_codec_ids, 0},
    .flags= AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS,
};
Exemplo n.º 14
0
Arquivo: vmnc.c Projeto: komh/kmp
/*
 *
 * Uninit VMnc decoder
 *
 */
static av_cold int decode_end(AVCodecContext *avctx)
{
    VmncContext * const c = avctx->priv_data;

    if (c->pic.data[0])
        avctx->release_buffer(avctx, &c->pic);

    av_free(c->curbits);
    av_free(c->curmask);
    av_free(c->screendta);
    return 0;
}

AVCodec ff_vmnc_decoder = {
    .name           = "vmnc",
    .type           = AVMEDIA_TYPE_VIDEO,
    .id             = CODEC_ID_VMNC,
    .priv_data_size = sizeof(VmncContext),
    .init           = decode_init,
    .close          = decode_end,
    .decode         = decode_frame,
    .capabilities   = CODEC_CAP_DR1,
    .long_name = NULL_IF_CONFIG_SMALL("VMware Screen Codec / VMware Video"),
};
Exemplo n.º 15
0
        avio_wl64(pb, ctx->frame_cnt * ctx->sum_delta_pts / (ctx->frame_cnt - 1));
        avio_seek(pb, end, SEEK_SET);
    }

    return 0;
}

static int ivf_check_bitstream(struct AVFormatContext *s, const AVPacket *pkt)
{
    int ret = 1;
    AVStream *st = s->streams[pkt->stream_index];

    if (st->codec->codec_id == AV_CODEC_ID_VP9)
        ret = ff_stream_add_bitstream_filter(st, "vp9_superframe", NULL);

    return ret;
}

AVOutputFormat ff_ivf_muxer = {
    .priv_data_size = sizeof(IVFEncContext),
    .name         = "ivf",
    .long_name    = NULL_IF_CONFIG_SMALL("On2 IVF"),
    .extensions   = "ivf",
    .audio_codec  = AV_CODEC_ID_NONE,
    .video_codec  = AV_CODEC_ID_VP8,
    .write_header = ivf_write_header,
    .write_packet = ivf_write_packet,
    .write_trailer = ivf_write_trailer,
    .check_bitstream = ivf_check_bitstream,
};
Exemplo n.º 16
0
            /* skip useless 10B sub-header (yes, it's not accounted for in the chunk header) */
            avio_skip(pb, 10);

            pkt->stream_index = flic->audio_stream_index;
            pkt->pos = avio_tell(pb);
            ret = avio_read(pb, pkt->data, size);

            if (ret != size) {
                av_free_packet(pkt);
                ret = AVERROR(EIO);
            }

            packet_read = 1;
        } else {
            /* not interested in this chunk */
            avio_skip(pb, size - 6);
        }
    }

    return ret;
}

AVInputFormat ff_flic_demuxer = {
    /*.name           = */	"flic",
    /*.long_name      = */	NULL_IF_CONFIG_SMALL("FLI/FLC/FLX animation format"),
    /*.priv_data_size = */	sizeof(FlicDemuxContext),
    /*.read_probe     = */	flic_probe,
    /*.read_header    = */	flic_read_header,
    /*.read_packet    = */	flic_read_packet,
};
Exemplo n.º 17
0
}

static const AVFilterPad avfilter_vsrc_buffer_outputs[] = {
    {
        .name          = "default",
        .type          = AVMEDIA_TYPE_VIDEO,
        .request_frame = request_frame,
        .poll_frame    = poll_frame,
        .config_props  = config_props,
    },
    { NULL }
};

AVFilter ff_vsrc_buffer = {
    .name      = "buffer",
    .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
    .priv_size = sizeof(BufferSourceContext),
    .priv_class = &buffer_class,
    .query_formats = query_formats,

    .init      = init_video,
    .uninit    = uninit,

    .inputs    = NULL,
    .outputs   = avfilter_vsrc_buffer_outputs,
};

static const AVFilterPad avfilter_asrc_abuffer_outputs[] = {
    {
        .name          = "default",
        .type          = AVMEDIA_TYPE_AUDIO,
Exemplo n.º 18
0
    .class_name = "AC3 decoder",
    .item_name  = av_default_item_name,
    .option     = options,
    .version    = LIBAVUTIL_VERSION_INT,
};

AVCodec ff_ac3_decoder = {
    .name           = "ac3",
    .type           = AVMEDIA_TYPE_AUDIO,
    .id             = AV_CODEC_ID_AC3,
    .priv_data_size = sizeof (AC3DecodeContext),
    .init           = ac3_decode_init,
    .close          = ac3_decode_end,
    .decode         = ac3_decode_frame,
    .capabilities   = AV_CODEC_CAP_DR1,
    .long_name      = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
    .sample_fmts    = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
                                                      AV_SAMPLE_FMT_NONE },
    .priv_class     = &ac3_decoder_class,
};

#if CONFIG_EAC3_DECODER
static const AVClass eac3_decoder_class = {
    .class_name = "E-AC3 decoder",
    .item_name  = av_default_item_name,
    .option     = options,
    .version    = LIBAVUTIL_VERSION_INT,
};

AVCodec ff_eac3_decoder = {
    .name           = "eac3",
Exemplo n.º 19
0
Arquivo: sunrast.c Projeto: raff/libav
            buf += alen;
        }
    }

    *picture   = s->picture;
    *data_size = sizeof(AVFrame);

    return buf - bufstart;
}

static av_cold int sunrast_end(AVCodecContext *avctx) {
    SUNRASTContext *s = avctx->priv_data;

    if(s->picture.data[0])
        avctx->release_buffer(avctx, &s->picture);

    return 0;
}

AVCodec ff_sunrast_decoder = {
    .name           = "sunrast",
    .type           = AVMEDIA_TYPE_VIDEO,
    .id             = CODEC_ID_SUNRAST,
    .priv_data_size = sizeof(SUNRASTContext),
    .init           = sunrast_init,
    .close          = sunrast_end,
    .decode         = sunrast_decode_frame,
    .capabilities   = CODEC_CAP_DR1,
    .long_name = NULL_IF_CONFIG_SMALL("Sun Rasterfile image"),
};
Exemplo n.º 20
0
            return read_frame(vid, pb, pkt, block_type, s);

        case EOF_BLOCK:
            if(vid->nframes != 0)
                av_log(s, AV_LOG_VERBOSE, "reached terminating character but not all frames read.\n");
            vid->is_finished = 1;
            return AVERROR(EIO);
        default:
            av_log(s, AV_LOG_ERROR, "unknown block (character = %c, decimal = %d, hex = %x)!!!\n",
                   block_type, block_type, block_type);
            return AVERROR_INVALIDDATA;
    }
}

static int vid_read_close(AVFormatContext *s)
{
    BVID_DemuxContext *vid = s->priv_data;
    av_freep(&vid->palette);
    return 0;
}

AVInputFormat ff_bethsoftvid_demuxer = {
    .name           = "bethsoftvid",
    .long_name      = NULL_IF_CONFIG_SMALL("Bethesda Softworks VID"),
    .priv_data_size = sizeof(BVID_DemuxContext),
    .read_probe     = vid_probe,
    .read_header    = vid_read_header,
    .read_packet    = vid_read_packet,
    .read_close     = vid_read_close,
};
Exemplo n.º 21
0
    snprintf(buf, sizeof(buf), "CRC=0x%08"PRIx32"\n", crc->crcval);
    avio_write(s->pb, buf, strlen(buf));

    return 0;
}

AVOutputFormat ff_crc_muxer = {
#ifdef IDE_COMPILE
    "crc",
    "CRC testing",
    0, 0, AV_CODEC_ID_PCM_S16LE,
    AV_CODEC_ID_RAWVIDEO,
    0, AVFMT_NOTIMESTAMPS,
    0, 0, 0, sizeof(CRCState),
    crc_write_header,
    crc_write_packet,
    crc_write_trailer,
#else
	.name              = "crc",
    .long_name         = NULL_IF_CONFIG_SMALL("CRC testing"),
    .priv_data_size    = sizeof(CRCState),
    .audio_codec       = AV_CODEC_ID_PCM_S16LE,
    .video_codec       = AV_CODEC_ID_RAWVIDEO,
    .write_header      = crc_write_header,
    .write_packet      = crc_write_packet,
    .write_trailer     = crc_write_trailer,
    .flags             = AVFMT_NOTIMESTAMPS,
#endif
};
Exemplo n.º 22
0
        }
        voc->param_written = 1;
    } else {
        avio_w8(pb, VOC_TYPE_VOICE_DATA_CONT);
        avio_wl24(pb, pkt->size);
    }

    avio_write(pb, pkt->data, pkt->size);
    return 0;
}

static int voc_write_trailer(AVFormatContext *s)
{
    avio_w8(s->pb, 0);
    return 0;
}

AVOutputFormat ff_voc_muxer = {
    .name              = "voc",
    .long_name         = NULL_IF_CONFIG_SMALL("Creative Voice file format"),
    .mime_type         = "audio/x-voc",
    .extensions        = "voc",
    .priv_data_size    = sizeof(VocEncContext),
    .audio_codec       = CODEC_ID_PCM_U8,
    .video_codec       = CODEC_ID_NONE,
    .write_header      = voc_write_header,
    .write_packet      = voc_write_packet,
    .write_trailer     = voc_write_trailer,
    .codec_tag=(const AVCodecTag* const []){ff_voc_codec_tags, 0},
};
Exemplo n.º 23
0
        put_bits(&s->pb, 6, 0); /* mb_x */
        put_bits(&s->pb, 6, 0); /* mb_y */
        put_bits(&s->pb, 12, s->mb_width * s->mb_height);
    }

    put_bits(&s->pb, 3, 0);     /* ignored */
}

FF_MPV_GENERIC_CLASS(rv10)



const enum AVPixelFormat rv10_pix_fmts[] ={ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
AVCodec ff_rv10_encoder = {
	/*.name           =*/ "rv10",
	/*.long_name      =*/  NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
	/*.type           =*/ AVMEDIA_TYPE_VIDEO,
	/*.id             =*/ AV_CODEC_ID_RV10,
	/* capabilities = */ 0,
	/* supported_framerates = */ 0,
	/* pix_fmts = */ rv10_pix_fmts,
	/* supported_samplerates = */ 0,
	/* sample_fmts = */ 0,
	/* channel_layouts = */0,
	/* max_lowres = */ 0,
	/* priv_class = */ &rv10_class,
	/* profiles = */ 0,
	/*.priv_data_size =*/ sizeof(MpegEncContext),
	/* next = */ 0,
	/* init_thread_copy = */ 0,
	/* update_thread_context = */ 0,
Exemplo n.º 24
0
	return 0;
}

static void libdirac_flush(AVCodecContext *avccontext)
{
	/* Got a seek request. We will need free memory held in the private
	 * context and free the current Dirac decoder handle and then open
	 * a new decoder handle. */
	libdirac_decode_close(avccontext);
	libdirac_decode_init(avccontext);
	return;
}



AVCodec ff_libdirac_decoder =
{
	"libdirac",
	AVMEDIA_TYPE_VIDEO,
	CODEC_ID_DIRAC,
	sizeof(FfmpegDiracDecoderParams),
	libdirac_decode_init,
	NULL,
	libdirac_decode_close,
	libdirac_decode_frame,
	CODEC_CAP_DELAY,
	.flush = libdirac_flush,
	.long_name = NULL_IF_CONFIG_SMALL("libdirac Dirac 2.2"),
};
Exemplo n.º 25
0
    return 0;
}

static const AVFilterPad avfilter_af_aselect_inputs[] = {
    {
        .name         = "default",
        .type         = AVMEDIA_TYPE_AUDIO,
        .config_props = config_input,
        .filter_frame = filter_frame,
    },
    { NULL }
};

AVFilter ff_af_aselect = {
    .name        = "aselect",
    .description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."),
    .init        = aselect_init,
    .uninit      = uninit,
    .priv_size   = sizeof(SelectContext),
    .inputs      = avfilter_af_aselect_inputs,
    .priv_class  = &aselect_class,
    .flags       = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
#endif /* CONFIG_ASELECT_FILTER */

#if CONFIG_SELECT_FILTER

DEFINE_OPTIONS(select, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
AVFILTER_DEFINE_CLASS(select);

static av_cold int select_init(AVFilterContext *ctx)
Exemplo n.º 26
0
                if (lookup)
                    diff += ff_adpcm_step_table[step_index] >> (lookup_size - 1);
                if (highbit)
                    diff  = -diff;

                output  = av_clip_int16(output + diff);
            }

            *dest = output;
            dest += channels;

            step_index += step_index_tables[lookup_size - 2][lookup];
        }
    }

    *got_frame_ptr   = 1;

    return pkt->size;
}

AVCodec ff_vima_decoder = {
    .name           = "vima",
    .type           = AVMEDIA_TYPE_AUDIO,
    .id             = AV_CODEC_ID_VIMA,
    .priv_data_size = sizeof(VimaContext),
    .init           = decode_init,
    .decode         = decode_frame,
    .capabilities   = CODEC_CAP_DR1,
    .long_name      = NULL_IF_CONFIG_SMALL("LucasArts VIMA audio"),
};
Exemplo n.º 27
0
#define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
static const AVOption aacenc_options[] = {
    {"stereo_mode", "Stereo coding method", offsetof(AACEncContext, options.stereo_mode), AV_OPT_TYPE_INT, {.dbl = 0}, -1, 1, AACENC_FLAGS, "stereo_mode"},
        {"auto",     "Selected by the Encoder", 0, AV_OPT_TYPE_CONST, {.dbl = -1 }, INT_MIN, INT_MAX, AACENC_FLAGS, "stereo_mode"},
        {"ms_off",   "Disable Mid/Side coding", 0, AV_OPT_TYPE_CONST, {.dbl =  0 }, INT_MIN, INT_MAX, AACENC_FLAGS, "stereo_mode"},
        {"ms_force", "Force Mid/Side for the whole frame if possible", 0, AV_OPT_TYPE_CONST, {.dbl =  1 }, INT_MIN, INT_MAX, AACENC_FLAGS, "stereo_mode"},
    {NULL}
};

static const AVClass aacenc_class = {
    "AAC encoder",
    av_default_item_name,
    aacenc_options,
    LIBAVUTIL_VERSION_INT,
};

AVCodec ff_aac_encoder = {
    .name           = "aac",
    .type           = AVMEDIA_TYPE_AUDIO,
    .id             = CODEC_ID_AAC,
    .priv_data_size = sizeof(AACEncContext),
    .init           = aac_encode_init,
    .encode         = aac_encode_frame,
    .close          = aac_encode_end,
    .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL,
    .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE},
    .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
    .priv_class = &aacenc_class,
};
Exemplo n.º 28
0
        return AVERROR(ENOMEM);

    ret = ff_mss12_decode_init(&c->ctx, 0, &c->sc, NULL);

    avctx->pix_fmt = AV_PIX_FMT_PAL8;

    return ret;
}

static av_cold int mss1_decode_end(AVCodecContext *avctx)
{
    MSS1Context * const ctx = avctx->priv_data;

    av_frame_free(&ctx->pic);
    ff_mss12_decode_end(&ctx->ctx);

    return 0;
}

AVCodec ff_mss1_decoder = {
    .name           = "mss1",
    .long_name      = NULL_IF_CONFIG_SMALL("MS Screen 1"),
    .type           = AVMEDIA_TYPE_VIDEO,
    .id             = AV_CODEC_ID_MSS1,
    .priv_data_size = sizeof(MSS1Context),
    .init           = mss1_decode_init,
    .close          = mss1_decode_end,
    .decode         = mss1_decode_frame,
    .capabilities   = CODEC_CAP_DR1,
};
Exemplo n.º 29
0
static av_cold int oggvorbis_encode_close(AVCodecContext *avccontext) {
    OggVorbisContext *context = avccontext->priv_data ;
/*  ogg_packet op ; */

    vorbis_analysis_wrote(&context->vd, 0) ; /* notify vorbisenc this is EOF */

    vorbis_block_clear(&context->vb);
    vorbis_dsp_clear(&context->vd);
    vorbis_info_clear(&context->vi);

    av_freep(&avccontext->coded_frame);
    av_freep(&avccontext->extradata);

    return 0 ;
}


AVCodec libvorbis_encoder = {
    "libvorbis",
    CODEC_TYPE_AUDIO,
    CODEC_ID_VORBIS,
    sizeof(OggVorbisContext),
    oggvorbis_encode_init,
    oggvorbis_encode_frame,
    oggvorbis_encode_close,
    .capabilities= CODEC_CAP_DELAY,
    .sample_fmts = (enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE},
    .long_name= NULL_IF_CONFIG_SMALL("libvorbis Vorbis"),
} ;
Exemplo n.º 30
0
    return 0;
}

static int read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
    BinkDemuxContext *bink = s->priv_data;
    AVStream *vst = s->streams[0];

    if (!s->pb->seekable)
        return -1;

    /* seek to the first frame */
    avio_seek(s->pb, vst->index_entries[0].pos, SEEK_SET);
    bink->video_pts = 0;
    memset(bink->audio_pts, 0, sizeof(bink->audio_pts));
    bink->current_track = -1;
    return 0;
}

AVInputFormat ff_bink_demuxer = {
    "bink",
    NULL_IF_CONFIG_SMALL("Bink"),
    sizeof(BinkDemuxContext),
    probe,
    read_header,
    read_packet,
    NULL,
    read_seek,
};