static av_cold int yop_decode_init(AVCodecContext *avctx) { YopDecContext *s = avctx->priv_data; s->avctx = avctx; if (avctx->width & 1 || avctx->height & 1 || avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) { av_log(avctx, AV_LOG_ERROR, "YOP has invalid dimensions\n"); return -1; } avctx->pix_fmt = PIX_FMT_PAL8; s->num_pal_colors = avctx->extradata[0]; s->first_color[0] = avctx->extradata[1]; s->first_color[1] = avctx->extradata[2]; if (s->num_pal_colors + s->first_color[0] > 256 || s->num_pal_colors + s->first_color[1] > 256) { av_log(avctx, AV_LOG_ERROR, "YOP: palette parameters invalid, header probably corrupt\n"); return AVERROR_INVALIDDATA; } return 0; }
static av_cold int decode_init(AVCodecContext *avctx) { CamStudioContext *c = avctx->priv_data; if (avcodec_check_dimensions(avctx, avctx->height, avctx->width) < 0) { return 1; } switch (avctx->bits_per_sample) { case 16: avctx->pix_fmt = PIX_FMT_RGB555; break; case 24: avctx->pix_fmt = PIX_FMT_BGR24; break; case 32: avctx->pix_fmt = PIX_FMT_RGB32; break; default: av_log(avctx, AV_LOG_ERROR, "CamStudio codec error: invalid depth %i bpp\n", avctx->bits_per_sample); return 1; } c->bpp = avctx->bits_per_sample; c->pic.data[0] = NULL; c->linelen = avctx->width * avctx->bits_per_sample / 8; c->height = avctx->height; c->decomp_size = c->height * c->linelen; c->decomp_buf = av_malloc(c->decomp_size + LZO_OUTPUT_PADDING); if (!c->decomp_buf) { av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); return 1; } return 0; }
static int gif_decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { GifState *s = avctx->priv_data; AVFrame *picture = data; int ret; s->bytestream = buf; s->bytestream_end = buf + buf_size; if (gif_read_header1(s) < 0) return -1; avctx->pix_fmt = PIX_FMT_PAL8; if (avcodec_check_dimensions(avctx, s->screen_width, s->screen_height)) return -1; avcodec_set_dimensions(avctx, s->screen_width, s->screen_height); if (s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); if (avctx->get_buffer(avctx, &s->picture) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } s->image_palette = (uint32_t *)s->picture.data[1]; ret = gif_parse_next_image(s); if (ret < 0) return ret; *picture = s->picture; *data_size = sizeof(AVPicture); return s->bytestream - buf; }
static av_cold int flashsv_encode_init(AVCodecContext *avctx) { FlashSVContext *s = avctx->priv_data; s->avctx = avctx; if ((avctx->width > 4095) || (avctx->height > 4095)) { av_log(avctx, AV_LOG_ERROR, "Input dimensions too large, input must be max 4096x4096 !\n"); return -1; } if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) { return -1; } // Needed if zlib unused or init aborted before deflateInit memset(&(s->zstream), 0, sizeof(z_stream)); s->last_key_frame=0; s->image_width = avctx->width; s->image_height = avctx->height; s->tmpblock = av_mallocz(3*256*256); s->encbuffer = av_mallocz(s->image_width*s->image_height*3); if (!s->tmpblock || !s->encbuffer) { av_log(avctx, AV_LOG_ERROR, "Memory allocation failed.\n"); return -1; } return 0; }
/* * * Init VMnc decoder * */ static av_cold int decode_init(AVCodecContext *avctx) { VmncContext * const c = avctx->priv_data; c->avctx = avctx; c->pic.data[0] = NULL; c->width = avctx->width; c->height = avctx->height; if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) { return 1; } c->bpp = avctx->bits_per_sample; c->bpp2 = c->bpp/8; switch(c->bpp){ case 8: avctx->pix_fmt = PIX_FMT_PAL8; break; case 16: avctx->pix_fmt = PIX_FMT_RGB555; break; case 32: avctx->pix_fmt = PIX_FMT_RGB32; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported bitdepth %i\n", c->bpp); } return 0; }
/* * * Init smacker decoder * */ static av_cold int decode_init(AVCodecContext *avctx) { SmackVContext * const c = avctx->priv_data; c->avctx = avctx; c->pic.data[0] = NULL; if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) { return 1; } avctx->pix_fmt = PIX_FMT_PAL8; /* decode huffman trees from extradata */ if(avctx->extradata_size < 16){ av_log(avctx, AV_LOG_ERROR, "Extradata missing!\n"); return -1; } decode_header_trees(c); return 0; }
/* * * Init tscc decoder * */ static int decode_init(AVCodecContext *avctx) { CamtasiaContext * const c = (CamtasiaContext *)avctx->priv_data; int zret; // Zlib return code c->avctx = avctx; avctx->has_b_frames = 0; c->pic.data[0] = NULL; c->height = avctx->height; if (avcodec_check_dimensions(avctx, avctx->height, avctx->width) < 0) { return 1; } #ifdef CONFIG_ZLIB // Needed if zlib unused or init aborted before inflateInit memset(&(c->zstream), 0, sizeof(z_stream)); #else av_log(avctx, AV_LOG_ERROR, "Zlib support not compiled.\n"); return 1; #endif switch(avctx->bits_per_sample){ case 8: avctx->pix_fmt = PIX_FMT_PAL8; break; case 16: avctx->pix_fmt = PIX_FMT_RGB555; break; case 24: avctx->pix_fmt = PIX_FMT_BGR24; break; case 32: avctx->pix_fmt = PIX_FMT_RGBA32; break; default: av_log(avctx, AV_LOG_ERROR, "Camtasia error: unknown depth %i bpp\n", avctx->bits_per_sample); return -1; } c->bpp = avctx->bits_per_sample; c->decomp_size = (avctx->width * c->bpp + (avctx->width + 254) / 255 + 2) * avctx->height + 2;//RLE in the 'best' case /* Allocate decompression buffer */ if (c->decomp_size) { if ((c->decomp_buf = av_malloc(c->decomp_size)) == NULL) { av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); return 1; } } #ifdef CONFIG_ZLIB c->zstream.zalloc = Z_NULL; c->zstream.zfree = Z_NULL; c->zstream.opaque = Z_NULL; zret = inflateInit(&(c->zstream)); if (zret != Z_OK) { av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret); return 1; } #endif return 0; }
static av_cold int decode_init(AVCodecContext *avctx){ // QdrawContext * const a = avctx->priv_data; if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) { return 1; } avctx->pix_fmt= PIX_FMT_PAL8; return 0; }
/* * * Init 8BPS decoder * */ static int decode_init(AVCodecContext *avctx) { EightBpsContext * const c = (EightBpsContext *)avctx->priv_data; c->avctx = avctx; avctx->has_b_frames = 0; c->pic.data[0] = NULL; if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) { return 1; } switch (avctx->bits_per_sample) { case 8: avctx->pix_fmt = PIX_FMT_PAL8; c->planes = 1; c->planemap[0] = 0; // 1st plane is palette indexes if (avctx->palctrl == NULL) { av_log(avctx, AV_LOG_ERROR, "Error: PAL8 format but no palette from demuxer.\n"); return -1; } break; case 24: avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24); c->planes = 3; c->planemap[0] = 2; // 1st plane is red c->planemap[1] = 1; // 2nd plane is green c->planemap[2] = 0; // 3rd plane is blue break; case 32: avctx->pix_fmt = PIX_FMT_RGBA32; c->planes = 4; #ifdef WORDS_BIGENDIAN c->planemap[0] = 1; // 1st plane is red c->planemap[1] = 2; // 2nd plane is green c->planemap[2] = 3; // 3rd plane is blue c->planemap[3] = 0; // 4th plane is alpha??? #else c->planemap[0] = 2; // 1st plane is red c->planemap[1] = 1; // 2nd plane is green c->planemap[2] = 0; // 3rd plane is blue c->planemap[3] = 3; // 4th plane is alpha??? #endif break; default: av_log(avctx, AV_LOG_ERROR, "Error: Unsupported color depth: %u.\n", avctx->bits_per_sample); return -1; } return 0; }
/* * * Init tscc decoder * */ static av_cold int decode_init(AVCodecContext *avctx) { CamtasiaContext * const c = avctx->priv_data; int zret; // Zlib return code c->avctx = avctx; c->height = avctx->height; if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) { return 1; } // Needed if zlib unused or init aborted before inflateInit memset(&(c->zstream), 0, sizeof(z_stream)); switch(avctx->bits_per_coded_sample){ case 8: avctx->pix_fmt = PIX_FMT_PAL8; break; case 16: avctx->pix_fmt = PIX_FMT_RGB555; break; case 24: avctx->pix_fmt = PIX_FMT_RGB24; /* ffdshow custom code */ break; case 32: avctx->pix_fmt = PIX_FMT_RGB32; break; default: av_log(avctx, AV_LOG_ERROR, "Camtasia error: unknown depth %i bpp\n", avctx->bits_per_coded_sample); return -1; } c->bpp = avctx->bits_per_coded_sample; c->decomp_size = (avctx->width * c->bpp + (avctx->width + 254) / 255 + 2) * avctx->height + 2;//RLE in the 'best' case /* Allocate decompression buffer */ if (c->decomp_size) { if ((c->decomp_buf = av_malloc(c->decomp_size)) == NULL) { av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); return 1; } } c->zstream.zalloc = Z_NULL; c->zstream.zfree = Z_NULL; c->zstream.opaque = Z_NULL; zret = inflateInit(&(c->zstream)); if (zret != Z_OK) { av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret); return 1; } return 0; }
static int roq_encode_init(AVCodecContext *avctx) { RoqContext *enc = avctx->priv_data; av_init_random(1, &enc->randctx); enc->framesSinceKeyframe = 0; if ((avctx->width & 0xf) || (avctx->height & 0xf)) { av_log(avctx, AV_LOG_ERROR, "Dimensions must be divisible by 16\n"); return -1; } if (((avctx->width)&(avctx->width-1))||((avctx->height)&(avctx->height-1))) av_log(avctx, AV_LOG_ERROR, "Warning: dimensions not power of two\n"); if (avcodec_check_dimensions(avctx, avctx->width, avctx->height)) { av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", avctx->width, avctx->height); return -1; } enc->width = avctx->width; enc->height = avctx->height; enc->framesSinceKeyframe = 0; enc->first_frame = 1; enc->last_frame = &enc->frames[0]; enc->current_frame = &enc->frames[1]; enc->this_motion4 = av_mallocz((enc->width*enc->height/16)*sizeof(motion_vect)); enc->last_motion4 = av_malloc ((enc->width*enc->height/16)*sizeof(motion_vect)); enc->this_motion8 = av_mallocz((enc->width*enc->height/64)*sizeof(motion_vect)); enc->last_motion8 = av_malloc ((enc->width*enc->height/64)*sizeof(motion_vect)); return 0; }
static av_cold int qtrle_encode_init(AVCodecContext *avctx) { QtrleEncContext *s = avctx->priv_data; if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) { return -1; } s->avctx=avctx; switch (avctx->pix_fmt) { case PIX_FMT_RGB555BE: s->pixel_size = 2; break; case PIX_FMT_RGB24: s->pixel_size = 3; break; case PIX_FMT_ARGB: s->pixel_size = 4; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported colorspace.\n"); break; } avctx->bits_per_coded_sample = s->pixel_size*8; s->rlecode_table = av_mallocz(s->avctx->width); s->skip_table = av_mallocz(s->avctx->width); s->length_table = av_mallocz((s->avctx->width + 1)*sizeof(int)); if (!s->skip_table || !s->length_table || !s->rlecode_table) { av_log(avctx, AV_LOG_ERROR, "Error allocating memory.\n"); return -1; } if (avpicture_alloc(&s->previous_frame, avctx->pix_fmt, avctx->width, avctx->height) < 0) { av_log(avctx, AV_LOG_ERROR, "Error allocating picture\n"); return -1; } s->max_buf_size = s->avctx->width*s->avctx->height*s->pixel_size /* image base material */ + 15 /* header + footer */ + s->avctx->height*2 /* skip code+rle end */ + s->avctx->width/MAX_RLE_BULK + 1 /* rle codes */; avctx->coded_frame = &s->frame; return 0; }
static av_cold int mm_decode_init(AVCodecContext *avctx) { MmContext *s = avctx->priv_data; s->avctx = avctx; avctx->pix_fmt = PIX_FMT_PAL8; if (avcodec_check_dimensions(avctx, avctx->width, avctx->height)) return -1; s->frame.reference = 1; if (avctx->get_buffer(avctx, &s->frame)) { av_log(s->avctx, AV_LOG_ERROR, "mmvideo: get_buffer() failed\n"); return -1; } return 0; }
static void libschroedinger_handle_first_access_unit(AVCodecContext *avccontext) { FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; SchroDecoder *decoder = p_schro_params->decoder; p_schro_params->format = schro_decoder_get_video_format (decoder); /* Tell FFmpeg about sequence details. */ if(avcodec_check_dimensions(avccontext, p_schro_params->format->width, p_schro_params->format->height) < 0) { av_log(avccontext, AV_LOG_ERROR, "invalid dimensions (%dx%d)\n", p_schro_params->format->width, p_schro_params->format->height); avccontext->height = avccontext->width = 0; return; } avccontext->height = p_schro_params->format->height; avccontext->width = p_schro_params->format->width; avccontext->pix_fmt = GetFfmpegChromaFormat(p_schro_params->format->chroma_format); if (ff_get_schro_frame_format( p_schro_params->format->chroma_format, &p_schro_params->frame_format) == -1) { av_log (avccontext, AV_LOG_ERROR, "This codec currently only supports planar YUV 4:2:0, 4:2:2 " "and 4:4:4 formats.\n"); return; } avccontext->time_base.den = p_schro_params->format->frame_rate_numerator; avccontext->time_base.num = p_schro_params->format->frame_rate_denominator; if (p_schro_params->dec_pic.data[0] == NULL) { avpicture_alloc(&p_schro_params->dec_pic, avccontext->pix_fmt, avccontext->width, avccontext->height); } }
static int codec_reinit(AVCodecContext *avctx, int width, int height, int quality) { NuvContext *c = avctx->priv_data; width = (width + 1) & ~1; height = (height + 1) & ~1; if (quality >= 0) get_quant_quality(c, quality); if (width != c->width || height != c->height) { if (avcodec_check_dimensions(avctx, height, width) < 0) return 0; avctx->width = c->width = width; avctx->height = c->height = height; c->decomp_size = c->height * c->width * 3 / 2; c->decomp_buf = av_realloc(c->decomp_buf, c->decomp_size + AV_LZO_OUTPUT_PADDING); if (!c->decomp_buf) { av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); return 0; } rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq); } else if (quality != c->quality) rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq); return 1; }
static int decode_init(AVCodecContext *avctx) { NuvContext *c = avctx->priv_data; avctx->width = (avctx->width + 1) & ~1; avctx->height = (avctx->height + 1) & ~1; if (avcodec_check_dimensions(avctx, avctx->height, avctx->width) < 0) { return 1; } avctx->pix_fmt = PIX_FMT_YUV420P; c->pic.data[0] = NULL; c->width = avctx->width; c->height = avctx->height; c->decomp_size = c->height * c->width * 3 / 2; c->decomp_buf = av_malloc(c->decomp_size + LZO_OUTPUT_PADDING); if (!c->decomp_buf) { av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); return 1; } dsputil_init(&c->dsp, avctx); if (avctx->extradata_size) get_quant(avctx, c, avctx->extradata, avctx->extradata_size); rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq); return 0; }
int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){ char buf1[32], tuple_type[32]; int h, w, depth, maxval; pnm_get(s, buf1, sizeof(buf1)); if (!strcmp(buf1, "P4")) { avctx->pix_fmt = PIX_FMT_MONOWHITE; } else if (!strcmp(buf1, "P5")) { if (avctx->codec_id == CODEC_ID_PGMYUV) avctx->pix_fmt = PIX_FMT_YUV420P; else avctx->pix_fmt = PIX_FMT_GRAY8; } else if (!strcmp(buf1, "P6")) { avctx->pix_fmt = PIX_FMT_RGB24; } else if (!strcmp(buf1, "P7")) { w = -1; h = -1; maxval = -1; depth = -1; tuple_type[0] = '\0'; for(;;) { pnm_get(s, buf1, sizeof(buf1)); if (!strcmp(buf1, "WIDTH")) { pnm_get(s, buf1, sizeof(buf1)); w = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, "HEIGHT")) { pnm_get(s, buf1, sizeof(buf1)); h = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, "DEPTH")) { pnm_get(s, buf1, sizeof(buf1)); depth = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, "MAXVAL")) { pnm_get(s, buf1, sizeof(buf1)); maxval = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, "TUPLETYPE")) { pnm_get(s, tuple_type, sizeof(tuple_type)); } else if (!strcmp(buf1, "ENDHDR")) { break; } else { return -1; } } /* check that all tags are present */ if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0' || avcodec_check_dimensions(avctx, w, h)) return -1; avctx->width = w; avctx->height = h; if (depth == 1) { if (maxval == 1) avctx->pix_fmt = PIX_FMT_MONOWHITE; else avctx->pix_fmt = PIX_FMT_GRAY8; } else if (depth == 3) { if (maxval < 256) { avctx->pix_fmt = PIX_FMT_RGB24; } else { av_log(avctx, AV_LOG_ERROR, "16-bit components are only supported for grayscale\n"); avctx->pix_fmt = PIX_FMT_NONE; return -1; } } else if (depth == 4) { avctx->pix_fmt = PIX_FMT_RGB32; } else { return -1; } return 0; } else { return -1; } pnm_get(s, buf1, sizeof(buf1)); avctx->width = atoi(buf1); if (avctx->width <= 0) return -1; pnm_get(s, buf1, sizeof(buf1)); avctx->height = atoi(buf1); if(avcodec_check_dimensions(avctx, avctx->width, avctx->height)) return -1; if (avctx->pix_fmt != PIX_FMT_MONOWHITE) { pnm_get(s, buf1, sizeof(buf1)); s->maxval = atoi(buf1); if (s->maxval >= 256) { if (avctx->pix_fmt == PIX_FMT_GRAY8) { avctx->pix_fmt = PIX_FMT_GRAY16BE; if (s->maxval != 65535) avctx->pix_fmt = PIX_FMT_GRAY16; } if (avctx->pix_fmt == PIX_FMT_RGB24) { if (s->maxval > 255) avctx->pix_fmt = PIX_FMT_RGB48BE; } else { av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format\n"); avctx->pix_fmt = PIX_FMT_NONE; return -1; } } } /* more check if YUV420 */ if (avctx->pix_fmt == PIX_FMT_YUV420P) { if ((avctx->width & 1) != 0) return -1; h = (avctx->height * 2); if ((h % 3) != 0) return -1; h /= 3; avctx->height = h; } return 0; }
static int libdirac_decode_frame(AVCodecContext *avccontext, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; FfmpegDiracDecoderParams *p_dirac_params = avccontext->priv_data; AVPicture *picture = data; AVPicture pic; int pict_size; unsigned char *buffer[3]; *data_size = 0; if (buf_size > 0) { /* set data to decode into buffer */ dirac_buffer(p_dirac_params->p_decoder, buf, buf + buf_size); if ((buf[4] & 0x08) == 0x08 && (buf[4] & 0x03)) avccontext->has_b_frames = 1; } while (1) { /* parse data and process result */ DecoderState state = dirac_parse(p_dirac_params->p_decoder); switch (state) { case STATE_BUFFER: return buf_size; case STATE_SEQUENCE: { /* tell FFmpeg about sequence details */ dirac_sourceparams_t *src_params = &p_dirac_params->p_decoder->src_params; if (avcodec_check_dimensions(avccontext, src_params->width, src_params->height) < 0) { av_log(avccontext, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", src_params->width, src_params->height); avccontext->height = avccontext->width = 0; return -1; } avccontext->height = src_params->height; avccontext->width = src_params->width; avccontext->pix_fmt = GetFfmpegChromaFormat(src_params->chroma); if (avccontext->pix_fmt == PIX_FMT_NONE) { av_log(avccontext, AV_LOG_ERROR, "Dirac chroma format %d not supported currently\n", src_params->chroma); return -1; } avccontext->time_base.den = src_params->frame_rate.numerator; avccontext->time_base.num = src_params->frame_rate.denominator; /* calculate output dimensions */ avpicture_fill(&pic, NULL, avccontext->pix_fmt, avccontext->width, avccontext->height); pict_size = avpicture_get_size(avccontext->pix_fmt, avccontext->width, avccontext->height); /* allocate output buffer */ if (!p_dirac_params->p_out_frame_buf) p_dirac_params->p_out_frame_buf = av_malloc(pict_size); buffer[0] = p_dirac_params->p_out_frame_buf; buffer[1] = p_dirac_params->p_out_frame_buf + pic.linesize[0] * avccontext->height; buffer[2] = buffer[1] + pic.linesize[1] * src_params->chroma_height; /* tell Dirac about output destination */ dirac_set_buf(p_dirac_params->p_decoder, buffer, NULL); break; } case STATE_SEQUENCE_END: break; case STATE_PICTURE_AVAIL: /* fill picture with current buffer data from Dirac */ avpicture_fill(picture, p_dirac_params->p_out_frame_buf, avccontext->pix_fmt, avccontext->width, avccontext->height); *data_size = sizeof(AVPicture); return buf_size; case STATE_INVALID: return -1; default: break; } } return buf_size; }
static int rv20_decode_picture_header(MpegEncContext *s) { int seq, mb_pos, i; #if 0 GetBitContext gb= s->gb; for(i=0; i<64; i++){ av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&gb)); if(i%4==3) av_log(s->avctx, AV_LOG_DEBUG, " "); } av_log(s->avctx, AV_LOG_DEBUG, "\n"); #endif #if 0 av_log(s->avctx, AV_LOG_DEBUG, "%3dx%03d/%02Xx%02X ", s->width, s->height, s->width/4, s->height/4); for(i=0; i<s->avctx->extradata_size; i++){ av_log(s->avctx, AV_LOG_DEBUG, "%02X ", ((uint8_t*)s->avctx->extradata)[i]); if(i%4==3) av_log(s->avctx, AV_LOG_DEBUG, " "); } av_log(s->avctx, AV_LOG_DEBUG, "\n"); #endif if(s->avctx->sub_id == 0x30202002 || s->avctx->sub_id == 0x30203002){ if (get_bits(&s->gb, 3)){ av_log(s->avctx, AV_LOG_ERROR, "unknown triplet set\n"); return -1; } } i= get_bits(&s->gb, 2); switch(i){ case 0: s->pict_type= FF_I_TYPE; break; case 1: s->pict_type= FF_I_TYPE; break; //hmm ... case 2: s->pict_type= FF_P_TYPE; break; case 3: s->pict_type= FF_B_TYPE; break; default: av_log(s->avctx, AV_LOG_ERROR, "unknown frame type\n"); return -1; } if(s->last_picture_ptr==NULL && s->pict_type==FF_B_TYPE){ av_log(s->avctx, AV_LOG_ERROR, "early B pix\n"); return -1; } if (get_bits1(&s->gb)){ av_log(s->avctx, AV_LOG_ERROR, "unknown bit set\n"); return -1; } s->qscale = get_bits(&s->gb, 5); if(s->qscale==0){ av_log(s->avctx, AV_LOG_ERROR, "error, qscale:0\n"); return -1; } if(s->avctx->sub_id == 0x30203002){ if (get_bits1(&s->gb)){ av_log(s->avctx, AV_LOG_ERROR, "unknown bit2 set\n"); return -1; } } if(s->avctx->has_b_frames){ int f, new_w, new_h; int v= s->avctx->extradata_size >= 4 ? 7&((uint8_t*)s->avctx->extradata)[1] : 0; if (get_bits1(&s->gb)){ av_log(s->avctx, AV_LOG_ERROR, "unknown bit3 set\n"); // return -1; } seq= get_bits(&s->gb, 13)<<2; f= get_bits(&s->gb, av_log2(v)+1); if(f){ new_w= 4*((uint8_t*)s->avctx->extradata)[6+2*f]; new_h= 4*((uint8_t*)s->avctx->extradata)[7+2*f]; }else{ new_w= s->width; //FIXME wrong we of course must save the original in the context new_h= s->height; } if(new_w != s->width || new_h != s->height){ av_log(s->avctx, AV_LOG_DEBUG, "attempting to change resolution to %dx%d\n", new_w, new_h); if (avcodec_check_dimensions(s->avctx, new_h, new_w) < 0) return -1; MPV_common_end(s); s->width = s->avctx->width = new_w; s->height = s->avctx->height= new_h; if (MPV_common_init(s) < 0) return -1; } if(s->avctx->debug & FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_DEBUG, "F %d/%d\n", f, v); } }else{ seq= get_bits(&s->gb, 8)*128; } // if(s->avctx->sub_id <= 0x20201002){ //0x20201002 definitely needs this mb_pos= ff_h263_decode_mba(s); /* }else{ mb_pos= get_bits(&s->gb, av_log2(s->mb_num-1)+1); s->mb_x= mb_pos % s->mb_width; s->mb_y= mb_pos / s->mb_width; }*/ //av_log(s->avctx, AV_LOG_DEBUG, "%d\n", seq); seq |= s->time &~0x7FFF; if(seq - s->time > 0x4000) seq -= 0x8000; if(seq - s->time < -0x4000) seq += 0x8000; if(seq != s->time){ if(s->pict_type!=FF_B_TYPE){ s->time= seq; s->pp_time= s->time - s->last_non_b_time; s->last_non_b_time= s->time; }else{ s->time= seq; s->pb_time= s->pp_time - (s->last_non_b_time - s->time); if(s->pp_time <=s->pb_time || s->pp_time <= s->pp_time - s->pb_time || s->pp_time<=0){ av_log(s->avctx, AV_LOG_DEBUG, "messed up order, possible from seeking? skipping current b frame\n"); return FRAME_SKIPPED; } ff_mpeg4_init_direct_mv(s); } } // printf("%d %d %d %d %d\n", seq, (int)s->time, (int)s->last_non_b_time, s->pp_time, s->pb_time); /*for(i=0; i<32; i++){ av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb)); } av_log(s->avctx, AV_LOG_DEBUG, "\n");*/ s->no_rounding= get_bits1(&s->gb); s->f_code = 1; s->unrestricted_mv = 1; s->h263_aic= s->pict_type == FF_I_TYPE; // s->alt_inter_vlc=1; // s->obmc=1; // s->umvplus=1; s->modified_quant=1; s->loop_filter=1; if(s->avctx->debug & FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_INFO, "num:%5d x:%2d y:%2d type:%d qscale:%2d rnd:%d\n", seq, s->mb_x, s->mb_y, s->pict_type, s->qscale, s->no_rounding); } assert(s->pict_type != FF_B_TYPE || !s->low_delay); return s->mb_width*s->mb_height - mb_pos; }
int ff_flv_decode_picture_header(MpegEncContext *s) { int format, width, height; /* picture header */ if (get_bits_long(&s->gb, 17) != 1) { av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n"); return -1; } format = get_bits(&s->gb, 5); if (format != 0 && format != 1) { av_log(s->avctx, AV_LOG_ERROR, "Bad picture format\n"); return -1; } s->h263_flv = format+1; s->picture_number = get_bits(&s->gb, 8); /* picture timestamp */ format = get_bits(&s->gb, 3); switch (format) { case 0: width = get_bits(&s->gb, 8); height = get_bits(&s->gb, 8); break; case 1: width = get_bits(&s->gb, 16); height = get_bits(&s->gb, 16); break; case 2: width = 352; height = 288; break; case 3: width = 176; height = 144; break; case 4: width = 128; height = 96; break; case 5: width = 320; height = 240; break; case 6: width = 160; height = 120; break; default: width = height = 0; break; } if(avcodec_check_dimensions(s->avctx, width, height)) return -1; s->width = width; s->height = height; s->pict_type = FF_I_TYPE + get_bits(&s->gb, 2); s->dropable= s->pict_type > FF_P_TYPE; if (s->dropable) s->pict_type = FF_P_TYPE; skip_bits1(&s->gb); /* deblocking flag */ s->chroma_qscale= s->qscale = get_bits(&s->gb, 5); s->h263_plus = 0; s->unrestricted_mv = 1; s->h263_long_vectors = 0; /* PEI */ while (get_bits1(&s->gb) != 0) { skip_bits(&s->gb, 8); } s->f_code = 1; if(s->avctx->debug & FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_DEBUG, "%c esc_type:%d, qp:%d num:%d\n", s->dropable ? 'D' : av_get_pict_type_char(s->pict_type), s->h263_flv-1, s->qscale, s->picture_number); } s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; return 0; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; const uint8_t *buf_end = buf + avpkt->size; KgvContext * const c = avctx->priv_data; int offsets[7]; uint16_t *out, *prev; int outcnt = 0, maxcnt; int w, h, i; if (avpkt->size < 2) return -1; w = (buf[0] + 1) * 8; h = (buf[1] + 1) * 8; buf += 2; if (avcodec_check_dimensions(avctx, w, h)) return -1; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); maxcnt = w * h; out = av_realloc(c->cur, w * h * 2); if (!out) return -1; c->cur = out; prev = av_realloc(c->prev, w * h * 2); if (!prev) return -1; c->prev = prev; for (i = 0; i < 7; i++) offsets[i] = -1; while (outcnt < maxcnt && buf_end - 2 > buf) { int code = AV_RL16(buf); buf += 2; if (!(code & 0x8000)) { out[outcnt++] = code; // rgb555 pixel coded directly } else { int count; uint16_t *inp; if ((code & 0x6000) == 0x6000) { // copy from previous frame int oidx = (code >> 10) & 7; int start; count = (code & 0x3FF) + 3; if (offsets[oidx] < 0) { if (buf_end - 3 < buf) break; offsets[oidx] = AV_RL24(buf); buf += 3; } start = (outcnt + offsets[oidx]) % maxcnt; if (maxcnt - start < count) break; inp = prev + start; } else { // copy from earlier in this frame int offset = (code & 0x1FFF) + 1; if (!(code & 0x6000)) { count = 2; } else if ((code & 0x6000) == 0x2000) { count = 3; } else { if (buf_end - 1 < buf) break; count = 4 + *buf++; } if (outcnt < offset) break; inp = out + outcnt - offset; } if (maxcnt - outcnt < count) break; for (i = 0; i < count; i++) out[outcnt++] = inp[i]; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { AVSubtitle *sub = data; const uint8_t *buf_end = buf + buf_size; uint8_t *bitmap; int w, h, x, y, rlelen, i; GetBitContext gb; // check that at least header fits if (buf_size < 27 + 7 * 2 + 4 * 3) { av_log(avctx, AV_LOG_ERROR, "coded frame too small\n"); return -1; } // read start and end time if (buf[0] != '[' || buf[13] != '-' || buf[26] != ']') { av_log(avctx, AV_LOG_ERROR, "invalid time code\n"); return -1; } sub->start_display_time = parse_timecode(buf + 1); sub->end_display_time = parse_timecode(buf + 14); buf += 27; // read header w = bytestream_get_le16(&buf); h = bytestream_get_le16(&buf); if (avcodec_check_dimensions(avctx, w, h) < 0) return -1; x = bytestream_get_le16(&buf); y = bytestream_get_le16(&buf); // skip bottom right position, it gives no new information bytestream_get_le16(&buf); bytestream_get_le16(&buf); rlelen = bytestream_get_le16(&buf); // allocate sub and set values if (!sub->rects) { sub->rects = av_mallocz(sizeof(AVSubtitleRect)); sub->num_rects = 1; } av_freep(&sub->rects[0].bitmap); sub->rects[0].x = x; sub->rects[0].y = y; sub->rects[0].w = w; sub->rects[0].h = h; sub->rects[0].linesize = w; sub->rects[0].bitmap = av_malloc(w * h); sub->rects[0].nb_colors = 4; sub->rects[0].rgba_palette = av_malloc(sub->rects[0].nb_colors * 4); // read palette for (i = 0; i < sub->rects[0].nb_colors; i++) sub->rects[0].rgba_palette[i] = bytestream_get_be24(&buf); // make all except background (first entry) non-transparent for (i = 1; i < sub->rects[0].nb_colors; i++) sub->rects[0].rgba_palette[i] |= 0xff000000; // process RLE-compressed data rlelen = FFMIN(rlelen, buf_end - buf); init_get_bits(&gb, buf, rlelen * 8); bitmap = sub->rects[0].bitmap; for (y = 0; y < h; y++) { // interlaced: do odd lines if (y == (h + 1) / 2) bitmap = sub->rects[0].bitmap + w; for (x = 0; x < w; ) { int log2 = ff_log2_tab[show_bits(&gb, 8)]; int run = get_bits(&gb, 14 - 4 * (log2 >> 1)); int color = get_bits(&gb, 2); run = FFMIN(run, w - x); // run length 0 means till end of row if (!run) run = w - x; memset(bitmap, color, run); bitmap += run; x += run; } // interlaced, skip every second line bitmap += w; align_get_bits(&gb); } *data_size = 1; return buf_size; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { PicContext *s = avctx->priv_data; int buf_size = avpkt->size; const uint8_t *buf = avpkt->data; const uint8_t *buf_end = avpkt->data + buf_size; uint32_t *palette; int bits_per_plane, bpp, etype, esize, npal; int i, x, y, plane; if (buf_size < 11) return AVERROR_INVALIDDATA; if (bytestream_get_le16(&buf) != 0x1234) return AVERROR_INVALIDDATA; s->width = bytestream_get_le16(&buf); s->height = bytestream_get_le16(&buf); buf += 4; bits_per_plane = *buf & 0xF; s->nb_planes = (*buf++ >> 4) + 1; bpp = s->nb_planes ? bits_per_plane*s->nb_planes : bits_per_plane; if (bits_per_plane > 8 || bpp < 1 || bpp > 32) { av_log_ask_for_sample(s, "unsupported bit depth\n"); return AVERROR_INVALIDDATA; } if (*buf == 0xFF) { buf += 2; etype = bytestream_get_le16(&buf); esize = bytestream_get_le16(&buf); if (buf_end - buf < esize) return AVERROR_INVALIDDATA; } else { etype = -1; esize = 0; } avctx->pix_fmt = PIX_FMT_PAL8; if (s->width != avctx->width && s->height != avctx->height) { if (avcodec_check_dimensions(avctx, s->width, s->height) < 0) return -1; avcodec_set_dimensions(avctx, s->width, s->height); if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); } if (avctx->get_buffer(avctx, &s->frame) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } memset(s->frame.data[0], 0, s->height * s->frame.linesize[0]); s->frame.pict_type = FF_I_TYPE; s->frame.palette_has_changed = 1; palette = (uint32_t*)s->frame.data[1]; if (etype == 1 && esize > 1 && *buf < 6) { int idx = *buf; npal = 4; for (i = 0; i < npal; i++) palette[i] = ff_cga_palette[ cga_mode45_index[idx][i] ]; } else if (etype == 2) { npal = FFMIN(esize, 16); for (i = 0; i < npal; i++) palette[i] = ff_cga_palette[ FFMIN(buf[i], 16)]; } else if (etype == 3) { npal = FFMIN(esize, 16); for (i = 0; i < npal; i++) palette[i] = ff_ega_palette[ FFMIN(buf[i], 63)]; } else if (etype == 4 || etype == 5) { npal = FFMIN(esize / 3, 256); for (i = 0; i < npal; i++) palette[i] = AV_RB24(buf + i*3) << 2; } else { if (bpp == 1) { npal = 2; palette[0] = 0x000000; palette[1] = 0xFFFFFF; } else if (bpp == 2) { npal = 4; for (i = 0; i < npal; i++) palette[i] = ff_cga_palette[ cga_mode45_index[0][i] ]; } else { npal = 16; memcpy(palette, ff_cga_palette, npal * 4); } } // fill remaining palette entries memset(palette + npal, 0, AVPALETTE_SIZE - npal * 4); buf += esize; x = 0; y = s->height - 1; plane = 0; if (bytestream_get_le16(&buf)) { while (buf_end - buf >= 6) { const uint8_t *buf_pend = buf + FFMIN(AV_RL16(buf), buf_end - buf); //ignore uncompressed block size reported at buf[2] int marker = buf[4]; buf += 5; while (plane < s->nb_planes && buf_pend - buf >= 1) { int run = 1; int val = *buf++; if (val == marker) { run = *buf++; if (run == 0) run = bytestream_get_le16(&buf); val = *buf++; } if (buf > buf_end) break; if (bits_per_plane == 8) { picmemset_8bpp(s, val, run, &x, &y); if (y < 0) break; } else { picmemset(s, val, run, &x, &y, &plane, bits_per_plane); } } } } else { av_log_ask_for_sample(s, "uncompressed image\n"); return buf_size; } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; }
static int libopenjpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; LibOpenJPEGContext *ctx = avctx->priv_data; AVFrame *picture = &ctx->image, *output = data; opj_dinfo_t *dec; opj_cio_t *stream; opj_image_t *image; int width, height, has_alpha = 0, ret = -1; int x, y, index; uint8_t *img_ptr; int adjust[4]; *data_size = 0; // Check if input is a raw jpeg2k codestream or in jp2 wrapping if((AV_RB32(buf) == 12) && (AV_RB32(buf + 4) == JP2_SIG_TYPE) && (AV_RB32(buf + 8) == JP2_SIG_VALUE)) { dec = opj_create_decompress(CODEC_JP2); } else { dec = opj_create_decompress(CODEC_J2K); } if(!dec) { av_log(avctx, AV_LOG_ERROR, "Error initializing decoder.\n"); return -1; } opj_set_event_mgr((opj_common_ptr)dec, NULL, NULL); // Tie decoder with decoding parameters opj_setup_decoder(dec, &ctx->dec_params); stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size); if(!stream) { av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading.\n"); opj_destroy_decompress(dec); return -1; } // Decode the codestream image = opj_decode_with_info(dec, stream, NULL); opj_cio_close(stream); if(!image) { av_log(avctx, AV_LOG_ERROR, "Error decoding codestream.\n"); opj_destroy_decompress(dec); return -1; } width = image->comps[0].w; height = image->comps[0].h; if(avcodec_check_dimensions(avctx, width, height) < 0) { av_log(avctx, AV_LOG_ERROR, "%dx%d dimension invalid.\n", width, height); goto done; } avcodec_set_dimensions(avctx, width, height); switch(image->numcomps) { case 1: avctx->pix_fmt = PIX_FMT_GRAY8; break; case 3: if(check_image_attributes(image)) { avctx->pix_fmt = PIX_FMT_RGB24; } else { avctx->pix_fmt = PIX_FMT_GRAY8; av_log(avctx, AV_LOG_ERROR, "Only first component will be used.\n"); } break; case 4: has_alpha = 1; avctx->pix_fmt = PIX_FMT_RGB32; break; default: av_log(avctx, AV_LOG_ERROR, "%d components unsupported.\n", image->numcomps); goto done; } if(picture->data[0]) avctx->release_buffer(avctx, picture); if(avctx->get_buffer(avctx, picture) < 0) { av_log(avctx, AV_LOG_ERROR, "Couldn't allocate image buffer.\n"); return -1; } for(x = 0; x < image->numcomps; x++) { adjust[x] = FFMAX(image->comps[x].prec - 8, 0); } for(y = 0; y < height; y++) { index = y*width; img_ptr = picture->data[0] + y*picture->linesize[0]; for(x = 0; x < width; x++, index++) { *img_ptr++ = image->comps[0].data[index] >> adjust[0]; if(image->numcomps > 2 && check_image_attributes(image)) { *img_ptr++ = image->comps[1].data[index] >> adjust[1]; *img_ptr++ = image->comps[2].data[index] >> adjust[2]; if(has_alpha) *img_ptr++ = image->comps[3].data[index] >> adjust[3]; } } }
static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) { VideoData *s = s1->priv_data; AVStream *st; int video_fd; int desired_palette, desired_depth; struct video_tuner tuner; struct video_audio audio; struct video_picture pict; int j; int vformat_num = FF_ARRAY_ELEMS(video_formats); if (ap->time_base.den <= 0) { av_log(s1, AV_LOG_ERROR, "Wrong time base (%d)\n", ap->time_base.den); return -1; } s->time_base = ap->time_base; s->video_win.width = ap->width; s->video_win.height = ap->height; st = av_new_stream(s1, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */ video_fd = open(s1->filename, O_RDWR); if (video_fd < 0) { av_log(s1, AV_LOG_ERROR, "%s: %s\n", s1->filename, strerror(errno)); goto fail; } if (ioctl(video_fd, VIDIOCGCAP, &s->video_cap) < 0) { av_log(s1, AV_LOG_ERROR, "VIDIOCGCAP: %s\n", strerror(errno)); goto fail; } if (!(s->video_cap.type & VID_TYPE_CAPTURE)) { av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n"); goto fail; } /* no values set, autodetect them */ if (s->video_win.width <= 0 || s->video_win.height <= 0) { if (ioctl(video_fd, VIDIOCGWIN, &s->video_win, sizeof(s->video_win)) < 0) { av_log(s1, AV_LOG_ERROR, "VIDIOCGWIN: %s\n", strerror(errno)); goto fail; } } if(avcodec_check_dimensions(s1, s->video_win.width, s->video_win.height) < 0) return -1; desired_palette = -1; desired_depth = -1; for (j = 0; j < vformat_num; j++) { if (ap->pix_fmt == video_formats[j].pix_fmt) { desired_palette = video_formats[j].palette; desired_depth = video_formats[j].depth; break; } } /* set tv standard */ if (ap->standard && !ioctl(video_fd, VIDIOCGTUNER, &tuner)) { if (!strcasecmp(ap->standard, "pal")) tuner.mode = VIDEO_MODE_PAL; else if (!strcasecmp(ap->standard, "secam")) tuner.mode = VIDEO_MODE_SECAM; else tuner.mode = VIDEO_MODE_NTSC; ioctl(video_fd, VIDIOCSTUNER, &tuner); } /* unmute audio */ audio.audio = 0; ioctl(video_fd, VIDIOCGAUDIO, &audio); memcpy(&s->audio_saved, &audio, sizeof(audio)); audio.flags &= ~VIDEO_AUDIO_MUTE; ioctl(video_fd, VIDIOCSAUDIO, &audio); ioctl(video_fd, VIDIOCGPICT, &pict); #if 0 printf("v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n", pict.colour, pict.hue, pict.brightness, pict.contrast, pict.whiteness); #endif /* try to choose a suitable video format */ pict.palette = desired_palette; pict.depth= desired_depth; if (desired_palette == -1 || ioctl(video_fd, VIDIOCSPICT, &pict) < 0) { for (j = 0; j < vformat_num; j++) { pict.palette = video_formats[j].palette; pict.depth = video_formats[j].depth; if (-1 != ioctl(video_fd, VIDIOCSPICT, &pict)) break; } if (j >= vformat_num) goto fail1; } if (ioctl(video_fd, VIDIOCGMBUF, &s->gb_buffers) < 0) { /* try to use read based access */ int val; s->video_win.x = 0; s->video_win.y = 0; s->video_win.chromakey = -1; s->video_win.flags = 0; if (ioctl(video_fd, VIDIOCSWIN, s->video_win) < 0) { av_log(s1, AV_LOG_ERROR, "VIDIOCSWIN: %s\n", strerror(errno)); goto fail; } s->frame_format = pict.palette; val = 1; if (ioctl(video_fd, VIDIOCCAPTURE, &val) < 0) { av_log(s1, AV_LOG_ERROR, "VIDIOCCAPTURE: %s\n", strerror(errno)); goto fail; } s->time_frame = av_gettime() * s->time_base.den / s->time_base.num; s->use_mmap = 0; } else { s->video_buf = mmap(0, s->gb_buffers.size, PROT_READ|PROT_WRITE, MAP_SHARED, video_fd, 0); if ((unsigned char*)-1 == s->video_buf) { s->video_buf = mmap(0, s->gb_buffers.size, PROT_READ|PROT_WRITE, MAP_PRIVATE, video_fd, 0); if ((unsigned char*)-1 == s->video_buf) { av_log(s1, AV_LOG_ERROR, "mmap: %s\n", strerror(errno)); goto fail; } } s->gb_frame = 0; s->time_frame = av_gettime() * s->time_base.den / s->time_base.num; /* start to grab the first frame */ s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames; s->gb_buf.height = s->video_win.height; s->gb_buf.width = s->video_win.width; s->gb_buf.format = pict.palette; if (ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) { if (errno != EAGAIN) { fail1: av_log(s1, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno)); } else { av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not receive any video signal\n"); } goto fail; } for (j = 1; j < s->gb_buffers.frames; j++) { s->gb_buf.frame = j; ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf); } s->frame_format = s->gb_buf.format; s->use_mmap = 1; } for (j = 0; j < vformat_num; j++) { if (s->frame_format == video_formats[j].palette) { s->frame_size = s->video_win.width * s->video_win.height * video_formats[j].depth / 8; st->codec->pix_fmt = video_formats[j].pix_fmt; break; } } if (j >= vformat_num) goto fail; s->fd = video_fd; st->codec->codec_type = CODEC_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_RAWVIDEO; st->codec->width = s->video_win.width; st->codec->height = s->video_win.height; st->codec->time_base = s->time_base; st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8; return 0; fail: if (video_fd >= 0) close(video_fd); return AVERROR(EIO); }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; DPXContext *const s = avctx->priv_data; AVFrame *picture = data; AVFrame *const p = &s->picture; uint8_t *ptr; int magic_num, offset, endian; int x, y; int w, h, stride, bits_per_color, descriptor, elements, target_packet_size, source_packet_size; unsigned int rgbBuffer; magic_num = AV_RB32(buf); buf += 4; /* Check if the files "magic number" is "SDPX" which means it uses * big-endian or XPDS which is for little-endian files */ if (magic_num == AV_RL32("SDPX")) { endian = 0; } else if (magic_num == AV_RB32("SDPX")) { endian = 1; } else { av_log(avctx, AV_LOG_ERROR, "DPX marker not found\n"); return -1; } offset = read32(&buf, endian); // Need to end in 0x304 offset from start of file buf = avpkt->data + 0x304; w = read32(&buf, endian); h = read32(&buf, endian); // Need to end in 0x320 to read the descriptor buf += 20; descriptor = buf[0]; // Need to end in 0x323 to read the bits per color buf += 3; avctx->bits_per_raw_sample = bits_per_color = buf[0]; switch (descriptor) { case 51: // RGBA elements = 4; break; case 50: // RGB elements = 3; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported descriptor %d\n", descriptor); return -1; } switch (bits_per_color) { case 8: if (elements == 4) { avctx->pix_fmt = PIX_FMT_RGBA; } else { avctx->pix_fmt = PIX_FMT_RGB24; } source_packet_size = elements; target_packet_size = elements; break; case 10: avctx->pix_fmt = PIX_FMT_RGB48; target_packet_size = 6; source_packet_size = elements * 2; break; case 12: case 16: if (endian) { avctx->pix_fmt = PIX_FMT_RGB48BE; } else { avctx->pix_fmt = PIX_FMT_RGB48LE; } target_packet_size = 6; source_packet_size = elements * 2; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported color depth : %d\n", bits_per_color); return -1; } if (s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); if (avcodec_check_dimensions(avctx, w, h)) return -1; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } // Move pointer to offset from start of file buf = avpkt->data + offset; ptr = p->data[0]; stride = p->linesize[0]; switch (bits_per_color) { case 10: for (x = 0; x < avctx->height; x++) { uint16_t *dst = (uint16_t*)ptr; for (y = 0; y < avctx->width; y++) { rgbBuffer = read32(&buf, endian); // Read out the 10-bit colors and convert to 16-bit *dst++ = make_16bit(rgbBuffer >> 16); *dst++ = make_16bit(rgbBuffer >> 6); *dst++ = make_16bit(rgbBuffer << 4); } ptr += stride; } break; case 8: case 12: // Treat 12-bit as 16-bit case 16: if (source_packet_size == target_packet_size) { for (x = 0; x < avctx->height; x++) { memcpy(ptr, buf, target_packet_size*avctx->width); ptr += stride; buf += source_packet_size*avctx->width; } } else { for (x = 0; x < avctx->height; x++) { uint8_t *dst = ptr; for (y = 0; y < avctx->width; y++) { memcpy(dst, buf, target_packet_size); dst += target_packet_size; buf += source_packet_size; } ptr += stride; } } break; } *picture = s->picture; *data_size = sizeof(AVPicture); return buf_size; }
static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { PCXContext * const s = avctx->priv_data; AVFrame *picture = data; AVFrame * const p = &s->picture; int xmin, ymin, xmax, ymax; unsigned int w, h, bits_per_pixel, bytes_per_line, nplanes, stride, y, x, bytes_per_scanline; uint8_t *ptr; uint8_t const *bufstart = buf; if (buf[0] != 0x0a || buf[1] > 5 || buf[1] == 1 || buf[2] != 1) { av_log(avctx, AV_LOG_ERROR, "this is not PCX encoded data\n"); return -1; } xmin = AV_RL16(buf+ 4); ymin = AV_RL16(buf+ 6); xmax = AV_RL16(buf+ 8); ymax = AV_RL16(buf+10); if (xmax < xmin || ymax < ymin) { av_log(avctx, AV_LOG_ERROR, "invalid image dimensions\n"); return -1; } w = xmax - xmin + 1; h = ymax - ymin + 1; bits_per_pixel = buf[3]; bytes_per_line = AV_RL16(buf+66); nplanes = buf[65]; bytes_per_scanline = nplanes * bytes_per_line; if (bytes_per_scanline < w * bits_per_pixel * nplanes / 8) { av_log(avctx, AV_LOG_ERROR, "PCX data is corrupted\n"); return -1; } switch ((nplanes<<8) + bits_per_pixel) { case 0x0308: avctx->pix_fmt = PIX_FMT_RGB24; break; case 0x0108: case 0x0104: case 0x0102: case 0x0101: case 0x0401: case 0x0301: case 0x0201: avctx->pix_fmt = PIX_FMT_PAL8; break; default: av_log(avctx, AV_LOG_ERROR, "invalid PCX file\n"); return -1; } buf += 128; if (p->data[0]) avctx->release_buffer(avctx, p); if (avcodec_check_dimensions(avctx, w, h)) return -1; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type = FF_I_TYPE; ptr = p->data[0]; stride = p->linesize[0]; if (nplanes == 3 && bits_per_pixel == 8) { uint8_t scanline[bytes_per_scanline]; for (y=0; y<h; y++) { buf = pcx_rle_decode(buf, scanline, bytes_per_scanline); for (x=0; x<w; x++) { ptr[3*x ] = scanline[x ]; ptr[3*x+1] = scanline[x+ bytes_per_line ]; ptr[3*x+2] = scanline[x+(bytes_per_line<<1)]; } ptr += stride; } } else if (nplanes == 1 && bits_per_pixel == 8) { uint8_t scanline[bytes_per_scanline]; const uint8_t *palstart = bufstart + buf_size - 769; for (y=0; y<h; y++, ptr+=stride) { buf = pcx_rle_decode(buf, scanline, bytes_per_scanline); memcpy(ptr, scanline, w); } if (buf != palstart) { av_log(avctx, AV_LOG_WARNING, "image data possibly corrupted\n"); buf = palstart; } if (*buf++ != 12) { av_log(avctx, AV_LOG_ERROR, "expected palette after image data\n"); return -1; } } else if (nplanes == 1) { /* all packed formats, max. 16 colors */ uint8_t scanline[bytes_per_scanline]; GetBitContext s; for (y=0; y<h; y++) { init_get_bits(&s, scanline, bytes_per_scanline<<3); buf = pcx_rle_decode(buf, scanline, bytes_per_scanline); for (x=0; x<w; x++) ptr[x] = get_bits(&s, bits_per_pixel); ptr += stride; } } else { /* planar, 4, 8 or 16 colors */ uint8_t scanline[bytes_per_scanline]; int i; for (y=0; y<h; y++) { buf = pcx_rle_decode(buf, scanline, bytes_per_scanline); for (x=0; x<w; x++) { int m = 0x80 >> (x&7), v = 0; for (i=nplanes - 1; i>=0; i--) { v <<= 1; v += !!(scanline[i*bytes_per_line + (x>>3)] & m); } ptr[x] = v; } ptr += stride; } } if (nplanes == 1 && bits_per_pixel == 8) { pcx_palette(&buf, (uint32_t *) p->data[1], 256); } else if (bits_per_pixel < 8) { const uint8_t *palette = bufstart+16; pcx_palette(&palette, (uint32_t *) p->data[1], 16); } *picture = s->picture; *data_size = sizeof(AVFrame); return buf - bufstart; }
static int sunrast_decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { SUNRASTContext * const s = avctx->priv_data; AVFrame *picture = data; AVFrame * const p = &s->picture; unsigned int w, h, depth, type, maptype, maplength, stride, x, y, len, alen; uint8_t *ptr; const uint8_t *bufstart = buf; if (AV_RB32(buf) != 0x59a66a95) { av_log(avctx, AV_LOG_ERROR, "this is not sunras encoded data\n"); return -1; } w = AV_RB32(buf+4); h = AV_RB32(buf+8); depth = AV_RB32(buf+12); type = AV_RB32(buf+20); maptype = AV_RB32(buf+24); maplength = AV_RB32(buf+28); if (type > RT_BYTE_ENCODED && type <= RT_FORMAT_IFF) { av_log(avctx, AV_LOG_ERROR, "unsupported (compression) type\n"); return -1; } if (type > RT_FORMAT_IFF) { av_log(avctx, AV_LOG_ERROR, "invalid (compression) type\n"); return -1; } if (maptype & ~1) { av_log(avctx, AV_LOG_ERROR, "invalid colormap type\n"); return -1; } buf += 32; switch (depth) { case 1: avctx->pix_fmt = PIX_FMT_MONOWHITE; break; case 8: avctx->pix_fmt = PIX_FMT_PAL8; break; case 24: avctx->pix_fmt = PIX_FMT_BGR24; break; default: av_log(avctx, AV_LOG_ERROR, "invalid depth\n"); return -1; } if (p->data[0]) avctx->release_buffer(avctx, p); if (avcodec_check_dimensions(avctx, w, h)) return -1; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type = FF_I_TYPE; if (depth != 8 && maplength) { av_log(avctx, AV_LOG_WARNING, "useless colormap found or file is corrupted, trying to recover\n"); } else if (depth == 8) { unsigned int len = maplength / 3; if (!maplength) { av_log(avctx, AV_LOG_ERROR, "colormap expected\n"); return -1; } if (maplength % 3 || maplength > 768) { av_log(avctx, AV_LOG_WARNING, "invalid colormap length\n"); return -1; } ptr = p->data[1]; for (x=0; x<len; x++, ptr+=4) *(uint32_t *)ptr = (buf[x]<<16) + (buf[len+x]<<8) + buf[len+len+x]; } buf += maplength; ptr = p->data[0]; stride = p->linesize[0]; /* scanlines are aligned on 16 bit boundaries */ len = (depth * w + 7) >> 3; alen = len + (len&1); if (type == RT_BYTE_ENCODED) { int value, run; uint8_t *end = ptr + h*stride; x = 0; while (ptr != end) { run = 1; if ((value = *buf++) == 0x80) { run = *buf++ + 1; if (run != 1) value = *buf++; } while (run--) { if (x < len) ptr[x] = value; if (++x >= alen) { x = 0; ptr += stride; if (ptr == end) break; } } } } else { for (y=0; y<h; y++) { memcpy(ptr, buf, len); ptr += stride; buf += alen; } } *picture = s->picture; *data_size = sizeof(AVFrame); return buf - bufstart; }
static int pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){ char buf1[32], tuple_type[32]; int h, w, depth, maxval; pnm_get(s, buf1, sizeof(buf1)); if (!strcmp(buf1, "P4")) { avctx->pix_fmt = PIX_FMT_MONOWHITE; } else if (!strcmp(buf1, "P5")) { if (avctx->codec_id == CODEC_ID_PGMYUV) avctx->pix_fmt = PIX_FMT_YUV420P; else avctx->pix_fmt = PIX_FMT_GRAY8; } else if (!strcmp(buf1, "P6")) { avctx->pix_fmt = PIX_FMT_RGB24; } else if (!strcmp(buf1, "P7")) { w = -1; h = -1; maxval = -1; depth = -1; tuple_type[0] = '\0'; for(;;) { pnm_get(s, buf1, sizeof(buf1)); if (!strcmp(buf1, "WIDTH")) { pnm_get(s, buf1, sizeof(buf1)); w = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, "HEIGHT")) { pnm_get(s, buf1, sizeof(buf1)); h = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, "DEPTH")) { pnm_get(s, buf1, sizeof(buf1)); depth = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, "MAXVAL")) { pnm_get(s, buf1, sizeof(buf1)); maxval = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, "TUPLETYPE")) { pnm_get(s, tuple_type, sizeof(tuple_type)); } else if (!strcmp(buf1, "ENDHDR")) { break; } else { return -1; } } /* check that all tags are present */ if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0' || avcodec_check_dimensions(avctx, w, h)) return -1; avctx->width = w; avctx->height = h; if (depth == 1) { if (maxval == 1) avctx->pix_fmt = PIX_FMT_MONOWHITE; else avctx->pix_fmt = PIX_FMT_GRAY8; } else if (depth == 3) { avctx->pix_fmt = PIX_FMT_RGB24; } else if (depth == 4) { avctx->pix_fmt = PIX_FMT_RGB32; } else { return -1; } return 0; } else { return -1; } pnm_get(s, buf1, sizeof(buf1)); avctx->width = atoi(buf1); if (avctx->width <= 0) return -1; pnm_get(s, buf1, sizeof(buf1)); avctx->height = atoi(buf1); if(avcodec_check_dimensions(avctx, avctx->width, avctx->height)) return -1; if (avctx->pix_fmt != PIX_FMT_MONOWHITE) { pnm_get(s, buf1, sizeof(buf1)); if(atoi(buf1) == 65535 && avctx->pix_fmt == PIX_FMT_GRAY8) avctx->pix_fmt = PIX_FMT_GRAY16BE; } /* more check if YUV420 */ if (avctx->pix_fmt == PIX_FMT_YUV420P) { if ((avctx->width & 1) != 0) return -1; h = (avctx->height * 2); if ((h % 3) != 0) return -1; h /= 3; avctx->height = h; } return 0; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *in_buf, int buf_size) { SgiState *s = avctx->priv_data; AVFrame *picture = data; AVFrame *p = &s->picture; const uint8_t *in_end = in_buf + buf_size; unsigned int dimension, bytes_per_channel, rle; int ret = 0; uint8_t *out_buf, *out_end; if (buf_size < SGI_HEADER_SIZE){ av_log(avctx, AV_LOG_ERROR, "buf_size too small (%d)\n", buf_size); return -1; } /* Test for SGI magic. */ if (bytestream_get_be16(&in_buf) != SGI_MAGIC) { av_log(avctx, AV_LOG_ERROR, "bad magic number\n"); return -1; } rle = bytestream_get_byte(&in_buf); bytes_per_channel = bytestream_get_byte(&in_buf); dimension = bytestream_get_be16(&in_buf); s->width = bytestream_get_be16(&in_buf); s->height = bytestream_get_be16(&in_buf); s->depth = bytestream_get_be16(&in_buf); if (bytes_per_channel != 1) { av_log(avctx, AV_LOG_ERROR, "wrong channel number\n"); return -1; } /* Check for supported image dimensions. */ if (dimension != 2 && dimension != 3) { av_log(avctx, AV_LOG_ERROR, "wrong dimension number\n"); return -1; } if (s->depth == SGI_GRAYSCALE) { avctx->pix_fmt = PIX_FMT_GRAY8; } else if (s->depth == SGI_RGB) { avctx->pix_fmt = PIX_FMT_RGB24; } else if (s->depth == SGI_RGBA) { avctx->pix_fmt = PIX_FMT_RGBA; } else { av_log(avctx, AV_LOG_ERROR, "wrong picture format\n"); return -1; } if (avcodec_check_dimensions(avctx, s->width, s->height)) return -1; avcodec_set_dimensions(avctx, s->width, s->height); if (p->data[0]) avctx->release_buffer(avctx, p); p->reference = 0; if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed.\n"); return -1; } p->pict_type = FF_I_TYPE; p->key_frame = 1; out_buf = p->data[0]; out_end = out_buf + p->linesize[0] * s->height; s->linesize = p->linesize[0]; /* Skip header. */ in_buf += SGI_HEADER_SIZE - 12; if (rle) { ret = read_rle_sgi(out_end, in_buf, in_end, s); } else { ret = read_uncompressed_sgi(out_buf, out_end, in_buf, in_end, s); } if (ret == 0) { *picture = s->picture; *data_size = sizeof(AVPicture); return buf_size; } else { return -1; } }