av_cold int ff_h263_decode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int ret; s->avctx = avctx; s->out_format = FMT_H263; s->width = avctx->coded_width; s->height = avctx->coded_height; s->workaround_bugs= avctx->workaround_bugs; // set defaults ff_MPV_decode_defaults(s); s->quant_precision=5; s->decode_mb= ff_h263_decode_mb; s->low_delay= 1; if (avctx->codec->id == AV_CODEC_ID_MSS2) avctx->pix_fmt = AV_PIX_FMT_YUV420P; else avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts); s->unrestricted_mv= 1; /* select sub codec */ switch(avctx->codec->id) { case AV_CODEC_ID_H263: case AV_CODEC_ID_H263P: s->unrestricted_mv= 0; avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; break; case AV_CODEC_ID_MPEG4: break; case AV_CODEC_ID_MSMPEG4V1: s->h263_pred = 1; s->msmpeg4_version=1; break; case AV_CODEC_ID_MSMPEG4V2: s->h263_pred = 1; s->msmpeg4_version=2; break; case AV_CODEC_ID_MSMPEG4V3: s->h263_pred = 1; s->msmpeg4_version=3; break; case AV_CODEC_ID_WMV1: s->h263_pred = 1; s->msmpeg4_version=4; break; case AV_CODEC_ID_WMV2: s->h263_pred = 1; s->msmpeg4_version=5; break; case AV_CODEC_ID_VC1: case AV_CODEC_ID_WMV3: case AV_CODEC_ID_VC1IMAGE: case AV_CODEC_ID_WMV3IMAGE: case AV_CODEC_ID_MSS2: s->h263_pred = 1; s->msmpeg4_version=6; avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break; case AV_CODEC_ID_H263I: break; case AV_CODEC_ID_FLV1: s->h263_flv = 1; break; default: return AVERROR(EINVAL); } s->codec_id= avctx->codec->id; avctx->hwaccel= ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt); /* for h263, we allocate the images after having read the header */ if (avctx->codec->id != AV_CODEC_ID_H263 && avctx->codec->id != AV_CODEC_ID_H263P && avctx->codec->id != AV_CODEC_ID_MPEG4) if ((ret = ff_MPV_common_init(s)) < 0) return ret; ff_h263_decode_init_vlc(s); return 0; }
static int rv20_decode_picture_header(RVDecContext *rv) { MpegEncContext *s = &rv->m; int seq, mb_pos, i; int rpr_bits; i= get_bits(&s->gb, 2); switch(i){ case 0: s->pict_type= AV_PICTURE_TYPE_I; break; case 1: s->pict_type= AV_PICTURE_TYPE_I; break; //hmm ... case 2: s->pict_type= AV_PICTURE_TYPE_P; break; case 3: s->pict_type= AV_PICTURE_TYPE_B; break; default: av_log(s->avctx, AV_LOG_ERROR, "unknown frame type\n"); return -1; } if(s->last_picture_ptr==NULL && s->pict_type==AV_PICTURE_TYPE_B){ av_log(s->avctx, AV_LOG_ERROR, "early B pix\n"); return -1; } if (get_bits1(&s->gb)){ av_log(s->avctx, AV_LOG_ERROR, "reserved bit set\n"); return -1; } s->qscale = get_bits(&s->gb, 5); if(s->qscale==0){ av_log(s->avctx, AV_LOG_ERROR, "error, qscale:0\n"); return -1; } if(RV_GET_MINOR_VER(rv->sub_id) >= 2) s->loop_filter = get_bits1(&s->gb); if(RV_GET_MINOR_VER(rv->sub_id) <= 1) seq = get_bits(&s->gb, 8) << 7; else seq = get_bits(&s->gb, 13) << 2; rpr_bits = s->avctx->extradata[1] & 7; if(rpr_bits){ int f, new_w, new_h; rpr_bits = FFMIN((rpr_bits >> 1) + 1, 3); f = get_bits(&s->gb, rpr_bits); if(f){ new_w= 4*((uint8_t*)s->avctx->extradata)[6+2*f]; new_h= 4*((uint8_t*)s->avctx->extradata)[7+2*f]; }else{ new_w= s->orig_width ; new_h= s->orig_height; } if(new_w != s->width || new_h != s->height){ av_log(s->avctx, AV_LOG_DEBUG, "attempting to change resolution to %dx%d\n", new_w, new_h); if (av_image_check_size(new_w, new_h, 0, s->avctx) < 0) return -1; ff_MPV_common_end(s); avcodec_set_dimensions(s->avctx, new_w, new_h); s->width = new_w; s->height = new_h; if (ff_MPV_common_init(s) < 0) return -1; } if(s->avctx->debug & FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_DEBUG, "F %d/%d\n", f, rpr_bits); } } else if (av_image_check_size(s->width, s->height, 0, s->avctx) < 0)
static int h261_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; H261Context *h = avctx->priv_data; MpegEncContext *s = &h->s; int ret; AVFrame *pict = data; av_dlog(avctx, "*****frame %d size=%d\n", avctx->frame_number, buf_size); av_dlog(avctx, "bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]); s->flags = avctx->flags; s->flags2 = avctx->flags2; h->gob_start_code_skipped = 0; retry: init_get_bits(&s->gb, buf, buf_size * 8); if (!s->context_initialized) // we need the IDCT permutaton for reading a custom matrix if (ff_MPV_common_init(s) < 0) return -1; /* We need to set current_picture_ptr before reading the header, * otherwise we cannot store anything in there. */ if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) { int i = ff_find_unused_picture(s, 0); if (i < 0) return i; s->current_picture_ptr = &s->picture[i]; } ret = h261_decode_picture_header(h); /* skip if the header was thrashed */ if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, "header damaged\n"); return -1; } if (s->width != avctx->coded_width || s->height != avctx->coded_height) { ParseContext pc = s->parse_context; // FIXME move this demuxing hack to libavformat s->parse_context.buffer = 0; ff_MPV_common_end(s); s->parse_context = pc; } if (!s->context_initialized) { avcodec_set_dimensions(avctx, s->width, s->height); goto retry; } // for skipping the frame s->current_picture.f.pict_type = s->pict_type; s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) || avctx->skip_frame >= AVDISCARD_ALL) return get_consumed_bytes(s, buf_size); if (ff_MPV_frame_start(s, avctx) < 0) return -1; ff_mpeg_er_frame_start(s); /* decode each macroblock */ s->mb_x = 0; s->mb_y = 0; while (h->gob_number < (s->mb_height == 18 ? 12 : 5)) { if (ff_h261_resync(h) < 0) break; h261_decode_gob(h); } ff_MPV_frame_end(s); av_assert0(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type); av_assert0(s->current_picture.f.pict_type == s->pict_type); if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0) return ret; ff_print_debug_info(s, s->current_picture_ptr, pict); *got_frame = 1; return get_consumed_bytes(s, buf_size); }
av_cold int ff_h263_decode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int ret; s->avctx = avctx; s->out_format = FMT_H263; s->width = avctx->coded_width; s->height = avctx->coded_height; s->workaround_bugs = avctx->workaround_bugs; // set defaults ff_MPV_decode_defaults(s); s->quant_precision = 5; s->decode_mb = ff_h263_decode_mb; s->low_delay = 1; if (avctx->codec->id == AV_CODEC_ID_MSS2) avctx->pix_fmt = AV_PIX_FMT_YUV420P; else avctx->pix_fmt = ff_get_format(avctx, avctx->codec->pix_fmts); s->unrestricted_mv = 1; /* select sub codec */ switch (avctx->codec->id) { case AV_CODEC_ID_H263: case AV_CODEC_ID_H263P: s->unrestricted_mv = 0; avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; break; case AV_CODEC_ID_MPEG4: break; case AV_CODEC_ID_MSMPEG4V1: s->h263_pred = 1; s->msmpeg4_version = 1; break; case AV_CODEC_ID_MSMPEG4V2: s->h263_pred = 1; s->msmpeg4_version = 2; break; case AV_CODEC_ID_MSMPEG4V3: s->h263_pred = 1; s->msmpeg4_version = 3; break; case AV_CODEC_ID_WMV1: s->h263_pred = 1; s->msmpeg4_version = 4; break; case AV_CODEC_ID_WMV2: s->h263_pred = 1; s->msmpeg4_version = 5; break; case AV_CODEC_ID_VC1: case AV_CODEC_ID_WMV3: case AV_CODEC_ID_VC1IMAGE: case AV_CODEC_ID_WMV3IMAGE: case AV_CODEC_ID_MSS2: s->h263_pred = 1; s->msmpeg4_version = 6; avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break; case AV_CODEC_ID_H263I: break; case AV_CODEC_ID_FLV1: s->h263_flv = 1; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported codec %d\n", avctx->codec->id); return AVERROR(ENOSYS); } s->codec_id = avctx->codec->id; if (avctx->stream_codec_tag == AV_RL32("l263") && avctx->extradata_size == 56 && avctx->extradata[0] == 1) s->ehc_mode = 1; /* for h263, we allocate the images after having read the header */ if (avctx->codec->id != AV_CODEC_ID_H263 && avctx->codec->id != AV_CODEC_ID_H263P && avctx->codec->id != AV_CODEC_ID_MPEG4) if ((ret = ff_MPV_common_init(s)) < 0) return ret; ff_h263dsp_init(&s->h263dsp); ff_qpeldsp_init(&s->qdsp); ff_h263_decode_init_vlc(); return 0; }
static int rv20_decode_picture_header(RVDecContext *rv) { MpegEncContext *s = &rv->m; int seq, mb_pos, i; int rpr_bits; #if 0 GetBitContext gb= s->gb; for(i=0; i<64; i++){ av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&gb)); if(i%4==3) av_log(s->avctx, AV_LOG_DEBUG, " "); } av_log(s->avctx, AV_LOG_DEBUG, "\n"); #endif #if 0 av_log(s->avctx, AV_LOG_DEBUG, "%3dx%03d/%02Xx%02X ", s->width, s->height, s->width/4, s->height/4); for(i=0; i<s->avctx->extradata_size; i++){ av_log(s->avctx, AV_LOG_DEBUG, "%02X ", ((uint8_t*)s->avctx->extradata)[i]); if(i%4==3) av_log(s->avctx, AV_LOG_DEBUG, " "); } av_log(s->avctx, AV_LOG_DEBUG, "\n"); #endif i= get_bits(&s->gb, 2); switch(i){ case 0: s->pict_type= AV_PICTURE_TYPE_I; break; case 1: s->pict_type= AV_PICTURE_TYPE_I; break; //hmm ... case 2: s->pict_type= AV_PICTURE_TYPE_P; break; case 3: s->pict_type= AV_PICTURE_TYPE_B; break; default: av_log(s->avctx, AV_LOG_ERROR, "unknown frame type\n"); return -1; } if(s->last_picture_ptr==NULL && s->pict_type==AV_PICTURE_TYPE_B){ av_log(s->avctx, AV_LOG_ERROR, "early B pix\n"); return -1; } if (get_bits1(&s->gb)){ av_log(s->avctx, AV_LOG_ERROR, "reserved bit set\n"); return -1; } s->qscale = get_bits(&s->gb, 5); if(s->qscale==0){ av_log(s->avctx, AV_LOG_ERROR, "error, qscale:0\n"); return -1; } if(RV_GET_MINOR_VER(rv->sub_id) >= 2) s->loop_filter = get_bits1(&s->gb); if(RV_GET_MINOR_VER(rv->sub_id) <= 1) seq = get_bits(&s->gb, 8) << 7; else seq = get_bits(&s->gb, 13) << 2; rpr_bits = s->avctx->extradata[1] & 7; if(rpr_bits){ int f, new_w, new_h; rpr_bits = FFMIN((rpr_bits >> 1) + 1, 3); f = get_bits(&s->gb, rpr_bits); if(f){ new_w= 4*((uint8_t*)s->avctx->extradata)[6+2*f]; new_h= 4*((uint8_t*)s->avctx->extradata)[7+2*f]; }else{ new_w= s->orig_width ; new_h= s->orig_height; } if(new_w != s->width || new_h != s->height){ AVRational old_aspect = s->avctx->sample_aspect_ratio; av_log(s->avctx, AV_LOG_DEBUG, "attempting to change resolution to %dx%d\n", new_w, new_h); if (av_image_check_size(new_w, new_h, 0, s->avctx) < 0) return -1; ff_MPV_common_end(s); // attempt to keep aspect during typical resolution switches if (!old_aspect.num) old_aspect = (AVRational){1, 1}; if (2 * new_w * s->height == new_h * s->width) s->avctx->sample_aspect_ratio = av_mul_q(old_aspect, (AVRational){2, 1}); if (new_w * s->height == 2 * new_h * s->width) s->avctx->sample_aspect_ratio = av_mul_q(old_aspect, (AVRational){1, 2}); avcodec_set_dimensions(s->avctx, new_w, new_h); s->width = new_w; s->height = new_h; if (ff_MPV_common_init(s) < 0) return -1; } if(s->avctx->debug & FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_DEBUG, "F %d/%d\n", f, rpr_bits); } } else if (av_image_check_size(s->width, s->height, 0, s->avctx) < 0)
/** * \brief initialize mjpeg encoder * * This routine is to set up the parameters and initialize the mjpeg encoder. * It does all the initializations needed of lower level routines. * The formats accepted by this encoder is YUV422P and YUV420 * * \param w width in pixels of the image to encode, must be a multiple of 16 * \param h height in pixels of the image to encode, must be a multiple of 8 * \param y_rsize size of each plane row Y component * \param y_rsize size of each plane row U component * \param v_rsize size of each plane row V component * \param cu "cheap upsample". Set to 0 for YUV422 format, 1 for YUV420 format * when set to 1, the encoder will assume that there is only half th * number of rows of chroma information, and every chroma row is * duplicated. * \param q quality parameter for the mjpeg encode. Between 1 and 20 where 1 * is best quality and 20 is the worst quality. * \param b monochrome flag. When set to 1, the mjpeg output is monochrome. * In that case, the colour information is omitted, and actually the * colour planes are not touched. * * \returns an appropriately set up jpeg_enc_t structure * * The actual plane buffer addreses are passed by jpeg_enc_frame(). * * The encoder doesn't know anything about interlacing, the halve height * needs to be passed and the double rowstride. Which field gets encoded * is decided by what buffers are passed to mjpeg_encode_frame() */ static jpeg_enc_t *jpeg_enc_init(int w, int h, int y_rsize, int u_rsize, int v_rsize, int cu, int q, int b) { jpeg_enc_t *j; int i = 0; VERBOSE("JPEG encoder init: %dx%d %d %d %d cu=%d q=%d bw=%d\n", w, h, y_rsize, u_rsize, v_rsize, cu, q, b); j = av_mallocz(sizeof(jpeg_enc_t)); if (j == NULL) return NULL; j->s = av_mallocz(sizeof(MpegEncContext)); if (j->s == NULL) { av_free(j); return NULL; } /* info on how to access the pixels */ j->y_rs = y_rsize; j->u_rs = u_rsize; j->v_rs = v_rsize; j->s->width = w; // image width and height j->s->height = h; j->s->qscale = q; // Encoding quality j->s->out_format = FMT_MJPEG; j->s->intra_only = 1; // Generate only intra pictures for jpeg j->s->encoding = 1; // Set mode to encode j->s->pict_type = AV_PICTURE_TYPE_I; j->s->y_dc_scale = 8; j->s->c_dc_scale = 8; /* * This sets up the MCU (Minimal Code Unit) number * of appearances of the various component * for the SOF0 table in the generated MJPEG. * The values are not used for anything else. * The current setup is simply YUV422, with two horizontal Y components * for every UV component. */ //FIXME j->s->mjpeg_write_tables = 1; // setup to write tables j->s->mjpeg_vsample[0] = 1; // 1 appearance of Y vertically j->s->mjpeg_vsample[1] = 1; // 1 appearance of U vertically j->s->mjpeg_vsample[2] = 1; // 1 appearance of V vertically j->s->mjpeg_hsample[0] = 2; // 2 appearances of Y horizontally j->s->mjpeg_hsample[1] = 1; // 1 appearance of U horizontally j->s->mjpeg_hsample[2] = 1; // 1 appearance of V horizontally j->cheap_upsample = cu; j->bw = b; init_avcodec(); // Build mjpeg huffman code tables, setting up j->s->mjpeg_ctx if (ff_mjpeg_encode_init(j->s) < 0) { av_free(j->s); av_free(j); return NULL; } /* alloc bogus avctx to keep MPV_common_init from segfaulting */ j->s->avctx = avcodec_alloc_context(); if (j->s->avctx == NULL) { av_free(j->s); av_free(j); return NULL; } // Set some a minimum amount of default values that are needed // Indicates that we should generated normal MJPEG j->s->avctx->codec_id = AV_CODEC_ID_MJPEG; // Which DCT method to use. AUTO will select the fastest one j->s->avctx->dct_algo = FF_DCT_AUTO; j->s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x // indicate we 'decode' to jpeg 4:2:2 j->s->avctx->pix_fmt = PIX_FMT_YUVJ422P; j->s->avctx->thread_count = 1; /* make MPV_common_init allocate important buffers, like s->block * Also initializes dsputil */ if (ff_MPV_common_init(j->s) < 0) { av_free(j->s); av_free(j); return NULL; } /* correct the value for sc->mb_height. MPV_common_init put other * values there */ j->s->mb_height = j->s->height/8; j->s->mb_intra = 1; // Init q matrix j->s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0]; for (i = 1; i < 64; i++) j->s->intra_matrix[i] = av_clip_uint8( (ff_mpeg1_default_intra_matrix[i]*j->s->qscale) >> 3); // precompute matrix convert_matrix(j->s, j->s->q_intra_matrix, j->s->q_intra_matrix16, j->s->intra_matrix, j->s->intra_quant_bias, 8, 8); /* Pick up the selection of the optimal get_pixels() routine * to use, which was done in MPV_common_init() */ get_pixels = j->s->dsp.get_pixels; return j; }