static int rv10_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MpegEncContext *s = avctx->priv_data; int i; AVFrame *pict = data; int slice_count; const uint8_t *slices_hdr = NULL; #ifdef DEBUG av_log(avctx, AV_LOG_DEBUG, "*****frame %d size=%d\n", avctx->frame_number, buf_size); #endif /* no supplementary picture */ if (buf_size == 0) { return 0; } if(!avctx->slice_count){ slice_count = (*buf++) + 1; slices_hdr = buf + 4; buf += 8 * slice_count; }else slice_count = avctx->slice_count; for(i=0; i<slice_count; i++){ int offset= get_slice_offset(avctx, slices_hdr, i); int size; if(i+1 == slice_count) size= buf_size - offset; else size= get_slice_offset(avctx, slices_hdr, i+1) - offset; rv10_decode_packet(avctx, buf+offset, size); } if(s->current_picture_ptr != NULL && s->mb_y>=s->mb_height){ ff_er_frame_end(s); MPV_frame_end(s); if (s->pict_type == FF_B_TYPE || s->low_delay) { *pict= *(AVFrame*)s->current_picture_ptr; } else if (s->last_picture_ptr != NULL) { *pict= *(AVFrame*)s->last_picture_ptr; } if(s->last_picture_ptr || s->low_delay){ *data_size = sizeof(AVFrame); ff_print_debug_info(s, pict); } s->current_picture_ptr= NULL; //so we can detect if frame_end wasnt called (find some nicer solution...) } return buf_size; }
static int rv10_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { MpegEncContext *s = avctx->priv_data; int i; AVFrame *pict = data; #ifdef DEBUG printf("*****frame %d size=%d\n", avctx->frame_number, buf_size); #endif /* no supplementary picture */ if (buf_size == 0) { *data_size = 0; return 0; } if(avctx->slice_count){ for(i=0; i<avctx->slice_count; i++){ int offset= avctx->slice_offset[i]; int size; if(i+1 == avctx->slice_count) size= buf_size - offset; else size= avctx->slice_offset[i+1] - offset; if( rv10_decode_packet(avctx, buf+offset, size) < 0 ) return -1; } }else{ if( rv10_decode_packet(avctx, buf, buf_size) < 0 ) return -1; } if(s->mb_y>=s->mb_height){ MPV_frame_end(s); *pict= *(AVFrame*)&s->current_picture; *data_size = sizeof(AVFrame); }else{ *data_size = 0; } return buf_size; }
static int h261_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; H261Context *h= avctx->priv_data; MpegEncContext *s = &h->s; int ret; AVFrame *pict = data; dprintf(avctx, "*****frame %d size=%d\n", avctx->frame_number, buf_size); dprintf(avctx, "bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]); s->flags= avctx->flags; s->flags2= avctx->flags2; h->gob_start_code_skipped=0; retry: init_get_bits(&s->gb, buf, buf_size*8); if(!s->context_initialized){ if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix return -1; } //we need to set current_picture_ptr before reading the header, otherwise we cannot store anyting im there if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){ int i= ff_find_unused_picture(s, 0); s->current_picture_ptr= &s->picture[i]; } ret = h261_decode_picture_header(h); /* skip if the header was thrashed */ if (ret < 0){ av_log(s->avctx, AV_LOG_ERROR, "header damaged\n"); return -1; } if (s->width != avctx->coded_width || s->height != avctx->coded_height){ ParseContext pc= s->parse_context; //FIXME move this demuxing hack to libavformat s->parse_context.buffer=0; MPV_common_end(s); s->parse_context= pc; } if (!s->context_initialized) { avcodec_set_dimensions(avctx, s->width, s->height); goto retry; } // for hurry_up==5 s->current_picture.pict_type= s->pict_type; s->current_picture.key_frame= s->pict_type == FF_I_TYPE; /* skip everything if we are in a hurry>=5 */ if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size); if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE) ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE) || avctx->skip_frame >= AVDISCARD_ALL) return get_consumed_bytes(s, buf_size); if(MPV_frame_start(s, avctx) < 0) return -1; ff_er_frame_start(s); /* decode each macroblock */ s->mb_x=0; s->mb_y=0; while(h->gob_number < (s->mb_height==18 ? 12 : 5)){ if(ff_h261_resync(h)<0) break; h261_decode_gob(h); } MPV_frame_end(s); assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type); assert(s->current_picture.pict_type == s->pict_type); *pict= *(AVFrame*)s->current_picture_ptr; ff_print_debug_info(s, pict); *data_size = sizeof(AVFrame); return get_consumed_bytes(s, buf_size); }
static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { MpegEncContext *s = avctx->priv_data; int mb_count, mb_pos, left, start_mb_x; init_get_bits(&s->gb, buf, buf_size*8); if(s->codec_id ==CODEC_ID_RV10) mb_count = rv10_decode_picture_header(s); else mb_count = rv20_decode_picture_header(s); if (mb_count < 0) { av_log(s->avctx, AV_LOG_ERROR, "HEADER ERROR\n"); return -1; } if (s->mb_x >= s->mb_width || s->mb_y >= s->mb_height) { av_log(s->avctx, AV_LOG_ERROR, "POS ERROR %d %d\n", s->mb_x, s->mb_y); return -1; } mb_pos = s->mb_y * s->mb_width + s->mb_x; left = s->mb_width * s->mb_height - mb_pos; if (mb_count > left) { av_log(s->avctx, AV_LOG_ERROR, "COUNT ERROR\n"); return -1; } //if(s->pict_type == FF_P_TYPE) return 0; if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) { if(s->current_picture_ptr){ //FIXME write parser so we always have complete frames? ff_er_frame_end(s); MPV_frame_end(s); s->mb_x= s->mb_y = s->resync_mb_x = s->resync_mb_y= 0; } if(MPV_frame_start(s, avctx) < 0) return -1; ff_er_frame_start(s); } #ifdef DEBUG av_log(avctx, AV_LOG_DEBUG, "qscale=%d\n", s->qscale); #endif /* default quantization values */ if(s->codec_id== CODEC_ID_RV10){ if(s->mb_y==0) s->first_slice_line=1; }else{ s->first_slice_line=1; s->resync_mb_x= s->mb_x; } start_mb_x= s->mb_x; s->resync_mb_y= s->mb_y; if(s->h263_aic){ s->y_dc_scale_table= s->c_dc_scale_table= ff_aic_dc_scale_table; }else{ s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; } if(s->modified_quant) s->chroma_qscale_table= ff_h263_chroma_qscale_table; ff_set_qscale(s, s->qscale); s->rv10_first_dc_coded[0] = 0; s->rv10_first_dc_coded[1] = 0; s->rv10_first_dc_coded[2] = 0; //printf("%d %X %X\n", s->pict_type, s->current_picture.motion_val[0], s->current_picture.motion_val[1]); s->block_wrap[0]= s->block_wrap[1]= s->block_wrap[2]= s->block_wrap[3]= s->b8_stride; s->block_wrap[4]= s->block_wrap[5]= s->mb_stride; ff_init_block_index(s); /* decode each macroblock */ for(s->mb_num_left= mb_count; s->mb_num_left>0; s->mb_num_left--) { int ret; ff_update_block_index(s); #ifdef DEBUG av_log(avctx, AV_LOG_DEBUG, "**mb x=%d y=%d\n", s->mb_x, s->mb_y); #endif s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; ret=ff_h263_decode_mb(s, s->block); if (ret == SLICE_ERROR || s->gb.size_in_bits < get_bits_count(&s->gb)) { av_log(s->avctx, AV_LOG_ERROR, "ERROR at MB %d %d\n", s->mb_x, s->mb_y); return -1; } if(s->pict_type != FF_B_TYPE) ff_h263_update_motion_val(s); MPV_decode_mb(s, s->block); if(s->loop_filter) ff_h263_loop_filter(s); if (++s->mb_x == s->mb_width) { s->mb_x = 0; s->mb_y++; ff_init_block_index(s); } if(s->mb_x == s->resync_mb_x) s->first_slice_line=0; if(ret == SLICE_END) break; } ff_er_add_slice(s, start_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END); return buf_size; }
static int rv10_decode_frame(AVCodecContext *avctx, void *data, int *data_size, UINT8 *buf, int buf_size) { MpegEncContext *s = avctx->priv_data; int i, mb_count, mb_pos, left; DCTELEM block[6][64]; AVPicture *pict = data; #ifdef DEBUG printf("*****frame %d size=%d\n", avctx->frame_number, buf_size); #endif /* no supplementary picture */ if (buf_size == 0) { *data_size = 0; return 0; } init_get_bits(&s->gb, buf, buf_size); mb_count = rv10_decode_picture_header(s); if (mb_count < 0) { #ifdef DEBUG printf("HEADER ERROR\n"); #endif return -1; } if (s->mb_x >= s->mb_width || s->mb_y >= s->mb_height) { #ifdef DEBUG printf("POS ERROR %d %d\n", s->mb_x, s->mb_y); #endif return -1; } mb_pos = s->mb_y * s->mb_width + s->mb_x; left = s->mb_width * s->mb_height - mb_pos; if (mb_count > left) { #ifdef DEBUG printf("COUNT ERROR\n"); #endif return -1; } if (s->mb_x == 0 && s->mb_y == 0) { MPV_frame_start(s); } #ifdef DEBUG printf("qscale=%d\n", s->qscale); #endif /* default quantization values */ s->y_dc_scale = 8; s->c_dc_scale = 8; s->rv10_first_dc_coded[0] = 0; s->rv10_first_dc_coded[1] = 0; s->rv10_first_dc_coded[2] = 0; /* decode each macroblock */ for(i=0;i<mb_count;i++) { #ifdef DEBUG printf("**mb x=%d y=%d\n", s->mb_x, s->mb_y); #endif memset(block, 0, sizeof(block)); s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; if (h263_decode_mb(s, block) < 0) { #ifdef DEBUG printf("ERROR\n"); #endif return -1; } MPV_decode_mb(s, block); if (++s->mb_x == s->mb_width) { s->mb_x = 0; s->mb_y++; } } if (s->mb_x == 0 && s->mb_y == s->mb_height) { MPV_frame_end(s); pict->data[0] = s->current_picture[0]; pict->data[1] = s->current_picture[1]; pict->data[2] = s->current_picture[2]; pict->linesize[0] = s->linesize; pict->linesize[1] = s->linesize / 2; pict->linesize[2] = s->linesize / 2; avctx->quality = s->qscale; *data_size = sizeof(AVPicture); } else { *data_size = 0; } return buf_size; }
int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { MpegEncContext *s = avctx->priv_data; int ret; AVFrame *pict = data; #ifdef PRINT_FRAME_TIME uint64_t time= rdtsc(); #endif #ifdef DEBUG av_log(avctx, AV_LOG_DEBUG, "*****frame %d size=%d\n", avctx->frame_number, buf_size); if(buf_size>0) av_log(avctx, AV_LOG_DEBUG, "bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]); #endif s->flags= avctx->flags; s->flags2= avctx->flags2; /* no supplementary picture */ if (buf_size == 0) { /* special case for last picture */ if (s->low_delay==0 && s->next_picture_ptr) { *pict= *(AVFrame*)s->next_picture_ptr; s->next_picture_ptr= NULL; *data_size = sizeof(AVFrame); } return 0; } retry: init_get_bits(&s->gb, buf, buf_size*8); s->bitstream_buffer_size=0; if (!s->context_initialized) { if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix return -1; } /* We need to set current_picture_ptr before reading the header, * otherwise we cannot store anyting in there */ if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){ int i= ff_find_unused_picture(s, 0); s->current_picture_ptr= &s->picture[i]; } ret = flv_h263_decode_picture_header(s); if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_size); /* skip if the header was thrashed */ if (ret < 0){ av_log(s->avctx, AV_LOG_ERROR, "header damaged\n"); return -1; } avctx->has_b_frames= !s->low_delay; /* After H263 & mpeg4 header decode we have the height, width,*/ /* and other parameters. So then we could init the picture */ /* FIXME: By the way H263 decoder is evolving it should have */ /* an H263EncContext */ if ( s->width != avctx->coded_width || s->height != avctx->coded_height) { /* H.263 could change picture size any time */ ParseContext pc= s->parse_context; //FIXME move these demuxng hack to avformat s->parse_context.buffer=0; MPV_common_end(s); s->parse_context= pc; } if (!s->context_initialized) { avcodec_set_dimensions(avctx, s->width, s->height); goto retry; } if((s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P)) s->gob_index = ff_h263_get_gob_height(s); // for hurry_up==5 s->current_picture.pict_type= s->pict_type; s->current_picture.key_frame= s->pict_type == FF_I_TYPE; /* skip B-frames if we don't have reference frames */ if(s->last_picture_ptr==NULL && (s->pict_type==FF_B_TYPE || s->dropable)) return get_consumed_bytes(s, buf_size); /* skip b frames if we are in a hurry */ if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return get_consumed_bytes(s, buf_size); if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE) || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE) || avctx->skip_frame >= AVDISCARD_ALL) return get_consumed_bytes(s, buf_size); /* skip everything if we are in a hurry>=5 */ if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size); if(s->next_p_frame_damaged){ if(s->pict_type==FF_B_TYPE) return get_consumed_bytes(s, buf_size); else s->next_p_frame_damaged=0; } if((s->avctx->flags2 & CODEC_FLAG2_FAST) && s->pict_type==FF_B_TYPE){ s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab; }else if((!s->no_rounding) || s->pict_type==FF_B_TYPE){ s->me.qpel_put= s->dsp.put_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab; }else{ s->me.qpel_put= s->dsp.put_no_rnd_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab; } if(MPV_frame_start(s, avctx) < 0) return -1; /* decode each macroblock */ s->mb_x=0; s->mb_y=0; decode_slice(s); while(s->mb_y<s->mb_height){ if(ff_h263_resync(s)<0) break; decode_slice(s); } intrax8_decoded: MPV_frame_end(s); assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type); assert(s->current_picture.pict_type == s->pict_type); if (s->pict_type == FF_B_TYPE || s->low_delay) { *pict= *(AVFrame*)s->current_picture_ptr; } else if (s->last_picture_ptr != NULL) { *pict= *(AVFrame*)s->last_picture_ptr; } if(s->last_picture_ptr || s->low_delay){ *data_size = sizeof(AVFrame); ff_print_debug_info(s, pict); } /* Return the Picture timestamp as the frame number */ /* we subtract 1 because it is added on utils.c */ avctx->frame_number = s->picture_number - 1; #ifdef PRINT_FRAME_TIME av_log(avctx, AV_LOG_DEBUG, "%"PRId64"\n", rdtsc()-time); #endif return get_consumed_bytes(s, buf_size); }