static av_cold int utvideo_encode_init(AVCodecContext *avctx) { UtVideoContext *utv = (UtVideoContext *)avctx->priv_data; UtVideoExtra *info; uint32_t flags, in_format; int ret; switch (avctx->pix_fmt) { case AV_PIX_FMT_YUV420P: in_format = UTVF_YV12; avctx->bits_per_coded_sample = 12; if (avctx->colorspace == AVCOL_SPC_BT709) avctx->codec_tag = MKTAG('U', 'L', 'H', '0'); else avctx->codec_tag = MKTAG('U', 'L', 'Y', '0'); break; case AV_PIX_FMT_YUYV422: in_format = UTVF_YUYV; avctx->bits_per_coded_sample = 16; if (avctx->colorspace == AVCOL_SPC_BT709) avctx->codec_tag = MKTAG('U', 'L', 'H', '2'); else avctx->codec_tag = MKTAG('U', 'L', 'Y', '2'); break; case AV_PIX_FMT_BGR24: in_format = UTVF_NFCC_BGR_BU; avctx->bits_per_coded_sample = 24; avctx->codec_tag = MKTAG('U', 'L', 'R', 'G'); break; case AV_PIX_FMT_RGB32: in_format = UTVF_NFCC_BGRA_BU; avctx->bits_per_coded_sample = 32; avctx->codec_tag = MKTAG('U', 'L', 'R', 'A'); break; default: return AVERROR(EINVAL); } #if FF_API_PRIVATE_OPT FF_DISABLE_DEPRECATION_WARNINGS if (avctx->prediction_method) utv->pred = avctx->prediction_method; FF_ENABLE_DEPRECATION_WARNINGS #endif /* Check before we alloc anything */ if (utv->pred != 0 && utv->pred != 2) { av_log(avctx, AV_LOG_ERROR, "Invalid prediction method.\n"); return AVERROR(EINVAL); } flags = ((utv->pred + 1) << 8) | (avctx->thread_count - 1); avctx->priv_data = utv; /* Alloc extradata buffer */ info = (UtVideoExtra *)av_malloc(sizeof(*info)); if (!info) { av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata buffer.\n"); return AVERROR(ENOMEM); } /* * We use this buffer to hold the data that Ut Video returns, * since we cannot decode planes separately with it. */ ret = av_image_get_buffer_size(avctx->pix_fmt, avctx->width, avctx->height, 1); if (ret < 0) { av_free(info); return ret; } utv->buf_size = ret; utv->buffer = (uint8_t *)av_malloc(utv->buf_size); if (utv->buffer == NULL) { av_log(avctx, AV_LOG_ERROR, "Could not allocate output buffer.\n"); av_free(info); return AVERROR(ENOMEM); } /* * Create a Ut Video instance. Since the function wants * an "interface name" string, pass it the name of the lib. */ utv->codec = CCodec::CreateInstance(UNFCC(avctx->codec_tag), "libavcodec"); /* Initialize encoder */ utv->codec->EncodeBegin(in_format, avctx->width, avctx->height, CBGROSSWIDTH_WINDOWS); /* Get extradata from encoder */ avctx->extradata_size = utv->codec->EncodeGetExtraDataSize(); utv->codec->EncodeGetExtraData(info, avctx->extradata_size, in_format, avctx->width, avctx->height, CBGROSSWIDTH_WINDOWS); avctx->extradata = (uint8_t *)info; /* Set flags */ utv->codec->SetState(&flags, sizeof(flags)); return 0; }
static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap) { struct video_data *s = s1->priv_data; AVStream *st; int res; uint32_t desired_format, capabilities; enum CodecID codec_id; st = av_new_stream(s1, 0); if (!st) { return AVERROR(ENOMEM); } av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */ s->width = ap->width; s->height = ap->height; capabilities = 0; s->fd = device_open(s1, &capabilities); if (s->fd < 0) { return AVERROR(EIO); } av_log(s1, AV_LOG_VERBOSE, "[%d]Capabilities: %x\n", s->fd, capabilities); if (!s->width && !s->height) { struct v4l2_format fmt; av_log(s1, AV_LOG_VERBOSE, "Querying the device for the current frame size\n"); fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) { av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", strerror(errno)); return AVERROR(errno); } s->width = fmt.fmt.pix.width; s->height = fmt.fmt.pix.height; av_log(s1, AV_LOG_VERBOSE, "Setting frame size to %dx%d\n", s->width, s->height); } desired_format = device_try_init(s1, ap, &s->width, &s->height, &codec_id); if (desired_format == 0) { av_log(s1, AV_LOG_ERROR, "Cannot find a proper format for " "codec_id %d, pix_fmt %d.\n", s1->video_codec_id, ap->pix_fmt); close(s->fd); return AVERROR(EIO); } if (av_image_check_size(s->width, s->height, 0, s1) < 0) return AVERROR(EINVAL); s->frame_format = desired_format; if (v4l2_set_parameters(s1, ap) < 0) return AVERROR(EIO); st->codec->pix_fmt = fmt_v4l2ff(desired_format, codec_id); s->frame_size = avpicture_get_size(st->codec->pix_fmt, s->width, s->height); if (capabilities & V4L2_CAP_STREAMING) { s->io_method = io_mmap; res = mmap_init(s1); if (res == 0) { res = mmap_start(s1); } } else { s->io_method = io_read; res = read_init(s1); } if (res < 0) { close(s->fd); return AVERROR(EIO); } s->top_field_first = first_field(s->fd); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = codec_id; st->codec->width = s->width; st->codec->height = s->height; st->codec->time_base.den = ap->time_base.den; st->codec->time_base.num = ap->time_base.num; st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8; return 0; }
static int rv20_decode_picture_header(MpegEncContext *s) { int seq, mb_pos, i; #if 0 GetBitContext gb= s->gb; for(i=0; i<64; i++){ av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&gb)); if(i%4==3) av_log(s->avctx, AV_LOG_DEBUG, " "); } av_log(s->avctx, AV_LOG_DEBUG, "\n"); #endif #if 0 av_log(s->avctx, AV_LOG_DEBUG, "%3dx%03d/%02Xx%02X ", s->width, s->height, s->width/4, s->height/4); for(i=0; i<s->avctx->extradata_size; i++){ av_log(s->avctx, AV_LOG_DEBUG, "%02X ", ((uint8_t*)s->avctx->extradata)[i]); if(i%4==3) av_log(s->avctx, AV_LOG_DEBUG, " "); } av_log(s->avctx, AV_LOG_DEBUG, "\n"); #endif if(s->avctx->sub_id == 0x30202002 || s->avctx->sub_id == 0x30203002){ if (get_bits(&s->gb, 3)){ av_log(s->avctx, AV_LOG_ERROR, "unknown triplet set\n"); return -1; } } i= get_bits(&s->gb, 2); switch(i){ case 0: s->pict_type= FF_I_TYPE; break; case 1: s->pict_type= FF_I_TYPE; break; //hmm ... case 2: s->pict_type= FF_P_TYPE; break; case 3: s->pict_type= FF_B_TYPE; break; default: av_log(s->avctx, AV_LOG_ERROR, "unknown frame type\n"); return -1; } if(s->last_picture_ptr==NULL && s->pict_type==FF_B_TYPE){ av_log(s->avctx, AV_LOG_ERROR, "early B pix\n"); return -1; } if (get_bits1(&s->gb)){ av_log(s->avctx, AV_LOG_ERROR, "unknown bit set\n"); return -1; } s->qscale = get_bits(&s->gb, 5); if(s->qscale==0){ av_log(s->avctx, AV_LOG_ERROR, "error, qscale:0\n"); return -1; } if(s->avctx->sub_id == 0x30203002){ if (get_bits1(&s->gb)){ av_log(s->avctx, AV_LOG_ERROR, "unknown bit2 set\n"); return -1; } } if(s->avctx->has_b_frames){ int f, new_w, new_h; int v= s->avctx->extradata_size >= 4 ? 7&((uint8_t*)s->avctx->extradata)[1] : 0; if (get_bits1(&s->gb)){ av_log(s->avctx, AV_LOG_ERROR, "unknown bit3 set\n"); } seq= get_bits(&s->gb, 13)<<2; f= get_bits(&s->gb, av_log2(v)+1); if(f){ new_w= 4*((uint8_t*)s->avctx->extradata)[6+2*f]; new_h= 4*((uint8_t*)s->avctx->extradata)[7+2*f]; }else{ new_w= s->orig_width ; new_h= s->orig_height; } if(new_w != s->width || new_h != s->height){ av_log(s->avctx, AV_LOG_DEBUG, "attempting to change resolution to %dx%d\n", new_w, new_h); if (avcodec_check_dimensions(s->avctx, new_w, new_h) < 0) return -1; MPV_common_end(s); avcodec_set_dimensions(s->avctx, new_w, new_h); s->width = new_w; s->height = new_h; if (MPV_common_init(s) < 0) return -1; } if(s->avctx->debug & FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_DEBUG, "F %d/%d\n", f, v); } }else{ seq= get_bits(&s->gb, 8)*128; } // if(s->avctx->sub_id <= 0x20201002){ //0x20201002 definitely needs this mb_pos= ff_h263_decode_mba(s); /* }else{ mb_pos= get_bits(&s->gb, av_log2(s->mb_num-1)+1); s->mb_x= mb_pos % s->mb_width; s->mb_y= mb_pos / s->mb_width; }*/ //av_log(s->avctx, AV_LOG_DEBUG, "%d\n", seq); seq |= s->time &~0x7FFF; if(seq - s->time > 0x4000) seq -= 0x8000; if(seq - s->time < -0x4000) seq += 0x8000; if(seq != s->time){ if(s->pict_type!=FF_B_TYPE){ s->time= seq; s->pp_time= s->time - s->last_non_b_time; s->last_non_b_time= s->time; }else{ s->time= seq; s->pb_time= s->pp_time - (s->last_non_b_time - s->time); if(s->pp_time <=s->pb_time || s->pp_time <= s->pp_time - s->pb_time || s->pp_time<=0){ av_log(s->avctx, AV_LOG_DEBUG, "messed up order, possible from seeking? skipping current b frame\n"); return FRAME_SKIPPED; } ff_mpeg4_init_direct_mv(s); } } // printf("%d %d %d %d %d\n", seq, (int)s->time, (int)s->last_non_b_time, s->pp_time, s->pb_time); /*for(i=0; i<32; i++){ av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb)); } av_log(s->avctx, AV_LOG_DEBUG, "\n");*/ s->no_rounding= get_bits1(&s->gb); s->f_code = 1; s->unrestricted_mv = 1; s->h263_aic= s->pict_type == FF_I_TYPE; // s->alt_inter_vlc=1; // s->obmc=1; // s->umvplus=1; s->modified_quant=1; if(!s->avctx->lowres) s->loop_filter=1; if(s->avctx->debug & FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_INFO, "num:%5d x:%2d y:%2d type:%d qscale:%2d rnd:%d\n", seq, s->mb_x, s->mb_y, s->pict_type, s->qscale, s->no_rounding); } assert(s->pict_type != FF_B_TYPE || !s->low_delay); return s->mb_width*s->mb_height - mb_pos; }
static int ffmpeg_decode_video_frame(struct anim *anim) { int rval = 0; av_log(anim->pFormatCtx, AV_LOG_DEBUG, " DECODE VIDEO FRAME\n"); if (anim->next_packet.stream_index == anim->videoStream) { av_free_packet(&anim->next_packet); anim->next_packet.stream_index = -1; } while ((rval = av_read_frame(anim->pFormatCtx, &anim->next_packet)) >= 0) { av_log(anim->pFormatCtx, AV_LOG_DEBUG, "%sREAD: strID=%d (VID: %d) dts=%lld pts=%lld " "%s\n", (anim->next_packet.stream_index == anim->videoStream) ? "->" : " ", anim->next_packet.stream_index, anim->videoStream, (anim->next_packet.dts == AV_NOPTS_VALUE) ? -1 : (long long int)anim->next_packet.dts, (anim->next_packet.pts == AV_NOPTS_VALUE) ? -1 : (long long int)anim->next_packet.pts, (anim->next_packet.flags & AV_PKT_FLAG_KEY) ? " KEY" : ""); if (anim->next_packet.stream_index == anim->videoStream) { anim->pFrameComplete = 0; avcodec_decode_video2( anim->pCodecCtx, anim->pFrame, &anim->pFrameComplete, &anim->next_packet); if (anim->pFrameComplete) { anim->next_pts = av_get_pts_from_frame( anim->pFormatCtx, anim->pFrame); av_log(anim->pFormatCtx, AV_LOG_DEBUG, " FRAME DONE: next_pts=%lld " "pkt_pts=%lld, guessed_pts=%lld\n", (anim->pFrame->pts == AV_NOPTS_VALUE) ? -1 : (long long int)anim->pFrame->pts, (anim->pFrame->pkt_pts == AV_NOPTS_VALUE) ? -1 : (long long int)anim->pFrame->pkt_pts, (long long int)anim->next_pts); break; } } av_free_packet(&anim->next_packet); anim->next_packet.stream_index = -1; } if (rval == AVERROR_EOF) { /* this sets size and data fields to zero, * which is necessary to decode the remaining data * in the decoder engine after EOF. It also prevents a memory * leak, since av_read_frame spills out a full size packet even * on EOF... (and: it's safe to call on NULL packets) */ av_free_packet(&anim->next_packet); anim->next_packet.size = 0; anim->next_packet.data = 0; anim->pFrameComplete = 0; avcodec_decode_video2( anim->pCodecCtx, anim->pFrame, &anim->pFrameComplete, &anim->next_packet); if (anim->pFrameComplete) { anim->next_pts = av_get_pts_from_frame( anim->pFormatCtx, anim->pFrame); av_log(anim->pFormatCtx, AV_LOG_DEBUG, " FRAME DONE (after EOF): next_pts=%lld " "pkt_pts=%lld, guessed_pts=%lld\n", (anim->pFrame->pts == AV_NOPTS_VALUE) ? -1 : (long long int)anim->pFrame->pts, (anim->pFrame->pkt_pts == AV_NOPTS_VALUE) ? -1 : (long long int)anim->pFrame->pkt_pts, (long long int)anim->next_pts); rval = 0; } } if (rval < 0) { anim->next_packet.stream_index = -1; av_log(anim->pFormatCtx, AV_LOG_ERROR, " DECODE READ FAILED: av_read_frame() " "returned error: %d\n", rval); } return (rval >= 0); }
static int mmap_init(AVFormatContext *ctx) { struct video_data *s = ctx->priv_data; struct v4l2_requestbuffers req; int i, res; memset(&req, 0, sizeof(struct v4l2_requestbuffers)); req.count = desired_video_buffers; req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; req.memory = V4L2_MEMORY_MMAP; res = ioctl(s->fd, VIDIOC_REQBUFS, &req); if (res < 0) { if (errno == EINVAL) { av_log(ctx, AV_LOG_ERROR, "Device does not support mmap\n"); } else { av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_REQBUFS)\n"); } return AVERROR(errno); } if (req.count < 2) { av_log(ctx, AV_LOG_ERROR, "Insufficient buffer memory\n"); return AVERROR(ENOMEM); } s->buffers = req.count; s->buf_start = av_malloc(sizeof(void *) * s->buffers); if (s->buf_start == NULL) { av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer pointers\n"); return AVERROR(ENOMEM); } s->buf_len = av_malloc(sizeof(unsigned int) * s->buffers); if (s->buf_len == NULL) { av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer sizes\n"); av_free(s->buf_start); return AVERROR(ENOMEM); } for (i = 0; i < req.count; i++) { struct v4l2_buffer buf; memset(&buf, 0, sizeof(struct v4l2_buffer)); buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; buf.index = i; res = ioctl(s->fd, VIDIOC_QUERYBUF, &buf); if (res < 0) { av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYBUF)\n"); return AVERROR(errno); } s->buf_len[i] = buf.length; if (s->frame_size > 0 && s->buf_len[i] < s->frame_size) { av_log(ctx, AV_LOG_ERROR, "Buffer len [%d] = %d != %d\n", i, s->buf_len[i], s->frame_size); return -1; } s->buf_start[i] = mmap (NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, s->fd, buf.m.offset); if (s->buf_start[i] == MAP_FAILED) { av_log(ctx, AV_LOG_ERROR, "mmap: %s\n", strerror(errno)); return AVERROR(errno); } } return 0; }
/* Decodes delta frames */ static void qpeg_decode_inter(QpegContext *qctx, uint8_t *dst, int stride, int width, int height, int delta, const uint8_t *ctable, uint8_t *refdata) { int i, j; int code; int filled = 0; int orig_height; /* copy prev frame */ for(i = 0; i < height; i++) memcpy(refdata + (i * width), dst + (i * stride), width); orig_height = height; height--; dst = dst + height * stride; while ((bytestream2_get_bytes_left(&qctx->buffer) > 0) && (height >= 0)) { code = bytestream2_get_byte(&qctx->buffer); if(delta) { /* motion compensation */ while((code & 0xF0) == 0xF0) { if(delta == 1) { int me_idx; int me_w, me_h, me_x, me_y; uint8_t *me_plane; int corr, val; /* get block size by index */ me_idx = code & 0xF; me_w = qpeg_table_w[me_idx]; me_h = qpeg_table_h[me_idx]; /* extract motion vector */ corr = bytestream2_get_byte(&qctx->buffer); val = corr >> 4; if(val > 7) val -= 16; me_x = val; val = corr & 0xF; if(val > 7) val -= 16; me_y = val; /* check motion vector */ if ((me_x + filled < 0) || (me_x + me_w + filled > width) || (height - me_y - me_h < 0) || (height - me_y > orig_height) || (filled + me_w > width) || (height - me_h < 0)) av_log(NULL, AV_LOG_ERROR, "Bogus motion vector (%i,%i), block size %ix%i at %i,%i\n", me_x, me_y, me_w, me_h, filled, height); else { /* do motion compensation */ me_plane = refdata + (filled + me_x) + (height - me_y) * width; for(j = 0; j < me_h; j++) { for(i = 0; i < me_w; i++) dst[filled + i - (j * stride)] = me_plane[i - (j * width)]; } } } code = bytestream2_get_byte(&qctx->buffer); } } if(code == 0xE0) /* end-of-picture code */ break; if(code > 0xE0) { /* run code: 0xE1..0xFF */ int p; code &= 0x1F; p = bytestream2_get_byte(&qctx->buffer); for(i = 0; i <= code; i++) { dst[filled++] = p; if(filled >= width) { filled = 0; dst -= stride; height--; if (height < 0) break; } } } else if(code >= 0xC0) { /* copy code: 0xC0..0xDF */ code &= 0x1F; for(i = 0; i <= code; i++) { dst[filled++] = bytestream2_get_byte(&qctx->buffer); if(filled >= width) { filled = 0; dst -= stride; height--; if (height < 0) break; } } } else if(code >= 0x80) { /* skip code: 0x80..0xBF */ int skip; code &= 0x3F; /* codes 0x80 and 0x81 are actually escape codes, skip value minus constant is in the next byte */ if(!code) skip = bytestream2_get_byte(&qctx->buffer) + 64; else if(code == 1) skip = bytestream2_get_byte(&qctx->buffer) + 320; else skip = code; filled += skip; while( filled >= width) { filled -= width; dst -= stride; height--; if(height < 0) break; } } else { /* zero code treated as one-pixel skip */ if(code) { dst[filled++] = ctable[code & 0x7F]; } else filled++; if(filled >= width) { filled = 0; dst -= stride; height--; } } }
static int fourxm_read_header(AVFormatContext *s, AVFormatParameters *ap) { AVIOContext *pb = s->pb; unsigned int fourcc_tag; unsigned int size; int header_size; FourxmDemuxContext *fourxm = s->priv_data; unsigned char *header; int i, ret; AVStream *st; fourxm->track_count = 0; fourxm->tracks = NULL; fourxm->fps = 1.0; /* skip the first 3 32-bit numbers */ avio_skip(pb, 12); /* check for LIST-HEAD */ GET_LIST_HEADER(); header_size = size - 4; if (fourcc_tag != HEAD_TAG || header_size < 0) return AVERROR_INVALIDDATA; /* allocate space for the header and load the whole thing */ header = av_malloc(header_size); if (!header) return AVERROR(ENOMEM); if (avio_read(pb, header, header_size) != header_size){ av_free(header); return AVERROR(EIO); } /* take the lazy approach and search for any and all vtrk and strk chunks */ for (i = 0; i < header_size - 8; i++) { fourcc_tag = AV_RL32(&header[i]); size = AV_RL32(&header[i + 4]); if (fourcc_tag == std__TAG) { fourxm->fps = av_int2flt(AV_RL32(&header[i + 12])); } else if (fourcc_tag == vtrk_TAG) { /* check that there is enough data */ if (size != vtrk_SIZE) { ret= AVERROR_INVALIDDATA; goto fail; } fourxm->width = AV_RL32(&header[i + 36]); fourxm->height = AV_RL32(&header[i + 40]); /* allocate a new AVStream */ st = avformat_new_stream(s, NULL); if (!st){ ret= AVERROR(ENOMEM); goto fail; } av_set_pts_info(st, 60, 1, fourxm->fps); fourxm->video_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_4XM; st->codec->extradata_size = 4; st->codec->extradata = av_malloc(4); AV_WL32(st->codec->extradata, AV_RL32(&header[i + 16])); st->codec->width = fourxm->width; st->codec->height = fourxm->height; i += 8 + size; } else if (fourcc_tag == strk_TAG) { int current_track; /* check that there is enough data */ if (size != strk_SIZE) { ret= AVERROR_INVALIDDATA; goto fail; } current_track = AV_RL32(&header[i + 8]); if((unsigned)current_track >= UINT_MAX / sizeof(AudioTrack) - 1){ av_log(s, AV_LOG_ERROR, "current_track too large\n"); ret= -1; goto fail; } if (current_track + 1 > fourxm->track_count) { fourxm->tracks = av_realloc_f(fourxm->tracks, sizeof(AudioTrack), current_track + 1); if (!fourxm->tracks) { ret = AVERROR(ENOMEM); goto fail; } memset(&fourxm->tracks[fourxm->track_count], 0, sizeof(AudioTrack) * (current_track + 1 - fourxm->track_count)); fourxm->track_count = current_track + 1; } fourxm->tracks[current_track].adpcm = AV_RL32(&header[i + 12]); fourxm->tracks[current_track].channels = AV_RL32(&header[i + 36]); fourxm->tracks[current_track].sample_rate = AV_RL32(&header[i + 40]); fourxm->tracks[current_track].bits = AV_RL32(&header[i + 44]); fourxm->tracks[current_track].audio_pts = 0; if( fourxm->tracks[current_track].channels <= 0 || fourxm->tracks[current_track].sample_rate <= 0 || fourxm->tracks[current_track].bits < 0){ av_log(s, AV_LOG_ERROR, "audio header invalid\n"); ret= -1; goto fail; } i += 8 + size; /* allocate a new AVStream */ st = avformat_new_stream(s, NULL); if (!st){ ret= AVERROR(ENOMEM); goto fail; } st->id = current_track; av_set_pts_info(st, 60, 1, fourxm->tracks[current_track].sample_rate); fourxm->tracks[current_track].stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = 0; st->codec->channels = fourxm->tracks[current_track].channels; st->codec->sample_rate = fourxm->tracks[current_track].sample_rate; st->codec->bits_per_coded_sample = fourxm->tracks[current_track].bits; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample; if (fourxm->tracks[current_track].adpcm){ st->codec->codec_id = CODEC_ID_ADPCM_4XM; }else if (st->codec->bits_per_coded_sample == 8){ st->codec->codec_id = CODEC_ID_PCM_U8; }else st->codec->codec_id = CODEC_ID_PCM_S16LE; } } /* skip over the LIST-MOVI chunk (which is where the stream should be */ GET_LIST_HEADER(); if (fourcc_tag != MOVI_TAG){ ret= AVERROR_INVALIDDATA; goto fail; } av_free(header); /* initialize context members */ fourxm->video_pts = -1; /* first frame will push to 0 */ return 0; fail: av_freep(&fourxm->tracks); av_free(header); return ret; }
int ff_flv_decode_picture_header(MpegEncContext *s) { int format, width, height; /* picture header */ if (get_bits_long(&s->gb, 17) != 1) { av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n"); return AVERROR_INVALIDDATA; } format = get_bits(&s->gb, 5); if (format != 0 && format != 1) { av_log(s->avctx, AV_LOG_ERROR, "Bad picture format\n"); return AVERROR_INVALIDDATA; } s->h263_flv = format+1; s->picture_number = get_bits(&s->gb, 8); /* picture timestamp */ format = get_bits(&s->gb, 3); switch (format) { case 0: width = get_bits(&s->gb, 8); height = get_bits(&s->gb, 8); break; case 1: width = get_bits(&s->gb, 16); height = get_bits(&s->gb, 16); break; case 2: width = 352; height = 288; break; case 3: width = 176; height = 144; break; case 4: width = 128; height = 96; break; case 5: width = 320; height = 240; break; case 6: width = 160; height = 120; break; default: width = height = 0; break; } if(av_image_check_size(width, height, 0, s->avctx)) return AVERROR(EINVAL); s->width = width; s->height = height; s->pict_type = AV_PICTURE_TYPE_I + get_bits(&s->gb, 2); s->droppable = s->pict_type > AV_PICTURE_TYPE_P; if (s->droppable) s->pict_type = AV_PICTURE_TYPE_P; skip_bits1(&s->gb); /* deblocking flag */ s->chroma_qscale= s->qscale = get_bits(&s->gb, 5); s->h263_plus = 0; s->unrestricted_mv = 1; s->h263_long_vectors = 0; /* PEI */ if (skip_1stop_8data_bits(&s->gb) < 0) return AVERROR_INVALIDDATA; s->f_code = 1; if (s->ehc_mode) s->avctx->sample_aspect_ratio= (AVRational){1,2}; if(s->avctx->debug & FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_DEBUG, "%c esc_type:%d, qp:%d num:%d\n", s->droppable ? 'D' : av_get_picture_type_char(s->pict_type), s->h263_flv - 1, s->qscale, s->picture_number); } s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; return 0; }
static int ogg_read_page (AVFormatContext * s, int *str) { AVIOContext *bc = s->pb; struct ogg *ogg = s->priv_data; struct ogg_stream *os; int i = 0; int flags, nsegs; uint64_t gp; uint32_t serial; uint32_t seq; uint32_t crc; int size, idx; uint8_t sync[4]; int sp = 0; if (avio_read (bc, sync, 4) < 4) return -1; do{ int c; if (sync[sp & 3] == 'O' && sync[(sp + 1) & 3] == 'g' && sync[(sp + 2) & 3] == 'g' && sync[(sp + 3) & 3] == 'S') break; c = avio_r8(bc); if (url_feof(bc)) return -1; sync[sp++ & 3] = c; }while (i++ < MAX_PAGE_SIZE); if (i >= MAX_PAGE_SIZE){ av_log (s, AV_LOG_INFO, "ogg, can't find sync word\n"); return -1; } if (avio_r8(bc) != 0) /* version */ return -1; flags = avio_r8(bc); gp = avio_rl64 (bc); serial = avio_rl32 (bc); seq = avio_rl32 (bc); crc = avio_rl32 (bc); nsegs = avio_r8(bc); idx = ogg_find_stream (ogg, serial); if (idx < 0){ if (ogg->headers) { int n; for (n = 0; n < ogg->nstreams; n++) { av_freep(&ogg->streams[n].buf); av_freep(&ogg->streams[n].private); } ogg->curidx = -1; ogg->nstreams = 0; } idx = ogg_new_stream (s, serial); if (idx < 0) return -1; }
static void ape_dumpinfo(AVFormatContext * s, APEContext * ape_ctx) { #if ENABLE_DEBUG int i; av_log(s, AV_LOG_DEBUG, "Descriptor Block:\n\n"); av_log(s, AV_LOG_DEBUG, "magic = \"%c%c%c%c\"\n", ape_ctx->magic[0], ape_ctx->magic[1], ape_ctx->magic[2], ape_ctx->magic[3]); av_log(s, AV_LOG_DEBUG, "fileversion = %d\n", ape_ctx->fileversion); av_log(s, AV_LOG_DEBUG, "descriptorlength = %d\n", ape_ctx->descriptorlength); av_log(s, AV_LOG_DEBUG, "headerlength = %d\n", ape_ctx->headerlength); av_log(s, AV_LOG_DEBUG, "seektablelength = %d\n", ape_ctx->seektablelength); av_log(s, AV_LOG_DEBUG, "wavheaderlength = %d\n", ape_ctx->wavheaderlength); av_log(s, AV_LOG_DEBUG, "audiodatalength = %d\n", ape_ctx->audiodatalength); av_log(s, AV_LOG_DEBUG, "audiodatalength_high = %d\n", ape_ctx->audiodatalength_high); av_log(s, AV_LOG_DEBUG, "wavtaillength = %d\n", ape_ctx->wavtaillength); av_log(s, AV_LOG_DEBUG, "md5 = "); for (i = 0; i < 16; i++) av_log(s, AV_LOG_DEBUG, "%02x", ape_ctx->md5[i]); av_log(s, AV_LOG_DEBUG, "\n"); av_log(s, AV_LOG_DEBUG, "\nHeader Block:\n\n"); av_log(s, AV_LOG_DEBUG, "compressiontype = %d\n", ape_ctx->compressiontype); av_log(s, AV_LOG_DEBUG, "formatflags = %d\n", ape_ctx->formatflags); av_log(s, AV_LOG_DEBUG, "blocksperframe = %d\n", ape_ctx->blocksperframe); av_log(s, AV_LOG_DEBUG, "finalframeblocks = %d\n", ape_ctx->finalframeblocks); av_log(s, AV_LOG_DEBUG, "totalframes = %d\n", ape_ctx->totalframes); av_log(s, AV_LOG_DEBUG, "bps = %d\n", ape_ctx->bps); av_log(s, AV_LOG_DEBUG, "channels = %d\n", ape_ctx->channels); av_log(s, AV_LOG_DEBUG, "samplerate = %d\n", ape_ctx->samplerate); av_log(s, AV_LOG_DEBUG, "\nSeektable\n\n"); if ((ape_ctx->seektablelength / sizeof(uint32_t)) != ape_ctx->totalframes) { av_log(s, AV_LOG_DEBUG, "No seektable\n"); } else { for (i = 0; i < ape_ctx->seektablelength / sizeof(uint32_t); i++) { if (i < ape_ctx->totalframes - 1) { av_log(s, AV_LOG_DEBUG, "%8d %d (%d bytes)\n", i, ape_ctx->seektable[i], ape_ctx->seektable[i + 1] - ape_ctx->seektable[i]); } else { av_log(s, AV_LOG_DEBUG, "%8d %d\n", i, ape_ctx->seektable[i]); } } } av_log(s, AV_LOG_DEBUG, "\nFrames\n\n"); for (i = 0; i < ape_ctx->totalframes; i++) av_log(s, AV_LOG_DEBUG, "%8d %8lld %8d (%d samples)\n", i, ape_ctx->frames[i].pos, ape_ctx->frames[i].size, ape_ctx->frames[i].nblocks); av_log(s, AV_LOG_DEBUG, "\nCalculated information:\n\n"); av_log(s, AV_LOG_DEBUG, "junklength = %d\n", ape_ctx->junklength); av_log(s, AV_LOG_DEBUG, "firstframe = %d\n", ape_ctx->firstframe); av_log(s, AV_LOG_DEBUG, "totalsamples = %d\n", ape_ctx->totalsamples); #endif }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AVSubtitle *sub = data; const uint8_t *buf_end = buf + buf_size; uint8_t *bitmap; int w, h, x, y, rlelen, i; int64_t packet_time = 0; GetBitContext gb; #ifdef _MSC_VER AVRational rational = {1, 1000}; #endif memset(sub, 0, sizeof(*sub)); // check that at least header fits if (buf_size < 27 + 7 * 2 + 4 * 3) { av_log(avctx, AV_LOG_ERROR, "coded frame too small\n"); return -1; } // read start and end time if (buf[0] != '[' || buf[13] != '-' || buf[26] != ']') { av_log(avctx, AV_LOG_ERROR, "invalid time code\n"); return -1; } if (avpkt->pts != AV_NOPTS_VALUE) #ifdef _MSC_VER packet_time = av_rescale_q(avpkt->pts, AV_TIME_BASE_Q, rational); #else packet_time = av_rescale_q(avpkt->pts, AV_TIME_BASE_Q, (AVRational){1, 1000}); #endif sub->start_display_time = parse_timecode(buf + 1, packet_time); sub->end_display_time = parse_timecode(buf + 14, packet_time); buf += 27; // read header w = bytestream_get_le16(&buf); h = bytestream_get_le16(&buf); if (av_image_check_size(w, h, 0, avctx) < 0) return -1; x = bytestream_get_le16(&buf); y = bytestream_get_le16(&buf); // skip bottom right position, it gives no new information bytestream_get_le16(&buf); bytestream_get_le16(&buf); rlelen = bytestream_get_le16(&buf); // allocate sub and set values sub->rects = av_mallocz(sizeof(*sub->rects)); sub->rects[0] = av_mallocz(sizeof(*sub->rects[0])); sub->num_rects = 1; sub->rects[0]->x = x; sub->rects[0]->y = y; sub->rects[0]->w = w; sub->rects[0]->h = h; sub->rects[0]->type = SUBTITLE_BITMAP; sub->rects[0]->pict.linesize[0] = w; sub->rects[0]->pict.data[0] = av_malloc(w * h); sub->rects[0]->nb_colors = 4; sub->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE); // read palette for (i = 0; i < sub->rects[0]->nb_colors; i++) ((uint32_t*)sub->rects[0]->pict.data[1])[i] = bytestream_get_be24(&buf); // make all except background (first entry) non-transparent for (i = 1; i < sub->rects[0]->nb_colors; i++) ((uint32_t*)sub->rects[0]->pict.data[1])[i] |= 0xff000000; // process RLE-compressed data rlelen = FFMIN(rlelen, buf_end - buf); init_get_bits(&gb, buf, rlelen * 8); bitmap = sub->rects[0]->pict.data[0]; for (y = 0; y < h; y++) { // interlaced: do odd lines if (y == (h + 1) / 2) bitmap = sub->rects[0]->pict.data[0] + w; for (x = 0; x < w; ) { int log2 = ff_log2_tab[show_bits(&gb, 8)]; int run = get_bits(&gb, 14 - 4 * (log2 >> 1)); int color = get_bits(&gb, 2); run = FFMIN(run, w - x); // run length 0 means till end of row if (!run) run = w - x; memset(bitmap, color, run); bitmap += run; x += run; } // interlaced, skip every second line bitmap += w; align_get_bits(&gb); } *data_size = 1; return buf_size; }
static int ape_read_header(AVFormatContext * s, AVFormatParameters * ap) { AVIOContext *pb = s->pb; APEContext *ape = s->priv_data; AVStream *st; uint32_t tag; int i; int total_blocks; int64_t pts; /* TODO: Skip any leading junk such as id3v2 tags */ ape->junklength = 0; tag = avio_rl32(pb); if (tag != MKTAG('M', 'A', 'C', ' ')) return -1; ape->fileversion = avio_rl16(pb); if (ape->fileversion < APE_MIN_VERSION || ape->fileversion > APE_MAX_VERSION) { av_log(s, AV_LOG_ERROR, "Unsupported file version - %d.%02d\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10); return -1; } if (ape->fileversion >= 3980) { ape->padding1 = avio_rl16(pb); ape->descriptorlength = avio_rl32(pb); ape->headerlength = avio_rl32(pb); ape->seektablelength = avio_rl32(pb); ape->wavheaderlength = avio_rl32(pb); ape->audiodatalength = avio_rl32(pb); ape->audiodatalength_high = avio_rl32(pb); ape->wavtaillength = avio_rl32(pb); avio_read(pb, ape->md5, 16); /* Skip any unknown bytes at the end of the descriptor. This is for future compatibility */ if (ape->descriptorlength > 52) avio_seek(pb, ape->descriptorlength - 52, SEEK_CUR); /* Read header data */ ape->compressiontype = avio_rl16(pb); ape->formatflags = avio_rl16(pb); ape->blocksperframe = avio_rl32(pb); ape->finalframeblocks = avio_rl32(pb); ape->totalframes = avio_rl32(pb); ape->bps = avio_rl16(pb); ape->channels = avio_rl16(pb); ape->samplerate = avio_rl32(pb); } else { ape->descriptorlength = 0; ape->headerlength = 32; ape->compressiontype = avio_rl16(pb); ape->formatflags = avio_rl16(pb); ape->channels = avio_rl16(pb); ape->samplerate = avio_rl32(pb); ape->wavheaderlength = avio_rl32(pb); ape->wavtaillength = avio_rl32(pb); ape->totalframes = avio_rl32(pb); ape->finalframeblocks = avio_rl32(pb); if (ape->formatflags & MAC_FORMAT_FLAG_HAS_PEAK_LEVEL) { avio_seek(pb, 4, SEEK_CUR); /* Skip the peak level */ ape->headerlength += 4; } if (ape->formatflags & MAC_FORMAT_FLAG_HAS_SEEK_ELEMENTS) { ape->seektablelength = avio_rl32(pb); ape->headerlength += 4; ape->seektablelength *= sizeof(int32_t); } else ape->seektablelength = ape->totalframes * sizeof(int32_t); if (ape->formatflags & MAC_FORMAT_FLAG_8_BIT) ape->bps = 8; else if (ape->formatflags & MAC_FORMAT_FLAG_24_BIT) ape->bps = 24; else ape->bps = 16; if (ape->fileversion >= 3950) ape->blocksperframe = 73728 * 4; else if (ape->fileversion >= 3900 || (ape->fileversion >= 3800 && ape->compressiontype >= 4000)) ape->blocksperframe = 73728; else ape->blocksperframe = 9216; /* Skip any stored wav header */ if (!(ape->formatflags & MAC_FORMAT_FLAG_CREATE_WAV_HEADER)) avio_seek(pb, ape->wavheaderlength, SEEK_CUR); } if(!ape->totalframes){ av_log(s, AV_LOG_ERROR, "No frames in the file!\n"); return AVERROR(EINVAL); } if(ape->totalframes > UINT_MAX / sizeof(APEFrame)){ av_log(s, AV_LOG_ERROR, "Too many frames: %d\n", ape->totalframes); return -1; } ape->frames = av_malloc(ape->totalframes * sizeof(APEFrame)); if(!ape->frames) return AVERROR(ENOMEM); ape->firstframe = ape->junklength + ape->descriptorlength + ape->headerlength + ape->seektablelength + ape->wavheaderlength; ape->currentframe = 0; ape->totalsamples = ape->finalframeblocks; if (ape->totalframes > 1) ape->totalsamples += ape->blocksperframe * (ape->totalframes - 1); if (ape->seektablelength > 0) { ape->seektable = av_malloc(ape->seektablelength); for (i = 0; i < ape->seektablelength / sizeof(uint32_t); i++) ape->seektable[i] = avio_rl32(pb); } ape->frames[0].pos = ape->firstframe; ape->frames[0].nblocks = ape->blocksperframe; ape->frames[0].skip = 0; for (i = 1; i < ape->totalframes; i++) { ape->frames[i].pos = ape->seektable[i]; //ape->frames[i-1].pos + ape->blocksperframe; ape->frames[i].nblocks = ape->blocksperframe; ape->frames[i - 1].size = ape->frames[i].pos - ape->frames[i - 1].pos; ape->frames[i].skip = (ape->frames[i].pos - ape->frames[0].pos) & 3; } ape->frames[ape->totalframes - 1].size = ape->finalframeblocks * 4; ape->frames[ape->totalframes - 1].nblocks = ape->finalframeblocks; for (i = 0; i < ape->totalframes; i++) { if(ape->frames[i].skip){ ape->frames[i].pos -= ape->frames[i].skip; ape->frames[i].size += ape->frames[i].skip; } ape->frames[i].size = (ape->frames[i].size + 3) & ~3; } ape_dumpinfo(s, ape); /* try to read APE tags */ if (!url_is_streamed(pb)) { ff_ape_parse_tag(s); avio_seek(pb, 0, SEEK_SET); } av_log(s, AV_LOG_DEBUG, "Decoding file - v%d.%02d, compression level %d\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10, ape->compressiontype); /* now we are ready: build format streams */ st = av_new_stream(s, 0); if (!st) return -1; total_blocks = (ape->totalframes == 0) ? 0 : ((ape->totalframes - 1) * ape->blocksperframe) + ape->finalframeblocks; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_APE; st->codec->codec_tag = MKTAG('A', 'P', 'E', ' '); st->codec->channels = ape->channels; st->codec->sample_rate = ape->samplerate; st->codec->bits_per_coded_sample = ape->bps; st->codec->frame_size = MAC_SUBFRAME_SIZE; st->nb_frames = ape->totalframes; st->start_time = 0; st->duration = total_blocks / MAC_SUBFRAME_SIZE; av_set_pts_info(st, 64, MAC_SUBFRAME_SIZE, ape->samplerate); st->codec->extradata = av_malloc(APE_EXTRADATA_SIZE); st->codec->extradata_size = APE_EXTRADATA_SIZE; AV_WL16(st->codec->extradata + 0, ape->fileversion); AV_WL16(st->codec->extradata + 2, ape->compressiontype); AV_WL16(st->codec->extradata + 4, ape->formatflags); pts = 0; for (i = 0; i < ape->totalframes; i++) { ape->frames[i].pts = pts; av_add_index_entry(st, ape->frames[i].pos, ape->frames[i].pts, 0, 0, AVINDEX_KEYFRAME); pts += ape->blocksperframe / MAC_SUBFRAME_SIZE; } return 0; }
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { V210DecContext *s = avctx->priv_data; int h, w, ret, stride, aligned_input; AVFrame *pic = avctx->coded_frame; const uint8_t *psrc = avpkt->data; uint16_t *y, *u, *v; if (s->custom_stride ) stride = s->custom_stride; else { int aligned_width = ((avctx->width + 47) / 48) * 48; stride = aligned_width * 8 / 3; } if (avpkt->size < stride * avctx->height) { if ((((avctx->width + 23) / 24) * 24 * 8) / 3 * avctx->height == avpkt->size) { stride = avpkt->size / avctx->height; if (!s->stride_warning_shown) av_log(avctx, AV_LOG_WARNING, "Broken v210 with too small padding (64 byte) detected\n"); s->stride_warning_shown = 1; } else { av_log(avctx, AV_LOG_ERROR, "packet too small\n"); return AVERROR_INVALIDDATA; } } aligned_input = !((uintptr_t)psrc & 0xf) && !(stride & 0xf); if (aligned_input != s->aligned_input) { s->aligned_input = aligned_input; if (HAVE_MMX) v210_x86_init(s); } if (pic->data[0]) avctx->release_buffer(avctx, pic); pic->reference = 0; if ((ret = ff_get_buffer(avctx, pic)) < 0) return ret; y = (uint16_t*)pic->data[0]; u = (uint16_t*)pic->data[1]; v = (uint16_t*)pic->data[2]; pic->pict_type = AV_PICTURE_TYPE_I; pic->key_frame = 1; for (h = 0; h < avctx->height; h++) { const uint32_t *src = (const uint32_t*)psrc; uint32_t val; w = (avctx->width / 6) * 6; s->unpack_frame(src, y, u, v, w); y += w; u += w >> 1; v += w >> 1; src += (w << 1) / 3; if (w < avctx->width - 1) { READ_PIXELS(u, y, v); val = av_le2ne32(*src++); *y++ = val & 0x3FF; if (w < avctx->width - 3) { *u++ = (val >> 10) & 0x3FF; *y++ = (val >> 20) & 0x3FF; val = av_le2ne32(*src++); *v++ = val & 0x3FF; *y++ = (val >> 10) & 0x3FF; } }
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data) { SgiContext *s = avctx->priv_data; AVFrame * const p = &s->picture; uint8_t *offsettab, *lengthtab, *in_buf, *encode_buf; int x, y, z, length, tablesize; unsigned int width, height, depth, dimension; unsigned char *orig_buf = buf, *end_buf = buf + buf_size; *p = *(AVFrame*)data; p->pict_type = FF_I_TYPE; p->key_frame = 1; width = avctx->width; height = avctx->height; switch (avctx->pix_fmt) { case PIX_FMT_GRAY8: dimension = SGI_SINGLE_CHAN; depth = SGI_GRAYSCALE; break; case PIX_FMT_RGB24: dimension = SGI_MULTI_CHAN; depth = SGI_RGB; break; case PIX_FMT_RGBA: dimension = SGI_MULTI_CHAN; depth = SGI_RGBA; break; default: return AVERROR_INVALIDDATA; } tablesize = depth * height * 4; length = tablesize * 2 + SGI_HEADER_SIZE; if (buf_size < length) { av_log(avctx, AV_LOG_ERROR, "buf_size too small(need %d, got %d)\n", length, buf_size); return -1; } /* Encode header. */ bytestream_put_be16(&buf, SGI_MAGIC); bytestream_put_byte(&buf, avctx->coder_type != FF_CODER_TYPE_RAW); /* RLE 1 - VERBATIM 0*/ bytestream_put_byte(&buf, 1); /* bytes_per_channel */ bytestream_put_be16(&buf, dimension); bytestream_put_be16(&buf, width); bytestream_put_be16(&buf, height); bytestream_put_be16(&buf, depth); /* The rest are constant in this implementation. */ bytestream_put_be32(&buf, 0L); /* pixmin */ bytestream_put_be32(&buf, 255L); /* pixmax */ bytestream_put_be32(&buf, 0L); /* dummy */ /* name */ memset(buf, 0, SGI_HEADER_SIZE); buf += 80; /* colormap */ bytestream_put_be32(&buf, 0L); /* The rest of the 512 byte header is unused. */ buf += 404; offsettab = buf; if (avctx->coder_type != FF_CODER_TYPE_RAW) { /* Skip RLE offset table. */ buf += tablesize; lengthtab = buf; /* Skip RLE length table. */ buf += tablesize; /* Make an intermediate consecutive buffer. */ if (!(encode_buf = av_malloc(width))) return -1; for (z = 0; z < depth; z++) { in_buf = p->data[0] + p->linesize[0] * (height - 1) + z; for (y = 0; y < height; y++) { bytestream_put_be32(&offsettab, buf - orig_buf); for (x = 0; x < width; x++) encode_buf[x] = in_buf[depth * x]; if ((length = ff_rle_encode(buf, end_buf - buf - 1, encode_buf, 1, width, 0, 0, 0x80, 0)) < 1) { av_free(encode_buf); return -1; } buf += length; bytestream_put_byte(&buf, 0); bytestream_put_be32(&lengthtab, length + 1); in_buf -= p->linesize[0]; } } av_free(encode_buf); } else { for (z = 0; z < depth; z++) { in_buf = p->data[0] + p->linesize[0] * (height - 1) + z; for (y = 0; y < height; y++) { for (x = 0; x < width * depth; x += depth) bytestream_put_byte(&buf, in_buf[x]); in_buf -= p->linesize[0]; } } } /* total length */ return buf - orig_buf; }
int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count) { int i, av_uninit(j); int pps_count; int current_ref_assigned = 0, err = 0; Picture *av_uninit(pic); if ((h->avctx->debug & FF_DEBUG_MMCO) && mmco_count == 0) av_log(h->avctx, AV_LOG_DEBUG, "no mmco here\n"); for (i = 0; i < mmco_count; i++) { int av_uninit(structure), av_uninit(frame_num); if (h->avctx->debug & FF_DEBUG_MMCO) av_log(h->avctx, AV_LOG_DEBUG, "mmco:%d %d %d\n", h->mmco[i].opcode, h->mmco[i].short_pic_num, h->mmco[i].long_arg); if (mmco[i].opcode == MMCO_SHORT2UNUSED || mmco[i].opcode == MMCO_SHORT2LONG) { frame_num = pic_num_extract(h, mmco[i].short_pic_num, &structure); pic = find_short(h, frame_num, &j); if (!pic) { if (mmco[i].opcode != MMCO_SHORT2LONG || !h->long_ref[mmco[i].long_arg] || h->long_ref[mmco[i].long_arg]->frame_num != frame_num) { av_log(h->avctx, h->short_ref_count ? AV_LOG_ERROR : AV_LOG_DEBUG, "mmco: unref short failure\n"); err = AVERROR_INVALIDDATA; } continue; } } switch (mmco[i].opcode) { case MMCO_SHORT2UNUSED: if (h->avctx->debug & FF_DEBUG_MMCO) av_log(h->avctx, AV_LOG_DEBUG, "mmco: unref short %d count %d\n", h->mmco[i].short_pic_num, h->short_ref_count); remove_short(h, frame_num, structure ^ PICT_FRAME); break; case MMCO_SHORT2LONG: if (h->long_ref[mmco[i].long_arg] != pic) remove_long(h, mmco[i].long_arg, 0); remove_short_at_index(h, j); h->long_ref[ mmco[i].long_arg ] = pic; if (h->long_ref[mmco[i].long_arg]) { h->long_ref[mmco[i].long_arg]->long_ref = 1; h->long_ref_count++; } break; case MMCO_LONG2UNUSED: j = pic_num_extract(h, mmco[i].long_arg, &structure); pic = h->long_ref[j]; if (pic) { remove_long(h, j, structure ^ PICT_FRAME); } else if (h->avctx->debug & FF_DEBUG_MMCO) av_log(h->avctx, AV_LOG_DEBUG, "mmco: unref long failure\n"); break; case MMCO_LONG: // Comment below left from previous code as it is an interresting note. /* First field in pair is in short term list or * at a different long term index. * This is not allowed; see 7.4.3.3, notes 2 and 3. * Report the problem and keep the pair where it is, * and mark this field valid. */ if (h->short_ref[0] == h->cur_pic_ptr) { av_log(h->avctx, AV_LOG_ERROR, "mmco: cannot assign current picture to short and long at the same time\n"); remove_short_at_index(h, 0); } if (h->long_ref[mmco[i].long_arg] != h->cur_pic_ptr) { if (h->cur_pic_ptr->long_ref) { for(j=0; j<16; j++) { if(h->long_ref[j] == h->cur_pic_ptr) { remove_long(h, j, 0); av_log(h->avctx, AV_LOG_ERROR, "mmco: cannot assign current picture to 2 long term references\n"); } } } av_assert0(!h->cur_pic_ptr->long_ref); remove_long(h, mmco[i].long_arg, 0); h->long_ref[mmco[i].long_arg] = h->cur_pic_ptr; h->long_ref[mmco[i].long_arg]->long_ref = 1; h->long_ref_count++; } h->cur_pic_ptr->reference |= h->picture_structure; current_ref_assigned = 1; break; case MMCO_SET_MAX_LONG: assert(mmco[i].long_arg <= 16); // just remove the long term which index is greater than new max for (j = mmco[i].long_arg; j < 16; j++) { remove_long(h, j, 0); } break; case MMCO_RESET: while (h->short_ref_count) { remove_short(h, h->short_ref[0]->frame_num, 0); } for (j = 0; j < 16; j++) { remove_long(h, j, 0); } h->frame_num = h->cur_pic_ptr->frame_num = 0; h->mmco_reset = 1; h->cur_pic_ptr->mmco_reset = 1; for (j = 0; j < MAX_DELAYED_PIC_COUNT; j++) h->last_pocs[j] = INT_MIN; break; default: assert(0); } } if (!current_ref_assigned) { /* Second field of complementary field pair; the first field of * which is already referenced. If short referenced, it * should be first entry in short_ref. If not, it must exist * in long_ref; trying to put it on the short list here is an * error in the encoded bit stream (ref: 7.4.3.3, NOTE 2 and 3). */ if (h->short_ref_count && h->short_ref[0] == h->cur_pic_ptr) { /* Just mark the second field valid */ h->cur_pic_ptr->reference = PICT_FRAME; } else if (h->cur_pic_ptr->long_ref) { av_log(h->avctx, AV_LOG_ERROR, "illegal short term reference " "assignment for second field " "in complementary field pair " "(first field is long term)\n"); err = AVERROR_INVALIDDATA; } else { pic = remove_short(h, h->cur_pic_ptr->frame_num, 0); if (pic) { av_log(h->avctx, AV_LOG_ERROR, "illegal short term buffer state detected\n"); err = AVERROR_INVALIDDATA; } if (h->short_ref_count) memmove(&h->short_ref[1], &h->short_ref[0], h->short_ref_count * sizeof(Picture*)); h->short_ref[0] = h->cur_pic_ptr; h->short_ref_count++; h->cur_pic_ptr->reference |= h->picture_structure; /* MythTV changes - begin */ // do not add more reference frames than allowed after seeing frame num gap if (!mmco_count && h->short_ref_count > h->sps.ref_frame_count) { pic = h->short_ref[h->short_ref_count - 1]; remove_short(h, pic->frame_num, 0); } /* MythTV changes - end */ } } if (h->long_ref_count + h->short_ref_count > FFMAX(h->sps.ref_frame_count, 1)) { /* We have too many reference frames, probably due to corrupted * stream. Need to discard one frame. Prevents overrun of the * short_ref and long_ref buffers. */ av_log(h->avctx, AV_LOG_ERROR, "number of reference frames (%d+%d) exceeds max (%d; probably " "corrupt input), discarding one\n", h->long_ref_count, h->short_ref_count, h->sps.ref_frame_count); err = AVERROR_INVALIDDATA; if (h->long_ref_count && !h->short_ref_count) { for (i = 0; i < 16; ++i) if (h->long_ref[i]) break; assert(i < 16); remove_long(h, i, 0); } else { pic = h->short_ref[h->short_ref_count - 1]; remove_short(h, pic->frame_num, 0); } } for (i = 0; i<h->short_ref_count; i++) { pic = h->short_ref[i]; if (pic->invalid_gap) { int d = (h->cur_pic_ptr->frame_num - pic->frame_num) & ((1 << h->sps.log2_max_frame_num)-1); if (d > h->sps.ref_frame_count) remove_short(h, pic->frame_num, 0); } } print_short_term(h); print_long_term(h); pps_count = 0; for (i = 0; i < FF_ARRAY_ELEMS(h->pps_buffers); i++) pps_count += !!h->pps_buffers[i]; if ( err >= 0 && h->long_ref_count==0 && (h->short_ref_count<=2 || h->pps.ref_count[0] <= 1 && h->pps.ref_count[1] <= 1 && pps_count == 1) && h->pps.ref_count[0]<=2 + (h->picture_structure != PICT_FRAME) && h->cur_pic_ptr->f.pict_type == AV_PICTURE_TYPE_I){ h->cur_pic_ptr->recovered |= 1; if(!h->avctx->has_b_frames) h->frame_recovered |= FRAME_RECOVERED_SEI; } return (h->avctx->err_recognition & AV_EF_EXPLODE) ? err : 0; }
static int audio_open(AVFormatContext * s1, int is_output, const char *audio_device) { AudioData *s = s1->priv_data; int audio_fd; int tmp, err; char *flip = getenv("AUDIO_FLIP_LEFT"); if (is_output) audio_fd = avpriv_open(audio_device, O_WRONLY); else audio_fd = avpriv_open(audio_device, O_RDONLY); if (audio_fd < 0) { av_log(s1, AV_LOG_ERROR, "%s: %s\n", audio_device, strerror(errno)); return AVERROR(EIO); } if (flip && *flip == '1') { s->flip_left = 1; } /* non blocking mode */ if (!is_output) { if (fcntl(audio_fd, F_SETFL, O_NONBLOCK) < 0) { av_log(s1, AV_LOG_WARNING, "%s: Could not enable non block mode (%s)\n", audio_device, strerror(errno)); } } s->frame_size = AUDIO_BLOCK_SIZE; /* select format : favour native format */ err = ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp); #if HAVE_BIGENDIAN if (tmp & AFMT_S16_BE) { tmp = AFMT_S16_BE; } else if (tmp & AFMT_S16_LE) { tmp = AFMT_S16_LE; } else { tmp = 0; } #else if (tmp & AFMT_S16_LE) { tmp = AFMT_S16_LE; } else if (tmp & AFMT_S16_BE) { tmp = AFMT_S16_BE; } else { tmp = 0; } #endif switch (tmp) { case AFMT_S16_LE: s->codec_id = AV_CODEC_ID_PCM_S16LE; break; case AFMT_S16_BE: s->codec_id = AV_CODEC_ID_PCM_S16BE; break; default: av_log(s1, AV_LOG_ERROR, "Soundcard does not support 16 bit sample format\n"); close(audio_fd); return AVERROR(EIO); } err = ioctl(audio_fd, SNDCTL_DSP_SETFMT, &tmp); if (err < 0) { av_log(s1, AV_LOG_ERROR, "SNDCTL_DSP_SETFMT: %s\n", strerror(errno)); goto fail; } tmp = (s->channels == 2); err = ioctl(audio_fd, SNDCTL_DSP_STEREO, &tmp); if (err < 0) { av_log(s1, AV_LOG_ERROR, "SNDCTL_DSP_STEREO: %s\n", strerror(errno)); goto fail; } tmp = s->sample_rate; err = ioctl(audio_fd, SNDCTL_DSP_SPEED, &tmp); if (err < 0) { av_log(s1, AV_LOG_ERROR, "SNDCTL_DSP_SPEED: %s\n", strerror(errno)); goto fail; } s->sample_rate = tmp; /* store real sample rate */ s->fd = audio_fd; return 0; fail: close(audio_fd); return AVERROR(EIO); }
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb, int first_slice) { int i, ret; MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = mmco_temp; int mmco_index = 0; if (h->nal_unit_type == NAL_IDR_SLICE) { // FIXME fields skip_bits1(gb); // broken_link if (get_bits1(gb)) { mmco[0].opcode = MMCO_LONG; mmco[0].long_arg = 0; mmco_index = 1; } } else { if (get_bits1(gb)) { // adaptive_ref_pic_marking_mode_flag for (i = 0; i < MAX_MMCO_COUNT; i++) { MMCOOpcode opcode = get_ue_golomb_31(gb); mmco[i].opcode = opcode; if (opcode == MMCO_SHORT2UNUSED || opcode == MMCO_SHORT2LONG) { mmco[i].short_pic_num = (h->curr_pic_num - get_ue_golomb(gb) - 1) & (h->max_pic_num - 1); #if 0 if (mmco[i].short_pic_num >= h->short_ref_count || h->short_ref[ mmco[i].short_pic_num ] == NULL){ av_log(s->avctx, AV_LOG_ERROR, "illegal short ref in memory management control " "operation %d\n", mmco); return -1; } #endif } if (opcode == MMCO_SHORT2LONG || opcode == MMCO_LONG2UNUSED || opcode == MMCO_LONG || opcode == MMCO_SET_MAX_LONG) { unsigned int long_arg = get_ue_golomb_31(gb); if (long_arg >= 32 || (long_arg >= 16 && !(opcode == MMCO_SET_MAX_LONG && long_arg == 16) && !(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE(h)))) { av_log(h->avctx, AV_LOG_ERROR, "illegal long ref in memory management control " "operation %d\n", opcode); return -1; } mmco[i].long_arg = long_arg; } if (opcode > (unsigned) MMCO_LONG) { av_log(h->avctx, AV_LOG_ERROR, "illegal memory management control operation %d\n", opcode); return -1; } if (opcode == MMCO_END) break; } mmco_index = i; } else { if (first_slice) { ret = ff_generate_sliding_window_mmcos(h, first_slice); if (ret < 0 && h->avctx->err_recognition & AV_EF_EXPLODE) return ret; } mmco_index = -1; } } if (first_slice && mmco_index != -1) { memcpy(h->mmco, mmco_temp, sizeof(h->mmco)); h->mmco_index = mmco_index; } else if (!first_slice && mmco_index >= 0 && (mmco_index != h->mmco_index || check_opcodes(h->mmco, mmco_temp, mmco_index))) { av_log(h->avctx, AV_LOG_ERROR, "Inconsistent MMCO state between slices [%d, %d]\n", mmco_index, h->mmco_index); return AVERROR_INVALIDDATA; } return 0; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { int field; AVFrame *pic = avctx->coded_frame; const uint8_t *buf = avpkt->data; const uint8_t *buf_end = buf + avpkt->size; if (pic->data[0]) avctx->release_buffer(avctx, pic); if (avpkt->size < avctx->width * 2 * avctx->height + 4 + 2*8) { av_log(avctx, AV_LOG_ERROR, "Packet is too small.\n"); return -1; } if (bytestream_get_le32(&buf) != AV_RL32("FRW1")) { av_log(avctx, AV_LOG_ERROR, "incorrect marker\n"); return -1; } pic->reference = 0; if (avctx->get_buffer(avctx, pic) < 0) return -1; pic->pict_type = AV_PICTURE_TYPE_I; pic->key_frame = 1; pic->interlaced_frame = 1; pic->top_field_first = 1; for (field = 0; field < 2; field++) { int i; int field_h = (avctx->height + !field) >> 1; int field_size, min_field_size = avctx->width * 2 * field_h; uint8_t *dst = pic->data[0]; if (buf_end - buf < 8) return -1; buf += 4; // flags? 0x80 == bottom field maybe? field_size = bytestream_get_le32(&buf); if (field_size < min_field_size) { av_log(avctx, AV_LOG_ERROR, "Field size %i is too small (required %i)\n", field_size, min_field_size); return -1; } if (buf_end - buf < field_size) { av_log(avctx, AV_LOG_ERROR, "Packet is too small, need %i, have %i\n", field_size, (int)(buf_end - buf)); return -1; } if (field) dst += pic->linesize[0]; for (i = 0; i < field_h; i++) { memcpy(dst, buf, avctx->width * 2); buf += avctx->width * 2; dst += pic->linesize[0] << 1; } buf += field_size - min_field_size; } *data_size = sizeof(AVFrame); *(AVFrame*)data = *pic; return avpkt->size; }
static int mjpegb_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MJpegDecodeContext *s = avctx->priv_data; const uint8_t *buf_end, *buf_ptr; GetBitContext hgb; /* for the header */ uint32_t dqt_offs, dht_offs, sof_offs, sos_offs, second_field_offs; uint32_t field_size, sod_offs; int ret; buf_ptr = buf; buf_end = buf + buf_size; read_header: /* reset on every SOI */ s->restart_interval = 0; s->restart_count = 0; s->mjpb_skiptosod = 0; if (buf_end - buf_ptr >= 1 << 28) return AVERROR_INVALIDDATA; init_get_bits(&hgb, buf_ptr, /*buf_size*/(buf_end - buf_ptr)*8); skip_bits(&hgb, 32); /* reserved zeros */ if (get_bits_long(&hgb, 32) != MKBETAG('m','j','p','g')) { av_log(avctx, AV_LOG_WARNING, "not mjpeg-b (bad fourcc)\n"); return AVERROR_INVALIDDATA; } field_size = get_bits_long(&hgb, 32); /* field size */ av_log(avctx, AV_LOG_DEBUG, "field size: 0x%"PRIx32"\n", field_size); skip_bits(&hgb, 32); /* padded field size */ second_field_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "second_field_offs is %d and size is %d\n"); av_log(avctx, AV_LOG_DEBUG, "second field offs: 0x%"PRIx32"\n", second_field_offs); dqt_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "dqt is %d and size is %d\n"); av_log(avctx, AV_LOG_DEBUG, "dqt offs: 0x%"PRIx32"\n", dqt_offs); if (dqt_offs) { init_get_bits(&s->gb, buf_ptr+dqt_offs, (buf_end - (buf_ptr+dqt_offs))*8); s->start_code = DQT; if (ff_mjpeg_decode_dqt(s) < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) return AVERROR_INVALIDDATA; } dht_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "dht is %d and size is %d\n"); av_log(avctx, AV_LOG_DEBUG, "dht offs: 0x%"PRIx32"\n", dht_offs); if (dht_offs) { init_get_bits(&s->gb, buf_ptr+dht_offs, (buf_end - (buf_ptr+dht_offs))*8); s->start_code = DHT; ff_mjpeg_decode_dht(s); } sof_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "sof is %d and size is %d\n"); av_log(avctx, AV_LOG_DEBUG, "sof offs: 0x%"PRIx32"\n", sof_offs); if (sof_offs) { init_get_bits(&s->gb, buf_ptr+sof_offs, (buf_end - (buf_ptr+sof_offs))*8); s->start_code = SOF0; if (ff_mjpeg_decode_sof(s) < 0) return -1; } sos_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "sos is %d and size is %d\n"); av_log(avctx, AV_LOG_DEBUG, "sos offs: 0x%"PRIx32"\n", sos_offs); sod_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "sof is %d and size is %d\n"); av_log(avctx, AV_LOG_DEBUG, "sod offs: 0x%"PRIx32"\n", sod_offs); if (sos_offs) { init_get_bits(&s->gb, buf_ptr + sos_offs, 8 * FFMIN(field_size, buf_end - buf_ptr - sos_offs)); s->mjpb_skiptosod = (sod_offs - sos_offs - show_bits(&s->gb, 16)); s->start_code = SOS; if (ff_mjpeg_decode_sos(s, NULL, NULL) < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) return AVERROR_INVALIDDATA; } if (s->interlaced) { s->bottom_field ^= 1; /* if not bottom field, do not output image yet */ if (s->bottom_field != s->interlace_polarity && second_field_offs) { buf_ptr = buf + second_field_offs; second_field_offs = 0; goto read_header; } } //XXX FIXME factorize, this looks very similar to the EOI code if ((ret = av_frame_ref(data, s->picture_ptr)) < 0) return ret; *got_frame = 1; if (!s->lossless && avctx->debug & FF_DEBUG_QP) { av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", FFMAX3(s->qscale[0], s->qscale[1], s->qscale[2])); } return buf_size; }
static int flic_read_header(AVFormatContext *s, AVFormatParameters *ap) { FlicDemuxContext *flic = s->priv_data; AVIOContext *pb = s->pb; unsigned char header[FLIC_HEADER_SIZE]; AVStream *st, *ast; int speed; int magic_number; unsigned char preamble[FLIC_PREAMBLE_SIZE]; flic->frame_number = 0; /* load the whole header and pull out the width and height */ if (avio_read(pb, header, FLIC_HEADER_SIZE) != FLIC_HEADER_SIZE) return AVERROR(EIO); magic_number = AV_RL16(&header[4]); speed = AV_RL32(&header[0x10]); if (speed == 0) speed = FLIC_DEFAULT_SPEED; /* initialize the decoder streams */ st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); flic->video_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_FLIC; st->codec->codec_tag = 0; /* no fourcc */ st->codec->width = AV_RL16(&header[0x08]); st->codec->height = AV_RL16(&header[0x0A]); if (!st->codec->width || !st->codec->height) { /* Ugly hack needed for the following sample: */ /* http://samples.mplayerhq.hu/fli-flc/fli-bugs/specular.flc */ av_log(s, AV_LOG_WARNING, "File with no specified width/height. Trying 640x480.\n"); st->codec->width = 640; st->codec->height = 480; } /* send over the whole 128-byte FLIC header */ st->codec->extradata_size = FLIC_HEADER_SIZE; st->codec->extradata = av_malloc(FLIC_HEADER_SIZE); memcpy(st->codec->extradata, header, FLIC_HEADER_SIZE); /* peek at the preamble to detect TFTD videos - they seem to always start with an audio chunk */ if (avio_read(pb, preamble, FLIC_PREAMBLE_SIZE) != FLIC_PREAMBLE_SIZE) { av_log(s, AV_LOG_ERROR, "Failed to peek at preamble\n"); return AVERROR(EIO); } url_fseek(pb, -FLIC_PREAMBLE_SIZE, SEEK_CUR); /* Time to figure out the framerate: * If the first preamble's magic number is 0xAAAA then this file is from * X-COM: Terror from the Deep. If on the other hand there is a FLIC chunk * magic number at offset 0x10 assume this file is from Magic Carpet instead. * If neither of the above is true then this is a normal FLIC file. */ if (AV_RL16(&preamble[4]) == FLIC_TFTD_CHUNK_AUDIO) { /* TFTD videos have an extra 22050 Hz 8-bit mono audio stream */ ast = av_new_stream(s, 1); if (!ast) return AVERROR(ENOMEM); flic->audio_stream_index = ast->index; /* all audio frames are the same size, so use the size of the first chunk for block_align */ ast->codec->block_align = AV_RL32(&preamble[0]); ast->codec->codec_type = AVMEDIA_TYPE_AUDIO; ast->codec->codec_id = CODEC_ID_PCM_U8; ast->codec->codec_tag = 0; ast->codec->sample_rate = FLIC_TFTD_SAMPLE_RATE; ast->codec->channels = 1; ast->codec->sample_fmt = AV_SAMPLE_FMT_U8; ast->codec->bit_rate = st->codec->sample_rate * 8; ast->codec->bits_per_coded_sample = 8; ast->codec->channel_layout = AV_CH_LAYOUT_MONO; ast->codec->extradata_size = 0; /* Since the header information is incorrect we have to figure out the * framerate using block_align and the fact that the audio is 22050 Hz. * We usually have two cases: 2205 -> 10 fps and 1470 -> 15 fps */ av_set_pts_info(st, 64, ast->codec->block_align, FLIC_TFTD_SAMPLE_RATE); av_set_pts_info(ast, 64, 1, FLIC_TFTD_SAMPLE_RATE); } else if (AV_RL16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) { av_set_pts_info(st, 64, FLIC_MC_SPEED, 70); /* rewind the stream since the first chunk is at offset 12 */ url_fseek(pb, 12, SEEK_SET); /* send over abbreviated FLIC header chunk */ av_free(st->codec->extradata); st->codec->extradata_size = 12; st->codec->extradata = av_malloc(12); memcpy(st->codec->extradata, header, 12); } else if (magic_number == FLIC_FILE_MAGIC_1) { av_set_pts_info(st, 64, speed, 70); } else if ((magic_number == FLIC_FILE_MAGIC_2) || (magic_number == FLIC_FILE_MAGIC_3)) { av_set_pts_info(st, 64, speed, 1000); } else { av_log(s, AV_LOG_INFO, "Invalid or unsupported magic chunk in file\n"); return AVERROR_INVALIDDATA; } return 0; }
static void ffmpeg_postprocess(struct anim *anim) { AVFrame *input = anim->pFrame; ImBuf *ibuf = anim->last_frame; int filter_y = 0; if (!anim->pFrameComplete) { return; } /* This means the data wasnt read properly, * this check stops crashing */ if (input->data[0] == 0 && input->data[1] == 0 && input->data[2] == 0 && input->data[3] == 0) { fprintf(stderr, "ffmpeg_fetchibuf: " "data not read properly...\n"); return; } av_log(anim->pFormatCtx, AV_LOG_DEBUG, " POSTPROC: anim->pFrame planes: %p %p %p %p\n", input->data[0], input->data[1], input->data[2], input->data[3]); if (anim->ib_flags & IB_animdeinterlace) { if (avpicture_deinterlace( (AVPicture *) anim->pFrameDeinterlaced, (const AVPicture *) anim->pFrame, anim->pCodecCtx->pix_fmt, anim->pCodecCtx->width, anim->pCodecCtx->height) < 0) { filter_y = true; } else { input = anim->pFrameDeinterlaced; } } if (!need_aligned_ffmpeg_buffer(anim)) { avpicture_fill((AVPicture *) anim->pFrameRGB, (unsigned char *) ibuf->rect, AV_PIX_FMT_RGBA, anim->x, anim->y); } if (ENDIAN_ORDER == B_ENDIAN) { int *dstStride = anim->pFrameRGB->linesize; uint8_t **dst = anim->pFrameRGB->data; int dstStride2[4] = { dstStride[0], 0, 0, 0 }; uint8_t *dst2[4] = { dst[0], 0, 0, 0 }; int x, y, h, w; unsigned char *bottom; unsigned char *top; sws_scale(anim->img_convert_ctx, (const uint8_t *const *)input->data, input->linesize, 0, anim->y, dst2, dstStride2); bottom = (unsigned char *) ibuf->rect; top = bottom + ibuf->x * (ibuf->y - 1) * 4; h = (ibuf->y + 1) / 2; w = ibuf->x; for (y = 0; y < h; y++) { unsigned char tmp[4]; unsigned int *tmp_l = (unsigned int *) tmp; for (x = 0; x < w; x++) { tmp[0] = bottom[0]; tmp[1] = bottom[1]; tmp[2] = bottom[2]; tmp[3] = bottom[3]; bottom[0] = top[0]; bottom[1] = top[1]; bottom[2] = top[2]; bottom[3] = top[3]; *(unsigned int *) top = *tmp_l; bottom += 4; top += 4; } top -= 8 * w; } } else { int *dstStride = anim->pFrameRGB->linesize; uint8_t **dst = anim->pFrameRGB->data; int dstStride2[4] = { -dstStride[0], 0, 0, 0 }; uint8_t *dst2[4] = { dst[0] + (anim->y - 1) * dstStride[0], 0, 0, 0 }; sws_scale(anim->img_convert_ctx, (const uint8_t *const *)input->data, input->linesize, 0, anim->y, dst2, dstStride2); } if (need_aligned_ffmpeg_buffer(anim)) { uint8_t *src = anim->pFrameRGB->data[0]; uint8_t *dst = (uint8_t *) ibuf->rect; for (int y = 0; y < anim->y; y++) { memcpy(dst, src, anim->x * 4); dst += anim->x * 4; src += anim->pFrameRGB->linesize[0]; } } if (filter_y) { IMB_filtery(ibuf); } }
static int rtp_parse_packet_internal(RTPDemuxContext *s, AVPacket *pkt, const uint8_t *buf, int len) { unsigned int ssrc, h; int payload_type, seq, ret, flags = 0; int ext; AVStream *st; uint32_t timestamp; int rv= 0; ext = buf[0] & 0x10; payload_type = buf[1] & 0x7f; if (buf[1] & 0x80) flags |= RTP_FLAG_MARKER; seq = AV_RB16(buf + 2); timestamp = AV_RB32(buf + 4); ssrc = AV_RB32(buf + 8); /* store the ssrc in the RTPDemuxContext */ s->ssrc = ssrc; /* NOTE: we can handle only one payload type */ if (s->payload_type != payload_type) return -1; st = s->st; // only do something with this if all the rtp checks pass... if(!rtp_valid_packet_in_sequence(&s->statistics, seq)) { av_log(st?st->codec:NULL, AV_LOG_ERROR, "RTP: PT=%02x: bad cseq %04x expected=%04x\n", payload_type, seq, ((s->seq + 1) & 0xffff)); return -1; } if (buf[0] & 0x20) { int padding = buf[len - 1]; if (len >= 12 + padding) len -= padding; } s->seq = seq; len -= 12; buf += 12; /* RFC 3550 Section 5.3.1 RTP Header Extension handling */ if (ext) { if (len < 4) return -1; /* calculate the header extension length (stored as number * of 32-bit words) */ ext = (AV_RB16(buf + 2) + 1) << 2; if (len < ext) return -1; // skip past RTP header extension len -= ext; buf += ext; } if (!st) { /* specific MPEG2TS demux support */ ret = ff_mpegts_parse_packet(s->ts, pkt, buf, len); /* The only error that can be returned from ff_mpegts_parse_packet * is "no more data to return from the provided buffer", so return * AVERROR(EAGAIN) for all errors */ if (ret < 0) return AVERROR(EAGAIN); if (ret < len) { s->read_buf_size = len - ret; memcpy(s->buf, buf + ret, s->read_buf_size); s->read_buf_index = 0; return 1; } return 0; } else if (s->parse_packet) { rv = s->parse_packet(s->ic, s->dynamic_protocol_context, s->st, pkt, ×tamp, buf, len, flags); } else { // at this point, the RTP header has been stripped; This is ASSUMING that there is only 1 CSRC, which in't wise. switch(st->codec->codec_id) { case CODEC_ID_MP2: case CODEC_ID_MP3: /* better than nothing: skip mpeg audio RTP header */ if (len <= 4) return -1; h = AV_RB32(buf); len -= 4; buf += 4; av_new_packet(pkt, len); memcpy(pkt->data, buf, len); break; case CODEC_ID_MPEG1VIDEO: case CODEC_ID_MPEG2VIDEO: /* better than nothing: skip mpeg video RTP header */ if (len <= 4) return -1; h = AV_RB32(buf); buf += 4; len -= 4; if (h & (1 << 26)) { /* mpeg2 */ if (len <= 4) return -1; buf += 4; len -= 4; } av_new_packet(pkt, len); memcpy(pkt->data, buf, len); break; default: av_new_packet(pkt, len); memcpy(pkt->data, buf, len); break; } pkt->stream_index = st->index; } // now perform timestamp things.... finalize_packet(s, pkt, timestamp); return rv; }
static ImBuf *ffmpeg_fetchibuf(struct anim *anim, int position, IMB_Timecode_Type tc) { int64_t pts_to_search = 0; double frame_rate; double pts_time_base; long long st_time; struct anim_index *tc_index = 0; AVStream *v_st; int new_frame_index = 0; /* To quiet gcc barking... */ int old_frame_index = 0; /* To quiet gcc barking... */ if (anim == NULL) return (0); av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: pos=%d\n", position); if (tc != IMB_TC_NONE) { tc_index = IMB_anim_open_index(anim, tc); } v_st = anim->pFormatCtx->streams[anim->videoStream]; frame_rate = av_q2d(av_get_r_frame_rate_compat(anim->pFormatCtx, v_st)); st_time = anim->pFormatCtx->start_time; pts_time_base = av_q2d(v_st->time_base); if (tc_index) { new_frame_index = IMB_indexer_get_frame_index( tc_index, position); old_frame_index = IMB_indexer_get_frame_index( tc_index, anim->curposition); pts_to_search = IMB_indexer_get_pts( tc_index, new_frame_index); } else { pts_to_search = (long long) floor(((double) position) / pts_time_base / frame_rate + 0.5); if (st_time != AV_NOPTS_VALUE) { pts_to_search += st_time / pts_time_base / AV_TIME_BASE; } } av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: looking for PTS=%lld " "(pts_timebase=%g, frame_rate=%g, st_time=%lld)\n", (long long int)pts_to_search, pts_time_base, frame_rate, st_time); if (anim->last_frame && anim->last_pts <= pts_to_search && anim->next_pts > pts_to_search) { av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: frame repeat: last: %lld next: %lld\n", (long long int)anim->last_pts, (long long int)anim->next_pts); IMB_refImBuf(anim->last_frame); anim->curposition = position; return anim->last_frame; } if (position > anim->curposition + 1 && anim->preseek && !tc_index && position - (anim->curposition + 1) < anim->preseek) { av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: within preseek interval (no index)\n"); ffmpeg_decode_video_frame_scan(anim, pts_to_search); } else if (tc_index && IMB_indexer_can_scan(tc_index, old_frame_index, new_frame_index)) { av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: within preseek interval " "(index tells us)\n"); ffmpeg_decode_video_frame_scan(anim, pts_to_search); } else if (position != anim->curposition + 1) { long long pos; int ret; if (tc_index) { unsigned long long dts; pos = IMB_indexer_get_seek_pos( tc_index, new_frame_index); dts = IMB_indexer_get_seek_pos_dts( tc_index, new_frame_index); av_log(anim->pFormatCtx, AV_LOG_DEBUG, "TC INDEX seek pos = %lld\n", pos); av_log(anim->pFormatCtx, AV_LOG_DEBUG, "TC INDEX seek dts = %llu\n", dts); if (ffmpeg_seek_by_byte(anim->pFormatCtx)) { av_log(anim->pFormatCtx, AV_LOG_DEBUG, "... using BYTE pos\n"); ret = av_seek_frame(anim->pFormatCtx, -1, pos, AVSEEK_FLAG_BYTE); av_update_cur_dts(anim->pFormatCtx, v_st, dts); } else { av_log(anim->pFormatCtx, AV_LOG_DEBUG, "... using DTS pos\n"); ret = av_seek_frame(anim->pFormatCtx, anim->videoStream, dts, AVSEEK_FLAG_BACKWARD); } } else { pos = (long long) (position - anim->preseek) * AV_TIME_BASE / frame_rate; av_log(anim->pFormatCtx, AV_LOG_DEBUG, "NO INDEX seek pos = %lld, st_time = %lld\n", pos, (st_time != AV_NOPTS_VALUE) ? st_time : 0); if (pos < 0) { pos = 0; } if (st_time != AV_NOPTS_VALUE) { pos += st_time; } av_log(anim->pFormatCtx, AV_LOG_DEBUG, "NO INDEX final seek pos = %lld\n", pos); ret = av_seek_frame(anim->pFormatCtx, -1, pos, AVSEEK_FLAG_BACKWARD); } if (ret < 0) { av_log(anim->pFormatCtx, AV_LOG_ERROR, "FETCH: " "error while seeking to DTS = %lld " "(frameno = %d, PTS = %lld): errcode = %d\n", pos, position, (long long int)pts_to_search, ret); } avcodec_flush_buffers(anim->pCodecCtx); anim->next_pts = -1; if (anim->next_packet.stream_index == anim->videoStream) { av_free_packet(&anim->next_packet); anim->next_packet.stream_index = -1; } /* memset(anim->pFrame, ...) ?? */ if (ret >= 0) { ffmpeg_decode_video_frame_scan(anim, pts_to_search); } } else if (position == 0 && anim->curposition == -1) { /* first frame without seeking special case... */ ffmpeg_decode_video_frame(anim); } else { av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: no seek necessary, just continue...\n"); } IMB_freeImBuf(anim->last_frame); anim->last_frame = IMB_allocImBuf(anim->x, anim->y, 32, IB_rect); anim->last_frame->rect_colorspace = colormanage_colorspace_get_named(anim->colorspace); ffmpeg_postprocess(anim); anim->last_pts = anim->next_pts; ffmpeg_decode_video_frame(anim); anim->curposition = position; IMB_refImBuf(anim->last_frame); return anim->last_frame; }
static int rtp_parse_one_packet(RTPDemuxContext *s, AVPacket *pkt, uint8_t **bufptr, int len) { uint8_t* buf = bufptr ? *bufptr : NULL; int ret, flags = 0; uint32_t timestamp; int rv= 0; if (!buf) { /* If parsing of the previous packet actually returned 0 or an error, * there's nothing more to be parsed from that packet, but we may have * indicated that we can return the next enqueued packet. */ if (s->prev_ret <= 0) return rtp_parse_queued_packet(s, pkt); /* return the next packets, if any */ if(s->st && s->parse_packet) { /* timestamp should be overwritten by parse_packet, if not, * the packet is left with pts == AV_NOPTS_VALUE */ timestamp = RTP_NOTS_VALUE; rv= s->parse_packet(s->ic, s->dynamic_protocol_context, s->st, pkt, ×tamp, NULL, 0, flags); finalize_packet(s, pkt, timestamp); return rv; } else { // TODO: Move to a dynamic packet handler (like above) if (s->read_buf_index >= s->read_buf_size) return AVERROR(EAGAIN); ret = ff_mpegts_parse_packet(s->ts, pkt, s->buf + s->read_buf_index, s->read_buf_size - s->read_buf_index); if (ret < 0) return AVERROR(EAGAIN); s->read_buf_index += ret; if (s->read_buf_index < s->read_buf_size) return 1; else return 0; } } if (len < 12) return -1; if ((buf[0] & 0xc0) != (RTP_VERSION << 6)) return -1; if (buf[1] >= RTCP_SR && buf[1] <= RTCP_APP) { return rtcp_parse_packet(s, buf, len); } if ((s->seq == 0 && !s->queue) || s->queue_size <= 1) { /* First packet, or no reordering */ return rtp_parse_packet_internal(s, pkt, buf, len); } else { uint16_t seq = AV_RB16(buf + 2); int16_t diff = seq - s->seq; if (diff < 0) { /* Packet older than the previously emitted one, drop */ av_log(s->st ? s->st->codec : NULL, AV_LOG_WARNING, "RTP: dropping old packet received too late\n"); return -1; } else if (diff <= 1) { /* Correct packet */ rv = rtp_parse_packet_internal(s, pkt, buf, len); return rv; } else { /* Still missing some packet, enqueue this one. */ enqueue_packet(s, buf, len); *bufptr = NULL; /* Return the first enqueued packet if the queue is full, * even if we're missing something */ if (s->queue_len >= s->queue_size) return rtp_parse_queued_packet(s, pkt); return -1; } } }
static int v4l2_set_parameters(AVFormatContext *s1, AVFormatParameters *ap) { struct video_data *s = s1->priv_data; struct v4l2_input input; struct v4l2_standard standard; struct v4l2_streamparm streamparm = { 0 }; struct v4l2_fract *tpf = &streamparm.parm.capture.timeperframe; int i; streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (ap->channel>=0) { /* set tv video input */ memset (&input, 0, sizeof (input)); input.index = ap->channel; if (ioctl(s->fd, VIDIOC_ENUMINPUT, &input) < 0) { av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl enum input failed:\n"); return AVERROR(EIO); } av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set input_id: %d, input: %s\n", ap->channel, input.name); if (ioctl(s->fd, VIDIOC_S_INPUT, &input.index) < 0) { av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set input(%d) failed\n", ap->channel); return AVERROR(EIO); } } if (ap->standard) { av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set standard: %s\n", ap->standard); /* set tv standard */ memset (&standard, 0, sizeof (standard)); for(i=0;;i++) { standard.index = i; if (ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) { av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set standard(%s) failed\n", ap->standard); return AVERROR(EIO); } if (!strcasecmp(standard.name, ap->standard)) { break; } } av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set standard: %s, id: %"PRIu64"\n", ap->standard, (uint64_t)standard.id); if (ioctl(s->fd, VIDIOC_S_STD, &standard.id) < 0) { av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set standard(%s) failed\n", ap->standard); return AVERROR(EIO); } } if (ap->time_base.num && ap->time_base.den) { av_log(s1, AV_LOG_DEBUG, "Setting time per frame to %d/%d\n", ap->time_base.num, ap->time_base.den); tpf->numerator = ap->time_base.num; tpf->denominator = ap->time_base.den; if (ioctl(s->fd, VIDIOC_S_PARM, &streamparm) != 0) { av_log(s1, AV_LOG_ERROR, "ioctl set time per frame(%d/%d) failed\n", ap->time_base.num, ap->time_base.den); return AVERROR(EIO); } if (ap->time_base.den != tpf->denominator || ap->time_base.num != tpf->numerator) { av_log(s1, AV_LOG_INFO, "The driver changed the time per frame from %d/%d to %d/%d\n", ap->time_base.num, ap->time_base.den, tpf->numerator, tpf->denominator); } } else { /* if timebase value is not set in ap, read the timebase value from the driver */ if (ioctl(s->fd, VIDIOC_G_PARM, &streamparm) != 0) { av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_PARM): %s\n", strerror(errno)); return AVERROR(errno); } } ap->time_base.num = tpf->numerator; ap->time_base.den = tpf->denominator; return 0; }
static int tta_read_header(AVFormatContext *s, AVFormatParameters *ap) { TTAContext *c = s->priv_data; AVStream *st; int i, channels, bps, samplerate, datalen, framelen; uint64_t framepos; if (get_le32(s->pb) != ff_get_fourcc("TTA1")) return -1; // not tta file url_fskip(s->pb, 2); // FIXME: flags channels = get_le16(s->pb); bps = get_le16(s->pb); samplerate = get_le32(s->pb); if(samplerate <= 0 || samplerate > 1000000){ av_log(s, AV_LOG_ERROR, "nonsense samplerate\n"); return -1; } datalen = get_le32(s->pb); if(datalen < 0){ av_log(s, AV_LOG_ERROR, "nonsense datalen\n"); return -1; } url_fskip(s->pb, 4); // header crc framelen = samplerate*256/245; c->totalframes = datalen / framelen + ((datalen % framelen) ? 1 : 0); c->currentframe = 0; if(c->totalframes >= UINT_MAX/sizeof(uint32_t)){ av_log(s, AV_LOG_ERROR, "totalframes too large\n"); return -1; } st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 64, 1, samplerate); st->start_time = 0; st->duration = datalen; framepos = url_ftell(s->pb) + 4*c->totalframes + 4; for (i = 0; i < c->totalframes; i++) { uint32_t size = get_le32(s->pb); av_add_index_entry(st, framepos, i*framelen, size, 0, AVINDEX_KEYFRAME); framepos += size; } url_fskip(s->pb, 4); // seektable crc st->codec->codec_type = CODEC_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_TTA; st->codec->channels = channels; st->codec->sample_rate = samplerate; st->codec->bits_per_sample = bps; st->codec->extradata_size = url_ftell(s->pb); if(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)st->codec->extradata_size){ //this check is redundant as get_buffer should fail av_log(s, AV_LOG_ERROR, "extradata_size too large\n"); return -1; } st->codec->extradata = av_mallocz(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE); url_fseek(s->pb, 0, SEEK_SET); get_buffer(s->pb, st->codec->extradata, st->codec->extradata_size); return 0; }
static int msrle_decode_pal4(AVCodecContext *avctx, AVPicture *pic, const uint8_t *data, int data_size) { int stream_ptr = 0; unsigned char rle_code; unsigned char extra_byte, odd_pixel; unsigned char stream_byte; int pixel_ptr = 0; int row_dec = pic->linesize[0]; int row_ptr = (avctx->height - 1) * row_dec; int frame_size = row_dec * avctx->height; int i; while (row_ptr >= 0) { FETCH_NEXT_STREAM_BYTE(); rle_code = stream_byte; if (rle_code == 0) { /* fetch the next byte to see how to handle escape code */ FETCH_NEXT_STREAM_BYTE(); if (stream_byte == 0) { /* line is done, goto the next one */ row_ptr -= row_dec; pixel_ptr = 0; } else if (stream_byte == 1) { /* decode is done */ return 0; } else if (stream_byte == 2) { /* reposition frame decode coordinates */ FETCH_NEXT_STREAM_BYTE(); pixel_ptr += stream_byte; FETCH_NEXT_STREAM_BYTE(); row_ptr -= stream_byte * row_dec; } else { // copy pixels from encoded stream odd_pixel = stream_byte & 1; rle_code = (stream_byte + 1) / 2; extra_byte = rle_code & 0x01; if ((row_ptr + pixel_ptr + stream_byte > frame_size) || (row_ptr < 0)) { av_log(avctx, AV_LOG_ERROR, " MS RLE: frame ptr just went out of bounds (1)\n"); return -1; } for (i = 0; i < rle_code; i++) { if (pixel_ptr >= avctx->width) break; FETCH_NEXT_STREAM_BYTE(); pic->data[0][row_ptr + pixel_ptr] = stream_byte >> 4; pixel_ptr++; if (i + 1 == rle_code && odd_pixel) break; if (pixel_ptr >= avctx->width) break; pic->data[0][row_ptr + pixel_ptr] = stream_byte & 0x0F; pixel_ptr++; } // if the RLE code is odd, skip a byte in the stream if (extra_byte) stream_ptr++; } } else { // decode a run of data if ((row_ptr + pixel_ptr + stream_byte > frame_size) ||
int ff_h264_decode_ref_pic_list_reordering(H264Context *h) { int list, index, pic_structure, i; print_short_term(h); print_long_term(h); for (list = 0; list < h->list_count; list++) { for (i = 0; i < h->ref_count[list]; i++) COPY_PICTURE(&h->ref_list[list][i], &h->default_ref_list[list][i]); if (get_bits1(&h->gb)) { int pred = h->curr_pic_num; for (index = 0; ; index++) { unsigned int reordering_of_pic_nums_idc = get_ue_golomb_31(&h->gb); unsigned int pic_id; int i; Picture *ref = NULL; if (reordering_of_pic_nums_idc == 3) break; if (index >= h->ref_count[list]) { av_log(h->avctx, AV_LOG_ERROR, "reference count overflow\n"); return -1; } switch (reordering_of_pic_nums_idc) { case 0: case 1: { const unsigned int abs_diff_pic_num = get_ue_golomb(&h->gb) + 1; int frame_num; if (abs_diff_pic_num > h->max_pic_num) { av_log(h->avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow\n"); return AVERROR_INVALIDDATA; } if (reordering_of_pic_nums_idc == 0) pred -= abs_diff_pic_num; else pred += abs_diff_pic_num; pred &= h->max_pic_num - 1; frame_num = pic_num_extract(h, pred, &pic_structure); for (i = h->short_ref_count - 1; i >= 0; i--) { ref = h->short_ref[i]; assert(ref->reference); assert(!ref->long_ref); if (ref->frame_num == frame_num && (ref->reference & pic_structure)) break; } if (i >= 0) ref->pic_id = pred; break; } case 2: { int long_idx; pic_id = get_ue_golomb(&h->gb); // long_term_pic_idx long_idx = pic_num_extract(h, pic_id, &pic_structure); if (long_idx > 31) { av_log(h->avctx, AV_LOG_ERROR, "long_term_pic_idx overflow\n"); return AVERROR_INVALIDDATA; } ref = h->long_ref[long_idx]; assert(!(ref && !ref->reference)); if (ref && (ref->reference & pic_structure)) { ref->pic_id = pic_id; assert(ref->long_ref); i = 0; } else { i = -1; } break; } default: av_log(h->avctx, AV_LOG_ERROR, "illegal reordering_of_pic_nums_idc\n"); return AVERROR_INVALIDDATA; } if (i < 0) { av_log(h->avctx, AV_LOG_ERROR, "reference picture missing during reorder\n"); memset(&h->ref_list[list][index], 0, sizeof(Picture)); // FIXME } else { for (i = index; i + 1 < h->ref_count[list]; i++) { if (ref->long_ref == h->ref_list[list][i].long_ref && ref->pic_id == h->ref_list[list][i].pic_id) break; } for (; i > index; i--) { COPY_PICTURE(&h->ref_list[list][i], &h->ref_list[list][i - 1]); } COPY_PICTURE(&h->ref_list[list][index], ref); if (FIELD_PICTURE(h)) { pic_as_field(&h->ref_list[list][index], pic_structure); } } } } } for (list = 0; list < h->list_count; list++) { for (index = 0; index < h->ref_count[list]; index++) { if ( !h->ref_list[list][index].f.buf[0] || (!FIELD_PICTURE(h) && (h->ref_list[list][index].reference&3) != 3)) { int i; av_log(h->avctx, AV_LOG_ERROR, "Missing reference picture, default is %d\n", h->default_ref_list[list][0].poc); for (i = 0; i < FF_ARRAY_ELEMS(h->last_pocs); i++) h->last_pocs[i] = INT_MIN; if (h->default_ref_list[list][0].f.buf[0] && !(!FIELD_PICTURE(h) && (h->default_ref_list[list][0].reference&3) != 3)) COPY_PICTURE(&h->ref_list[list][index], &h->default_ref_list[list][0]); else return -1; } av_assert0(av_buffer_get_ref_count(h->ref_list[list][index].f.buf[0]) > 0); } } return 0; }
static av_cold int rv10_decode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; static int done=0; if (avctx->extradata_size < 8) { av_log(avctx, AV_LOG_ERROR, "Extradata is too small.\n"); return -1; } MPV_decode_defaults(s); s->avctx= avctx; s->out_format = FMT_H263; s->codec_id= avctx->codec_id; s->orig_width = s->width = avctx->coded_width; s->orig_height= s->height = avctx->coded_height; s->h263_long_vectors= ((uint8_t*)avctx->extradata)[3] & 1; avctx->sub_id= AV_RB32((uint8_t*)avctx->extradata + 4); if (avctx->sub_id == 0x10000000) { s->rv10_version= 0; s->low_delay=1; } else if (avctx->sub_id == 0x10001000) { s->rv10_version= 3; s->low_delay=1; } else if (avctx->sub_id == 0x10002000) { s->rv10_version= 3; s->low_delay=1; s->obmc=1; } else if (avctx->sub_id == 0x10003000) { s->rv10_version= 3; s->low_delay=1; } else if (avctx->sub_id == 0x10003001) { s->rv10_version= 3; s->low_delay=1; } else if ( avctx->sub_id == 0x20001000 || (avctx->sub_id >= 0x20100000 && avctx->sub_id < 0x201a0000)) { s->low_delay=1; } else if ( avctx->sub_id == 0x30202002 || avctx->sub_id == 0x30203002 || (avctx->sub_id >= 0x20200002 && avctx->sub_id < 0x20300000)) { s->low_delay=0; s->avctx->has_b_frames=1; } else av_log(s->avctx, AV_LOG_ERROR, "unknown header %X\n", avctx->sub_id); if(avctx->debug & FF_DEBUG_PICT_INFO){ av_log(avctx, AV_LOG_DEBUG, "ver:%X ver0:%X\n", avctx->sub_id, avctx->extradata_size >= 4 ? ((uint32_t*)avctx->extradata)[0] : -1); } avctx->pix_fmt = PIX_FMT_YUV420P; if (MPV_common_init(s) < 0) return -1; h263_decode_init_vlc(s); /* init rv vlc */ if (!done) { INIT_VLC_STATIC(&rv_dc_lum, DC_VLC_BITS, 256, rv_lum_bits, 1, 1, rv_lum_code, 2, 2, 16384); INIT_VLC_STATIC(&rv_dc_chrom, DC_VLC_BITS, 256, rv_chrom_bits, 1, 1, rv_chrom_code, 2, 2, 16388); done = 1; } return 0; }
static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet) { UtVideoContext *utv = (UtVideoContext *)avctx->priv_data; int w = avctx->width, h = avctx->height; int ret, rgb_size, i; bool keyframe; uint8_t *y, *u, *v; uint8_t *dst; /* Alloc buffer */ if ((ret = ff_alloc_packet2(avctx, pkt, utv->buf_size, 0)) < 0) return ret; dst = pkt->data; /* Move input if needed data into Ut Video friendly buffer */ switch (avctx->pix_fmt) { case AV_PIX_FMT_YUV420P: y = utv->buffer; u = y + w * h; v = u + w * h / 4; for (i = 0; i < h; i++) { memcpy(y, pic->data[0] + i * pic->linesize[0], w); y += w; } for (i = 0; i < h / 2; i++) { memcpy(u, pic->data[2] + i * pic->linesize[2], w >> 1); memcpy(v, pic->data[1] + i * pic->linesize[1], w >> 1); u += w >> 1; v += w >> 1; } break; case AV_PIX_FMT_YUYV422: for (i = 0; i < h; i++) memcpy(utv->buffer + i * (w << 1), pic->data[0] + i * pic->linesize[0], w << 1); break; case AV_PIX_FMT_BGR24: case AV_PIX_FMT_RGB32: /* Ut Video takes bottom-up BGR */ rgb_size = avctx->pix_fmt == AV_PIX_FMT_BGR24 ? 3 : 4; for (i = 0; i < h; i++) memcpy(utv->buffer + (h - i - 1) * w * rgb_size, pic->data[0] + i * pic->linesize[0], w * rgb_size); break; default: return AVERROR(EINVAL); } /* Encode frame */ pkt->size = utv->codec->EncodeFrame(dst, &keyframe, utv->buffer); if (!pkt->size) { av_log(avctx, AV_LOG_ERROR, "EncodeFrame failed!\n"); return AVERROR_INVALIDDATA; } /* * Ut Video is intra-only and every frame is a keyframe, * and the API always returns true. In case something * durastic changes in the future, such as inter support, * assert that this is true. */ av_assert2(keyframe == true); pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; }