/* for setdar filter, convert from frame aspect ratio to pixel aspect ratio */ static int setdar_config_props(AVFilterLink *inlink) { AspectContext *s = inlink->dst->priv; AVRational dar; int ret; #if FF_API_OLD_FILTER_OPTS if (!(s->aspect_num > 0 && s->aspect_den > 0)) { #endif if ((ret = get_aspect_ratio(inlink, &s->dar))) return ret; #if FF_API_OLD_FILTER_OPTS } #endif if (s->dar.num && s->dar.den) { av_reduce(&s->sar.num, &s->sar.den, s->dar.num * inlink->h, s->dar.den * inlink->w, 100); inlink->sample_aspect_ratio = s->sar; dar = s->dar; } else { inlink->sample_aspect_ratio = (AVRational){ 1, 1 }; dar = (AVRational){ inlink->w, inlink->h }; } av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d -> dar:%d/%d sar:%d/%d\n", inlink->w, inlink->h, dar.num, dar.den, inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den); return 0; }
HRESULT CDecMSDKMVC::DeliverOutput(MVCBuffer * pBaseView, MVCBuffer * pExtraView) { mfxStatus sts = MFX_ERR_NONE; ASSERT(pBaseView->surface.Info.FrameId.ViewId == 0 && pExtraView->surface.Info.FrameId.ViewId > 0); ASSERT(pBaseView->surface.Data.FrameOrder == pExtraView->surface.Data.FrameOrder); // Sync base view do { sts = MFXVideoCORE_SyncOperation(m_mfxSession, pBaseView->sync, 1000); } while (sts == MFX_WRN_IN_EXECUTION); pBaseView->sync = nullptr; // Sync extra view do { sts = MFXVideoCORE_SyncOperation(m_mfxSession, pExtraView->sync, 1000); } while (sts == MFX_WRN_IN_EXECUTION); pExtraView->sync = nullptr; LAVFrame *pFrame = nullptr; AllocateFrame(&pFrame); pFrame->width = pBaseView->surface.Info.CropW; pFrame->height = pBaseView->surface.Info.CropH; pFrame->data[0] = pBaseView->surface.Data.Y; pFrame->data[1] = pBaseView->surface.Data.UV; pFrame->stereo[0] = pExtraView->surface.Data.Y; pFrame->stereo[1] = pExtraView->surface.Data.UV; pFrame->data[2] = (uint8_t *)pBaseView; pFrame->data[3] = (uint8_t *)pExtraView; pFrame->stride[0] = pBaseView->surface.Data.PitchLow; pFrame->stride[1] = pBaseView->surface.Data.PitchLow; pFrame->format = LAVPixFmt_NV12; pFrame->bpp = 8; pFrame->flags |= LAV_FRAME_FLAG_MVC; if (!(pBaseView->surface.Data.DataFlag & MFX_FRAMEDATA_ORIGINAL_TIMESTAMP)) pBaseView->surface.Data.TimeStamp = MFX_TIMESTAMP_UNKNOWN; if (pBaseView->surface.Data.TimeStamp != MFX_TIMESTAMP_UNKNOWN) { pFrame->rtStart = pBaseView->surface.Data.TimeStamp; pFrame->rtStart -= TIMESTAMP_OFFSET; } else { pFrame->rtStart = AV_NOPTS_VALUE; } int64_t num = (int64_t)pBaseView->surface.Info.AspectRatioW * pFrame->width; int64_t den = (int64_t)pBaseView->surface.Info.AspectRatioH * pFrame->height; av_reduce(&pFrame->aspect_ratio.num, &pFrame->aspect_ratio.den, num, den, INT_MAX); pFrame->destruct = msdk_buffer_destruct; pFrame->priv_data = this; GetOffsetSideData(pFrame, pBaseView->surface.Data.TimeStamp); return Deliver(pFrame); }
AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff){ AVResampleContext *c= av_mallocz(sizeof(AVResampleContext)); double factor= FFMIN(out_rate * cutoff / in_rate, 1.0); int phase_count= 1<<phase_shift; if (!c) return NULL; c->phase_shift= phase_shift; c->phase_mask= phase_count-1; c->linear= linear; c->filter_length= FFMAX((int)ceil(filter_size/factor), 1); c->filter_bank= av_mallocz(c->filter_length*(phase_count+1)*sizeof(FELEM)); if (!c->filter_bank) goto error; if (build_filter(c->filter_bank, factor, c->filter_length, phase_count, 1<<FILTER_SHIFT, WINDOW_TYPE)) goto error; memcpy(&c->filter_bank[c->filter_length*phase_count+1], c->filter_bank, (c->filter_length-1)*sizeof(FELEM)); c->filter_bank[c->filter_length*phase_count]= c->filter_bank[c->filter_length - 1]; if(!av_reduce(&c->src_incr, &c->dst_incr, out_rate, in_rate * (int64_t)phase_count, INT32_MAX/2)) goto error; c->ideal_dst_incr= c->dst_incr; c->index= -phase_count*((c->filter_length-1)/2); return c; error: av_free(c->filter_bank); av_free(c); return NULL; }
IOutputStream& OutputFile::addVideoStream( const VideoCodec& videoDesc ) { assert( _formatContext != NULL ); if( ( _stream = avformat_new_stream( _formatContext, videoDesc.getAVCodec() ) ) == NULL ) { throw std::runtime_error( "unable to add new video stream" ); } _stream->codec->width = videoDesc.getAVCodecContext()->width; _stream->codec->height = videoDesc.getAVCodecContext()->height; _stream->codec->bit_rate = videoDesc.getAVCodecContext()->bit_rate; _stream->codec->ticks_per_frame = videoDesc.getAVCodecContext()->ticks_per_frame; _stream->codec->pix_fmt = videoDesc.getAVCodecContext()->pix_fmt; _stream->codec->profile = videoDesc.getAVCodecContext()->profile; _stream->codec->level = videoDesc.getAVCodecContext()->level; // need to set the time_base on the AVCodecContext and the AVStream... av_reduce( &_stream->codec->time_base.num, &_stream->codec->time_base.den, videoDesc.getAVCodecContext()->time_base.num * videoDesc.getAVCodecContext()->ticks_per_frame, videoDesc.getAVCodecContext()->time_base.den, INT_MAX ); _stream->time_base = _stream->codec->time_base; AvOutputStream* avOutputStream = new AvOutputStream( *this, _formatContext->nb_streams - 1 ); _outputStreams.push_back( avOutputStream ); return *_outputStreams.back(); }
static int setdar_config_props(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; AVFilterLink *inlink = ctx->inputs[0]; AspectContext *s = ctx->priv; AVRational dar; AVRational old_dar; AVRational old_sar = inlink->sample_aspect_ratio; int ret; if ((ret = get_aspect_ratio(inlink, &s->dar))) return ret; if (s->dar.num && s->dar.den) { av_reduce(&s->sar.num, &s->sar.den, s->dar.num * inlink->h, s->dar.den * inlink->w, INT_MAX); outlink->sample_aspect_ratio = s->sar; dar = s->dar; } else { outlink->sample_aspect_ratio = (AVRational){ 1, 1 }; dar = (AVRational){ inlink->w, inlink->h }; } compute_dar(&old_dar, old_sar, inlink->w, inlink->h); av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d dar:%d/%d sar:%d/%d -> dar:%d/%d sar:%d/%d\n", inlink->w, inlink->h, old_dar.num, old_dar.den, old_sar.num, old_sar.den, dar.num, dar.den, outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den); return 0; }
// Create the 'pasp' atom for video tracks. No guesswork required. // References http://www.uwasa.fi/~f76998/video/conversion/ void set_track_clean_aperture_ext(ImageDescriptionHandle imgDesc, Fixed displayW, Fixed displayH, Fixed pixelW, Fixed pixelH) { if (displayW == pixelW && displayH == pixelH) return; AVRational dar, invPixelSize, sar; dar = (AVRational) { displayW, displayH }; invPixelSize = (AVRational) { pixelH, pixelW }; sar = av_mul_q(dar, invPixelSize); av_reduce(&sar.num, &sar.den, sar.num, sar.den, fixed1); if (sar.num == sar.den) return; PixelAspectRatioImageDescriptionExtension **pasp = (PixelAspectRatioImageDescriptionExtension**)NewHandle(sizeof(PixelAspectRatioImageDescriptionExtension)); **pasp = (PixelAspectRatioImageDescriptionExtension) { EndianU32_NtoB(sar.num), EndianU32_NtoB(sar.den) }; AddImageDescriptionExtension(imgDesc, (Handle)pasp, kPixelAspectRatioImageDescriptionExtension); DisposeHandle((Handle)pasp); }
AVRational av_add_q(AVRational b, AVRational c) { av_reduce(&b.num, &b.den, b.num * (int64_t) c.den + c.num * (int64_t) b.den, b.den * (int64_t) c.den, INT_MAX); return b; }
ResampleContext *swri_resample_init(ResampleContext *c, int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff, enum AVSampleFormat format){ double factor= FFMIN(out_rate * cutoff / in_rate, 1.0); int phase_count= 1<<phase_shift; if (!c || c->phase_shift != phase_shift || c->linear!=linear || c->factor != factor || c->filter_length != FFMAX((int)ceil(filter_size/factor), 1) || c->format != format) { c = av_mallocz(sizeof(*c)); if (!c) return NULL; c->format= format; c->felem_size= av_get_bytes_per_sample(c->format); switch(c->format){ case AV_SAMPLE_FMT_S16P: c->filter_shift = 15; break; case AV_SAMPLE_FMT_S32P: c->filter_shift = 30; break; case AV_SAMPLE_FMT_FLTP: case AV_SAMPLE_FMT_DBLP: c->filter_shift = 0; break; default: av_log(NULL, AV_LOG_ERROR, "Unsupported sample format\n"); return NULL; } c->phase_shift = phase_shift; c->phase_mask = phase_count - 1; c->linear = linear; c->factor = factor; c->filter_length = FFMAX((int)ceil(filter_size/factor), 1); c->filter_alloc = FFALIGN(c->filter_length, 8); c->filter_bank = av_mallocz(c->filter_alloc*(phase_count+1)*c->felem_size); if (!c->filter_bank) goto error; if (build_filter(c, (void*)c->filter_bank, factor, c->filter_length, c->filter_alloc, phase_count, 1<<c->filter_shift, WINDOW_TYPE)) goto error; memcpy(c->filter_bank + (c->filter_alloc*phase_count+1)*c->felem_size, c->filter_bank, (c->filter_alloc-1)*c->felem_size); memcpy(c->filter_bank + (c->filter_alloc*phase_count )*c->felem_size, c->filter_bank + (c->filter_alloc - 1)*c->felem_size, c->felem_size); } c->compensation_distance= 0; if(!av_reduce(&c->src_incr, &c->dst_incr, out_rate, in_rate * (int64_t)phase_count, INT32_MAX/2)) goto error; c->ideal_dst_incr= c->dst_incr; c->index= -phase_count*((c->filter_length-1)/2); c->frac= 0; return c; error: av_free(c->filter_bank); av_free(c); return NULL; }
/** * Converts a double precission floating point number to a AVRational. * @param max the maximum allowed numerator and denominator */ AVRational av_d2q(double d, int max){ AVRational a; int exponent= FFMAX( (int)(log(ABS(d) + 1e-20)/log(2)), 0); int64_t den= 1LL << (61 - exponent); av_reduce(&a.num, &a.den, (int64_t)(d * den + 0.5), den, max); return a; }
AVRational av_d2q(double d, int max){ AVRational a; #define LOG2 0.69314718055994530941723212145817656807550013436025 int exponent= FFMAX( (int)(log(fabs(d) + 1e-20)/LOG2), 0); int64_t den= 1 << (61 - exponent); av_reduce(&a.num, &a.den, (int64_t)(d * den + 0.5), den, max); return a; }
static int yuv4_generate_header(AVFormatContext *s, char* buf) { AVStream *st; int width, height; int raten, rated, aspectn, aspectd, n; char inter; const char *colorspace = ""; st = s->streams[0]; width = st->codec->width; height = st->codec->height; // TODO: should be avg_frame_rate av_reduce(&raten, &rated, st->time_base.den, st->time_base.num, (1UL << 31) - 1); aspectn = st->sample_aspect_ratio.num; aspectd = st->sample_aspect_ratio.den; if (aspectn == 0 && aspectd == 1) aspectd = 0; // 0:0 means unknown switch (st->codec->field_order) { case AV_FIELD_TT: inter = 't'; break; case AV_FIELD_BB: inter = 'b'; break; default: inter = 'p'; break; } switch (st->codec->pix_fmt) { case AV_PIX_FMT_GRAY8: colorspace = " Cmono"; break; case AV_PIX_FMT_YUV411P: colorspace = " C411 XYSCSS=411"; break; case AV_PIX_FMT_YUV420P: switch (st->codec->chroma_sample_location) { case AVCHROMA_LOC_TOPLEFT: colorspace = " C420paldv XYSCSS=420PALDV"; break; case AVCHROMA_LOC_LEFT: colorspace = " C420mpeg2 XYSCSS=420MPEG2"; break; default: colorspace = " C420jpeg XYSCSS=420JPEG"; break; } break; case AV_PIX_FMT_YUV422P: colorspace = " C422 XYSCSS=422"; break; case AV_PIX_FMT_YUV444P: colorspace = " C444 XYSCSS=444"; break; } /* construct stream header, if this is the first frame */ n = snprintf(buf, Y4M_LINE_MAX, "%s W%d H%d F%d:%d I%c A%d:%d%s\n", Y4M_MAGIC, width, height, raten, rated, inter, aspectn, aspectd, colorspace); return n; }
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den) { int offset = tag == TIFF_YRES ? 2 : 0; s->res[offset++] = num; s->res[offset] = den; if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den, s->res[2] * (uint64_t)s->res[1], s->res[0] * (uint64_t)s->res[3], INT32_MAX); }
static int yuv4_generate_header(AVFormatContext *s, char* buf) { AVStream *st; int width, height; int raten, rated, aspectn, aspectd, n; char inter; const char *colorspace = ""; st = s->streams[0]; width = st->codec->width; height = st->codec->height; av_reduce(&raten, &rated, st->codec->time_base.den, st->codec->time_base.num, (1UL<<31)-1); aspectn = st->sample_aspect_ratio.num; aspectd = st->sample_aspect_ratio.den; if ( aspectn == 0 && aspectd == 1 ) aspectd = 0; // 0:0 means unknown inter = 'p'; /* progressive is the default */ if (st->codec->coded_frame && st->codec->coded_frame->interlaced_frame) { inter = st->codec->coded_frame->top_field_first ? 't' : 'b'; } switch(st->codec->pix_fmt) { case PIX_FMT_GRAY8: colorspace = " Cmono"; break; case PIX_FMT_YUV411P: colorspace = " C411 XYSCSS=411"; break; case PIX_FMT_YUV420P: colorspace = (st->codec->chroma_sample_location == AVCHROMA_LOC_TOPLEFT)?" C420paldv XYSCSS=420PALDV": (st->codec->chroma_sample_location == AVCHROMA_LOC_LEFT) ?" C420mpeg2 XYSCSS=420MPEG2": " C420jpeg XYSCSS=420JPEG"; break; case PIX_FMT_YUV422P: colorspace = " C422 XYSCSS=422"; break; case PIX_FMT_YUV444P: colorspace = " C444 XYSCSS=444"; break; } /* construct stream header, if this is the first frame */ n = snprintf(buf, Y4M_LINE_MAX, "%s W%d H%d F%d:%d I%c A%d:%d%s\n", Y4M_MAGIC, width, height, raten, rated, inter, aspectn, aspectd, colorspace); return n; }
void BIKPlayer::av_set_pts_info(AVRational &time_base, unsigned int pts_num, unsigned int pts_den) { //pts_wrap_bits, if needed, is always 64 if(av_reduce(time_base.num, time_base.den, pts_num, pts_den, INT_MAX)) { //bla bla, something didn't work } if(!time_base.num || !time_base.den) time_base.num = time_base.den = 0; }
AVRational av_d2q(double d, int max) { AVRational a; #define LOG2 0.69314718055994530941723212145817656807550013436025 int exponent; int64_t den; if (isnan(d)) return (AVRational) { 0,0 }; if (fabs(d) > INT_MAX + 3LL) return (AVRational) { d < 0 ? -1 : 1, 0 }; exponent = FFMAX( (int)(log(fabs(d) + 1e-20)/LOG2), 0); den = 1LL << (61 - exponent); // (int64_t)rint() and llrint() do not work with gcc on ia64 and sparc64 av_reduce(&a.num, &a.den, floor(d * den + 0.5), den, max); if ((!a.num || !a.den) && d && max>0 && max<INT_MAX) av_reduce(&a.num, &a.den, floor(d * den + 0.5), den, INT_MAX); return a; }
/* for aspect filter, convert from frame aspect ratio to pixel aspect ratio */ static int frameaspect_config_props(AVFilterLink *inlink) { AspectContext *aspect = inlink->dst->priv; av_reduce(&aspect->aspect.num, &aspect->aspect.den, aspect->aspect.num * inlink->h, aspect->aspect.den * inlink->w, 100); return 0; }
static inline void calc_aspect_ratio(rational *ratio, struct stream *stream) { int num, den; av_reduce(&num, &den, stream->pCodecCtx->width * stream->pCodecCtx->sample_aspect_ratio.num, stream->pCodecCtx->height * stream->pCodecCtx->sample_aspect_ratio.den, 1024*1024); ratio->num = num; ratio->den = den; }
AVRational av_d2q(double d, int max) { AVRational a; int exponent; int64_t den; if (isnan(d)) return (AVRational) { 0,0 }; if (fabs(d) > INT_MAX + 3LL) return (AVRational) { d < 0 ? -1 : 1, 0 }; frexp(d, &exponent); exponent = FFMAX(exponent-1, 0); den = 1LL << (61 - exponent); // (int64_t)rint() and llrint() do not work with gcc on ia64 and sparc64, // see Ticket2713 for affected gcc/glibc versions av_reduce(&a.num, &a.den, floor(d * den + 0.5), den, max); if ((!a.num || !a.den) && d && max>0 && max<INT_MAX) av_reduce(&a.num, &a.den, floor(d * den + 0.5), den, INT_MAX); return a; }
/** * Parse video variable * @return < 0 if unknown */ static int parse_video_var(AVFormatContext *avctx, AVStream *st, const char *name, int size) { AVIOContext *pb = avctx->pb; if (!strcmp(name, "__DIR_COUNT")) { st->nb_frames = st->duration = var_read_int(pb, size); } else if (!strcmp(name, "COMPRESSION")) { char *str = var_read_string(pb, size); if (!str) return AVERROR_INVALIDDATA; if (!strcmp(str, "1")) { st->codecpar->codec_id = AV_CODEC_ID_MVC1; } else if (!strcmp(str, "2")) { st->codecpar->format = AV_PIX_FMT_ABGR; st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO; } else if (!strcmp(str, "3")) { st->codecpar->codec_id = AV_CODEC_ID_SGIRLE; } else if (!strcmp(str, "10")) { st->codecpar->codec_id = AV_CODEC_ID_MJPEG; } else if (!strcmp(str, "MVC2")) { st->codecpar->codec_id = AV_CODEC_ID_MVC2; } else { avpriv_request_sample(avctx, "Video compression %s", str); } av_free(str); } else if (!strcmp(name, "FPS")) { AVRational fps = var_read_float(pb, size); avpriv_set_pts_info(st, 64, fps.den, fps.num); st->avg_frame_rate = fps; } else if (!strcmp(name, "HEIGHT")) { st->codecpar->height = var_read_int(pb, size); } else if (!strcmp(name, "PIXEL_ASPECT")) { st->sample_aspect_ratio = var_read_float(pb, size); av_reduce(&st->sample_aspect_ratio.num, &st->sample_aspect_ratio.den, st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, INT_MAX); } else if (!strcmp(name, "WIDTH")) { st->codecpar->width = var_read_int(pb, size); } else if (!strcmp(name, "ORIENTATION")) { if (var_read_int(pb, size) == 1101) { st->codecpar->extradata = av_strdup("BottomUp"); st->codecpar->extradata_size = 9; } } else if (!strcmp(name, "Q_SPATIAL") || !strcmp(name, "Q_TEMPORAL")) { var_read_metadata(avctx, name, size); } else if (!strcmp(name, "INTERLACING") || !strcmp(name, "PACKING")) { avio_skip(pb, size); // ignore } else return AVERROR_INVALIDDATA; return 0; }
static int config(struct vf_instance *vf, int w, int h, int dw, int dh, unsigned flags, unsigned fmt) { int ret; AVFilterLink *out; AVRational iar, dar; av_reduce(&iar.num, &iar.den, w, h, INT_MAX); av_reduce(&dar.num, &dar.den, dw, dh, INT_MAX); vf->priv->in_pixfmt = imgfmt2pixfmt(fmt); vf->priv->in_imgfmt = fmt; vf->priv->in_w = w; vf->priv->in_h = h; vf->priv->in_sar = av_div_q(dar, iar); ret = avfilter_graph_config(vf->priv->graph, NULL); if (ret < 0) return 0; out = vf->priv->out->inputs[0]; vf->priv->out_w = out->w; vf->priv->out_h = out->h; vf->priv->out_pixfmt = out->format; vf->priv->out_imgfmt = pixfmt2imgfmt(out->format); vf->priv->out_sar = out->sample_aspect_ratio; if (vf->priv->out_sar.num != vf->priv->in_sar.num || vf->priv->out_sar.den != vf->priv->in_sar.den || out->w != w || out->h != h) { av_reduce(&iar.num, &iar.den, out->w, out->h, INT_MAX); dar = av_mul_q(iar, out->sample_aspect_ratio); if (av_cmp_q(dar, iar) >= 0) { dh = out->h; dw = av_rescale(dh, dar.num, dar.den); } else { dw = out->w; dh = av_rescale(dw, dar.den, dar.num); } } return vf_next_config(vf, out->w, out->h, dw, dh, flags, fmt); }
/* A very large percent of movies have NTSC timebases (30/1.001) with misrounded fractions, so let's recover them. */ static void rescue_ntsc_timebase(AVRational *base) { av_reduce(&base->num, &base->den, base->num, base->den, INT_MAX); if (base->num == 1) return; // FIXME: is this good enough? double fTimebase = av_q2d(*base), nearest_ntsc = floor(fTimebase * 1001. + .5) / 1001.; const double small_interval = 1./120.; if (fabs(fTimebase - nearest_ntsc) < small_interval) { base->num = 1001; base->den = (1001. / fTimebase) + .5; } }
AVRational av_d2q(double d, int max){ AVRational a; #define LOG2 0.69314718055994530941723212145817656807550013436025 int exponent; int64_t den; if (isnan(d)) return (AVRational){0,0}; if (isinf(d)) return (AVRational){ d<0 ? -1:1, 0 }; exponent = FFMAX( (int)(log(fabs(d) + 1e-20)/LOG2), 0); den = 1LL << (61 - exponent); av_reduce(&a.num, &a.den, (int64_t)(d * den + 0.5), den, max); return a; }
/* for setdar filter, convert from frame aspect ratio to pixel aspect ratio */ static int setdar_config_props(AVFilterLink *inlink) { AspectContext *aspect = inlink->dst->priv; AVRational dar = aspect->aspect; av_reduce(&aspect->aspect.num, &aspect->aspect.den, aspect->aspect.num * inlink->h, aspect->aspect.den * inlink->w, 100); av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d -> dar:%d/%d sar:%d/%d\n", inlink->w, inlink->h, dar.num, dar.den, aspect->aspect.num, aspect->aspect.den); inlink->sample_aspect_ratio = aspect->aspect; return 0; }
static int rebuild_filter_bank_with_compensation(ResampleContext *c) { uint8_t *new_filter_bank; int new_src_incr, new_dst_incr; int phase_count = c->phase_count_compensation; int ret; if (phase_count == c->phase_count) return 0; av_assert0(!c->frac && !c->dst_incr_mod && !c->compensation_distance); new_filter_bank = av_calloc(c->filter_alloc, (phase_count + 1) * c->felem_size); if (!new_filter_bank) return AVERROR(ENOMEM); ret = build_filter(c, new_filter_bank, c->factor, c->filter_length, c->filter_alloc, phase_count, 1 << c->filter_shift, c->filter_type, c->kaiser_beta); if (ret < 0) { av_freep(&new_filter_bank); return ret; } memcpy(new_filter_bank + (c->filter_alloc*phase_count+1)*c->felem_size, new_filter_bank, (c->filter_alloc-1)*c->felem_size); memcpy(new_filter_bank + (c->filter_alloc*phase_count )*c->felem_size, new_filter_bank + (c->filter_alloc - 1)*c->felem_size, c->felem_size); if (!av_reduce(&new_src_incr, &new_dst_incr, c->src_incr, c->dst_incr * (int64_t)(phase_count/c->phase_count), INT32_MAX/2)) { av_freep(&new_filter_bank); return AVERROR(EINVAL); } c->src_incr = new_src_incr; c->dst_incr = new_dst_incr; while (c->dst_incr < (1<<20) && c->src_incr < (1<<20)) { c->dst_incr *= 2; c->src_incr *= 2; } c->ideal_dst_incr = c->dst_incr; c->dst_incr_div = c->dst_incr / c->src_incr; c->dst_incr_mod = c->dst_incr % c->src_incr; c->index *= phase_count / c->phase_count; c->phase_count = phase_count; av_freep(&c->filter_bank); c->filter_bank = new_filter_bank; return 0; }
static int cudascale_filter_frame(AVFilterLink *link, AVFrame *in) { AVFilterContext *ctx = link->dst; CUDAScaleContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; AVHWFramesContext *frames_ctx = (AVHWFramesContext*)s->frames_ctx->data; AVCUDADeviceContext *device_hwctx = frames_ctx->device_ctx->hwctx; AVFrame *out = NULL; CUresult err; CUcontext dummy; int ret = 0; out = av_frame_alloc(); if (!out) { ret = AVERROR(ENOMEM); goto fail; } err = cuCtxPushCurrent(device_hwctx->cuda_ctx); if (err != CUDA_SUCCESS) { ret = AVERROR_UNKNOWN; goto fail; } ret = cudascale_scale(ctx, out, in); cuCtxPopCurrent(&dummy); if (ret < 0) goto fail; av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den, (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w, (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h, INT_MAX); av_frame_free(&in); return ff_filter_frame(outlink, out); fail: av_frame_free(&in); av_frame_free(&out); return ret; }
/** Parsing for fps, which can be a fraction. Unfortunately, * the spec for the header leaves out a lot of details, * so this is mostly guessing. */ static AVRational read_fps(const char* line, int* error) { int64_t num, den = 1; AVRational result; num = read_int(line, &line, error); if (*line == '.') line++; for (; *line>='0' && *line<='9'; line++) { // Truncate any numerator too large to fit into an int64_t if (num > (INT64_MAX - 9) / 10 || den > INT64_MAX / 10) break; num = 10 * num + *line - '0'; den *= 10; } if (!num) *error = -1; av_reduce(&result.num, &result.den, num, den, 0x7FFFFFFF); return result; }
// check_window_only: assume params and dst/src rc are unchanged static void update_overlay(struct gl_hwdec *hw, bool check_window_only) { struct priv *p = hw->priv; GL *gl = hw->gl; MMAL_PORT_T *input = p->renderer->input[0]; struct mp_rect src = p->src; struct mp_rect dst = p->dst; if (!p->w || !p->h) return; int defs[4] = {0, 0, 0, 0}; int *z = gl->MPGetNativeDisplay ? gl->MPGetNativeDisplay("MPV_RPI_WINDOW") : NULL; if (!z) z = defs; // As documented in the libmpv openglcb headers. int display = z[0]; int layer = z[1]; int x = z[2]; int y = z[3]; if (check_window_only && memcmp(z, p->cur_window, sizeof(p->cur_window)) == 0) return; memcpy(p->cur_window, z, sizeof(p->cur_window)); int rotate[] = {MMAL_DISPLAY_ROT0, MMAL_DISPLAY_ROT90, MMAL_DISPLAY_ROT180, MMAL_DISPLAY_ROT270}; int src_w = src.x1 - src.x0, src_h = src.y1 - src.y0, dst_w = dst.x1 - dst.x0, dst_h = dst.y1 - dst.y0; int p_x, p_y; av_reduce(&p_x, &p_y, dst_w * src_h, src_w * dst_h, 16000); MMAL_DISPLAYREGION_T dr = { .hdr = { .id = MMAL_PARAMETER_DISPLAYREGION, .size = sizeof(MMAL_DISPLAYREGION_T), }, .src_rect = { .x = src.x0, .y = src.y0, .width = src_w, .height = src_h }, .dest_rect = { .x = dst.x0 + x, .y = dst.y0 + y,
static void put_videoinfoheader2(AVIOContext *pb, AVStream *st) { AVRational dar = av_mul_q(st->sample_aspect_ratio, (AVRational){st->codec->width, st->codec->height}); unsigned int num, den; av_reduce(&num, &den, dar.num, dar.den, 0xFFFFFFFF); /* VIDEOINFOHEADER2 */ avio_wl32(pb, 0); avio_wl32(pb, 0); avio_wl32(pb, st->codec->width); avio_wl32(pb, st->codec->height); avio_wl32(pb, 0); avio_wl32(pb, 0); avio_wl32(pb, 0); avio_wl32(pb, 0); avio_wl32(pb, st->codec->bit_rate); avio_wl32(pb, 0); avio_wl64(pb, st->avg_frame_rate.num && st->avg_frame_rate.den ? INT64_C(10000000) / av_q2d(st->avg_frame_rate) : 0); avio_wl32(pb, 0); avio_wl32(pb, 0); avio_wl32(pb, num); avio_wl32(pb, den); avio_wl32(pb, 0); avio_wl32(pb, 0); ff_put_bmp_header(pb, st->codec, ff_codec_bmp_tags, 0, 1); if (st->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO) { int padding = (st->codec->extradata_size & 3) ? 4 - (st->codec->extradata_size & 3) : 0; /* MPEG2VIDEOINFO */ avio_wl32(pb, 0); avio_wl32(pb, st->codec->extradata_size + padding); avio_wl32(pb, -1); avio_wl32(pb, -1); avio_wl32(pb, 0); avio_write(pb, st->codec->extradata, st->codec->extradata_size); ffio_fill(pb, 0, padding); } }
int av_parse_ratio(AVRational *q, const char *str, int max, int log_offset, void *log_ctx) { char c; int ret; if (sscanf(str, "%d:%d%c", &q->num, &q->den, &c) != 2) { double d; ret = av_expr_parse_and_eval(&d, str, NULL, NULL, NULL, NULL, NULL, NULL, NULL, log_offset, log_ctx); if (ret < 0) return ret; *q = av_d2q(d, max); } else { av_reduce(&q->num, &q->den, q->num, q->den, max); } return 0; }
// attempt to correct framerate to a common fraction if close to one void CorrectRationalFramerate(int *Num, int *Den) { // Make sure fps is a normalized rational number av_reduce(Den, Num, *Den, *Num, INT_MAX); const double fps = static_cast<double>(*Num) / *Den; const int fpsList[] = { 24, 25, 30, 48, 50, 60, 100, 120 }; for (size_t i = 0; i < sizeof(fpsList) / sizeof(fpsList[0]); i++) { const double delta = (fpsList[i] - static_cast<double>(fpsList[i]) / 1.001) / 2.0; if (fabs(fps - fpsList[i]) < delta) { *Num = fpsList[i]; *Den = 1; break; } else if ((fpsList[i] % 25) && (fabs(fps - static_cast<double>(fpsList[i]) / 1.001) < delta)) { *Num = fpsList[i] * 1000; *Den = 1001; break; } } }