// Get the total output size scaled to aspect ratio void EDL::calculate_conformed_dimensions(int single_channel, float &w, float &h) { w = session->output_w; h = session->output_h; if((float)session->output_w / session->output_h > get_aspect_ratio()) { h = (float)h * (session->output_w / get_aspect_ratio() / session->output_h); } else { w = (float)w * (h * get_aspect_ratio() / session->output_w); } }
/* for setdar filter, convert from frame aspect ratio to pixel aspect ratio */ static int setdar_config_props(AVFilterLink *inlink) { AspectContext *s = inlink->dst->priv; AVRational dar; int ret; #if FF_API_OLD_FILTER_OPTS if (!(s->aspect_num > 0 && s->aspect_den > 0)) { #endif if ((ret = get_aspect_ratio(inlink, &s->dar))) return ret; #if FF_API_OLD_FILTER_OPTS } #endif if (s->dar.num && s->dar.den) { av_reduce(&s->sar.num, &s->sar.den, s->dar.num * inlink->h, s->dar.den * inlink->w, 100); inlink->sample_aspect_ratio = s->sar; dar = s->dar; } else { inlink->sample_aspect_ratio = (AVRational){ 1, 1 }; dar = (AVRational){ inlink->w, inlink->h }; } av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d -> dar:%d/%d sar:%d/%d\n", inlink->w, inlink->h, dar.num, dar.den, inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den); return 0; }
static int setdar_config_props(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; AVFilterLink *inlink = ctx->inputs[0]; AspectContext *s = ctx->priv; AVRational dar; AVRational old_dar; AVRational old_sar = inlink->sample_aspect_ratio; int ret; if ((ret = get_aspect_ratio(inlink, &s->dar))) return ret; if (s->dar.num && s->dar.den) { av_reduce(&s->sar.num, &s->sar.den, s->dar.num * inlink->h, s->dar.den * inlink->w, INT_MAX); outlink->sample_aspect_ratio = s->sar; dar = s->dar; } else { outlink->sample_aspect_ratio = (AVRational){ 1, 1 }; dar = (AVRational){ inlink->w, inlink->h }; } compute_dar(&old_dar, old_sar, inlink->w, inlink->h); av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d dar:%d/%d sar:%d/%d -> dar:%d/%d sar:%d/%d\n", inlink->w, inlink->h, old_dar.num, old_dar.den, old_sar.num, old_sar.den, dar.num, dar.den, outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den); return 0; }
void perspective_camera::set_vertical_fov(const float vertical_fov) { m_vertical_fov = vertical_fov; // calculate horizontal fov float fov_radians = m_vertical_fov * degrees_to_radians; m_horizontal_fov = 2.0 * std::atanf(get_aspect_ratio() * std::tanf(fov_radians * 0.5)); m_horizontal_fov *= radians_to_degrees; //m_vertical_fov = 2 * std::atan((1 / get_aspect_ratio()) * std::tan((fov_radians / 2.0)); calculate_view(); }
void mat4_perspective(mat4 matrix, float fov, float near, float far) { float f = (float) (1.0 / tan(fov * deg_to_rad / 2.0)); float frustum = near - far; mat4_zero(matrix); matrix[0] = f / get_aspect_ratio(); matrix[5] = f; matrix[10] = (far + near) / frustum; matrix[11] = -1.0f; matrix[14] = (2.0f * far * near) / frustum; }
void perspective_camera::calculate_view() { // calculate subpixel pitch m_pixel_pitch = std::tanf((m_vertical_fov * degrees_to_radians) * 0.5) / (get_display_height() * 0.5); m_subpixel_pitch = (1.0 / m_sample_rate) * m_pixel_pitch; // first calculate the top and left edge m_origin_pixel.y = std::tanf((m_vertical_fov * degrees_to_radians) * 0.5); m_origin_pixel.x = -(m_origin_pixel.y * get_aspect_ratio()); // advance the top left corner half the pixel pitch to the right and to the bottom // origin pixel is situated at the top left. we can derive the other pixels by adding and multiplying by pixel pitch //m_origin_pixel.x += (m_pixel_pitch * 0.5); //m_origin_pixel.y -= (m_pixel_pitch * 0.5); }
/* for setdar filter, convert from frame aspect ratio to pixel aspect ratio */ static int setsar_config_props(AVFilterLink *inlink) { AspectContext *s = inlink->dst->priv; int ret; #if FF_API_OLD_FILTER_OPTS if (!(s->aspect_num > 0 && s->aspect_den > 0)) { #endif if ((ret = get_aspect_ratio(inlink, &s->sar))) return ret; #if FF_API_OLD_FILTER_OPTS } #endif inlink->sample_aspect_ratio = s->sar; return 0; }
void retro_get_system_av_info(struct retro_system_av_info *info) { #ifdef PSP unsigned width = Nes_Emu::image_width - (use_overscan ? 0 : 16); unsigned height = Nes_Emu::image_height - (use_overscan ? 0 : 16); #else unsigned width = Nes_Emu::image_width - (use_overscan_h ? 0 : 16); unsigned height = Nes_Emu::image_height - (use_overscan_v ? 0 : 16); #endif const retro_system_timing timing = { Nes_Emu::frame_rate, 44100.0 }; info->timing = timing; info->geometry.base_width = width; info->geometry.base_height = height; info->geometry.max_width = Nes_Emu::image_width; info->geometry.max_height = Nes_Emu::image_height; info->geometry.aspect_ratio = get_aspect_ratio(width, height); }
static int setsar_config_props(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; AVFilterLink *inlink = ctx->inputs[0]; AspectContext *s = ctx->priv; AVRational old_sar = inlink->sample_aspect_ratio; AVRational old_dar, dar; int ret; if ((ret = get_aspect_ratio(inlink, &s->sar))) return ret; outlink->sample_aspect_ratio = s->sar; compute_dar(&old_dar, old_sar, inlink->w, inlink->h); compute_dar(&dar, s->sar, inlink->w, inlink->h); av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d sar:%d/%d dar:%d/%d -> sar:%d/%d dar:%d/%d\n", inlink->w, inlink->h, old_sar.num, old_sar.den, old_dar.num, old_dar.den, outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den, dar.num, dar.den); return 0; }
static double get_aspect_ratio( AVStream *stream, AVCodecContext *codec_context, AVPacket *pkt ) { double aspect_ratio = 1.0; if ( codec_context->codec_id == CODEC_ID_DVVIDEO ) { if ( pkt ) { if ( dv_is_pal( pkt ) ) { aspect_ratio = dv_is_wide( pkt ) ? 64.0/45.0 // 16:9 PAL : 16.0/15.0; // 4:3 PAL } else { aspect_ratio = dv_is_wide( pkt ) ? 32.0/27.0 // 16:9 NTSC : 8.0/9.0; // 4:3 NTSC } } else { AVRational ar = #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0) stream->sample_aspect_ratio; #else codec_context->sample_aspect_ratio; #endif // Override FFmpeg's notion of DV aspect ratios, which are // based upon a width of 704. Since we do not have a normaliser // that crops (nor is cropping 720 wide ITU-R 601 video always desirable) // we just coerce the values to facilitate a passive behaviour through // the rescale normaliser when using equivalent producers and consumers. // = display_aspect / (width * height) if ( ar.num == 10 && ar.den == 11 ) aspect_ratio = 8.0/9.0; // 4:3 NTSC else if ( ar.num == 59 && ar.den == 54 ) aspect_ratio = 16.0/15.0; // 4:3 PAL else if ( ar.num == 40 && ar.den == 33 ) aspect_ratio = 32.0/27.0; // 16:9 NTSC else if ( ar.num == 118 && ar.den == 81 ) aspect_ratio = 64.0/45.0; // 16:9 PAL } } else { AVRational codec_sar = codec_context->sample_aspect_ratio; AVRational stream_sar = #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0) stream->sample_aspect_ratio; #else { 0, 1 }; #endif if ( codec_sar.num > 0 ) aspect_ratio = av_q2d( codec_sar ); else if ( stream_sar.num > 0 ) aspect_ratio = av_q2d( stream_sar ); } return aspect_ratio; } /** Open the file. */ static int producer_open( mlt_producer this, mlt_profile profile, char *file ) { // Return an error code (0 == no error) int error = 0; // Context for avformat AVFormatContext *context = NULL; // Get the properties mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // We will treat everything with the producer fps double fps = mlt_profile_fps( profile ); // Lock the mutex now avformat_lock( ); // If "MRL", then create AVInputFormat AVInputFormat *format = NULL; AVFormatParameters *params = NULL; char *standard = NULL; char *mrl = strchr( file, ':' ); // AV option (0 = both, 1 = video, 2 = audio) int av = 0; // Only if there is not a protocol specification that avformat can handle if ( mrl && !url_exist( file ) ) { // 'file' becomes format abbreviation mrl[0] = 0; // Lookup the format format = av_find_input_format( file ); // Eat the format designator file = ++mrl; if ( format ) { // Allocate params params = calloc( sizeof( AVFormatParameters ), 1 ); // These are required by video4linux (defaults) params->width = 640; params->height = 480; params->time_base= (AVRational){1,25}; // params->device = file; params->channels = 2; params->sample_rate = 48000; } // XXX: this does not work anymore since avdevice // TODO: make producer_avddevice? // Parse out params mrl = strchr( file, '?' ); while ( mrl ) { mrl[0] = 0; char *name = strdup( ++mrl ); char *value = strchr( name, ':' ); if ( value ) { value[0] = 0; value++; char *t = strchr( value, '&' ); if ( t ) t[0] = 0; if ( !strcmp( name, "frame_rate" ) ) params->time_base.den = atoi( value ); else if ( !strcmp( name, "frame_rate_base" ) ) params->time_base.num = atoi( value ); else if ( !strcmp( name, "sample_rate" ) ) params->sample_rate = atoi( value ); else if ( !strcmp( name, "channels" ) ) params->channels = atoi( value ); else if ( !strcmp( name, "width" ) ) params->width = atoi( value ); else if ( !strcmp( name, "height" ) ) params->height = atoi( value ); else if ( !strcmp( name, "standard" ) ) { standard = strdup( value ); params->standard = standard; } else if ( !strcmp( name, "av" ) ) av = atoi( value ); } free( name ); mrl = strchr( mrl, '&' ); } } // Now attempt to open the file error = av_open_input_file( &context, file, format, 0, params ) < 0; // Cleanup AVFormatParameters free( standard ); free( params ); // If successful, then try to get additional info if ( error == 0 ) { // Get the stream info error = av_find_stream_info( context ) < 0; // Continue if no error if ( error == 0 ) { // We will default to the first audio and video streams found int audio_index = -1; int video_index = -1; int av_bypass = 0; // Now set properties where we can (use default unknowns if required) if ( context->duration != AV_NOPTS_VALUE ) { // This isn't going to be accurate for all formats mlt_position frames = ( mlt_position )( ( ( double )context->duration / ( double )AV_TIME_BASE ) * fps + 0.5 ); mlt_properties_set_position( properties, "out", frames - 1 ); mlt_properties_set_position( properties, "length", frames ); } // Find default audio and video streams find_default_streams( properties, context, &audio_index, &video_index ); if ( context->start_time != AV_NOPTS_VALUE ) mlt_properties_set_double( properties, "_start_time", context->start_time ); // Check if we're seekable (something funny about mpeg here :-/) if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 ) && strncmp( file, "udp:", 4 ) && strncmp( file, "tcp:", 4 ) && strncmp( file, "rtsp:", 5 ) && strncmp( file, "rtp:", 4 ) ) { mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 ); mlt_properties_set_data( properties, "dummy_context", context, 0, producer_file_close, NULL ); av_open_input_file( &context, file, NULL, 0, NULL ); av_find_stream_info( context ); } else av_bypass = 1; // Store selected audio and video indexes on properties mlt_properties_set_int( properties, "_audio_index", audio_index ); mlt_properties_set_int( properties, "_video_index", video_index ); mlt_properties_set_int( properties, "_last_position", -1 ); // Fetch the width, height and aspect ratio if ( video_index != -1 ) { AVCodecContext *codec_context = context->streams[ video_index ]->codec; mlt_properties_set_int( properties, "width", codec_context->width ); mlt_properties_set_int( properties, "height", codec_context->height ); if ( codec_context->codec_id == CODEC_ID_DVVIDEO ) { // Fetch the first frame of DV so we can read it directly AVPacket pkt; int ret = 0; while ( ret >= 0 ) { ret = av_read_frame( context, &pkt ); if ( ret >= 0 && pkt.stream_index == video_index && pkt.size > 0 ) { mlt_properties_set_double( properties, "aspect_ratio", get_aspect_ratio( context->streams[ video_index ], codec_context, &pkt ) ); break; } } } else { mlt_properties_set_double( properties, "aspect_ratio", get_aspect_ratio( context->streams[ video_index ], codec_context, NULL ) ); } } // Read Metadata if (context->title != NULL) mlt_properties_set(properties, "meta.attr.title.markup", context->title ); if (context->author != NULL) mlt_properties_set(properties, "meta.attr.author.markup", context->author ); if (context->copyright != NULL) mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright ); if (context->comment != NULL) mlt_properties_set(properties, "meta.attr.comment.markup", context->comment ); if (context->album != NULL) mlt_properties_set(properties, "meta.attr.album.markup", context->album ); if (context->year != 0) mlt_properties_set_int(properties, "meta.attr.year.markup", context->year ); if (context->track != 0) mlt_properties_set_int(properties, "meta.attr.track.markup", context->track ); // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later) if ( av == 0 && audio_index != -1 && video_index != -1 ) { // We'll use the open one as our video_context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); // And open again for our audio context av_open_input_file( &context, file, NULL, 0, NULL ); av_find_stream_info( context ); // Audio context mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL ); } else if ( av != 2 && video_index != -1 ) { // We only have a video context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); } else if ( audio_index != -1 ) { // We only have an audio context mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL ); } else { // Something has gone wrong error = -1; } mlt_properties_set_int( properties, "av_bypass", av_bypass ); } } // Unlock the mutex now avformat_unlock( ); return error; } /** Convert a frame position to a time code. */ static double producer_time_of_frame( mlt_producer this, mlt_position position ) { return ( double )position / mlt_producer_get_fps( this ); } static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height ) { #ifdef SWSCALE if ( format == mlt_image_yuv420p ) { struct SwsContext *context = sws_getContext( width, height, pix_fmt, width, height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL); AVPicture output; output.data[0] = buffer; output.data[1] = buffer + width * height; output.data[2] = buffer + ( 3 * width * height ) / 2; output.linesize[0] = width; output.linesize[1] = width >> 1; output.linesize[2] = width >> 1; sws_scale( context, frame->data, frame->linesize, 0, height, output.data, output.linesize); sws_freeContext( context ); } else if ( format == mlt_image_rgb24 ) { struct SwsContext *context = sws_getContext( width, height, pix_fmt, width, height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL); AVPicture output; avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height ); sws_scale( context, frame->data, frame->linesize, 0, height, output.data, output.linesize); sws_freeContext( context ); } else { struct SwsContext *context = sws_getContext( width, height, pix_fmt, width, height, PIX_FMT_YUYV422, SWS_FAST_BILINEAR, NULL, NULL, NULL); AVPicture output; avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height ); sws_scale( context, frame->data, frame->linesize, 0, height, output.data, output.linesize); sws_freeContext( context ); } #else if ( format == mlt_image_yuv420p ) { AVPicture pict; pict.data[0] = buffer; pict.data[1] = buffer + width * height; pict.data[2] = buffer + ( 3 * width * height ) / 2; pict.linesize[0] = width; pict.linesize[1] = width >> 1; pict.linesize[2] = width >> 1; img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height ); }
int write_cpl(opendcp_t *opendcp, cpl_t *cpl) { int a,r, rc; struct stat st; xmlIndentTreeOutput = 1; xmlDocPtr doc; xmlTextWriterPtr xml; /* create XML document */ xml = xmlNewTextWriterDoc(&doc,0); /* cpl start */ rc = xmlTextWriterStartDocument(xml, NULL, XML_ENCODING, NULL); if (rc < 0) { dcp_log(LOG_ERROR,"xmlTextWriterStartDocument failed"); return DCP_FATAL; } xmlTextWriterStartElement(xml, BAD_CAST "CompositionPlaylist"); xmlTextWriterWriteAttribute(xml, BAD_CAST "xmlns", BAD_CAST NS_CPL[opendcp->ns]); if (opendcp->xml_signature.sign) { xmlTextWriterWriteAttribute(xml, BAD_CAST "xmlns:dsig", BAD_CAST DS_DSIG); } /* cpl attributes */ xmlTextWriterWriteFormatElement(xml, BAD_CAST "Id","%s%s","urn:uuid:",cpl->uuid); xmlTextWriterWriteFormatElement(xml, BAD_CAST "AnnotationText","%s",cpl->annotation); xmlTextWriterWriteFormatElement(xml, BAD_CAST "IssueDate","%s",cpl->timestamp); xmlTextWriterWriteFormatElement(xml, BAD_CAST "Creator","%s",cpl->creator); xmlTextWriterWriteFormatElement(xml, BAD_CAST "ContentTitleText","%s",cpl->title); xmlTextWriterWriteFormatElement(xml, BAD_CAST "ContentKind","%s",cpl->kind); /* content version */ if (opendcp->ns == XML_NS_SMPTE) { xmlTextWriterStartElement(xml, BAD_CAST "ContentVersion"); xmlTextWriterWriteFormatElement(xml, BAD_CAST "Id","%s%s_%s","urn:uri:",cpl->uuid,cpl->timestamp); xmlTextWriterWriteFormatElement(xml, BAD_CAST "LabelText","%s_%s",cpl->uuid,cpl->timestamp); xmlTextWriterEndElement(xml); } /* rating */ xmlTextWriterStartElement(xml, BAD_CAST "RatingList"); if (strcmp(cpl->rating,"")) { xmlTextWriterWriteFormatElement(xml, BAD_CAST "Agency","%s",RATING_AGENCY[1]); xmlTextWriterWriteFormatElement(xml, BAD_CAST "Label","%s",cpl->rating); } xmlTextWriterEndElement(xml); /* reel(s) Start */ xmlTextWriterStartElement(xml, BAD_CAST "ReelList"); for (r=0;r<cpl->reel_count;r++) { reel_t reel = cpl->reel[r]; xmlTextWriterStartElement(xml, BAD_CAST "Reel"); xmlTextWriterWriteFormatElement(xml, BAD_CAST "Id","%s%s","urn:uuid:",reel.uuid); xmlTextWriterStartElement(xml, BAD_CAST "AssetList"); /* Asset(s) Start */ for (a=0;a<cpl->reel[r].asset_count;a++) { asset_t asset = cpl->reel[r].asset[a]; if (asset.essence_class == ACT_PICTURE) { if (asset.stereoscopic) { xmlTextWriterStartElement(xml, BAD_CAST "msp-cpl:MainStereoscopicPicture"); xmlTextWriterWriteAttribute(xml, BAD_CAST "xmlns:msp-cpl", BAD_CAST NS_CPL_3D[opendcp->ns]); } else { xmlTextWriterStartElement(xml, BAD_CAST "MainPicture"); } } if (asset.essence_class == ACT_SOUND) { xmlTextWriterStartElement(xml, BAD_CAST "MainSound"); } if (asset.essence_class == ACT_TIMED_TEXT) { xmlTextWriterStartElement(xml, BAD_CAST "MainSubtitle"); } xmlTextWriterWriteFormatElement(xml, BAD_CAST "Id","%s%s","urn:uuid:",asset.uuid); xmlTextWriterWriteFormatElement(xml, BAD_CAST "AnnotationText","%s",asset.annotation); xmlTextWriterWriteFormatElement(xml, BAD_CAST "EditRate","%s",asset.edit_rate); xmlTextWriterWriteFormatElement(xml, BAD_CAST "IntrinsicDuration","%d",asset.intrinsic_duration); xmlTextWriterWriteFormatElement(xml, BAD_CAST "EntryPoint","%d",asset.entry_point); xmlTextWriterWriteFormatElement(xml, BAD_CAST "Duration","%d",asset.duration); if (asset.essence_class == ACT_PICTURE) { xmlTextWriterWriteFormatElement(xml, BAD_CAST "FrameRate","%s",asset.frame_rate); if (opendcp->ns == XML_NS_SMPTE) { xmlTextWriterWriteFormatElement(xml, BAD_CAST "ScreenAspectRatio","%s",asset.aspect_ratio); } else { xmlTextWriterWriteFormatElement(xml, BAD_CAST "ScreenAspectRatio","%s",get_aspect_ratio(asset.aspect_ratio)); } } if ( opendcp->xml.digest_flag ) { xmlTextWriterWriteFormatElement(xml, BAD_CAST "Hash","%s",asset.digest); } xmlTextWriterEndElement(xml); /* end asset */ } xmlTextWriterEndElement(xml); /* end assetlist */ xmlTextWriterEndElement(xml); /* end reel */ } xmlTextWriterEndElement(xml); /* end reel list */ #ifdef XMLSEC if (opendcp->xml_signature.sign) { write_dsig_template(opendcp, xml); } #endif xmlTextWriterEndElement(xml); /* end compositionplaylist */ rc = xmlTextWriterEndDocument(xml); if (rc < 0) { dcp_log(LOG_ERROR,"xmlTextWriterEndDocument failed %s",cpl->filename); return DCP_FATAL; } xmlFreeTextWriter(xml); xmlSaveFormatFile(cpl->filename, doc, 1); xmlFreeDoc(doc); #ifdef XMLSEC /* sign the XML file */ if (opendcp->xml_signature.sign) { xml_sign(opendcp, cpl->filename); } #endif /* store CPL file size */ dcp_log(LOG_INFO,"Writing CPL file info"); stat(cpl->filename, &st); sprintf(cpl->size,"%"PRIu64,st.st_size); calculate_digest(cpl->filename,cpl->digest); return DCP_SUCCESS; }