char* tdav_codec_mp4ves_sdp_att_get(const tmedia_codec_t* _self, const char* att_name) { tdav_codec_mp4ves_t *self = (tdav_codec_mp4ves_t *)_self; if(tsk_striequals(att_name, "fmtp")){ char* fmtp = tsk_null; switch(_self->bl){//FIXME: deprecated case tmedia_bl_low: default: self->profile = Simple_Profile_Level_1; break; case tmedia_bl_medium: self->profile = Simple_Profile_Level_2; break; case tmedia_bl_hight: case tmedia_bl_unrestricted: self->profile = Simple_Profile_Level_3; break; } tsk_sprintf(&fmtp, "profile-level-id=%d", self->profile); return fmtp; } else if(tsk_striequals(att_name, "imageattr")){ return tmedia_get_video_imageattr(TMEDIA_CODEC_VIDEO(self)->pref_size, TMEDIA_CODEC_VIDEO(self)->in.width, TMEDIA_CODEC_VIDEO(self)->in.height, TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height); } return tsk_null; }
int tdav_codec_mp4ves_open_decoder(tdav_codec_mp4ves_t* self) { int ret, size; if(!self->decoder.codec && !(self->decoder.codec = avcodec_find_decoder(CODEC_ID_MPEG4))){ TSK_DEBUG_ERROR("Failed to find MP4V-ES decoder"); return -1; } if(self->decoder.context){ TSK_DEBUG_ERROR("Decoder already opened"); return -1; } self->decoder.context = avcodec_alloc_context(); avcodec_get_context_defaults(self->decoder.context); self->decoder.context->pix_fmt = PIX_FMT_YUV420P; self->decoder.context->width = TMEDIA_CODEC_VIDEO(self)->out.width; self->decoder.context->height = TMEDIA_CODEC_VIDEO(self)->out.height; // Picture (YUV 420) if(!(self->decoder.picture = avcodec_alloc_frame())){ TSK_DEBUG_ERROR("Failed to create decoder picture"); return -2; } avcodec_get_frame_defaults(self->decoder.picture); size = avpicture_get_size(PIX_FMT_YUV420P, self->decoder.context->width, self->decoder.context->height); if(!(self->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){ TSK_DEBUG_ERROR("Failed to allocate decoder buffer"); return -2; } if(!(self->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){ TSK_DEBUG_ERROR("Failed to allocate decoder buffer"); return -2; } // Open decoder if((ret = avcodec_open(self->decoder.context, self->decoder.codec)) < 0){ TSK_DEBUG_ERROR("Failed to open MP4V-ES decoder"); return ret; } self->decoder.last_seq = 0; return ret; }
/**@ingroup tmedia_codec_group * Initialize a Codec * @param self The codec to initialize. Could be any type of codec (e.g. @ref tmedia_codec_audio_t or @ref tmedia_codec_video_t). * @param type * @param name the name of the codec. e.g. "G.711u" or "G.711a" etc used in the sdp. * @param desc full description. * @param format the format. e.g. "0" for G.711.u or "8" for G.711a or "*" for MSRP. * @retval Zero if succeed and non-zero error code otherwise. */ int tmedia_codec_init(tmedia_codec_t* self, tmedia_type_t type, const char* name, const char* desc, const char* format) { if(!self || tsk_strnullORempty(name)){ TSK_DEBUG_ERROR("Invalid parameter"); return -1; } self->type = type; tsk_strupdate(&self->name, name); tsk_strupdate(&self->desc,desc); tsk_strupdate(&self->format, format); if(!self->bandwidth_max_upload) self->bandwidth_max_upload = (type == tmedia_video ? tmedia_defaults_get_bandwidth_video_upload_max() : INT_MAX); // INT_MAX or <=0 means undefined if(!self->bandwidth_max_download) self->bandwidth_max_download = (type == tmedia_video ? tmedia_defaults_get_bandwidth_video_download_max() : INT_MAX); // INT_MAX or <=0 means undefined if(!self->in.rate) self->in.rate = self->plugin->rate; if(!self->out.rate) self->out.rate = self->plugin->rate; if(type & tmedia_audio){ tmedia_codec_audio_t* audio = TMEDIA_CODEC_AUDIO(self); if(!audio->in.ptime) audio->in.ptime = (self->plugin->audio.ptime ? self->plugin->audio.ptime : tmedia_defaults_get_audio_ptime()); if(!audio->out.ptime) audio->out.ptime = (self->plugin->audio.ptime ? self->plugin->audio.ptime : tmedia_defaults_get_audio_ptime()); if(!audio->in.channels) audio->in.channels = self->plugin->audio.channels; if(!audio->out.channels) audio->out.channels = self->plugin->audio.channels; if(!audio->in.timestamp_multiplier) audio->in.timestamp_multiplier = tmedia_codec_audio_get_timestamp_multiplier(self->id, self->in.rate); if(!audio->out.timestamp_multiplier) audio->out.timestamp_multiplier = tmedia_codec_audio_get_timestamp_multiplier(self->id, self->out.rate); } // Video flipping: For backward compatibility we have to initialize the default values // according to the CFLAGS: 'FLIP_ENCODED_PICT' and 'FLIP_DECODED_PICT'. At any time you // can update thse values (e.g. when the device switch from landscape to portrait) using video_session->set(); else if(type & tmedia_video){ tmedia_codec_video_t* video = TMEDIA_CODEC_VIDEO(self); #if FLIP_ENCODED_PICT video->out.flip = tsk_true; #endif #if FLIP_DECODED_PICT video->in.flip = tsk_true; #endif if(!video->in.fps) video->in.fps = self->plugin->video.fps ? self->plugin->video.fps : tmedia_defaults_get_video_fps(); if(!video->out.fps) video->out.fps = self->plugin->video.fps ? self->plugin->video.fps : tmedia_defaults_get_video_fps(); if(video->in.chroma == tmedia_chroma_none) video->in.chroma = tmedia_chroma_yuv420p; if(video->out.chroma == tmedia_chroma_none) video->out.chroma = tmedia_chroma_yuv420p; if(0){ // @deprecated if(!video->in.width) video->in.width = video->out.width = self->plugin->video.width; if(!video->in.height) video->in.height = video->out.height = self->plugin->video.height; } else{ int ret; unsigned width, height; video->pref_size = tmedia_defaults_get_pref_video_size(); if((ret = tmedia_video_get_size(video->pref_size, &width, &height)) != 0){ width = self->plugin->video.width; height = self->plugin->video.height; } if(!video->in.width) video->in.width = video->out.width = width; if(!video->in.height) video->in.height = video->out.height = height; } } return 0; }
static void tdav_codec_mp4ves_rtp_callback(tdav_codec_mp4ves_t *mp4v, const void *data, tsk_size_t size, tsk_bool_t marker) { // Send data over the network if(TMEDIA_CODEC_VIDEO(mp4v)->out.callback){ TMEDIA_CODEC_VIDEO(mp4v)->out.result.buffer.ptr = data; TMEDIA_CODEC_VIDEO(mp4v)->out.result.buffer.size = size; TMEDIA_CODEC_VIDEO(mp4v)->out.result.duration = (uint32_t)((1./(double)TMEDIA_CODEC_VIDEO(mp4v)->out.fps) * TMEDIA_CODEC(mp4v)->plugin->rate); TMEDIA_CODEC_VIDEO(mp4v)->out.result.last_chunck = marker; TMEDIA_CODEC_VIDEO(mp4v)->out.callback(&TMEDIA_CODEC_VIDEO(mp4v)->out.result); } }
int tdav_codec_h264_open_decoder(tdav_codec_h264_t* self) { #if HAVE_FFMPEG int ret; if(self->decoder.context){ TSK_DEBUG_ERROR("Decoder already opened"); return -1; } self->decoder.context = avcodec_alloc_context(); avcodec_get_context_defaults(self->decoder.context); self->decoder.context->pix_fmt = PIX_FMT_YUV420P; self->decoder.context->flags2 |= CODEC_FLAG2_FAST; self->decoder.context->width = TMEDIA_CODEC_VIDEO(self)->in.width; self->decoder.context->height = TMEDIA_CODEC_VIDEO(self)->in.height; // Picture (YUV 420) if(!(self->decoder.picture = avcodec_alloc_frame())){ TSK_DEBUG_ERROR("Failed to create decoder picture"); return -2; } avcodec_get_frame_defaults(self->decoder.picture); // Open decoder if((ret = avcodec_open(self->decoder.context, self->decoder.codec)) < 0){ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc); return ret; } self->decoder.last_seq = 0; return ret; #elif HAVE_H264_PASSTHROUGH return 0; #endif TSK_DEBUG_ERROR("Unexpected code called"); return -1; }
/**@ingroup tmedia_codec_group * Initialize a Codec * @param self The codec to initialize. Could be any type of codec (e.g. @ref tmedia_codec_audio_t or @ref tmedia_codec_video_t). * @param type * @param name the name of the codec. e.g. "G.711u" or "G.711a" etc used in the sdp. * @param desc full description. * @param format the format. e.g. "0" for G.711.u or "8" for G.711a or "*" for MSRP. * @retval Zero if succeed and non-zero error code otherwise. */ int tmedia_codec_init(tmedia_codec_t* self, tmedia_type_t type, const char* name, const char* desc, const char* format) { if(!self || tsk_strnullORempty(name)){ TSK_DEBUG_ERROR("Invalid parameter"); return -1; } self->type = type; tsk_strupdate(&self->name, name); tsk_strupdate(&self->desc,desc); tsk_strupdate(&self->format, format); // Video flipping: For backward compatibility we have to initialize the default values // according to the CFLAGS: 'FLIP_ENCODED_PICT' and 'FLIP_DECODED_PICT'. At any time you // can update thse values (e.g. when the device switch from landscape to portrait) using video_session->set(); if(type & tmedia_video){ tmedia_codec_video_t* video = TMEDIA_CODEC_VIDEO(self); #if FLIP_ENCODED_PICT video->out.flip = tsk_true; #endif #if FLIP_DECODED_PICT video->in.flip = tsk_true; #endif if(!video->in.fps) video->in.fps = video->out.fps = self->plugin->video.fps; if(video->in.chroma == tmedia_chroma_none) video->in.chroma = tmedia_chroma_yuv420p; if(video->out.chroma == tmedia_chroma_none) video->out.chroma = tmedia_chroma_yuv420p; if(0){ // @deprecated if(!video->in.width) video->in.width = video->out.width = self->plugin->video.width; if(!video->in.height) video->in.height = video->out.height = self->plugin->video.height; } else{ int ret; unsigned width, height; video->pref_size = tmedia_defaults_get_pref_video_size(); if((ret = tmedia_video_get_size(video->pref_size, &width, &height)) != 0){ width = self->plugin->video.width; height = self->plugin->video.height; } if(!video->in.width) video->in.width = video->out.width = width; if(!video->in.height) video->in.height = video->out.height = height; } } return 0; }
/**@ingroup tmedia_codec_group * Creates a new codec using an already registered plugin. * @param format The format of the codec to create (e.g. "0" for PCMU or "8" for PCMA or "*" for MSRP) * @sa @ref tmedia_codec_plugin_register() */ tmedia_codec_t* tmedia_codec_create(const char* format) { tmedia_codec_t* codec = tsk_null; const tmedia_codec_plugin_def_t* plugin; tsk_size_t i = 0; while((i < TMED_CODEC_MAX_PLUGINS) && (plugin = __tmedia_codec_plugins[i++])) { if(plugin->objdef && tsk_striequals(plugin->format, format)) { if((codec = tsk_object_new(plugin->objdef))) { /* initialize the newly created codec */ codec->id = plugin->codec_id; codec->dyn = plugin->dyn; codec->plugin = plugin; codec->bl = tmedia_bl_medium; switch(plugin->type) { case tmedia_audio: { /* Audio codec */ tmedia_codec_audio_t* audio = TMEDIA_CODEC_AUDIO(codec); tmedia_codec_audio_init(TMEDIA_CODEC(audio), plugin->name, plugin->desc, plugin->format); break; } case tmedia_video: { /* Video codec */ tmedia_codec_video_t* video = TMEDIA_CODEC_VIDEO(codec); tmedia_codec_video_init(TMEDIA_CODEC(video), plugin->name, plugin->desc, plugin->format); break; } case tmedia_msrp: { /* Msrp codec */ tmedia_codec_msrp_init(codec, plugin->name, plugin->desc); break; } default: { /* Any other codec */ tmedia_codec_init(codec, plugin->type, plugin->name, plugin->desc, plugin->format); break; } } break; } } } return codec; }
int tdav_codec_h264_init(tdav_codec_h264_t* self, profile_idc_t profile) { int ret = 0; level_idc_t level; if(!self){ TSK_DEBUG_ERROR("Invalid parameter"); return -1; } if((ret = tdav_codec_h264_common_init(TDAV_CODEC_H264_COMMON(self)))){ TSK_DEBUG_ERROR("tdav_codec_h264_common_init() faile with error code=%d", ret); return ret; } if((ret = tdav_codec_h264_common_level_from_size(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, &level))){ TSK_DEBUG_ERROR("Failed to find level for size=[%u, %u]", TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height); return ret; } (self)->encoder.max_bw_kpbs = TMEDIA_CODEC(self)->bandwidth_max_upload; TDAV_CODEC_H264_COMMON(self)->pack_mode_local = H264_PACKETIZATION_MODE; TDAV_CODEC_H264_COMMON(self)->profile = profile; TDAV_CODEC_H264_COMMON(self)->level = level; TMEDIA_CODEC_VIDEO(self)->in.max_mbps = TMEDIA_CODEC_VIDEO(self)->out.max_mbps = H264_MAX_MBPS*1000; TMEDIA_CODEC_VIDEO(self)->in.max_br = TMEDIA_CODEC_VIDEO(self)->out.max_br = H264_MAX_BR*1000; #if HAVE_FFMPEG if(!(self->encoder.codec = avcodec_find_encoder(CODEC_ID_H264))){ TSK_DEBUG_ERROR("Failed to find H.264 encoder"); ret = -2; } if(!(self->decoder.codec = avcodec_find_decoder(CODEC_ID_H264))){ TSK_DEBUG_ERROR("Failed to find H.264 decoder"); ret = -3; } #endif #if HAVE_H264_PASSTHROUGH TMEDIA_CODEC(self)->passthrough = tsk_true; self->decoder.passthrough = tsk_true; self->encoder.passthrough = tsk_true; #endif self->encoder.quality = 1; /* allocations MUST be done by open() */ return ret; }
/* ============ Internal functions ================= */ int tdav_codec_mp4ves_open_encoder(tdav_codec_mp4ves_t* self) { int ret, size; int32_t max_bw_kpbs; if(!self->encoder.codec && !(self->encoder.codec = avcodec_find_encoder(CODEC_ID_MPEG4))){ TSK_DEBUG_ERROR("Failed to find mp4v encoder"); return -1; } if(self->encoder.context){ TSK_DEBUG_ERROR("Encoder already opened"); return -1; } self->encoder.context = avcodec_alloc_context(); avcodec_get_context_defaults(self->encoder.context); self->encoder.context->pix_fmt = PIX_FMT_YUV420P; self->encoder.context->time_base.num = 1; self->encoder.context->time_base.den = TMEDIA_CODEC_VIDEO(self)->in.fps; self->encoder.context->width = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width; self->encoder.context->height = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height; self->encoder.context->mb_decision = FF_MB_DECISION_RD; self->encoder.context->noise_reduction = 250; self->encoder.context->flags |= CODEC_FLAG_QSCALE; self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality; max_bw_kpbs = TSK_CLAMP( 0, tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps), self->encoder.max_bw_kpbs ); self->encoder.context->bit_rate = (max_bw_kpbs * 1024);// bps self->encoder.context->rtp_payload_size = MP4V_RTP_PAYLOAD_SIZE; self->encoder.context->opaque = tsk_null; self->encoder.context->profile = self->profile>>4; self->encoder.context->level = self->profile & 0x0F; self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->in.fps * MP4V_GOP_SIZE_IN_SECONDS); self->encoder.context->max_b_frames = 0; self->encoder.context->b_frame_strategy = 1; self->encoder.context->flags |= CODEC_FLAG_AC_PRED; // Picture (YUV 420) if(!(self->encoder.picture = avcodec_alloc_frame())){ TSK_DEBUG_ERROR("Failed to create MP4V-ES encoder picture"); return -2; } avcodec_get_frame_defaults(self->encoder.picture); size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height); if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){ TSK_DEBUG_ERROR("Failed to allocate MP4V-ES encoder buffer"); return -2; } // Open encoder if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){ TSK_DEBUG_ERROR("Failed to open MP4V-ES encoder"); return ret; } TSK_DEBUG_INFO("[MP4V-ES] bitrate=%d bps", self->encoder.context->bit_rate); return ret; }
tsk_bool_t tdav_codec_mp4ves_sdp_att_match(const tmedia_codec_t* _self, const char* att_name, const char* att_value) { tdav_codec_mp4ves_t *self = (tdav_codec_mp4ves_t *)_self; if(!self){ TSK_DEBUG_ERROR("Invalid parameter"); return tsk_false; } if(tsk_striequals(att_name, "fmtp")){ tsk_params_L_t* params ; /* e.g. profile-level-id=1; xx=yy */ if((params = tsk_params_fromstring(att_value, ";", tsk_true))){ int val_int; if((val_int = tsk_params_get_param_value_as_int(params, "profile-level-id")) != -1){ TSK_DEBUG_INFO("Proposed profile-level-id=%d", val_int); self->profile = val_int; // FIXME: Take the remote profile-level-id even if the bandwidth level doesn't match } TSK_OBJECT_SAFE_FREE(params); } switch (self->profile ) { case Simple_Profile_Level_1: TMEDIA_CODEC_VIDEO(self)->out.width = TMEDIA_CODEC_VIDEO(self)->in.width = 176; TMEDIA_CODEC_VIDEO(self)->in.height = TMEDIA_CODEC_VIDEO(self)->out.height = 144; break; case Simple_Profile_Level_2: case Simple_Profile_Level_3: default: TMEDIA_CODEC_VIDEO(self)->out.width = TMEDIA_CODEC_VIDEO(self)->in.width = 352; TMEDIA_CODEC_VIDEO(self)->in.height = TMEDIA_CODEC_VIDEO(self)->out.height = 288; break; } } else if(tsk_striequals(att_name, "imageattr")){ unsigned in_width, in_height, out_width, out_height; if(tmedia_parse_video_imageattr(att_value, TMEDIA_CODEC_VIDEO(self)->pref_size, &in_width, &in_height, &out_width, &out_height) != 0){ return tsk_false; } TMEDIA_CODEC_VIDEO(self)->in.width = in_width; TMEDIA_CODEC_VIDEO(self)->in.height = in_height; TMEDIA_CODEC_VIDEO(self)->out.width = out_width; TMEDIA_CODEC_VIDEO(self)->out.height = out_height; } return tsk_true; }
int tdav_codec_h264_open_encoder(tdav_codec_h264_t* self) { #if HAVE_FFMPEG int ret; tsk_size_t size; int32_t max_bw_kpbs; if(self->encoder.context){ TSK_DEBUG_ERROR("Encoder already opened"); return -1; } #if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0)) if((self->encoder.context = avcodec_alloc_context3(self->encoder.codec))){ avcodec_get_context_defaults3(self->encoder.context, self->encoder.codec); } #else if((self->encoder.context = avcodec_alloc_context())){ avcodec_get_context_defaults(self->encoder.context); } #endif if(!self->encoder.context){ TSK_DEBUG_ERROR("Failed to allocate context"); return -1; } #if TDAV_UNDER_X86 && LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->dsp_mask = (FF_MM_MMX | FF_MM_MMXEXT | FF_MM_SSE); #endif self->encoder.context->pix_fmt = PIX_FMT_YUV420P; self->encoder.context->time_base.num = 1; self->encoder.context->time_base.den = TMEDIA_CODEC_VIDEO(self)->out.fps; self->encoder.context->width = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width; self->encoder.context->height = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height; max_bw_kpbs = TSK_CLAMP( 0, tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps), self->encoder.max_bw_kpbs ); self->encoder.context->bit_rate = (max_bw_kpbs * 1024);// bps self->encoder.context->rc_min_rate = (self->encoder.context->bit_rate >> 3); self->encoder.context->rc_max_rate = self->encoder.context->bit_rate; #if LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->rc_lookahead = 0; #endif self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality; self->encoder.context->scenechange_threshold = 0; self->encoder.context->me_subpel_quality = 0; #if LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->partitions = X264_PART_I4X4 | X264_PART_I8X8 | X264_PART_P8X8 | X264_PART_B8X8; #endif self->encoder.context->me_method = ME_EPZS; self->encoder.context->trellis = 0; self->encoder.context->me_range = 16; self->encoder.context->qmin = 10; self->encoder.context->qmax = 51; #if LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->mb_qmin = self->encoder.context->qmin; self->encoder.context->mb_qmax = self->encoder.context->qmax; #endif self->encoder.context->qcompress = 0.6f; self->encoder.context->mb_decision = FF_MB_DECISION_SIMPLE; #if LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->flags2 |= CODEC_FLAG2_FASTPSKIP; #else self->encoder.context->flags2 |= CODEC_FLAG2_FAST; #endif self->encoder.context->flags |= CODEC_FLAG_LOOP_FILTER; self->encoder.context->flags |= CODEC_FLAG_GLOBAL_HEADER; self->encoder.context->flags |= CODEC_FLAG_LOW_DELAY; self->encoder.context->max_b_frames = 0; self->encoder.context->b_frame_strategy = 1; self->encoder.context->chromaoffset = 0; switch(TDAV_CODEC_H264_COMMON(self)->profile){ case profile_idc_baseline: default: self->encoder.context->profile = FF_PROFILE_H264_BASELINE; self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level; break; case profile_idc_main: self->encoder.context->profile = FF_PROFILE_H264_MAIN; self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level; break; } self->encoder.context->rtp_payload_size = H264_RTP_PAYLOAD_SIZE; self->encoder.context->opaque = tsk_null; self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->out.fps * TDAV_H264_GOP_SIZE_IN_SECONDS); #if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0)) if((ret = av_opt_set_int(self->encoder.context->priv_data, "slice-max-size", H264_RTP_PAYLOAD_SIZE, 0))){ TSK_DEBUG_ERROR("Failed to set x264 slice-max-size to %d", H264_RTP_PAYLOAD_SIZE); } if((ret = av_opt_set(self->encoder.context->priv_data, "profile", (self->encoder.context->profile == FF_PROFILE_H264_BASELINE ? "baseline" : "main"), 0))){ TSK_DEBUG_ERROR("Failed to set x264 profile"); } if((ret = av_opt_set(self->encoder.context->priv_data, "preset", "veryfast", 0))){ TSK_DEBUG_ERROR("Failed to set x264 preset to veryfast"); } if((ret = av_opt_set_int(self->encoder.context->priv_data, "rc-lookahead", 0, 0)) && (ret = av_opt_set_int(self->encoder.context->priv_data, "rc_lookahead", 0, 0))){ TSK_DEBUG_ERROR("Failed to set x264 rc_lookahead=0"); } if((ret = av_opt_set(self->encoder.context->priv_data, "tune", "animation+zerolatency", 0))){ TSK_DEBUG_ERROR("Failed to set x264 tune to zerolatency"); } #endif // Picture (YUV 420) if(!(self->encoder.picture = avcodec_alloc_frame())){ TSK_DEBUG_ERROR("Failed to create encoder picture"); return -2; } avcodec_get_frame_defaults(self->encoder.picture); size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height); if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){ TSK_DEBUG_ERROR("Failed to allocate encoder buffer"); return -2; } // Open encoder if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc); return ret; } TSK_DEBUG_INFO("[H.264] bitrate=%d bps", self->encoder.context->bit_rate); return ret; #elif HAVE_H264_PASSTHROUGH return 0; #endif TSK_DEBUG_ERROR("Not expected code called"); return -1; }
int tdav_codec_h264_open_encoder(tdav_codec_h264_t* self) { #if HAVE_FFMPEG int ret; tsk_size_t size; if(self->encoder.context){ TSK_DEBUG_ERROR("Encoder already opened"); return -1; } #if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0)) if((self->encoder.context = avcodec_alloc_context3(self->encoder.codec))){ avcodec_get_context_defaults3(self->encoder.context, self->encoder.codec); } #else if((self->encoder.context = avcodec_alloc_context())){ avcodec_get_context_defaults(self->encoder.context); } #endif if(!self->encoder.context){ TSK_DEBUG_ERROR("Failed to allocate context"); return -1; } #if TDAV_UNDER_X86 && LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->dsp_mask = (FF_MM_MMX | FF_MM_MMXEXT | FF_MM_SSE); #endif self->encoder.context->pix_fmt = PIX_FMT_YUV420P; self->encoder.context->time_base.num = 1; self->encoder.context->time_base.den = TMEDIA_CODEC_VIDEO(self)->out.fps; self->encoder.context->width = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width; self->encoder.context->height = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height; self->encoder.max_bw_kpbs = TSK_CLAMP( 0, tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps), TMEDIA_CODEC(self)->bandwidth_max_upload ); self->encoder.context->bit_rate = (self->encoder.max_bw_kpbs * 1024);// bps self->encoder.context->rc_min_rate = (self->encoder.context->bit_rate >> 3); self->encoder.context->rc_max_rate = self->encoder.context->bit_rate; #if LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->rc_lookahead = 0; #endif self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality; #if LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->partitions = X264_PART_I4X4 | X264_PART_I8X8 | X264_PART_P8X8 | X264_PART_B8X8; #endif self->encoder.context->me_method = ME_UMH; self->encoder.context->me_range = 16; self->encoder.context->qmin = 10; self->encoder.context->qmax = 51; #if LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->mb_qmin = self->encoder.context->qmin; self->encoder.context->mb_qmax = self->encoder.context->qmax; #endif /* METROPOLIS = G2J.COM TelePresence client. Check Issue 378: No video when calling "TANDBERG/4129 (X8.1.1)" */ #if !METROPOLIS && 0 self->encoder.context->flags |= CODEC_FLAG_GLOBAL_HEADER; #endif self->encoder.context->flags |= CODEC_FLAG_LOW_DELAY; if (self->encoder.context->profile == FF_PROFILE_H264_BASELINE) { self->encoder.context->max_b_frames = 0; } switch(TDAV_CODEC_H264_COMMON(self)->profile){ case profile_idc_baseline: default: self->encoder.context->profile = FF_PROFILE_H264_BASELINE; self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level; break; case profile_idc_main: self->encoder.context->profile = FF_PROFILE_H264_MAIN; self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level; break; } /* Comment from libavcodec/libx264.c: * Allow x264 to be instructed through AVCodecContext about the maximum * size of the RTP payload. For example, this enables the production of * payload suitable for the H.264 RTP packetization-mode 0 i.e. single * NAL unit per RTP packet. */ self->encoder.context->rtp_payload_size = H264_RTP_PAYLOAD_SIZE; self->encoder.context->opaque = tsk_null; self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->out.fps * TDAV_H264_GOP_SIZE_IN_SECONDS); #if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0)) if((ret = av_opt_set_int(self->encoder.context->priv_data, "slice-max-size", H264_RTP_PAYLOAD_SIZE, 0))){ TSK_DEBUG_ERROR("Failed to set x264 slice-max-size to %d", H264_RTP_PAYLOAD_SIZE); } if((ret = av_opt_set(self->encoder.context->priv_data, "profile", (self->encoder.context->profile == FF_PROFILE_H264_BASELINE ? "baseline" : "main"), 0))){ TSK_DEBUG_ERROR("Failed to set x264 profile"); } if((ret = av_opt_set(self->encoder.context->priv_data, "preset", "veryfast", 0))){ TSK_DEBUG_ERROR("Failed to set x264 preset to veryfast"); } if((ret = av_opt_set_int(self->encoder.context->priv_data, "rc-lookahead", 0, 0)) && (ret = av_opt_set_int(self->encoder.context->priv_data, "rc_lookahead", 0, 0))){ TSK_DEBUG_ERROR("Failed to set x264 rc_lookahead=0"); } if((ret = av_opt_set(self->encoder.context->priv_data, "tune", "animation+zerolatency", 0))){ TSK_DEBUG_ERROR("Failed to set x264 tune to zerolatency"); } #endif // Picture (YUV 420) if(!(self->encoder.picture = avcodec_alloc_frame())){ TSK_DEBUG_ERROR("Failed to create encoder picture"); return -2; } avcodec_get_frame_defaults(self->encoder.picture); size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height); if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){ TSK_DEBUG_ERROR("Failed to allocate encoder buffer"); return -2; } // Open encoder if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc); return ret; } self->encoder.frame_count = 0; TSK_DEBUG_INFO("[H.264] bitrate=%d bps", self->encoder.context->bit_rate); return ret; #elif HAVE_H264_PASSTHROUGH self->encoder.frame_count = 0; return 0; #endif TSK_DEBUG_ERROR("Not expected code called"); return -1; }
static tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr) { tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self; const trtp_rtp_header_t* rtp_hdr = (const trtp_rtp_header_t*)proto_hdr; const uint8_t* pay_ptr = tsk_null; tsk_size_t pay_size = 0; int ret; tsk_bool_t sps_or_pps, append_scp, end_of_unit; tsk_size_t retsize = 0, size_to_copy = 0; static const tsk_size_t xmax_size = (3840 * 2160 * 3) >> 3; // >>3 instead of >>1 (not an error) static tsk_size_t start_code_prefix_size = sizeof(H264_START_CODE_PREFIX); #if HAVE_FFMPEG int got_picture_ptr = 0; #endif if(!h264 || !in_data || !in_size || !out_data #if HAVE_FFMPEG || !h264->decoder.context #endif ) { TSK_DEBUG_ERROR("Invalid parameter"); return 0; } //TSK_DEBUG_INFO("SeqNo=%hu", rtp_hdr->seq_num); /* Packet lost? */ if((h264->decoder.last_seq + 1) != rtp_hdr->seq_num && h264->decoder.last_seq){ TSK_DEBUG_INFO("[H.264] Packet loss, seq_num=%d", (h264->decoder.last_seq + 1)); } h264->decoder.last_seq = rtp_hdr->seq_num; /* 5.3. NAL Unit Octet Usage +---------------+ |0|1|2|3|4|5|6|7| +-+-+-+-+-+-+-+-+ |F|NRI| Type | +---------------+ */ if(*((uint8_t*)in_data) & 0x80){ TSK_DEBUG_WARN("F=1"); /* reset accumulator */ h264->decoder.accumulator_pos = 0; return 0; } /* get payload */ if((ret = tdav_codec_h264_get_pay(in_data, in_size, (const void**)&pay_ptr, &pay_size, &append_scp, &end_of_unit)) || !pay_ptr || !pay_size){ TSK_DEBUG_ERROR("Depayloader failed to get H.264 content"); return 0; } //append_scp = tsk_true; size_to_copy = pay_size + (append_scp ? start_code_prefix_size : 0); // whether it's SPS or PPS (append_scp is false for subsequent FUA chuncks) sps_or_pps = append_scp && pay_ptr && ((pay_ptr[0] & 0x1F) == 7 || (pay_ptr[0] & 0x1F) == 8); // start-accumulator if(!h264->decoder.accumulator){ if(size_to_copy > xmax_size){ TSK_DEBUG_ERROR("%u too big to contain valid encoded data. xmax_size=%u", size_to_copy, xmax_size); return 0; } if(!(h264->decoder.accumulator = tsk_calloc(size_to_copy, sizeof(uint8_t)))){ TSK_DEBUG_ERROR("Failed to allocated new buffer"); return 0; } h264->decoder.accumulator_size = size_to_copy; } if((h264->decoder.accumulator_pos + size_to_copy) >= xmax_size){ TSK_DEBUG_ERROR("BufferOverflow"); h264->decoder.accumulator_pos = 0; return 0; } if((h264->decoder.accumulator_pos + size_to_copy) > h264->decoder.accumulator_size){ if(!(h264->decoder.accumulator = tsk_realloc(h264->decoder.accumulator, (h264->decoder.accumulator_pos + size_to_copy)))){ TSK_DEBUG_ERROR("Failed to reallocated new buffer"); h264->decoder.accumulator_pos = 0; h264->decoder.accumulator_size = 0; return 0; } h264->decoder.accumulator_size = (h264->decoder.accumulator_pos + size_to_copy); } if(append_scp){ memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], H264_START_CODE_PREFIX, start_code_prefix_size); h264->decoder.accumulator_pos += start_code_prefix_size; } memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], pay_ptr, pay_size); h264->decoder.accumulator_pos += pay_size; // end-accumulator if(sps_or_pps){ // http://libav-users.943685.n4.nabble.com/Decode-H264-streams-how-to-fill-AVCodecContext-from-SPS-PPS-td2484472.html // SPS and PPS should be bundled with IDR TSK_DEBUG_INFO("Receiving SPS or PPS ...to be tied to an IDR"); } else if(rtp_hdr->marker){ if(h264->decoder.passthrough){ if(*out_max_size < h264->decoder.accumulator_pos){ if((*out_data = tsk_realloc(*out_data, h264->decoder.accumulator_pos))){ *out_max_size = h264->decoder.accumulator_pos; } else{ *out_max_size = 0; return 0; } } memcpy(*out_data, h264->decoder.accumulator, h264->decoder.accumulator_pos); retsize = h264->decoder.accumulator_pos; } else { // !h264->decoder.passthrough #if HAVE_FFMPEG AVPacket packet; /* decode the picture */ av_init_packet(&packet); packet.dts = packet.pts = AV_NOPTS_VALUE; packet.size = (int)h264->decoder.accumulator_pos; packet.data = h264->decoder.accumulator; ret = avcodec_decode_video2(h264->decoder.context, h264->decoder.picture, &got_picture_ptr, &packet); if(ret <0){ TSK_DEBUG_INFO("Failed to decode the buffer with error code =%d, size=%u, append=%s", ret, h264->decoder.accumulator_pos, append_scp ? "yes" : "no"); if(TMEDIA_CODEC_VIDEO(self)->in.callback){ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_error; TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr; TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result); } } else if(got_picture_ptr){ tsk_size_t xsize; /* IDR ? */ if(((pay_ptr[0] & 0x1F) == 0x05) && TMEDIA_CODEC_VIDEO(self)->in.callback){ TSK_DEBUG_INFO("Decoded H.264 IDR"); TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_idr; TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr; TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result); } /* fill out */ xsize = avpicture_get_size(h264->decoder.context->pix_fmt, h264->decoder.context->width, h264->decoder.context->height); if(*out_max_size<xsize){ if((*out_data = tsk_realloc(*out_data, (xsize + FF_INPUT_BUFFER_PADDING_SIZE)))){ *out_max_size = xsize; } else{ *out_max_size = 0; return 0; } } retsize = xsize; TMEDIA_CODEC_VIDEO(h264)->in.width = h264->decoder.context->width; TMEDIA_CODEC_VIDEO(h264)->in.height = h264->decoder.context->height; avpicture_layout((AVPicture *)h264->decoder.picture, h264->decoder.context->pix_fmt, (int)h264->decoder.context->width, (int)h264->decoder.context->height, *out_data, (int)retsize); } #endif /* HAVE_FFMPEG */ } // else(h264->decoder.passthrough) h264->decoder.accumulator_pos = 0; } // else if(rtp_hdr->marker) return retsize; }
static int tdav_codec_h264_set(tmedia_codec_t* self, const tmedia_param_t* param) { tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self; if (param->value_type == tmedia_pvt_int32) { if(tsk_striequals(param->key, "action")){ tmedia_codec_action_t action = (tmedia_codec_action_t)TSK_TO_INT32((uint8_t*)param->value); switch(action){ case tmedia_codec_action_encode_idr: { h264->encoder.force_idr = tsk_true; break; } case tmedia_codec_action_bw_down: { h264->encoder.quality = TSK_CLAMP(1, (h264->encoder.quality + 1), 31); #if HAVE_FFMPEG if (h264->encoder.context) { h264->encoder.context->global_quality = FF_QP2LAMBDA * h264->encoder.quality; } #endif break; } case tmedia_codec_action_bw_up: { h264->encoder.quality = TSK_CLAMP(1, (h264->encoder.quality - 1), 31); #if HAVE_FFMPEG if (h264->encoder.context) { h264->encoder.context->global_quality = FF_QP2LAMBDA * h264->encoder.quality; } #endif break; } } return 0; } else if(tsk_striequals(param->key, "bw_kbps")){ int32_t max_bw_userdefine = self->bandwidth_max_upload; int32_t max_bw_new = *((int32_t*)param->value); if (max_bw_userdefine > 0) { // do not use more than what the user defined in it's configuration h264->encoder.max_bw_kpbs = TSK_MIN(max_bw_new, max_bw_userdefine); } else { h264->encoder.max_bw_kpbs = max_bw_new; } return 0; } else if(tsk_striequals(param->key, "bypass-encoding")){ h264->encoder.passthrough = *((int32_t*)param->value) ? tsk_true : tsk_false; TSK_DEBUG_INFO("[H.264] bypass-encoding = %d", h264->encoder.passthrough); return 0; } else if(tsk_striequals(param->key, "bypass-decoding")){ h264->decoder.passthrough = *((int32_t*)param->value) ? tsk_true : tsk_false; TSK_DEBUG_INFO("[H.264] bypass-decoding = %d", h264->decoder.passthrough); return 0; } else if(tsk_striequals(param->key, "rotation")){ int32_t rotation = *((int32_t*)param->value); if(h264->encoder.rotation != rotation){ h264->encoder.rotation = rotation; if (self->opened) { int ret; if ((ret = tdav_codec_h264_close_encoder(h264, kResetRotationFalse))) { return ret; } if ((ret = tdav_codec_h264_open_encoder(h264))) { return ret; } #if 0 // Not working if((ret = avcodec_close(h264->encoder.context))){ TSK_DEBUG_ERROR("Failed to close [%s] codec", TMEDIA_CODEC(h264)->plugin->desc); return ret; } h264->encoder.context->width = (rotation == 90 || rotation == 270) ? TMEDIA_CODEC_VIDEO(h264)->out.height : TMEDIA_CODEC_VIDEO(h264)->out.width; h264->encoder.context->height = (rotation == 90 || rotation == 270) ? TMEDIA_CODEC_VIDEO(h264)->out.width : TMEDIA_CODEC_VIDEO(h264)->out.height; if((ret = avcodec_open(h264->encoder.context, h264->encoder.codec)) < 0){ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(h264)->plugin->desc); return ret; } h264->encoder.force_idr = tsk_true; #endif } } return 0; } } return -1; }
void tdav_codec_h264_rtp_callback(struct tdav_codec_h264_common_s *self, const void *data, tsk_size_t size, tsk_bool_t marker) { uint8_t* pdata = (uint8_t*)data; //TSK_DEBUG_INFO("%x %x %x %x -- %u", pdata[0], pdata[1], pdata[2], pdata[3], size); if(size>4 && pdata[0] == H264_START_CODE_PREFIX[0] && pdata[1] == H264_START_CODE_PREFIX[1]){ if(pdata[2] == H264_START_CODE_PREFIX[3]){ pdata += 3, size -= 3; } else if(pdata[2] == H264_START_CODE_PREFIX[2] && pdata[3] == H264_START_CODE_PREFIX[3]){ pdata += 4, size -= 4; } } //TSK_DEBUG_INFO("==> SCP %2x %2x %2x %2x", pdata[0], pdata[1], pdata[2], pdata[3]); if(size < H264_RTP_PAYLOAD_SIZE){ /* Can be packet in a Single Nal Unit */ // Send data over the network if(TMEDIA_CODEC_VIDEO(self)->out.callback){ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.ptr = pdata; TMEDIA_CODEC_VIDEO(self)->out.result.buffer.size = size; TMEDIA_CODEC_VIDEO(self)->out.result.duration = (3003* (30/TMEDIA_CODEC_VIDEO(self)->out.fps)); TMEDIA_CODEC_VIDEO(self)->out.result.last_chunck = marker; TMEDIA_CODEC_VIDEO(self)->out.callback(&TMEDIA_CODEC_VIDEO(self)->out.result); } } else if(size > H264_NAL_UNIT_TYPE_HEADER_SIZE){ /* Should be Fragmented as FUA */ uint8_t fua_hdr[H264_FUA_HEADER_SIZE]; /* "FU indicator" and "FU header" - 2bytes */ fua_hdr[0] = pdata[0] & 0x60/* F=0 */, fua_hdr[0] |= fu_a; fua_hdr[1] = 0x80/* S=1,E=0,R=0 */, fua_hdr[1] |= pdata[0] & 0x1f; /* type */ // discard header pdata += H264_NAL_UNIT_TYPE_HEADER_SIZE; size -= H264_NAL_UNIT_TYPE_HEADER_SIZE; while(size){ tsk_size_t packet_size = TSK_MIN(H264_RTP_PAYLOAD_SIZE, size); if(self->rtp.size < (packet_size + H264_FUA_HEADER_SIZE)){ if(!(self->rtp.ptr = tsk_realloc(self->rtp.ptr, (packet_size + H264_FUA_HEADER_SIZE)))){ TSK_DEBUG_ERROR("Failed to allocate new buffer"); return; } self->rtp.size = (packet_size + H264_FUA_HEADER_SIZE); } // set E bit if((size - packet_size) == 0){ // Last packet fua_hdr[1] |= 0x40; } // copy FUA header memcpy(self->rtp.ptr, fua_hdr, H264_FUA_HEADER_SIZE); // reset "S" bit fua_hdr[1] &= 0x7F; // copy data memcpy((self->rtp.ptr + H264_FUA_HEADER_SIZE), pdata, packet_size); pdata += packet_size; size -= packet_size; // send data if(TMEDIA_CODEC_VIDEO(self)->out.callback){ TMEDIA_CODEC_VIDEO(self)->out.result.buffer.ptr = self->rtp.ptr; TMEDIA_CODEC_VIDEO(self)->out.result.buffer.size = (packet_size + H264_FUA_HEADER_SIZE); TMEDIA_CODEC_VIDEO(self)->out.result.duration = (3003* (30/TMEDIA_CODEC_VIDEO(self)->out.fps)); TMEDIA_CODEC_VIDEO(self)->out.result.last_chunck = (size == 0); TMEDIA_CODEC_VIDEO(self)->out.callback(&TMEDIA_CODEC_VIDEO(self)->out.result); } } } }
static int _tdav_producer_video_v4l2_prepare(tmedia_producer_t* p_self, const tmedia_codec_t* pc_codec) { tdav_producer_video_v4l2_t* p_v4l2 = (tdav_producer_video_v4l2_t*)p_self; int ret = 0; if (!p_v4l2 || !pc_codec) { V4L2_DEBUG_ERROR("Invalid parameter"); return -1; } tsk_safeobj_lock(p_v4l2); if (!p_v4l2->p_timer_mgr && !(p_v4l2->p_timer_mgr = tsk_timer_manager_create())) { V4L2_DEBUG_ERROR("Failed to create timer manager"); ret = -2; goto bail; } TMEDIA_PRODUCER(p_v4l2)->video.fps = TMEDIA_CODEC_VIDEO(pc_codec)->out.fps; TMEDIA_PRODUCER(p_v4l2)->video.width = TMEDIA_CODEC_VIDEO(pc_codec)->out.width; TMEDIA_PRODUCER(p_v4l2)->video.height = TMEDIA_CODEC_VIDEO(pc_codec)->out.height; p_v4l2->u_timout_grab = (1000/TMEDIA_PRODUCER(p_v4l2)->video.fps); // prepare() if ((ret = _v4l2_prepare(p_v4l2))) { goto bail; } // update() - up to the "converter" to perform chroma conversion and scaling TMEDIA_PRODUCER(p_v4l2)->video.width = p_v4l2->fmt.fmt.pix.width; TMEDIA_PRODUCER(p_v4l2)->video.height = p_v4l2->fmt.fmt.pix.height; #if V4L2_FAKE_UYVY TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_uyvy422; #else switch (p_v4l2->fmt.fmt.pix.pixelformat) { case V4L2_PIX_FMT_YUV420: TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_yuv420p; break; case V4L2_PIX_FMT_NV12: TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_nv12; break; case V4L2_PIX_FMT_NV21: TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_nv21; break; case V4L2_PIX_FMT_YUYV: TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_yuyv422; break; case V4L2_PIX_FMT_UYVY: TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_uyvy422; // SINCITY break; case V4L2_PIX_FMT_RGB24: TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_rgb24; break; case V4L2_PIX_FMT_RGB32: TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_rgb32; break; case V4L2_PIX_FMT_MJPEG: TMEDIA_PRODUCER(p_v4l2)->video.chroma = tmedia_chroma_mjpeg; break; default: V4L2_DEBUG_ERROR("Failed to match negotiated format: %d", p_v4l2->fmt.fmt.pix.pixelformat); ret = -1; goto bail; } #endif /* V4L2_FAKE_UYVY */ V4L2_DEBUG_INFO("Negotiated caps: fps=%d, width=%d, height=%d, chroma=%d", TMEDIA_PRODUCER(p_v4l2)->video.fps, TMEDIA_PRODUCER(p_v4l2)->video.width, TMEDIA_PRODUCER(p_v4l2)->video.height, TMEDIA_PRODUCER(p_v4l2)->video.chroma); p_v4l2->b_prepared = (ret == 0) ? tsk_true : tsk_false; bail: tsk_safeobj_unlock(p_v4l2); return ret; }
tsk_size_t tdav_codec_mp4ves_decode(tmedia_codec_t* _self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr) { tdav_codec_mp4ves_t* self = (tdav_codec_mp4ves_t*)_self; const trtp_rtp_header_t* rtp_hdr = proto_hdr; tsk_size_t xsize, retsize = 0; int got_picture_ptr; int ret; if(!self || !in_data || !in_size || !out_data || !self->decoder.context){ TSK_DEBUG_ERROR("Invalid parameter"); return 0; } // get expected size xsize = avpicture_get_size(self->decoder.context->pix_fmt, self->decoder.context->width, self->decoder.context->height); /* Packet lost? */ if(self->decoder.last_seq != (rtp_hdr->seq_num - 1) && self->decoder.last_seq){ if(self->decoder.last_seq == rtp_hdr->seq_num){ // Could happen on some stupid emulators TSK_DEBUG_INFO("Packet duplicated, seq_num=%d", rtp_hdr->seq_num); return 0; } TSK_DEBUG_INFO("Packet lost, seq_num=%d", rtp_hdr->seq_num); } self->decoder.last_seq = rtp_hdr->seq_num; if((self->decoder.accumulator_pos + in_size) <= xsize){ memcpy(&((uint8_t*)self->decoder.accumulator)[self->decoder.accumulator_pos], in_data, in_size); self->decoder.accumulator_pos += in_size; } else{ TSK_DEBUG_WARN("Buffer overflow"); self->decoder.accumulator_pos = 0; return 0; } if(rtp_hdr->marker){ AVPacket packet; /* allocate destination buffer */ if(*out_max_size <xsize){ if(!(*out_data = tsk_realloc(*out_data, xsize))){ TSK_DEBUG_ERROR("Failed to allocate new buffer"); self->decoder.accumulator_pos = 0; *out_max_size = 0; return 0; } *out_max_size = xsize; } av_init_packet(&packet); packet.size = (int)self->decoder.accumulator_pos; packet.data = self->decoder.accumulator; ret = avcodec_decode_video2(self->decoder.context, self->decoder.picture, &got_picture_ptr, &packet); if(ret < 0){ TSK_DEBUG_WARN("Failed to decode the buffer with error code = %d", ret); if(TMEDIA_CODEC_VIDEO(self)->in.callback){ TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_error; TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr; TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result); } } else if(got_picture_ptr){ retsize = xsize; TMEDIA_CODEC_VIDEO(self)->in.width = self->decoder.context->width; TMEDIA_CODEC_VIDEO(self)->in.height = self->decoder.context->height; /* copy picture into a linear buffer */ avpicture_layout((AVPicture *)self->decoder.picture, self->decoder.context->pix_fmt, (int)self->decoder.context->width, (int)self->decoder.context->height, *out_data, (int)retsize); } /* in all cases: reset accumulator */ self->decoder.accumulator_pos = 0; } return retsize; }
static tsk_size_t tdav_codec_h264_encode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size) { int ret = 0; #if HAVE_FFMPEG int size; tsk_bool_t send_idr, send_hdr; #endif tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self; if(!self || !in_data || !in_size){ TSK_DEBUG_ERROR("Invalid parameter"); return 0; } if(!self->opened){ TSK_DEBUG_ERROR("Codec not opened"); return 0; } if(h264->encoder.passthrough) { tdav_codec_h264_rtp_encap(TDAV_CODEC_H264_COMMON(h264), (const uint8_t*)in_data, in_size); } else { // !h264->encoder.passthrough #if HAVE_FFMPEG // wrap yuv420 buffer size = avpicture_fill((AVPicture *)h264->encoder.picture, (uint8_t*)in_data, PIX_FMT_YUV420P, h264->encoder.context->width, h264->encoder.context->height); if(size != in_size){ /* guard */ TSK_DEBUG_ERROR("Invalid size"); return 0; } // send IDR for: // - the first frame // - remote peer requested an IDR // - every second within the first 4seconds send_idr = ( h264->encoder.frame_count++ == 0 || h264 ->encoder.force_idr || ( (h264->encoder.frame_count < (int)TMEDIA_CODEC_VIDEO(h264)->out.fps * 4) && ((h264->encoder.frame_count % TMEDIA_CODEC_VIDEO(h264)->out.fps)==0) ) ); // send SPS and PPS headers for: // - IDR frames (not required but it's the easiest way to deal with pkt loss) // - every 5 seconds after the first 4seconds send_hdr = ( send_idr || ( (h264->encoder.frame_count % (TMEDIA_CODEC_VIDEO(h264)->out.fps * 5))==0 ) ); if(send_hdr){ tdav_codec_h264_rtp_encap(TDAV_CODEC_H264_COMMON(h264), h264->encoder.context->extradata, (tsk_size_t)h264->encoder.context->extradata_size); } // Encode data #if LIBAVCODEC_VERSION_MAJOR <= 53 h264->encoder.picture->pict_type = send_idr ? FF_I_TYPE : 0; #else h264->encoder.picture->pict_type = send_idr ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_NONE; #endif h264->encoder.picture->pts = AV_NOPTS_VALUE; h264->encoder.picture->quality = h264->encoder.context->global_quality; // h264->encoder.picture->pts = h264->encoder.frame_count; MUST NOT ret = avcodec_encode_video(h264->encoder.context, h264->encoder.buffer, size, h264->encoder.picture); if(ret > 0){ tdav_codec_h264_rtp_encap(TDAV_CODEC_H264_COMMON(h264), h264->encoder.buffer, (tsk_size_t)ret); } h264 ->encoder.force_idr = tsk_false; #endif }// else(!h264->encoder.passthrough) return 0; }