int CVideoEncodeVt::GetLengthCodeSize(CMSampleBufferRef sampleBuffer, int* aiSize) { if(NULL == sampleBuffer) { return -1; } int iSize = 0; CMVideoFormatDescriptionRef videoFmtDesc = NULL; videoFmtDesc = CMSampleBufferGetFormatDescription(sampleBuffer); if (NULL == videoFmtDesc) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "CMSampleBufferGetFormatDescription failed!"); return -1; } //取得nlau头长度 int ret = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(videoFmtDesc, 0, NULL, NULL, NULL, &iSize); if (0 != ret) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed!"); assert(false); return -1; } *aiSize = iSize; return 0; }
static gboolean gst_vtenc_negotiate_downstream (GstVTEnc * self, CMSampleBufferRef sbuf) { gboolean result; GstCaps *caps; GstStructure *s; if (self->caps_width == self->negotiated_width && self->caps_height == self->negotiated_height && self->caps_fps_n == self->negotiated_fps_n && self->caps_fps_d == self->negotiated_fps_d) { return TRUE; } caps = gst_pad_get_pad_template_caps (self->srcpad); caps = gst_caps_make_writable (caps); s = gst_caps_get_structure (caps, 0); gst_structure_set (s, "width", G_TYPE_INT, self->negotiated_width, "height", G_TYPE_INT, self->negotiated_height, "framerate", GST_TYPE_FRACTION, self->negotiated_fps_n, self->negotiated_fps_d, NULL); if (self->details->format_id == kVTFormatH264) { CMFormatDescriptionRef fmt; CFDictionaryRef atoms; CFStringRef avccKey; CFDataRef avcc; gpointer codec_data; gsize codec_data_size; GstBuffer *codec_data_buf; fmt = CMSampleBufferGetFormatDescription (sbuf); atoms = CMFormatDescriptionGetExtension (fmt, kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms); avccKey = CFStringCreateWithCString (NULL, "avcC", kCFStringEncodingUTF8); avcc = CFDictionaryGetValue (atoms, avccKey); CFRelease (avccKey); codec_data_size = CFDataGetLength (avcc); codec_data = g_malloc (codec_data_size); CFDataGetBytes (avcc, CFRangeMake (0, codec_data_size), codec_data); codec_data_buf = gst_buffer_new_wrapped (codec_data, codec_data_size); gst_structure_set (s, "codec_data", GST_TYPE_BUFFER, codec_data_buf, NULL); gst_buffer_unref (codec_data_buf); } result = gst_pad_push_event (self->srcpad, gst_event_new_caps (caps)); gst_caps_unref (caps); self->caps_width = self->negotiated_width; self->caps_height = self->negotiated_height; self->caps_fps_n = self->negotiated_fps_n; self->caps_fps_d = self->negotiated_fps_d; return result; }
void vtCallback(void *outputCallbackRefCon, void *sourceFrameRefCon, OSStatus status, VTEncodeInfoFlags infoFlags, CMSampleBufferRef sampleBuffer ) { CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sampleBuffer); CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, false); CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer); bool isKeyframe = false; if(attachments != NULL) { CFDictionaryRef attachment; CFBooleanRef dependsOnOthers; attachment = (CFDictionaryRef)CFArrayGetValueAtIndex(attachments, 0); dependsOnOthers = (CFBooleanRef)CFDictionaryGetValue(attachment, kCMSampleAttachmentKey_DependsOnOthers); isKeyframe = (dependsOnOthers == kCFBooleanFalse); } if(isKeyframe) { // Send the SPS and PPS. CMFormatDescriptionRef format = CMSampleBufferGetFormatDescription(sampleBuffer); size_t spsSize, ppsSize; size_t parmCount; const uint8_t* sps, *pps; CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 0, &sps, &spsSize, &parmCount, nullptr ); CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 1, &pps, &ppsSize, &parmCount, nullptr ); std::unique_ptr<uint8_t[]> sps_buf (new uint8_t[spsSize + 4]) ; std::unique_ptr<uint8_t[]> pps_buf (new uint8_t[ppsSize + 4]) ; memcpy(&sps_buf[4], sps, spsSize); spsSize+=4 ; memcpy(&sps_buf[0], &spsSize, 4); memcpy(&pps_buf[4], pps, ppsSize); ppsSize += 4; memcpy(&pps_buf[0], &ppsSize, 4); ((H264Encode*)outputCallbackRefCon)->compressionSessionOutput((uint8_t*)sps_buf.get(),spsSize, pts.value); ((H264Encode*)outputCallbackRefCon)->compressionSessionOutput((uint8_t*)pps_buf.get(),ppsSize, pts.value); } char* bufferData; size_t size; CMBlockBufferGetDataPointer(block, 0, NULL, &size, &bufferData); ((H264Encode*)outputCallbackRefCon)->compressionSessionOutput((uint8_t*)bufferData,size, pts.value); }
static void h264_enc_output_cb(VTH264EncCtx *ctx, void *sourceFrameRefCon, OSStatus status, VTEncodeInfoFlags infoFlags, CMSampleBufferRef sampleBuffer) { MSQueue nalu_queue; CMBlockBufferRef block_buffer; size_t read_size, frame_size; bool_t is_keyframe = FALSE; mblk_t *nalu; int i; if(sampleBuffer == NULL || status != noErr) { ms_error("VideoToolbox: could not encode frame: error %d", status); return; } ms_mutex_lock(&ctx->mutex); if(ctx->is_configured) { ms_queue_init(&nalu_queue); block_buffer = CMSampleBufferGetDataBuffer(sampleBuffer); frame_size = CMBlockBufferGetDataLength(block_buffer); for(i=0, read_size=0; read_size < frame_size; i++) { char *chunk; size_t chunk_size; int idr_count; CMBlockBufferGetDataPointer(block_buffer, i, &chunk_size, NULL, &chunk); ms_h264_stream_to_nalus((uint8_t *)chunk, chunk_size, &nalu_queue, &idr_count); if(idr_count) is_keyframe = TRUE; read_size += chunk_size; } if(is_keyframe) { mblk_t *insertion_point = ms_queue_peek_first(&nalu_queue); const uint8_t *parameter_set; size_t parameter_set_size; size_t parameter_set_count; CMFormatDescriptionRef format_desc = CMSampleBufferGetFormatDescription(sampleBuffer); i=0; do { CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format_desc, i, ¶meter_set, ¶meter_set_size, ¶meter_set_count, NULL); nalu = allocb(parameter_set_size, 0); memcpy(nalu->b_wptr, parameter_set, parameter_set_size); nalu->b_wptr += parameter_set_size; ms_queue_insert(&nalu_queue, insertion_point, nalu); i++; } while(i < parameter_set_count); } rfc3984_pack(&ctx->packer_ctx, &nalu_queue, &ctx->queue, (uint32_t)(ctx->f->ticker->time * 90)); } ms_mutex_unlock(&ctx->mutex); }
int CVideoEncodeVt::SetExtraData(CMSampleBufferRef sampleBuffer) { CMVideoFormatDescriptionRef videoFmtDesc; //取得sample buffer中的格式描述信息 videoFmtDesc = CMSampleBufferGetFormatDescription(sampleBuffer); if(NULL == videoFmtDesc) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "CMSampleBufferGetFormatDescription failed!"); assert(false); return -1; } //取得参数集 int iParamsSize = 0; int ret = GetParamSize(videoFmtDesc, &iParamsSize); if(ret < 0) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "GetParamSize failed!"); assert(false); return -1; } //分配缓冲用于保存参数集 m_pExtraData = (uint8_t*)av_mallocz(iParamsSize); if(NULL == m_pExtraData) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "m_pExtraData alloc failed!"); assert(false); return -1; } m_iExtraDataSize = iParamsSize; //拷贝参数集 ret = CopyParamSets(videoFmtDesc, m_pExtraData, m_iExtraDataSize); if(ret <= 0) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "CopyParamSets failed!"); assert(false); return -1; } return 0; }
int CVideoEncodeVt::CopySampleBufferToAVPakcet(CMSampleBufferRef sampleBuffer, AVPacket* apPacket) { //取得头大小 int iLengthCodeSize = 0; int ret = GetLengthCodeSize(sampleBuffer, &iLengthCodeSize); if(ret < 0) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "GetLengthCodeSize failed!"); assert(false); return -1; } int iHeaderSize = 0; bool bIsKeyFrame = IsKeyFrame(sampleBuffer); bool bAddHeader = bIsKeyFrame; CMVideoFormatDescriptionRef videoFmtDesc = NULL; //需要添加头 if (bAddHeader) { videoFmtDesc = CMSampleBufferGetFormatDescription(sampleBuffer); if (NULL == videoFmtDesc) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "CMSampleBufferGetFormatDescription failed!"); assert(false); return -1; } //取得头大小 ret = GetParamSize(videoFmtDesc, &iHeaderSize); if(ret < 0) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "GetParamSize failed!"); assert(false); return -1; } } //nalu的数量 int iNaluCnt = 0; ret = GetNaluCnt(sampleBuffer, iLengthCodeSize, &iNaluCnt); if(ret < 0) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "GetNaluCnt failed!"); assert(false); return -1; } int iSampleSize = (int)CMSampleBufferGetTotalSampleSize(sampleBuffer); int iPacketSize = iHeaderSize + iSampleSize + iNaluCnt * ((int)sizeof(StartCode) - (int)iLengthCodeSize); av_free_packet(apPacket); ret = av_new_packet(apPacket, iPacketSize); if(ret < 0) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "av_new_packet failed!"); assert(false); return -1; } do { //添加头 if(bAddHeader) { //参数集拷贝到头 ret = CopyParamSets(videoFmtDesc, apPacket->data, iPacketSize); if(ret < 0) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "CopyParamSets failed!"); assert(false); break; } } //拷贝每个nalu ret = CopyNalus(sampleBuffer, iLengthCodeSize, apPacket->data + iHeaderSize, apPacket->size - iHeaderSize); if(ret < 0) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "CopyNalus failed!"); assert(false); break; } if (bIsKeyFrame) { apPacket->flags |= AV_PKT_FLAG_KEY; } //设置时间戳 CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer); CMTime dts = CMSampleBufferGetDecodeTimeStamp(sampleBuffer); if (CMTIME_IS_INVALID(dts)) { if (!m_bHasBFrames) { dts = pts; } else { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "dts is invalid!"); assert(false); break; } } int64_t iDtsDelta = m_iDtsDelta >= 0 ? m_iDtsDelta : 0; apPacket->pts = pts.value / m_iFrameRate; apPacket->dts = dts.value / m_iFrameRate - iDtsDelta; apPacket->size = iPacketSize; return 0; }while(0); av_free_packet(apPacket); return -1; }