void H264Encode::pushBuffer(const uint8_t *const data, size_t size, videocore::IMetadata &metadata) { #if VERSION_OK if(m_compressionSession) { m_encodeMutex.lock(); VTCompressionSessionRef session = (VTCompressionSessionRef)m_compressionSession; CMTime pts = CMTimeMake(metadata.timestampDelta, 1000.); // timestamp is in ms. CMTime dur = CMTimeMake(1, m_fps); VTEncodeInfoFlags flags; CFMutableDictionaryRef frameProps = NULL; if(m_forceKeyframe) { s_forcedKeyframePTS = pts.value; frameProps = CFDictionaryCreateMutable(kCFAllocatorDefault, 1,&kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); CFDictionaryAddValue(frameProps, kVTEncodeFrameOptionKey_ForceKeyFrame, kCFBooleanTrue); } VTCompressionSessionEncodeFrame(session, (CVPixelBufferRef)data, pts, dur, frameProps, NULL, &flags); if(m_forceKeyframe) { CFRelease(frameProps); m_forceKeyframe = false; } m_encodeMutex.unlock(); } #endif }
// Helper to fill in a timestamp structure. static CMSampleTimingInfo TimingInfoFromSample(MediaRawData* aSample) { CMSampleTimingInfo timestamp; timestamp.duration = CMTimeMake(aSample->mDuration, USECS_PER_S); timestamp.presentationTimeStamp = CMTimeMake(aSample->mTime, USECS_PER_S); timestamp.decodeTimeStamp = CMTimeMake(aSample->mTimecode, USECS_PER_S); return timestamp; }
// Helper to fill in a timestamp structure. static CMSampleTimingInfo TimingInfoFromSample(mp4_demuxer::MP4Sample* aSample) { CMSampleTimingInfo timestamp; timestamp.duration = CMTimeMake(aSample->duration, USECS_PER_S); timestamp.presentationTimeStamp = CMTimeMake(aSample->composition_timestamp, USECS_PER_S); timestamp.decodeTimeStamp = CMTimeMake(aSample->decode_timestamp, USECS_PER_S); return timestamp; }
static CMSampleBufferRef cm_sample_buffer_from_gst_buffer (GstVtdec * vtdec, GstBuffer * buf) { OSStatus status; CMBlockBufferRef bbuf = NULL; CMSampleBufferRef sbuf = NULL; CMSampleTimingInfo sample_timing; CMSampleTimingInfo time_array[1]; g_return_val_if_fail (vtdec->format_description, NULL); /* create a block buffer */ bbuf = cm_block_buffer_from_gst_buffer (buf, GST_MAP_READ); if (bbuf == NULL) { GST_ELEMENT_ERROR (vtdec, RESOURCE, FAILED, (NULL), ("failed creating CMBlockBuffer")); return NULL; } /* create a sample buffer */ if (GST_BUFFER_DURATION_IS_VALID (buf)) sample_timing.duration = CMTimeMake (GST_BUFFER_DURATION (buf), GST_SECOND); else sample_timing.duration = kCMTimeInvalid; if (GST_BUFFER_PTS_IS_VALID (buf)) sample_timing.presentationTimeStamp = CMTimeMake (GST_BUFFER_PTS (buf), GST_SECOND); else sample_timing.presentationTimeStamp = kCMTimeInvalid; if (GST_BUFFER_DTS_IS_VALID (buf)) sample_timing.decodeTimeStamp = CMTimeMake (GST_BUFFER_DTS (buf), GST_SECOND); else sample_timing.decodeTimeStamp = kCMTimeInvalid; time_array[0] = sample_timing; status = CMSampleBufferCreate (NULL, bbuf, TRUE, 0, 0, vtdec->format_description, 1, 1, time_array, 0, NULL, &sbuf); CFRelease (bbuf); if (status != noErr) { GST_ELEMENT_ERROR (vtdec, RESOURCE, FAILED, (NULL), ("CMSampleBufferCreate returned %d", (int) status)); return NULL; } return sbuf; }
static CMSampleBufferRef gst_vtdec_sample_buffer_from (GstVTDec * self, GstBuffer * buf) { OSStatus status; CMBlockBufferRef bbuf = NULL; CMSampleBufferRef sbuf = NULL; GstMapInfo map; CMSampleTimingInfo sample_timing; CMSampleTimingInfo time_array[1]; g_assert (self->fmt_desc != NULL); gst_buffer_map (buf, &map, GST_MAP_READ); status = CMBlockBufferCreateWithMemoryBlock (NULL, map.data, (gint64) map.size, kCFAllocatorNull, NULL, 0, (gint64) map.size, FALSE, &bbuf); gst_buffer_unmap (buf, &map); if (status != noErr) goto error; sample_timing.duration = CMTimeMake (GST_BUFFER_DURATION (buf), 1); sample_timing.presentationTimeStamp = CMTimeMake (GST_BUFFER_PTS (buf), 1); sample_timing.decodeTimeStamp = CMTimeMake (GST_BUFFER_DTS (buf), 1); time_array[0] = sample_timing; status = CMSampleBufferCreate (NULL, bbuf, TRUE, 0, 0, self->fmt_desc, 1, 1, time_array, 0, NULL, &sbuf); if (status != noErr) goto error; beach: CFRelease (bbuf); return sbuf; error: GST_ERROR_OBJECT (self, "err %d", status); goto beach; }
static GstFlowReturn gst_vtenc_encode_frame (GstVTEnc * self, GstBuffer * buf) { GstVTApi *vt = self->ctx->vt; CMTime ts, duration; GstCoreMediaMeta *meta; CVPixelBufferRef pbuf = NULL; VTStatus vt_status; GstFlowReturn ret = GST_FLOW_OK; guint i; self->cur_inbuf = buf; ts = CMTimeMake (GST_TIME_AS_MSECONDS (GST_BUFFER_TIMESTAMP (buf)), 1000); duration = CMTimeMake (GST_TIME_AS_MSECONDS (GST_BUFFER_DURATION (buf)), 1000); meta = gst_buffer_get_core_media_meta (buf); if (meta != NULL) { pbuf = gst_core_media_buffer_get_pixel_buffer (buf); } if (pbuf == NULL) { GstVTEncFrame *frame; CVReturn cv_ret; frame = gst_vtenc_frame_new (buf, &self->video_info); if (!frame) goto cv_error; { const size_t num_planes = GST_VIDEO_FRAME_N_PLANES (&frame->videoframe); void *plane_base_addresses[GST_VIDEO_MAX_PLANES]; size_t plane_widths[GST_VIDEO_MAX_PLANES]; size_t plane_heights[GST_VIDEO_MAX_PLANES]; size_t plane_bytes_per_row[GST_VIDEO_MAX_PLANES]; OSType pixel_format_type; size_t i; for (i = 0; i < num_planes; i++) { plane_base_addresses[i] = GST_VIDEO_FRAME_PLANE_DATA (&frame->videoframe, i); plane_widths[i] = GST_VIDEO_FRAME_COMP_WIDTH (&frame->videoframe, i); plane_heights[i] = GST_VIDEO_FRAME_COMP_HEIGHT (&frame->videoframe, i); plane_bytes_per_row[i] = GST_VIDEO_FRAME_COMP_STRIDE (&frame->videoframe, i); plane_bytes_per_row[i] = GST_VIDEO_FRAME_COMP_STRIDE (&frame->videoframe, i); } switch (GST_VIDEO_INFO_FORMAT (&self->video_info)) { case GST_VIDEO_FORMAT_I420: pixel_format_type = kCVPixelFormatType_420YpCbCr8Planar; break; case GST_VIDEO_FORMAT_NV12: pixel_format_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; break; default: goto cv_error; } cv_ret = CVPixelBufferCreateWithPlanarBytes (NULL, self->negotiated_width, self->negotiated_height, pixel_format_type, frame, GST_VIDEO_FRAME_SIZE (&frame->videoframe), num_planes, plane_base_addresses, plane_widths, plane_heights, plane_bytes_per_row, gst_pixel_buffer_release_cb, frame, NULL, &pbuf); if (cv_ret != kCVReturnSuccess) { gst_vtenc_frame_free (frame); goto cv_error; } } } GST_OBJECT_LOCK (self); self->expect_keyframe = CFDictionaryContainsKey (self->options, *(vt->kVTEncodeFrameOptionKey_ForceKeyFrame)); if (self->expect_keyframe) gst_vtenc_clear_cached_caps_downstream (self); vt_status = self->ctx->vt->VTCompressionSessionEncodeFrame (self->session, pbuf, ts, duration, self->options, NULL, NULL); if (vt_status != 0) { GST_WARNING_OBJECT (self, "VTCompressionSessionEncodeFrame returned %d", vt_status); } self->ctx->vt->VTCompressionSessionCompleteFrames (self->session, kCMTimeInvalid); GST_OBJECT_UNLOCK (self); CVPixelBufferRelease (pbuf); self->cur_inbuf = NULL; gst_buffer_unref (buf); if (self->cur_outbufs->len > 0) { meta = gst_buffer_get_core_media_meta (g_ptr_array_index (self->cur_outbufs, 0)); if (!gst_vtenc_negotiate_downstream (self, meta->sample_buf)) ret = GST_FLOW_NOT_NEGOTIATED; } for (i = 0; i != self->cur_outbufs->len; i++) { GstBuffer *buf = g_ptr_array_index (self->cur_outbufs, i); if (ret == GST_FLOW_OK) { ret = gst_pad_push (self->srcpad, buf); } else { gst_buffer_unref (buf); } } g_ptr_array_set_size (self->cur_outbufs, 0); return ret; cv_error: { self->cur_inbuf = NULL; gst_buffer_unref (buf); return GST_FLOW_ERROR; } }
static void vtb_decode(struct media_codec *mc, struct video_decoder *vd, struct media_queue *mq, struct media_buf *mb, int reqsize) { vtb_decoder_t *vtbd = mc->opaque; VTDecodeInfoFlags infoflags; int flags = kVTDecodeFrame_EnableAsynchronousDecompression | kVTDecodeFrame_EnableTemporalProcessing; OSStatus status; CMBlockBufferRef block_buf; CMSampleBufferRef sample_buf; vtbd->vtbd_vd = vd; status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, mb->mb_data, mb->mb_size, kCFAllocatorNull, NULL, 0, mb->mb_size, 0, &block_buf); if(status) { TRACE(TRACE_ERROR, "VTB", "Data buffer allocation error %d", status); return; } CMSampleTimingInfo ti; ti.duration = CMTimeMake(mb->mb_duration, 1000000); ti.presentationTimeStamp = CMTimeMake(mb->mb_pts, 1000000); ti.decodeTimeStamp = CMTimeMake(mb->mb_dts, 1000000); status = CMSampleBufferCreate(kCFAllocatorDefault, block_buf, TRUE, 0, 0, vtbd->vtbd_fmt, 1, 1, &ti, 0, NULL, &sample_buf); CFRelease(block_buf); if(status) { TRACE(TRACE_ERROR, "VTB", "Sample buffer allocation error %d", status); return; } void *frame_opaque = &vd->vd_reorder[vd->vd_reorder_ptr]; copy_mbm_from_mb(frame_opaque, mb); vd->vd_reorder_ptr = (vd->vd_reorder_ptr + 1) & VIDEO_DECODER_REORDER_MASK; if(mb->mb_skip) flags |= kVTDecodeFrame_DoNotOutputFrame; status = VTDecompressionSessionDecodeFrame(vtbd->vtbd_session, sample_buf, flags, frame_opaque, &infoflags); CFRelease(sample_buf); if(status) { TRACE(TRACE_ERROR, "VTB", "Decoding error %d", status); } hts_mutex_lock(&vtbd->vtbd_mutex); if(vtbd->vtbd_flush_to != PTS_UNSET) { vtb_frame_t *vf; while((vf = LIST_FIRST(&vtbd->vtbd_frames)) != NULL) { if(vtbd->vtbd_flush_to < vf->vf_mbm.mbm_pts) break; LIST_REMOVE(vf, vf_link); hts_mutex_unlock(&vtbd->vtbd_mutex); emit_frame(vtbd, vf, mq); hts_mutex_lock(&vtbd->vtbd_mutex); CFRelease(vf->vf_buf); free(vf); } } hts_mutex_unlock(&vtbd->vtbd_mutex); }
int CVideoEncodeVt::EncodeFrame(const AVFrame* apFrame, VTCompressionSessionRef aSession) { int ret = 0; CVPixelBufferRef pixelBuffer = NULL; CFDictionaryRef frameDict = NULL; do { //创建pixel buffer ret = CreateCVPixelBuffer(apFrame, &pixelBuffer, aSession); if(ret < 0) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "CreateCVPixelBuffer failed!"); //assert(false); break; } //判断是否关键帧 if (apFrame->pict_type == AV_PICTURE_TYPE_I) { const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame }; const void *vals[] = { kCFBooleanTrue }; frameDict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL); if(NULL == frameDict) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "CFDictionaryCreate failed!"); assert(false); break; } } //时间戳 CMTime time = CMTimeMake(apFrame->pts, m_iFrameRate); //编码 ret = VTCompressionSessionEncodeFrame(aSession, pixelBuffer, time, kCMTimeInvalid, frameDict, NULL, NULL); if(ret < 0) { CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "VTCompressionSessionEncodeFrame failed!"); assert(false); break; } }while(0); if(NULL != pixelBuffer) { CFRelease(pixelBuffer); pixelBuffer = NULL; } if(NULL != frameDict) { CFRelease(frameDict); frameDict = NULL; } return ret; }
static void h264_dec_process(MSFilter *f) { VTH264DecCtx *ctx = (VTH264DecCtx *)f->data; mblk_t *pkt; mblk_t *nalu; mblk_t *pixbuf; MSQueue q_nalus; MSQueue q_nalus2; CMBlockBufferRef stream = NULL; CMSampleBufferRef sample = NULL; CMSampleTimingInfo timing_info; MSPicture pixbuf_desc; OSStatus status; MSList *parameter_sets = NULL; bool_t unpacking_failed; ms_queue_init(&q_nalus); ms_queue_init(&q_nalus2); // unpack RTP packet unpacking_failed = FALSE; while((pkt = ms_queue_get(f->inputs[0]))) { unpacking_failed |= (rfc3984_unpack(&ctx->unpacker, pkt, &q_nalus) != 0); } if(unpacking_failed) { ms_error("VideoToolboxDecoder: error while unpacking RTP packets"); goto fail; } // Pull out SPSs and PPSs and put them into the filter context if necessary while((nalu = ms_queue_get(&q_nalus))) { MSH264NaluType nalu_type = ms_h264_nalu_get_type(nalu); if(nalu_type == MSH264NaluTypeSPS || nalu_type == MSH264NaluTypePPS) { parameter_sets = ms_list_append(parameter_sets, nalu); } else if(ctx->format_desc || parameter_sets) { ms_queue_put(&q_nalus2, nalu); } else { ms_free(nalu); } } if(parameter_sets) { CMFormatDescriptionRef last_format = ctx->format_desc ? CFRetain(ctx->format_desc) : NULL; h264_dec_update_format_description(ctx, parameter_sets); parameter_sets = ms_list_free_with_data(parameter_sets, (void (*)(void *))freemsg); if(ctx->format_desc == NULL) goto fail; if(last_format) { CMVideoDimensions last_vsize = CMVideoFormatDescriptionGetDimensions(last_format); CMVideoDimensions vsize = CMVideoFormatDescriptionGetDimensions(ctx->format_desc); if(last_vsize.width != vsize.width || last_vsize.height != vsize.height) { ms_message("VideoToolboxDecoder: new encoded video size %dx%d -> %dx%d", (int)last_vsize.width, (int)last_vsize.height, (int)vsize.width, (int)vsize.height); ms_message("VideoToolboxDecoder: destroying decoding session"); VTDecompressionSessionInvalidate(ctx->session); CFRelease(ctx->session); ctx->session = NULL; } CFRelease(last_format); } } /* Stops proccessing if no IDR has been received yet */ if(ctx->format_desc == NULL) { ms_warning("VideoToolboxDecoder: no IDR packet has been received yet"); goto fail; } /* Initializes the decoder if it has not be done yet or reconfigure it when the size of the encoded video change */ if(ctx->session == NULL) { if(!h264_dec_init_decoder(ctx)) { ms_error("VideoToolboxDecoder: failed to initialized decoder"); goto fail; } } // Pack all nalus in a VTBlockBuffer CMBlockBufferCreateEmpty(NULL, 0, kCMBlockBufferAssureMemoryNowFlag, &stream); while((nalu = ms_queue_get(&q_nalus2))) { CMBlockBufferRef nalu_block; size_t nalu_block_size = msgdsize(nalu) + H264_NALU_HEAD_SIZE; uint32_t nalu_size = htonl(msgdsize(nalu)); CMBlockBufferCreateWithMemoryBlock(NULL, NULL, nalu_block_size, NULL, NULL, 0, nalu_block_size, kCMBlockBufferAssureMemoryNowFlag, &nalu_block); CMBlockBufferReplaceDataBytes(&nalu_size, nalu_block, 0, H264_NALU_HEAD_SIZE); CMBlockBufferReplaceDataBytes(nalu->b_rptr, nalu_block, H264_NALU_HEAD_SIZE, msgdsize(nalu)); CMBlockBufferAppendBufferReference(stream, nalu_block, 0, nalu_block_size, 0); CFRelease(nalu_block); freemsg(nalu); } if(!CMBlockBufferIsEmpty(stream)) { timing_info.duration = kCMTimeInvalid; timing_info.presentationTimeStamp = CMTimeMake(f->ticker->time, 1000); timing_info.decodeTimeStamp = CMTimeMake(f->ticker->time, 1000); CMSampleBufferCreate( NULL, stream, TRUE, NULL, NULL, ctx->format_desc, 1, 1, &timing_info, 0, NULL, &sample); status = VTDecompressionSessionDecodeFrame(ctx->session, sample, 0, NULL, NULL); CFRelease(sample); if(status != noErr) { CFRelease(stream); ms_error("VideoToolboxDecoder: error while passing encoded frames to the decoder: %d", status); if(status == kVTInvalidSessionErr) { h264_dec_uninit_decoder(ctx); } goto fail; } } CFRelease(stream); goto put_frames_out; fail: ms_filter_notify_no_arg(f, MS_VIDEO_DECODER_DECODING_ERRORS); ms_filter_lock(f); if(ctx->enable_avpf) { ms_message("VideoToolboxDecoder: sending PLI"); ms_filter_notify_no_arg(f, MS_VIDEO_DECODER_SEND_PLI); } ms_filter_unlock(f); put_frames_out: // Transfer decoded frames in the output queue ms_mutex_lock(&ctx->mutex); while((pixbuf = ms_queue_get(&ctx->queue))) { ms_mutex_unlock(&ctx->mutex); ms_yuv_buf_init_from_mblk(&pixbuf_desc, pixbuf); ms_filter_lock(f); if(pixbuf_desc.w != ctx->vsize.width || pixbuf_desc.h != ctx->vsize.height) { ctx->vsize = (MSVideoSize){ pixbuf_desc.w , pixbuf_desc.h }; } ms_average_fps_update(&ctx->fps, (uint32_t)f->ticker->time); if(ctx->first_image) { ms_filter_notify_no_arg(f, MS_VIDEO_DECODER_FIRST_IMAGE_DECODED); ctx->first_image = FALSE; } ms_filter_unlock(f); ms_queue_put(f->outputs[0], pixbuf); ms_mutex_lock(&ctx->mutex); } ms_mutex_unlock(&ctx->mutex); // Cleaning ms_queue_flush(&q_nalus); ms_queue_flush(&q_nalus2); ms_queue_flush(f->inputs[0]); return; }
static void h264_enc_process(MSFilter *f) { VTH264EncCtx *ctx = (VTH264EncCtx *)f->data; mblk_t *frame; OSStatus err; CMTime p_time = CMTimeMake(f->ticker->time, 1000); if(!ctx->is_configured) { ms_queue_flush(f->inputs[0]); return; } #if 0 && TARGET_OS_IPHONE CVPixelBufferPoolRef pixbuf_pool = VTCompressionSessionGetPixelBufferPool(ctx->session); if(pixbuf_pool == NULL) { ms_error("VideoToolbox: fails to get the pixel buffer pool"); return; } #endif while((frame = ms_queue_get(f->inputs[0]))) { YuvBuf src_yuv_frame, dst_yuv_frame = {0}; CVPixelBufferRef pixbuf; CFMutableDictionaryRef enc_param = NULL; int i, pixbuf_fmt = kCVPixelFormatType_420YpCbCr8Planar; CFNumberRef value; CFMutableDictionaryRef pixbuf_attr; ms_yuv_buf_init_from_mblk(&src_yuv_frame, frame); #if 0 && TARGET_OS_IPHONE CVPixelBufferPoolCreatePixelBuffer(NULL, pixbuf_pool, &pixbuf); #else pixbuf_attr = CFDictionaryCreateMutable(NULL, 0, NULL, NULL); value = CFNumberCreate(NULL, kCFNumberIntType, &pixbuf_fmt); CFDictionarySetValue(pixbuf_attr, kCVPixelBufferPixelFormatTypeKey, value); CVPixelBufferCreate(NULL, ctx->conf.vsize.width, ctx->conf.vsize.height, kCVPixelFormatType_420YpCbCr8Planar, pixbuf_attr, &pixbuf); CFRelease(pixbuf_attr); #endif CVPixelBufferLockBaseAddress(pixbuf, 0); dst_yuv_frame.w = (int)CVPixelBufferGetWidth(pixbuf); dst_yuv_frame.h = (int)CVPixelBufferGetHeight(pixbuf); for(i=0; i<3; i++) { dst_yuv_frame.planes[i] = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i); dst_yuv_frame.strides[i] = (int)CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i); } ms_yuv_buf_copy(src_yuv_frame.planes, src_yuv_frame.strides, dst_yuv_frame.planes, dst_yuv_frame.strides, (MSVideoSize){dst_yuv_frame.w, dst_yuv_frame.h}); CVPixelBufferUnlockBaseAddress(pixbuf, 0); freemsg(frame); ms_filter_lock(f); if(ctx->fps_changed || ctx->bitrate_changed || ctx->vfu_requested) { CFNumberRef value; enc_param = CFDictionaryCreateMutable(NULL, 0, NULL, NULL); if(ctx->fps_changed) { value = CFNumberCreate(NULL, kCFNumberFloatType, &ctx->conf.fps); CFDictionaryAddValue(enc_param, kVTCompressionPropertyKey_ExpectedFrameRate, value); ctx->fps_changed = FALSE; } if(ctx->bitrate_changed) { value = CFNumberCreate(NULL, kCFNumberIntType, &ctx->conf.required_bitrate); CFDictionaryAddValue(enc_param, kVTCompressionPropertyKey_AverageBitRate, value); ctx->bitrate_changed = FALSE; } if(ctx->vfu_requested) { int force_keyframe = 1; value = CFNumberCreate(NULL, kCFNumberIntType, &force_keyframe); CFDictionaryAddValue(enc_param, kVTEncodeFrameOptionKey_ForceKeyFrame, value); ctx->vfu_requested = FALSE; } } ms_filter_unlock(f); if(!ctx->enable_avpf) { if(ctx->first_frame) { ms_video_starter_first_frame(&ctx->starter, f->ticker->time); } if(ms_video_starter_need_i_frame(&ctx->starter, f->ticker->time)) { if(enc_param == NULL) enc_param = CFDictionaryCreateMutable(NULL, 0, NULL, NULL); if(CFDictionaryGetValue(enc_param, kVTEncodeFrameOptionKey_ForceKeyFrame) == NULL) { int force_keyframe = 1; CFNumberRef value = CFNumberCreate(NULL, kCFNumberIntType, &force_keyframe); CFDictionaryAddValue(enc_param, kVTEncodeFrameOptionKey_ForceKeyFrame, value); } } } if((err = VTCompressionSessionEncodeFrame(ctx->session, pixbuf, p_time, kCMTimeInvalid, enc_param, NULL, NULL)) != noErr) { ms_error("VideoToolbox: could not pass a pixbuf to the encoder: error code %d", err); } CFRelease(pixbuf); ctx->first_frame = FALSE; if(enc_param) CFRelease(enc_param); } ms_mutex_lock(&ctx->mutex); while ((frame = ms_queue_get(&ctx->queue))) { ms_mutex_unlock(&ctx->mutex); ms_queue_put(f->outputs[0], frame); ms_mutex_lock(&ctx->mutex); } ms_mutex_unlock(&ctx->mutex); }