예제 #1
0
int CVideoEncodeVt::CopyParamSets(CMVideoFormatDescriptionRef avideoFmtDesc, uint8_t* apBuf, int aiBufSize)
{
    if(NULL == avideoFmtDesc || NULL == apBuf || aiBufSize <= 0)
    {
        return -1;
    }
    
    size_t nTotalSize = 0;
    size_t nParamCnt = 0;
    bool bCannotGetCnt = false;
    
    //取得参数集的数量
    int ret = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(avideoFmtDesc, 0, NULL, NULL, &nParamCnt, NULL);
    if(0 != ret)
    {
        bCannotGetCnt = true;
        nParamCnt = 0;
    }
    
    //取得参数集
    for(int i = 0; i < nParamCnt || bCannotGetCnt; i++)
    {
        const uint8_t* pParam = NULL;
        size_t nParamSize = 0;
        ret = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(avideoFmtDesc, i, &pParam, &nParamSize, NULL, NULL);
        if(0 != ret)
        {
            if(i > 0 && bCannotGetCnt)
            {
                ret = 0;
            }
            
            break;
        }

        if (aiBufSize < nTotalSize + sizeof(StartCode) + nParamSize)
        {
            CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "bufsize too small! %d < %d", aiBufSize, nTotalSize + sizeof(StartCode) + nParamSize);
            
            assert(false);
            return -1;
        }
        
        memcpy(apBuf + nTotalSize, StartCode, sizeof(StartCode));
        nTotalSize += sizeof(StartCode);
        
        memcpy(apBuf + nTotalSize, pParam, nParamSize);
        nTotalSize += nParamSize;
    }

    if(0 != ret)
    {
        return -1;
    }
    
    return (int)nTotalSize;
}
예제 #2
0
    void vtCallback(void *outputCallbackRefCon,
                    void *sourceFrameRefCon,
                    OSStatus status,
                    VTEncodeInfoFlags infoFlags,
                    CMSampleBufferRef sampleBuffer )
    {
        CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sampleBuffer);
        CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, false);
        CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
        
        bool isKeyframe = false;
        if(attachments != NULL) {
            CFDictionaryRef attachment;
            CFBooleanRef dependsOnOthers;
            attachment = (CFDictionaryRef)CFArrayGetValueAtIndex(attachments, 0);
            dependsOnOthers = (CFBooleanRef)CFDictionaryGetValue(attachment, kCMSampleAttachmentKey_DependsOnOthers);
            isKeyframe = (dependsOnOthers == kCFBooleanFalse);
        }
        
        if(isKeyframe) {

            // Send the SPS and PPS.
            CMFormatDescriptionRef format = CMSampleBufferGetFormatDescription(sampleBuffer);
            size_t spsSize, ppsSize;
            size_t parmCount;
            const uint8_t* sps, *pps;
            
            CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 0, &sps, &spsSize, &parmCount, nullptr );
            CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 1, &pps, &ppsSize, &parmCount, nullptr );
            
            std::unique_ptr<uint8_t[]> sps_buf (new uint8_t[spsSize + 4]) ;
            std::unique_ptr<uint8_t[]> pps_buf (new uint8_t[ppsSize + 4]) ;
            
            memcpy(&sps_buf[4], sps, spsSize);
            spsSize+=4 ;
            memcpy(&sps_buf[0], &spsSize, 4);
            memcpy(&pps_buf[4], pps, ppsSize);
            ppsSize += 4;
            memcpy(&pps_buf[0], &ppsSize, 4);
            
            ((H264Encode*)outputCallbackRefCon)->compressionSessionOutput((uint8_t*)sps_buf.get(),spsSize, pts.value);
            ((H264Encode*)outputCallbackRefCon)->compressionSessionOutput((uint8_t*)pps_buf.get(),ppsSize, pts.value);
        }
       
        char* bufferData;
        size_t size;
        CMBlockBufferGetDataPointer(block, 0, NULL, &size, &bufferData);

        ((H264Encode*)outputCallbackRefCon)->compressionSessionOutput((uint8_t*)bufferData,size, pts.value);
        
    }
예제 #3
0
int CVideoEncodeVt::GetParamSize(CMVideoFormatDescriptionRef avideoFmtDesc, int* aiSize)
{
    if(NULL == avideoFmtDesc)
    {
        return -1;
    }
    
    size_t nTotalSize = 0;
    size_t nParamCnt = 0;
    bool bCannotGetCnt = false;
    
    //取得参数集的数量
    int ret = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(avideoFmtDesc, 0, NULL, NULL, &nParamCnt, NULL);
    if(0 != ret)
    {
        bCannotGetCnt = true;
        nParamCnt = 0;
    }
    
    //计算参数集的大小
    for(int i = 0; i < nParamCnt || bCannotGetCnt; i++)
    {
        size_t nParamSize = 0;
        ret = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(avideoFmtDesc, i, NULL, &nParamSize, NULL, NULL);
        if(0 != ret)
        {
            if(i > 0 && bCannotGetCnt)
            {
                ret = 0;
            }
            
            break;
        }
        
        nTotalSize += nParamSize + sizeof(StartCode);
    }
    
    if(0 != ret)
    {
        CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "VTCompressionSessionCreate failed!");
        
        assert(false);
        return -1;
    }
        
    *aiSize = (int)nTotalSize;
    
    return 0;
}
예제 #4
0
int CVideoEncodeVt::GetLengthCodeSize(CMSampleBufferRef sampleBuffer, int* aiSize)
{
    if(NULL == sampleBuffer)
    {
        return -1;
    }
    
    int iSize = 0;
    CMVideoFormatDescriptionRef videoFmtDesc = NULL;
    
    videoFmtDesc = CMSampleBufferGetFormatDescription(sampleBuffer);
    if (NULL == videoFmtDesc)
    {
        CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "CMSampleBufferGetFormatDescription failed!");
        return -1;
    }
    
    //取得nlau头长度
    int ret = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(videoFmtDesc, 0, NULL, NULL, NULL, &iSize);
    if (0 != ret)
    {
        CLog::GetInstance().Log(ENUM_LOG_LEVEL::enum_Log_Level5, "CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed!");
        assert(false);
        return -1;
    }
    
    *aiSize = iSize;
    
    return 0;
}
예제 #5
0
static void h264_enc_output_cb(VTH264EncCtx *ctx, void *sourceFrameRefCon, OSStatus status, VTEncodeInfoFlags infoFlags, CMSampleBufferRef sampleBuffer) {
	MSQueue nalu_queue;
	CMBlockBufferRef block_buffer;
	size_t read_size, frame_size;
	bool_t is_keyframe = FALSE;
	mblk_t *nalu;
	int i;

	if(sampleBuffer == NULL || status != noErr) {
		ms_error("VideoToolbox: could not encode frame: error %d", status);
		return;
	}

	ms_mutex_lock(&ctx->mutex);
	if(ctx->is_configured) {
		ms_queue_init(&nalu_queue);
		block_buffer = CMSampleBufferGetDataBuffer(sampleBuffer);
		frame_size = CMBlockBufferGetDataLength(block_buffer);
		for(i=0, read_size=0; read_size < frame_size; i++) {
			char *chunk;
			size_t chunk_size;
			int idr_count;
			CMBlockBufferGetDataPointer(block_buffer, i, &chunk_size, NULL, &chunk);
			ms_h264_stream_to_nalus((uint8_t *)chunk, chunk_size, &nalu_queue, &idr_count);
			if(idr_count) is_keyframe = TRUE;
			read_size += chunk_size;
		}

		if(is_keyframe) {
			mblk_t *insertion_point = ms_queue_peek_first(&nalu_queue);
			const uint8_t *parameter_set;
			size_t parameter_set_size;
			size_t parameter_set_count;
			CMFormatDescriptionRef format_desc = CMSampleBufferGetFormatDescription(sampleBuffer);
			i=0;
			do {
				CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format_desc, i, &parameter_set, &parameter_set_size, &parameter_set_count, NULL);
				nalu = allocb(parameter_set_size, 0);
				memcpy(nalu->b_wptr, parameter_set, parameter_set_size);
				nalu->b_wptr += parameter_set_size;
				ms_queue_insert(&nalu_queue, insertion_point, nalu);
				i++;
			} while(i < parameter_set_count);
		}

		rfc3984_pack(&ctx->packer_ctx, &nalu_queue, &ctx->queue, (uint32_t)(ctx->f->ticker->time * 90));
	}
	ms_mutex_unlock(&ctx->mutex);
}