예제 #1
0
void Movie::EncodeThread()
{
	av->video_counter = 0;
	av->audio_counter = 0;
	while (true)
	{
		SDL_SemWait(encodeReady);
		if (!stillEncoding)
		{
			// signal to quit
			SDL_SemPost(fillReady);
			return;
		}
        
        // add video and audio
        EncodeVideo(false);
        EncodeAudio(false);
		
		SDL_SemPost(fillReady);
	}
}
예제 #2
0
/*
 * Encoder. Runs in a loop off the record thread until we run out of frames
 * (probably because stop was called)
 */
void
MediaRecorder::Encode()
{
    /* Alright, so we are multiplexing audio with video. We first fetch 1 frame
     * of video from the pipe, encode it and then follow up with packets of
     * audio. We want the audio and video packets to be as close to each other
     * as possible (timewise) to make playback synchronized.
     *
     * The generic formula to decide how many bytes of audio we must write per
     * frame of video will be: (SAMPLE_RATE / FPS) * AUDIO_FRAME_SIZE;
     *
     * As an example, this works out to 8820 bytes of 22050Hz audio
     * (2205 frames) per frame of video @ 10 fps.
     *
     * The really tricky part here is that most webcams will not return
     * frames at the rate we request. Movement and brightness changes will
     * often result in wildy varying fps, and we must compensate for that
     * by either dropping or duplicating frames to meet the audio stream
     * and the theora header (we commit to 15fps by default).
     *
     * TODO: Figure out if PR_Calloc will be more efficient if we call it for
     * storing more than just 1 frame at a time. For instance, we can wait
     * a second and encode 10 frames of video and 88200 bytes of audio per
     * run of the loop? Possible answer: No, because the timing might go
     * awry, we are better off processing timestamps per frame of video.
     */
    nsresult rv;
    PRUint32 rd;
    int v_fps = FPS_N / FPS_D;
    int a_frame_num = FRAMES_BUFFER;
    if (v_rec) {
        v_fps = vState->backend->GetFPSN() / vState->backend->GetFPSD();
        a_frame_num = params->rate/(v_fps);
    }
    int a_frame_size = aState->backend->GetFrameSize();
    int a_frame_total = a_frame_num * a_frame_size;
    PRFloat64 v_frame_time_length = (PRFloat64)1.0 / static_cast<PRFloat64>(v_fps);
    
    PRUint8 *v_frame = NULL;
    PRInt16 *a_frames = NULL;
    PRUint8 *v_frame_most_recent = NULL;
    
    PRInt32 vlen;
    PRBool should_end = PR_FALSE;
    PRFloat64 atime, delta, current_audio_time = 0, vtime = 0;
    
    if (v_rec && a_rec) {
        /* Check if audio or video started first, and set that as baseline */
        rv = aState->aPipeIn->Read((char *)&atime, sizeof(PRFloat64), &rd);
        fprintf(stderr, "Audio stream started at %f\n", atime);
        if (!(v_frame = GetVideoPacket(&vlen, &vtime))) {
            fprintf(stderr, "GetVideoPacket returned NULL\n");
            goto finish;
        }
        fprintf(stderr, "Video stream started at %f\n", vtime);
        
        while (vtime > atime) {
            /* Fast forward audio to catch up with video */
            PR_Free(GetAudioPacket(a_frame_total));
            atime += v_frame_time_length;
        }
        current_audio_time = atime;
        
multiplex:
        if (!(a_frames = GetAudioPacket(a_frame_total))) {
            should_end = PR_TRUE;
            fprintf(stderr, "GetAudioPacket returned NULL\n");
            goto video;
        } else {
            if (EncodeAudio(a_frames, a_frame_total) == PR_FALSE) {
                goto finish;
            }
            current_audio_time += v_frame_time_length;
        }
        PR_Free(a_frames);
        a_frames = NULL;
        
        /* Experience also suggests that the audio stream is more or less
         * consistent; so the question really is if we need to duplicate or
         * drop video packets to match the FPS that we committed to. First
         * we drop all packets until we reach timestamp indicated by audio.
         */
video:
        delta = vtime - current_audio_time;
        while (delta < 0) {
            if (v_frame) {
                PR_Free(v_frame); v_frame = NULL;
            }
            if (!(v_frame = GetVideoPacket(&vlen, &vtime))) {
                fprintf(stderr, "GetVideoPacket returned NULL\n");
                goto finish;
            }
            delta = vtime - current_audio_time;
        }
        
        if (delta < TOLERANCE) {
            /* This video frame appeared right after the audio frame, but
             * within our tolerance levels, so we encode it and keep a
             * copy in case we need to duplicate it on the next run
             */
            v_frame_most_recent = v_frame;
        }
        /* Frame we got was too late, so re-use our old one. If none is
         * available, just drop.
         */
        if (v_frame_most_recent) {
            if (EncodeVideo(v_frame_most_recent, vlen) == PR_FALSE) {
                goto finish;
            }
        }
        
        if (should_end)
            return;
        goto multiplex;
        
    } else if (v_rec && !a_rec) {
        while (!v_stp) {
            if (!(v_frame = GetVideoPacket(&vlen, &vtime))) {
                continue;
            } else {
                if (EncodeVideo(v_frame, vlen) == PR_FALSE) {
                    PR_Free(v_frame); v_frame = NULL;
                    continue;
                }
            }
            PR_Free(v_frame); v_frame = NULL;
        }
    } else if (a_rec && !v_rec) {
        rv = aState->aPipeIn->Read((char *)&atime, sizeof(PRFloat64), &rd);
        while (!a_stp) {
            if (!(a_frames = GetAudioPacket(a_frame_total))) {
                continue;
            } else {
                if (EncodeAudio(a_frames, a_frame_total) == PR_FALSE) {
                    PR_Free(a_frames); a_frames = NULL;
                    continue;
                }
            }
            PR_Free(a_frames); a_frames = NULL;
        }
    }
    
finish:
    if (v_frame) PR_Free(v_frame);
    if (a_frames) PR_Free(a_frames);
    
}
예제 #3
0
void Movie::StopRecording()
{
	if (encodeThread)
	{
		stillEncoding = false;
		SDL_SemPost(encodeReady);
		SDL_WaitThread(encodeThread, NULL);
		encodeThread = NULL;
	}
	if (encodeReady)
	{
		SDL_DestroySemaphore(encodeReady);
		encodeReady = NULL;
	}
	if (fillReady)
	{
		SDL_DestroySemaphore(fillReady);
		fillReady = NULL;
	}
	if (temp_surface)
	{
		SDL_FreeSurface(temp_surface);
		temp_surface = NULL;
	}
    
    if (av->inited)
    {
        // flush video and audio
        EncodeVideo(true);
        EncodeAudio(true);
        avcodec_flush_buffers(av->fmt_ctx->streams[av->audio_stream_idx]->codec);
        avcodec_flush_buffers(av->fmt_ctx->streams[av->video_stream_idx]->codec);
        av_write_trailer(av->fmt_ctx);
        av->inited = false;
    }
    
    if (av->audio_fifo)
    {
        av_fifo_free(av->audio_fifo);
        av->audio_fifo = NULL;
    }
    if (av->audio_data)
    {
        av_free(av->audio_data);
        av->audio_data = NULL;
    }
    if (av->audio_frame)
    {
        av_free(av->audio_frame);
        av->audio_frame = NULL;
    }
    
    if (av->video_buf)
    {
        av_free(av->video_buf);
        av->video_buf = NULL;
    }
    if (av->video_data)
    {
        av_free(av->video_data);
        av->video_data = NULL;
    }
    if (av->video_frame)
    {
        av_free(av->video_frame);
        av->video_frame = NULL;
    }
    
    if (av->sws_ctx)
    {
        av_free(av->sws_ctx);
        av->sws_ctx = NULL;
    }

    if (av->fmt_ctx)
    {
        for (int i = 0; i < av->fmt_ctx->nb_streams; i++)
        {
            avcodec_close(av->fmt_ctx->streams[i]->codec);
        }
        avio_close(av->fmt_ctx->pb);
        avformat_free_context(av->fmt_ctx);
        av->fmt_ctx = NULL;
    }

	moviefile = "";
	SDL_PauseAudio(false);
}
예제 #4
0
int main(int argc, char **argv)
{
	int ret = -1;
	uint8_t *data = NULL;
	unsigned long size = 0;
	int times = 0;
	int width = 0;
	int height = 0;
	char *dest = NULL;
	unsigned long dest_size = 0;
	long long pts = 0;
	long long dts = 0;
	AUDIOPACKET ap[30] = {0};
	int ap_len = 0;
	int i = 0;
	CAPTURECONFIG captureConfig;
	PCAPTURECONFIG pCaptureConfig = &captureConfig;
	ENCODECONFIG encodeConfig;
	PENCODECONFIG pEncodeConfig = &encodeConfig;
	PENCODER pEncoder;
	PCAPTURE pCapture;
	DWORD start_time, end_time;
	pCaptureConfig->fps = 5;
	pCaptureConfig->channels = 2;
	pCaptureConfig->bits_per_sample = 16;
	pCaptureConfig->samples_per_sec = 48000;
	pCaptureConfig->avg_bytes_per_sec = 48000;

	pEncodeConfig->fps = 5;
	pEncodeConfig->width = 1366;
	pEncodeConfig->height = 768;
	pEncodeConfig->bit_rate = 400000;
	pEncodeConfig->channels = 2;
	pEncodeConfig->bits_per_sample = 16;
	pEncodeConfig->sample_rate = 48000;
	pEncodeConfig->avg_bytes_per_sec = 48000;
	pEncodeConfig->record = 1;

	memcpy(pEncodeConfig->record_file, "D:\\desktop_live.mp4", 20);

	InitLog(LOG_DEBUG, OUT_FILE);

	pCapture = InitCapture(pCaptureConfig);
	if (NULL == pCapture)
	{
		printf("init capture failed\n");
		return -1;
	}

	pEncoder = InitEncoder(pEncodeConfig);
	if (NULL == pEncoder)
	{
		printf("init encoder failed\n");
		return -1;
	}

	ret = StartCapture(pCapture);
	if (SECCESS != ret)
	{
		printf("start capture failed\n");
		return -1;
	}

	start_time = end_time = timeGetTime();
	while(10*1000 > (end_time - start_time))
	{
		if (SECCESS == GetVideoFrame(pCapture, &data, &size, &width, &height))
		{
			ret = EncodeVideo(pEncoder, data, width, height, &dest, &dest_size, &pts, &dts);
			if (ret == SECCESS)
			{
				free(dest);
			}

			times++;
			printf("video data size = %d\n", size);
			free(data);
		}

		if (SECCESS == GetAudioFrame(pCapture, &data, &size))
		{
			ap_len = 0;
			ret = EncodeAudio(pEncoder, data, size, ap, &ap_len);
			if (ret == SECCESS)
			{
				for (i=0; i<ap_len; i++)
				{
					free(ap[i].data);
				}
			}

			printf("audio data size = %d\n", size);
			free(data);
		}

		end_time = timeGetTime();
	}

	StopCapture(pCapture);
	FreeCapture(pCapture);

	FflushEncoder(pEncoder);
	FreeEncoder(pEncoder);

	FreeLog();
	_CrtDumpMemoryLeaks();
	return 0;
}