Beispiel #1
0
/* called by video capture every 10 sec for checking disk free space*/
gboolean 
FreeDiskCheck_timer(gpointer data)
{
    struct ALL_DATA * all_data = (struct ALL_DATA *) data;
    struct vdIn *videoIn = all_data->videoIn;
    struct GLOBAL *global = all_data->global;
    struct GWIDGET *gwidget = all_data->gwidget;

    __LOCK_MUTEX(__VMUTEX);
        gboolean capVid = videoIn->capVid;
    __UNLOCK_MUTEX(__VMUTEX);

    if (capVid) 
    {
        if(!DiskSupervisor(data))
        {
            g_printerr("Stopping video Capture\n");
            /*stop video capture*/
            if(global->debug) g_print("setting video toggle to FALSE\n");
            if(!global->no_display)
            {
                //gdk_threads_enter();
                gtk_toggle_button_set_active (GTK_TOGGLE_BUTTON(gwidget->CapVidButt), FALSE);
                gdk_flush();
                //gdk_threads_leave();
            }
            else
                capture_vid(NULL, all_data);
        }
        else
            return(TRUE); /*keeps the timer*/
    }

    return (FALSE);/*destroys the timer*/
}
Beispiel #2
0
static int write_audio_frame (struct ALL_DATA *all_data)
{
	struct paRecordData *pdata = all_data->pdata;
	struct GLOBAL *global = all_data->global;
	struct VideoFormatData *videoF = all_data->videoF;
	//struct GWIDGET *gwidget = all_data->gwidget;

	int ret =0;

	switch (global->VidFormat)
	{
		case AVI_FORMAT:
			ret = compress_audio_frame(all_data);
			break;

		case WEBM_FORMAT:
		case MKV_FORMAT:
			__LOCK_MUTEX( __AMUTEX ); //why do we need this ???
				/*set pts*/
				videoF->apts = pdata->audio_buff[pdata->br_ind][pdata->r_ind].time_stamp;
				/*write audio chunk*/
				ret = compress_audio_frame(all_data);
			__UNLOCK_MUTEX( __AMUTEX );
			break;

		default:

			break;
	}
	return (ret);
}
Beispiel #3
0
static int write_video_data(struct ALL_DATA *all_data, BYTE *buff, int size)
{
	struct VideoFormatData *videoF = all_data->videoF;
	struct GLOBAL *global = all_data->global;

	int ret =0;

	__LOCK_MUTEX( __FMUTEX );
	switch (global->VidFormat)
	{
		case AVI_FORMAT:
			if(size > 0)
				ret = avi_write_packet(videoF->avi, 0, buff, size, videoF->vdts, videoF->vblock_align, videoF->vflags);
				//ret = AVI_write_frame (videoF->AviOut, buff, size, videoF->keyframe);
			break;

		case WEBM_FORMAT:
		case MKV_FORMAT:
			if(size > 0)
			{
				ret = mkv_write_packet(videoF->mkv, 0, buff, size, videoF->vduration, videoF->vpts, videoF->vflags);
				//ret = write_video_packet (buff, size, global->fps, videoF);
			}
			break;

		default:

			break;
	}
	__UNLOCK_MUTEX( __FMUTEX );

	return (ret);
}
Beispiel #4
0
/*
 * mux a audio frame
 * args:
 *   encoder_ctx - pointer to encoder context
 *
 * asserts:
 *   encoder_ctx is not null;
 *
 * returns: error code
 */
int encoder_write_audio_data(encoder_context_t *encoder_ctx)
{
	/*assertions*/
	assert(encoder_ctx != NULL);

	encoder_audio_context_t *enc_audio_ctx = encoder_ctx->enc_audio_ctx;

	if(!enc_audio_ctx || encoder_ctx->audio_channels <= 0)
		return -1;

	if(verbosity > 3)
		printf("ENCODER: writing %i bytes of audio data\n", enc_audio_ctx->outbuf_coded_size);
	if(enc_audio_ctx->outbuf_coded_size <= 0)
		return -1;

	int ret =0;
	int block_align = 1;

	encoder_codec_data_t *audio_codec_data = (encoder_codec_data_t *) enc_audio_ctx->codec_data;

	if(audio_codec_data)
		block_align = audio_codec_data->codec_context->block_align;

	__LOCK_MUTEX( __PMUTEX );
	switch (encoder_ctx->muxer_id)
	{
		case ENCODER_MUX_AVI:
			ret = avi_write_packet(
					avi_ctx,
					1,
					enc_audio_ctx->outbuf,
					enc_audio_ctx->outbuf_coded_size,
					enc_audio_ctx->dts,
					block_align,
					enc_audio_ctx->flags);
			break;

		case ENCODER_MUX_MKV:
		case ENCODER_MUX_WEBM:
			ret = mkv_write_packet(
					mkv_ctx,
					1,
					enc_audio_ctx->outbuf,
					enc_audio_ctx->outbuf_coded_size,
					enc_audio_ctx->duration,
					enc_audio_ctx->pts,
					enc_audio_ctx->flags);
			break;

		default:

			break;
	}
	__UNLOCK_MUTEX( __PMUTEX );

	return (ret);
}
Beispiel #5
0
/*
 * mux a video frame
 * args:
 *   encoder_ctx - pointer to encoder context
 *
 * asserts:
 *   encoder_ctx is not null;
 *
 * returns: error code
 */
int encoder_write_video_data(encoder_context_t *encoder_ctx)
{
	/*assertions*/
	assert(encoder_ctx);

	encoder_video_context_t *enc_video_ctx = encoder_ctx->enc_video_ctx;
	assert(enc_video_ctx);

	if(enc_video_ctx->outbuf_coded_size <= 0)
		return -1;

	enc_video_ctx->framecount++;

	int ret =0;
	int block_align = 1;

	encoder_codec_data_t *video_codec_data = (encoder_codec_data_t *) enc_video_ctx->codec_data;

	if(video_codec_data)
		block_align = video_codec_data->codec_context->block_align;

	__LOCK_MUTEX( __PMUTEX );
	switch (encoder_ctx->muxer_id)
	{
		case ENCODER_MUX_AVI:
			ret = avi_write_packet(
					avi_ctx,
					0,
					enc_video_ctx->outbuf,
					enc_video_ctx->outbuf_coded_size,
					enc_video_ctx->dts,
					block_align,
					enc_video_ctx->flags);
			break;

		case ENCODER_MUX_MKV:
		case ENCODER_MUX_WEBM:
			ret = mkv_write_packet(
					mkv_ctx,
					0,
					enc_video_ctx->outbuf,
					enc_video_ctx->outbuf_coded_size,
					enc_video_ctx->duration,
					enc_video_ctx->pts,
					enc_video_ctx->flags);
			break;

		default:

			break;
	}
	__UNLOCK_MUTEX( __PMUTEX );

	return (ret);
}
Beispiel #6
0
int
close_sound (struct paRecordData *pdata)
{
	int err = 0;
	int i= 0, j= 0;

	pdata->capVid = 0;

	switch(pdata->api)
	{
#ifdef PULSEAUDIO
		case PULSE:
			err = pulse_close_audio(pdata);
			break;
#endif
		case PORT:
		default:
			err = port_close_audio(pdata);
			break;
	}

	pdata->flush = 0;
	pdata->delay = 0; /*reset the audio delay*/

	/* ---------------------------------------------------------------------
	 * make sure no operations are performed on the buffers  */
	__LOCK_MUTEX(__AMUTEX);
		if(pdata->lavc_data)
			clean_lavc_audio(&(pdata->lavc_data));
		pdata->lavc_data = NULL;
		/*free primary buffer*/
		g_free( pdata->recordedSamples );
		pdata->recordedSamples=NULL;
		if(pdata->audio_buff)
		{
			for(j=0; j< AUDBUFF_NUM; j++)
			{
				for(i=0; i<AUDBUFF_SIZE; i++)
				{
					g_free(pdata->audio_buff[j][i].frame);
				}
				g_free(pdata->audio_buff[j]);
				pdata->audio_buff[j] = NULL;
			}
		}
		if(pdata->pcm_sndBuff) g_free(pdata->pcm_sndBuff);
		pdata->pcm_sndBuff = NULL;
	__UNLOCK_MUTEX(__AMUTEX);

	return (err);
}
/*video capture can only start after buffer allocation*/
static void alloc_videoBuff(struct ALL_DATA *all_data)
{
	struct GLOBAL *global = all_data->global;
	int i = 0;
	int framesize = global->height*global->width*2; /*yuyv (maximum size)*/
	if((global->fps > 0) && (global->fps_num > 0))
	    global->video_buff_size = (global->fps * 3) / (global->fps_num * 2); /* 1,5 seconds of video in buffer*/
	if (global->video_buff_size < 16) global->video_buff_size = 16; /*keep at least 16 frames*/
	
	/*alloc video ring buffer*/
	__LOCK_MUTEX(__GMUTEX);
		if (global->videoBuff == NULL)
		{
			/*alloc video frames to videoBuff*/
			global->videoBuff = g_new0(VidBuff, global->video_buff_size);
			for(i=0;i<global->video_buff_size;i++)
			{
				global->videoBuff[i].frame = g_new0(BYTE,framesize);
				global->videoBuff[i].used = FALSE;
			}
		}
		else
		{
			/*free video frames to videoBuff*/
			for(i=0;i<global->video_buff_size;i++)
			{
				if(global->videoBuff[i].frame) g_free(global->videoBuff[i].frame);
				global->videoBuff[i].frame = NULL;
			}
			g_free(global->videoBuff);
			
			/*alloc video frames to videoBuff*/
			global->videoBuff = g_new0(VidBuff,global->video_buff_size);
			for(i=0;i<global->video_buff_size;i++)
			{
				global->videoBuff[i].frame = g_new0(BYTE,framesize);
				global->videoBuff[i].used = FALSE;
			}
		}
		//reset indexes
		global->r_ind=0;
		global->w_ind=0;
	__UNLOCK_MUTEX(__GMUTEX);
}
Beispiel #8
0
/* called by video capture from start timer */
gboolean
timer_callback(gpointer data)
{
    struct ALL_DATA * all_data = (struct ALL_DATA *) data;
    struct vdIn *videoIn = all_data->videoIn;
    struct GLOBAL *global = all_data->global;
    struct GWIDGET *gwidget = all_data->gwidget;
    
    __LOCK_MUTEX(__VMUTEX);
		gboolean capVid = videoIn->capVid;
	__UNLOCK_MUTEX(__VMUTEX);
	
    if(!capVid)
		return (FALSE);/*destroys the timer*/
    
    /*stop video capture*/
    if(global->debug) g_print("setting video toggle to FALSE\n");
    
    if(!global->no_display)
    {
        //gdk_threads_enter();
        gtk_toggle_button_set_active (GTK_TOGGLE_BUTTON(gwidget->CapVidButt), FALSE);
        gdk_flush();
        //gdk_threads_leave();
    }
    else
    {
        capture_vid(NULL, all_data);
    }
    
    global->Capture_time=0;
    //if exit_on_close then shutdown
    if(global->exit_on_close)
        shutd (0, data);
    
    return (FALSE);/*destroys the timer*/
}
Beispiel #9
0
/* run in a thread (SDL overlay)*/
void *main_loop(void *data)
{
    struct ALL_DATA *all_data = (struct ALL_DATA *) data;

    struct VidState *s = all_data->s;
    struct paRecordData *pdata = all_data->pdata;
    struct GLOBAL *global = all_data->global;
    struct focusData *AFdata = all_data->AFdata;
    struct vdIn *videoIn = all_data->videoIn;

    struct particle* particles = NULL; //for the particles video effect

    SDL_Event event;
    /*the main SDL surface*/
    SDL_Surface *pscreen = NULL;
    SDL_Overlay *overlay = NULL;
    SDL_Rect drect;

    int width = global->width;
    int height = global->height;
    int format = global->format;

    SAMPLE vuPeak[2];  // The maximum vuLevel seen recently
    int vuPeakFreeze[2]; // The vuPeak values will be frozen for this many frames.
    vuPeak[0] = vuPeak[1] = 0;
    vuPeakFreeze[0] = vuPeakFreeze[1] = 0;

    BYTE *p = NULL;

    Control *focus_control = NULL;
    int last_focus = 0;

    if (global->AFcontrol)
    {
        focus_control = get_ctrl_by_id(s->control_list, AFdata->id);
        get_ctrl(videoIn->fd, s->control_list, AFdata->id, all_data);
        last_focus = focus_control->value;
        /*make sure we wait for focus to settle on first check*/
        if (last_focus < 0) last_focus = AFdata->f_max;
    }

    gboolean capVid = FALSE;
    gboolean signalquit = FALSE;

    /*------------------------------ SDL init video ---------------------*/
    if(!global->no_display)
    {
        overlay = video_init(data, &(pscreen));

        if(overlay == NULL)
        {
            g_print("FATAL: Couldn't create yuv overlay - please disable hardware accelaration\n");
            signalquit = TRUE; /*exit video thread*/
        }
        else
        {
            p = (unsigned char *) overlay->pixels[0];

            drect.x = 0;
            drect.y = 0;
            drect.w = pscreen->w;
            drect.h = pscreen->h;
        }
    }

    while (!signalquit)
    {
        __LOCK_MUTEX(__VMUTEX);
            capVid = videoIn->capVid;
            signalquit = videoIn->signalquit;
        __UNLOCK_MUTEX(__VMUTEX);

        /*-------------------------- Grab Frame ----------------------------------*/
        if (uvcGrab(videoIn, format, width, height, &global->fps, &global->fps_num) < 0)
        {
            g_printerr("Error grabbing image \n");
            continue;
        }
        else
        {
            if(!videoIn->timestamp)
            {
                global->skip_n++; //skip this frame
            }

            if(capVid)
            {
                if(global->framecount < 1)
                {
					/*reset video start time to first frame capture time */
					global->Vidstarttime = videoIn->timestamp;
					/** set current time for audio ts(0) reference (MONOTONIC)
					 *  only used if we have no audio capture before video
					 */
					__LOCK_MUTEX(__AMUTEX);
						pdata->ts_ref = ns_time_monotonic();
					__UNLOCK_MUTEX(__AMUTEX);
					//printf("video ts ref: %llu audio ts_ ref: %llu\n",global->Vidstarttime, pdata->ts_ref);
					global->v_ts = 0;
                }
                else
                {
                    global->v_ts = videoIn->timestamp - global->Vidstarttime;
                    /*always use the last frame time stamp for video stop time*/
                    global->Vidstoptime = videoIn->timestamp;
                }
            }

            if (global->FpsCount && !global->no_display)
            {/* sets fps count in window title bar */
                global->frmCount++;
                if (global->DispFps>0)
                { /*set every 2 sec*/
                    g_snprintf(global->WVcaption,24,"GUVCVideo - %3.2f fps",global->DispFps);
                    SDL_WM_SetCaption(global->WVcaption, NULL);

                    global->frmCount=0;/*resets*/
                    global->DispFps=0;
                }
            }

            /*---------------- autofocus control ------------------*/

            if (global->AFcontrol && (global->autofocus || AFdata->setFocus))
            { /*AFdata = NULL if no focus control*/
                if (AFdata->focus < 0)
                {
                    /*starting autofocus*/
                    AFdata->focus = AFdata->left; /*start left*/
                    focus_control->value = AFdata->focus;
                    if (set_ctrl (videoIn->fd, s->control_list, AFdata->id) != 0)
                        g_printerr("ERROR: couldn't set focus to %d\n", AFdata->focus);
                    /*number of frames until focus is stable*/
                    /*1.4 ms focus time - every 1 step*/
                    AFdata->focus_wait = (int) abs(AFdata->focus-last_focus)*1.4/(1000/global->fps)+1;
                    last_focus = AFdata->focus;
                }
                else
                {
                    if (AFdata->focus_wait == 0)
                    {
                        AFdata->sharpness=getSharpness (videoIn->framebuffer, width, height, 5);
                        if (global->debug)
                            g_print("sharp=%d focus_sharp=%d foc=%d right=%d left=%d ind=%d flag=%d\n",
                                AFdata->sharpness,AFdata->focus_sharpness,
                                AFdata->focus, AFdata->right, AFdata->left,
                                AFdata->ind, AFdata->flag);
                        AFdata->focus=getFocusVal (AFdata);
                        if ((AFdata->focus != last_focus))
                        {
                            focus_control->value = AFdata->focus;
                            if (set_ctrl (videoIn->fd, s->control_list, AFdata->id) != 0)
                                g_printerr("ERROR: couldn't set focus to %d\n",
                                    AFdata->focus);
                            /*number of frames until focus is stable*/
                            /*1.4 ms focus time - every 1 step*/
                            AFdata->focus_wait = (int) abs(AFdata->focus-last_focus)*1.4/(1000/global->fps)+1;
                        }
                        last_focus = AFdata->focus;
                    }
                    else
                    {
                        AFdata->focus_wait--;
                        if (global->debug) g_print("Wait Frame: %d\n",AFdata->focus_wait);
                    }
                }
            }
        }
        /*------------------------- Filter Frame ---------------------------------*/
        __LOCK_MUTEX(__GMUTEX);
        if(global->Frame_Flags>0)
        {
            if((global->Frame_Flags & YUV_PARTICLES)==YUV_PARTICLES)
                particles = particles_effect(videoIn->framebuffer, width, height, 20, 4, particles);

            if((global->Frame_Flags & YUV_MIRROR)==YUV_MIRROR)
                yuyv_mirror(videoIn->framebuffer, width, height);

            if((global->Frame_Flags & YUV_UPTURN)==YUV_UPTURN)
                yuyv_upturn(videoIn->framebuffer, width, height);

            if((global->Frame_Flags & YUV_NEGATE)==YUV_NEGATE)
                yuyv_negative (videoIn->framebuffer, width, height);

            if((global->Frame_Flags & YUV_MONOCR)==YUV_MONOCR)
                yuyv_monochrome (videoIn->framebuffer, width, height);

            if((global->Frame_Flags & YUV_PIECES)==YUV_PIECES)
                pieces (videoIn->framebuffer, width, height, 16 );

        }
        __UNLOCK_MUTEX(__GMUTEX);
        /*-------------------------capture Image----------------------------------*/
        if (videoIn->capImage)
        {
            /*
             * format and resolution can change(enabled) while capturing the frame
             * but you would need to be speedy gonzalez to press two buttons
             * at almost the same time :D
             */
            int ret = 0;
            if((ret=store_picture(all_data)) < 0)
                g_printerr("saved image to:%s ...Failed \n",videoIn->ImageFName);
            else if (!ret && global->debug) g_print("saved image to:%s ...OK \n",videoIn->ImageFName);

            videoIn->capImage=FALSE;
        }
        /*---------------------------capture Video---------------------------------*/
        if (capVid && !(global->skip_n))
        {
            __LOCK_MUTEX(__VMUTEX);
                if(videoIn->VidCapStop) videoIn->VidCapStop = FALSE;
            __UNLOCK_MUTEX(__VMUTEX);
            int res=0;

			/* format and resolution don't change(disabled) while capturing video
			 * store_video_frame may sleep if needed to avoid buffer overrun
			 */
            if((res=store_video_frame(all_data))<0) g_printerr("WARNING: droped frame (%i)\n",res);

        } /*video and audio capture have stopped */
        else
        {
            __LOCK_MUTEX(__VMUTEX);
                if(!(videoIn->VidCapStop)) videoIn->VidCapStop=TRUE;
            __UNLOCK_MUTEX(__VMUTEX);
        }

        /* decrease skip frame count */
        if (global->skip_n > 0)
        {
            if (global->debug && capVid) g_print("skiping frame %d...\n", global->skip_n);
            global->skip_n--;
        }

        __LOCK_MUTEX( __AMUTEX );
            if (global->Sound_enable && capVid) pdata->skip_n = global->skip_n;
        __UNLOCK_MUTEX( __AMUTEX );

        /*------------------------- Display Frame --------------------------------*/
        if(!global->no_display)
        {
			if (global->osdFlags && pdata->audio_buff[0])
			{
				draw_vu_meter(width, height, vuPeak, vuPeakFreeze, data);
			}
            SDL_LockYUVOverlay(overlay);
            memcpy(p, videoIn->framebuffer, width * height * 2);
            SDL_UnlockYUVOverlay(overlay);
            SDL_DisplayYUVOverlay(overlay, &drect);

            /*------------------------- Read Key events ------------------------------*/
            /* Poll for events */
            while( SDL_PollEvent(&event) )
            {
                //printf("event type:%i  event key:%i\n", event.type, event.key.keysym.scancode);
                if(event.type==SDL_KEYDOWN)
                {
                    if (videoIn->PanTilt)
                    {
                        switch( event.key.keysym.sym )
                        {
                            /* Keyboard event */
                            /* Pass the event data onto PrintKeyInfo() */
                            case SDLK_DOWN:
                                /*Tilt Down*/
                                uvcPanTilt (videoIn->fd, s->control_list, 0, 1);
                                break;

                            case SDLK_UP:
                                /*Tilt UP*/
                                uvcPanTilt (videoIn->fd, s->control_list, 0, -1);
                                break;

                            case SDLK_LEFT:
                                /*Pan Left*/
                                uvcPanTilt (videoIn->fd, s->control_list, 1, 1);
                                break;

                            case SDLK_RIGHT:
                                /*Pan Right*/
                                uvcPanTilt (videoIn->fd, s->control_list, 1, -1);
                                break;
                            default:
                                break;
                        }
                    }
                    switch( event.key.keysym.scancode )
                    {
                        case 220: /*webcam button*/
                            //gdk_threads_enter();
                           	if (all_data->global->default_action == 0)
                           		g_main_context_invoke(NULL, image_capture_callback, (gpointer) all_data);
							else
                            	g_main_context_invoke(NULL, video_capture_callback, (gpointer) all_data);
                       
                            break;
                    }
                    switch( event.key.keysym.sym )
                    {
                        case SDLK_q:
                            //shutDown
                            g_timeout_add(200, shutd_timer, all_data);
                            g_print("q pressed - Quiting...\n");
                            break;
                        case SDLK_SPACE:
							{
                            if(global->AFcontrol > 0)
                                setfocus_clicked(NULL, all_data);
							}
                            break;
                        case SDLK_i:
							g_main_context_invoke(NULL, image_capture_callback, (gpointer) all_data);
							break;
						case SDLK_v:
							g_main_context_invoke(NULL, video_capture_callback, (gpointer) all_data);
							break;
                        default:
                            break;
                    }
                }
                if(event.type==SDL_VIDEORESIZE)
                {
                    pscreen =
                        SDL_SetVideoMode(event.resize.w,
                                 event.resize.h,
                                 global->bpp,
                                 SDL_VIDEO_Flags);
                    drect.w = event.resize.w;
                    drect.h = event.resize.h;
                }
                if(event.type==SDL_QUIT)
                {
                    //shutDown
                    g_timeout_add(200, shutd_timer, all_data);
                }
            }
        }
        /* if set make the thread sleep - default no sleep (full throttle)*/
        if(global->vid_sleep) sleep_ms(global->vid_sleep);

        /*------------------------------------------*/
        /*  restart video (new resolution/format)   */
        /*------------------------------------------*/
        if (global->change_res)
        {
            g_print("setting new resolution (%d x %d)\n", global->width, global->height);
            /*clean up */

            if(particles) g_free(particles);
            particles = NULL;

            if (global->debug) g_print("cleaning buffer allocations\n");
            fflush(NULL);//flush all output buffers

            if(!global->no_display)
            {
                SDL_FreeYUVOverlay(overlay);
                overlay = NULL;
            }
            /*init device*/
            restart_v4l2(videoIn, global);
            /*set new resolution for video thread*/
            width = global->width;
            height = global->height;
            format = global->format;
            /* restart SDL with new values*/
            if(!global->no_display)
            {
                overlay = video_init(data, &(pscreen));
                if(overlay == NULL)
                {
                    g_print("FATAL: Couldn't create yuv overlay - please disable hardware accelaration\n");
                    signalquit = TRUE; /*exit video thread*/
                }
                else
                {
                    if (global->debug) g_print("yuv overlay created (%ix%i).\n", overlay->w, overlay->h);
                    p = (unsigned char *) overlay->pixels[0];

                    drect.x = 0;
                    drect.y = 0;
                    drect.w = pscreen->w;
                    drect.h = pscreen->h;

                    global->change_res = FALSE;
                }
            }
            else global->change_res = FALSE;
        }

    }/*loop end*/

    __LOCK_MUTEX(__VMUTEX);
        capVid = videoIn->capVid;
    __UNLOCK_MUTEX(__VMUTEX);
    /*check if thread exited while in Video capture mode*/
    if (capVid)
    {
        /*stop capture*/
        if (global->debug) g_print("stoping Video capture\n");
        //global->Vidstoptime = ns_time_monotonic(); /*this is set in IO thread*/
        videoIn->VidCapStop=TRUE;
        capVid = FALSE;
        __LOCK_MUTEX(__VMUTEX);
            videoIn->capVid = capVid;
        __UNLOCK_MUTEX(__VMUTEX);
        __LOCK_MUTEX(__AMUTEX);
            pdata->capVid = capVid;
        __UNLOCK_MUTEX(__AMUTEX);
        /*join IO thread*/
        if (global->debug) g_print("Shuting Down IO Thread\n");
        __THREAD_JOIN( all_data->IO_thread );
        if (global->debug) g_print("IO Thread finished\n");
    }

    if (global->debug) g_print("Thread terminated...\n");
    p = NULL;
    if(particles) g_free(particles);
    particles=NULL;

    if (global->debug) g_print("cleaning Thread allocations: 100%%\n");
    fflush(NULL);//flush all output buffers

    if(!global->no_display)
    {
        if(overlay)
            SDL_FreeYUVOverlay(overlay);
        //SDL_FreeSurface(pscreen);

        SDL_Quit();
    }

    if (global->debug) g_print("Video thread completed\n");

    global = NULL;
    AFdata = NULL;
    videoIn = NULL;
    return ((void *) 0);
}
static int initVideoFile(struct ALL_DATA *all_data)
{
	struct GWIDGET *gwidget = all_data->gwidget;
	struct paRecordData *pdata = all_data->pdata;
	struct GLOBAL *global = all_data->global;
	struct vdIn *videoIn = all_data->videoIn;
	struct VideoFormatData *videoF = all_data->videoF;
	
	const char *compression= get_vid4cc(global->VidCodec);
	videoF->vcodec = get_vcodec_id(global->VidCodec);
	videoF->acodec = CODEC_ID_NONE;
	videoF->keyframe = 0;
	int ret = 0;
	
	__LOCK_MUTEX(__VMUTEX);
		gboolean capVid = videoIn->capVid;
	__UNLOCK_MUTEX(__VMUTEX);
	
	/*alloc video ring buffer*/
	alloc_videoBuff(all_data);
	
	switch (global->VidFormat)
	{
		case AVI_FORMAT:
			if(videoF->AviOut != NULL)
			{
				g_free(videoF->AviOut);
				videoF->AviOut = NULL;
			}
			videoF->AviOut = g_new0(struct avi_t, 1);
			
			if(AVI_open_output_file(videoF->AviOut, videoIn->VidFName)<0) 
			{
				g_printerr("Error: Couldn't create Video.\n");
				capVid = FALSE; /*don't start video capture*/
				__LOCK_MUTEX(__VMUTEX);
					videoIn->capVid = capVid;
				__UNLOCK_MUTEX(__VMUTEX);
				pdata->capVid = capVid;
				return(-1);
			} 
			else 
			{
				AVI_set_video(videoF->AviOut, global->width, global->height, 
					global->fps, compression);
		  
				/* start video capture*/
				capVid = TRUE;
				__LOCK_MUTEX(__VMUTEX);
					videoIn->capVid = capVid;
				__UNLOCK_MUTEX(__VMUTEX);
				pdata->capVid = capVid;
				
				/* start sound capture*/
				if(global->Sound_enable > 0) 
				{
					/*get channels and sample rate*/
					set_sound(global, pdata);
					/*set audio header for avi*/
					AVI_set_audio(videoF->AviOut, global->Sound_NumChan, 
						global->Sound_SampRate,
						get_aud_bit_rate(get_ind_by4cc(global->Sound_Format)), /*bit rate*/
						get_aud_bits(get_ind_by4cc(global->Sound_Format)),     /*sample size - only used for PCM*/
						global->Sound_Format);
					/* Initialize sound (open stream)*/
					if(init_sound (pdata)) 
					{
						g_printerr("Audio initialization error\n");
						global->Sound_enable=0;
						if(!(global->no_display))
                        {
						    gdk_threads_enter();
						    gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(gwidget->SndEnable),0);
						    gdk_flush();
						    gdk_threads_leave();
						}
						else
                            capture_vid(NULL, all_data);
					} 
				}
			}
			break;
			
		case MKV_FORMAT:
			if(global->Sound_enable > 0) 
			{
				/*set channels, sample rate and allocate buffers*/
				set_sound(global, pdata);
			}
			if(init_FormatContext((void *) all_data)<0)
			{
				capVid = FALSE;
				__LOCK_MUTEX(__VMUTEX);
					videoIn->capVid = capVid;
				__UNLOCK_MUTEX(__VMUTEX);
				pdata->capVid = capVid;
				return (-1);
			}
			
			videoF->old_apts = 0;
			videoF->apts = 0;
			videoF->vpts = 0;
			
			/* start video capture*/
			capVid = TRUE;
			__LOCK_MUTEX(__VMUTEX);
				videoIn->capVid = capVid;
			__UNLOCK_MUTEX(__VMUTEX);
			pdata->capVid = capVid;
			
			
			/* start sound capture*/
			if(global->Sound_enable > 0) 
			{
				/* Initialize sound (open stream)*/
				if(init_sound (pdata)) 
				{
					g_printerr("Audio initialization error\n");
					global->Sound_enable=0;
					if(!(global->no_display))
                    {
					    /*will this work with the checkbox disabled?*/
					    gdk_threads_enter();
					    gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(gwidget->SndEnable),0);
					    gdk_flush();
					    gdk_threads_leave();
					}
					else
					    capture_vid(NULL, all_data);
				}
			}
			break;
//WebM = MKV + WebVTT
//add META data capture
		case WEBM_FORMAT:
                     
			if(global->Sound_enable > 0) 
			{
				/*set channels, sample rate and allocate buffers*/
				set_sound(global, pdata);
			}
			if(init_FormatContext((void *) all_data)<0)
			{
				capVid = FALSE;
				__LOCK_MUTEX(__VMUTEX);
					videoIn->capVid = capVid;
				__UNLOCK_MUTEX(__VMUTEX);
				pdata->capVid = capVid;
				return (-1);
			}
			
			videoF->old_apts = 0;
			videoF->apts = 0;
			videoF->vpts = 0;
			
			/* start video capture*/
			capVid = TRUE;
			__LOCK_MUTEX(__VMUTEX);
				videoIn->capVid = capVid;
			__UNLOCK_MUTEX(__VMUTEX);
			pdata->capVid = capVid;
			
			
			/* start sound capture*/
			if(global->Sound_enable > 0) 
			{
				/* Initialize sound (open stream)*/
				if(init_sound (pdata)) 
				{
					g_printerr("Audio initialization error\n");
					global->Sound_enable=0;
					if(!(global->no_display))
                    {
					    /*will this work with the checkbox disabled?*/
					    gdk_threads_enter();
					    gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(gwidget->SndEnable),0);
					    gdk_flush();
					    gdk_threads_leave();
					}
					else
					    capture_vid(NULL, all_data);
				}
			}
			break;            
		default:
			
			break;
	}
	
	return (ret);
}
static int write_audio_frame (struct ALL_DATA *all_data)
{
	struct paRecordData *pdata = all_data->pdata;
	struct GLOBAL *global = all_data->global;
	struct VideoFormatData *videoF = all_data->videoF;
	struct GWIDGET *gwidget = all_data->gwidget;
	
	int ret =0;
	__THREAD_TYPE press_butt_thread;
	
	switch (global->VidFormat)
	{
		case AVI_FORMAT:
			if(!(global->VidButtPress)) //if this is set AVI reached it's limit size
				ret = compress_audio_frame(all_data);
		
			if (ret) 
			{	
				if (AVI_getErrno () == AVI_ERR_SIZELIM) 
				{
					if (!(global->VidButtPress))
					{
						global->VidButtPress = TRUE;

						/*avi file limit reached - must end capture close file and start new one*/
						if( __THREAD_CREATE(&press_butt_thread, split_avi, all_data))  //should be created detachable
						{
							/*thread failed to start - stop video capture   */
							/*can't restart since we need IO thread to stop */
							g_printerr("split avi: thread creation failed\n");
							
							printf("stoping video capture\n");
							if(!(global->no_display))
                            {
							    gdk_threads_enter();
							    gtk_toggle_button_set_active (GTK_TOGGLE_BUTTON(gwidget->CapVidButt), FALSE);
							    gdk_flush();
							    gdk_threads_leave();
							}
							else
							    capture_vid(NULL, all_data);
						}
					
						//split_avi(all_data);/*blocking call*/
						g_print("AVI file size limit reached - restarted capture on new file\n");
					}
				} 
				else 
				{
					g_printerr ("write error on avi out \n");
				}
					
			}
			break;
		case MKV_FORMAT:
			__LOCK_MUTEX( __AMUTEX ); //why do we need this ???
				/*set pts*/
				videoF->apts = pdata->audio_buff[pdata->br_ind][pdata->r_ind].time_stamp;
				/*write audio chunk*/
				ret = compress_audio_frame(all_data);
			__UNLOCK_MUTEX( __AMUTEX );
			break;
			
		default:
			
			break;
	}
	return (0);
}
static void closeVideoFile(struct ALL_DATA *all_data)
{
	struct GLOBAL *global = all_data->global;
	struct vdIn *videoIn = all_data->videoIn;
	struct paRecordData *pdata = all_data->pdata;
	
	int i=0;
	/*we are streaming so we need to lock a mutex*/
	gboolean capVid = FALSE;
	__LOCK_MUTEX(__VMUTEX);
		videoIn->capVid = capVid; /*flag video thread to stop recording frames*/
	__UNLOCK_MUTEX(__VMUTEX);
	__LOCK_MUTEX(__AMUTEX);
		pdata->capVid = capVid;
	__UNLOCK_MUTEX(__AMUTEX);
	/*wait for flag from video thread that recording has stopped    */
	/*wait on videoIn->VidCapStop by sleeping for 200 loops of 10 ms*/
	/*(test VidCapStop == TRUE on every loop)*/
	int stall = wait_ms(&(videoIn->VidCapStop), TRUE, __VMUTEX, 10, 200);
	if( !(stall > 0) )
	{
		g_printerr("video capture stall on exit(%d) - timeout\n",
			videoIn->VidCapStop);
	}
	
	/*free video buffer allocations*/
	__LOCK_MUTEX(__GMUTEX);
		//reset the indexes
		global->r_ind=0;
		global->w_ind=0;
		if (global->videoBuff != NULL)
		{
			/*free video frames to videoBuff*/
			for(i=0;i<global->video_buff_size;i++)
			{
				g_free(global->videoBuff[i].frame);
				global->videoBuff[i].frame = NULL;
			}
			g_free(global->videoBuff);
			global->videoBuff = NULL;
		}
	__UNLOCK_MUTEX(__GMUTEX);
	
	switch (global->VidFormat)
	{
		case AVI_FORMAT:
			aviClose(all_data);
			break;
			
		case MKV_FORMAT:
			if(clean_FormatContext ((void*) all_data))
				g_printerr("matroska close returned a error\n");
			break;
			
		default:
			
			break;
	}
	
	global->Vidstoptime = 0;
	global->Vidstarttime = 0;
	global->framecount = 0;
}
Beispiel #13
0
static int initVideoFile(struct ALL_DATA *all_data, void* lav_data)
{
	//struct GWIDGET *gwidget = all_data->gwidget;
	struct paRecordData *pdata = all_data->pdata;
	struct GLOBAL *global = all_data->global;
	struct vdIn *videoIn = all_data->videoIn;
	struct VideoFormatData *videoF = all_data->videoF;
	
	io_Stream *vstream, *astream;
	struct lavcData **lavc_data = (struct lavcData **) lav_data;

	videoF->vcodec = get_vcodec_id(global->VidCodec);
	videoF->acodec = get_acodec_id(global->AudCodec);
	videoF->keyframe = 0;
	int ret = 0;

	__LOCK_MUTEX(__VMUTEX);
		gboolean capVid = videoIn->capVid;
	__UNLOCK_MUTEX(__VMUTEX);

	/*alloc video ring buffer*/
	alloc_videoBuff(all_data);

	if(isLavcCodec(global->VidCodec))
		*lavc_data = init_lavc(global->width, global->height, global->fps_num, global->fps, global->VidCodec);


	switch (global->VidFormat)
	{
		case AVI_FORMAT:
			if(videoF->avi != NULL)
			{
				avi_destroy_context(videoF->avi);
				videoF->avi = NULL;
			}
			videoF->avi = avi_create_context(videoIn->VidFName);

			if(!videoF->avi)
			{
				g_printerr("Error: Couldn't create AVI context.\n");
				capVid = FALSE; /*don't start video capture*/
				__LOCK_MUTEX(__VMUTEX);
					videoIn->capVid = capVid;
				__UNLOCK_MUTEX(__VMUTEX);
				pdata->capVid = capVid;
				return(-1);
			}

			vstream = avi_add_video_stream(videoF->avi,
								global->width,
								global->height,
								global->fps,
								videoF->vcodec,
								get_vid4cc(global->VidCodec));

			if(videoF->vcodec == AV_CODEC_ID_THEORA)
			{
				vstream->extra_data = (BYTE*) (*lavc_data)->codec_context->extradata;
				vstream->extra_data_size = (*lavc_data)->codec_context->extradata_size;
			}

			if(global->Sound_enable > 0)
			{
				/*get channels and sample rate*/
				set_sound(global, pdata);

				/*sample size - only used for PCM*/
				int32_t a_bits = get_aud_bits(global->AudCodec);
				/*bit rate (compressed formats)*/
				int32_t b_rate = get_aud_bit_rate(global->AudCodec);

				astream = avi_add_audio_stream(videoF->avi,
								global->Sound_NumChan,
								global->Sound_SampRate,
								a_bits,
								b_rate,
								videoF->acodec,
								global->Sound_Format);

				if(videoF->acodec == AV_CODEC_ID_VORBIS)
				{
						astream->extra_data = (BYTE*) pdata->lavc_data->codec_context->extradata;
						astream->extra_data_size = pdata->lavc_data->codec_context->extradata_size;
				}

			}
			/* add first riff header */
			avi_add_new_riff(videoF->avi);

			/* start video capture*/
			capVid = TRUE;
			__LOCK_MUTEX(__VMUTEX);
				videoIn->capVid = capVid;
			__UNLOCK_MUTEX(__VMUTEX);
			pdata->capVid = capVid;

			/* start sound capture*/
			if(global->Sound_enable > 0 && init_sound (pdata))
			{
				//FIXME: enable capture button
				g_printerr("Audio initialization error\n");
				global->Sound_enable=0;
			}
			break;

		case WEBM_FORMAT:
		case MKV_FORMAT:
			if(videoF->mkv != NULL)
			{
				mkv_destroy_context(videoF->mkv);
				videoF->mkv = NULL;
			}
			videoF->mkv = mkv_create_context(videoIn->VidFName, global->VidFormat);

			if(!videoF->mkv)
			{
				g_printerr("Error: Couldn't create MKV context.\n");
				capVid = FALSE; /*don't start video capture*/
				__LOCK_MUTEX(__VMUTEX);
					videoIn->capVid = capVid;
				__UNLOCK_MUTEX(__VMUTEX);
				pdata->capVid = capVid;
				return(-1);
			}

			vstream = mkv_add_video_stream(videoF->mkv,
									global->width,
									global->height,
									videoF->vcodec);


			vstream->extra_data_size = set_mkvCodecPriv(global->VidCodec, global->width, global->height, *lavc_data);
			if(vstream->extra_data_size > 0)
				vstream->extra_data = get_mkvCodecPriv(global->VidCodec);

			if(global->Sound_enable > 0)
			{
				/*get channels and sample rate*/
				set_sound(global, pdata);

				/*sample size - only used for PCM*/
				int32_t a_bits = get_aud_bits(global->AudCodec);
				/*bit rate (compressed formats)*/
				int32_t b_rate = get_aud_bit_rate(global->AudCodec);

				astream = mkv_add_audio_stream(
								videoF->mkv,
								pdata->channels,
								pdata->samprate,
								a_bits,
								b_rate,
								videoF->acodec,
								global->Sound_Format);

				astream->extra_data_size = set_mkvACodecPriv(
								global->AudCodec,
								pdata->samprate,
								pdata->channels,
								pdata->lavc_data);

				if(astream->extra_data_size > 0)
					astream->extra_data = get_mkvACodecPriv(global->AudCodec);


			}

			/** write the file header */
			mkv_write_header(videoF->mkv);

			/* start video capture*/
			capVid = TRUE;
			__LOCK_MUTEX(__VMUTEX);
				videoIn->capVid = capVid;
			__UNLOCK_MUTEX(__VMUTEX);
			pdata->capVid = capVid;

			/* start sound capture*/
			if(global->Sound_enable > 0 && init_sound (pdata))
			{
				//FIXME: enable capture button
				g_printerr("Audio initialization error\n");
				global->Sound_enable=0;
			}
			break;

		default:

			break;
	}

	return (ret);
}
Beispiel #14
0
int fill_audio_buffer(struct paRecordData *pdata, UINT64 ts)
{
	int ret =0;
	UINT64 buffer_length;

	if(pdata->sampleIndex >= pdata->aud_numSamples)
	{
		buffer_length = (G_NSEC_PER_SEC * pdata->aud_numSamples)/(pdata->samprate * pdata->channels);

		/*first frame time stamp*/
		if(pdata->a_ts < 0)
		{
			/* if sound begin time > first video frame ts then sync audio to video
			 * else set audio ts to aprox. the video ts */
			if((pdata->ts_ref > 0) && (pdata->ts_ref < pdata->snd_begintime))
				pdata->a_ts = pdata->snd_begintime - pdata->ts_ref;
			else
				pdata->a_ts = 0;
		}
		else /*increment time stamp for audio frame*/
			pdata->a_ts += buffer_length;

		/* check audio drift through timestamps */
		if (ts > pdata->snd_begintime)
			ts -= pdata->snd_begintime;
		else
			ts = 0;
		if (ts > buffer_length)
			ts -= buffer_length;
		else
			ts = 0;
		pdata->ts_drift = ts - pdata->a_ts;

		pdata->sampleIndex = 0; /*reset*/

		__LOCK_MUTEX( __AMUTEX );
			int flag = pdata->audio_buff_flag[pdata->bw_ind];
		__UNLOCK_MUTEX( __AMUTEX );

		if(  flag == AUD_READY || flag == AUD_IN_USE )
		{
			if(flag == AUD_READY)
			{
				/*flag as IN_USE*/
				__LOCK_MUTEX( __AMUTEX );
					pdata->audio_buff_flag[pdata->bw_ind] = AUD_IN_USE;
				__UNLOCK_MUTEX( __AMUTEX );
			}
			/*copy data to audio buffer*/
			memcpy(pdata->audio_buff[pdata->bw_ind][pdata->w_ind].frame, pdata->recordedSamples, pdata->aud_numBytes);
			pdata->audio_buff[pdata->bw_ind][pdata->w_ind].time_stamp = pdata->a_ts + pdata->delay;
			pdata->audio_buff[pdata->bw_ind][pdata->w_ind].used = TRUE;

			pdata->blast_ind = pdata->bw_ind;
			pdata->last_ind  = pdata->w_ind;

			/*doesn't need locking as it's only used in the callback*/
			NEXT_IND(pdata->w_ind, AUDBUFF_SIZE);

			if(pdata->w_ind == 0)
			{
				/* reached end of current ring buffer
				 * flag it as AUD_PROCESS
				 * move to next one and flag it as AUD_IN_USE (if READY)
				 */
				pdata->audio_buff_flag[pdata->bw_ind] = AUD_PROCESS;

				__LOCK_MUTEX( __AMUTEX );
					NEXT_IND(pdata->bw_ind, AUDBUFF_NUM);

					if(pdata->audio_buff_flag[pdata->bw_ind] != AUD_READY)
					{
						g_printf("AUDIO: next buffer is not yet ready\n");
					}
					else
					{
						pdata->audio_buff_flag[pdata->bw_ind] = AUD_IN_USE;
					}
				__UNLOCK_MUTEX( __AMUTEX );

			}
		}
		else
		{
			/*drop audio data*/
			ret = -1;
			g_printerr("AUDIO: dropping audio data\n");
		}
	}

	return ret;
}
Beispiel #15
0
void
set_sound (struct GLOBAL *global, struct paRecordData* pdata)
{
	if(global->Sound_SampRateInd==0)
		global->Sound_SampRate=global->Sound_IndexDev[global->Sound_UseDev].samprate;/*using default*/

	if(global->Sound_NumChanInd==0)
	{
		/*using default if channels <3 or stereo(2) otherwise*/
		global->Sound_NumChan=(global->Sound_IndexDev[global->Sound_UseDev].chan < 3) ?
			global->Sound_IndexDev[global->Sound_UseDev].chan : 2;
	}

	pdata->api = global->Sound_API;
	pdata->audio_buff[0] = NULL;
	pdata->recordedSamples = NULL;

	pdata->samprate = global->Sound_SampRate;
	pdata->channels = global->Sound_NumChan;
	__LOCK_MUTEX( __AMUTEX );
		pdata->skip_n = global->skip_n; /*initial video frames to skip*/
	__UNLOCK_MUTEX( __AMUTEX );
	if(global->debug)
	{
		g_print("using audio codec: 0x%04x\n",global->Sound_Format );
		g_print("\tchannels: %d  samplerate: %d\n", pdata->channels, pdata->samprate);
	}

	switch (global->Sound_Format)
	{
		case PA_FOURCC:
		{
			pdata->aud_numSamples = MPG_NUM_SAMP * pdata->channels;
			//outbuffer size in bytes (max value is for pcm 2 bytes per sample)
			pdata->outbuf_size = pdata->aud_numSamples * 2; //a good value is 240000;
			break;
		}
		default:
		{
			//outbuffer size in bytes (max value is for pcm 2 bytes per sample)
			pdata->outbuf_size = MPG_NUM_SAMP * pdata->channels * 2; //a good value is 240000;

			/*initialize lavc data*/
			if(!(pdata->lavc_data))
			{
				pdata->lavc_data = init_lavc_audio(pdata, global->AudCodec);
			}
			/*use lavc audio codec frame size to determine samples*/
			pdata->aud_numSamples = (pdata->lavc_data)->codec_context->frame_size * pdata->channels;
			if(pdata->aud_numSamples <= 0)
			{
				pdata->aud_numSamples = MPG_NUM_SAMP * pdata->channels;
			}
			break;
		}
	}

	pdata->aud_numBytes = pdata->aud_numSamples * sizeof(SAMPLE);
	pdata->input_type = PA_SAMPLE_TYPE;
	pdata->mp2Buff = NULL;

	pdata->sampleIndex = 0;

	fprintf(stderr, "AUDIO: samples(%d)\n", pdata->aud_numSamples);
	pdata->flush = 0;
	pdata->a_ts= -1;
	pdata->ts_ref = 0;

	pdata->stream = NULL;
	/* some drivers, e.g. GSPCA, don't set fps( guvcview sets it to 1/1 )
	 * so we can't obtain the proper delay for H.264 (2 video frames)
	 * if set, use the codec properties fps value */
	int fps_num = 1;
	int fps_den = get_enc_fps(global->VidCodec); /*if set use encoder fps */
	if(!fps_den) /*if not set use video combobox fps*/
	{
		fps_num = global->fps_num;
		fps_den = global->fps;
	}
	if((get_vcodec_id(global->VidCodec) == CODEC_ID_H264) && (fps_den >= 5))
		pdata->delay = (UINT64) 2*(fps_num * G_NSEC_PER_SEC / fps_den); /*2 frame delay in nanosec*/
	pdata->delay += global->Sound_delay; /*add predefined delay - def = 0*/

	/*reset the indexes*/
	pdata->r_ind     = 0;
	pdata->w_ind     = 0;
	pdata->bw_ind    = 0;
	pdata->br_ind    = 0;
	pdata->blast_ind = 0;
	pdata->last_ind  = 0;
	/*buffer for video PCM 16 bits*/
	pdata->pcm_sndBuff=NULL;


	/*set audio device id to use (portaudio)*/
	pdata->device_id = global->Sound_IndexDev[global->Sound_UseDev].id; /* input device */


	/*set audio device to use (pulseudio)*/
	strncpy(pdata->device_name, global->Sound_IndexDev[global->Sound_UseDev].name, 511);
}
Beispiel #16
0
/*--------------------------- sound callback ------------------------------*/
int
record_sound ( const void *inputBuffer, unsigned long numSamples, void *userData )
{
	struct paRecordData *pdata = (struct paRecordData*)userData;

	__LOCK_MUTEX( __AMUTEX );
        gboolean capVid = pdata->capVid;
        int channels = pdata->channels;
        int skip_n = pdata->skip_n;
    __UNLOCK_MUTEX( __AMUTEX );

	const SAMPLE *rptr = (const SAMPLE*) inputBuffer;
    	int i;


	UINT64 numFrames = numSamples / channels;
	/* buffer ends at timestamp "now", calculate beginning timestamp */
    UINT64 nsec_per_frame = G_NSEC_PER_SEC / pdata->samprate;

    UINT64 ts = ns_time_monotonic() - numFrames * nsec_per_frame;

	if (skip_n > 0) /*skip audio while were skipping video frames*/
	{

		if(capVid)
		{
			__LOCK_MUTEX( __AMUTEX );
				pdata->snd_begintime = ns_time_monotonic(); /*reset first time stamp*/
			__UNLOCK_MUTEX( __AMUTEX );
			return (0); /*still capturing*/
		}
		else
		{	__LOCK_MUTEX( __AMUTEX );
				pdata->streaming=FALSE;
			__LOCK_MUTEX( __AMUTEX );
			return (-1); /*capture has stopped*/
		}
	}

	// __LOCK_MUTEX( __AMUTEX );
        // pdata->streaming=TRUE;
    // __UNLOCK_MUTEX( __AMUTEX );

    for( i=0; i<numSamples; i++ )
    {
        pdata->recordedSamples[pdata->sampleIndex] = inputBuffer ? *rptr++ : 0;
        pdata->sampleIndex++;

        fill_audio_buffer(pdata, ts);

        /* increment timestamp accordingly while copying */
        if (i % channels == 0)
            ts += nsec_per_frame;
    }


    if(capVid) return (0); /*still capturing*/
	else
    {
        __LOCK_MUTEX( __AMUTEX );
            pdata->streaming=FALSE;
            /* mark current buffer as ready to process */
            pdata->audio_buff_flag[pdata->bw_ind] = AUD_PROCESS;
        __UNLOCK_MUTEX( __AMUTEX );
    }

	return(-1); /* audio capture stopped*/
}