Esempio n. 1
0
////////////////////////////////////////////////////////////////
// AVI1フレーム書出し
//
// 引数:	wh		ウィンドウハンドル
// 返値:	bool	true:成功 false:失敗
////////////////////////////////////////////////////////////////
bool AVI6::AVIWriteFrame( HWINDOW wh )
{
#ifndef NOAVI
	if( !wh ) return false;
	
	cCritical::Lock();

	int xx = OSD_GetWindowWidth(wh);
	int yy = OSD_GetWindowHeight(wh);

	// 途中でウィンドウサイズが変わっていたら記録を中断
	if(xx != video_st.tmp_frame->width || yy != video_st.tmp_frame->height){
		StopAVI();
		cCritical::UnLock();
		return false;
	}

	Sbuf.resize(xx * yy * ABPP / 4);

	VRect ss;
	ss.x = 0;
	ss.y = 0;
	ss.w = xx;
	ss.h = yy;
	if( !OSD_GetWindowImage( wh, (void **)&Sbuf, &ss ) ) return false;

	int encode_video = 1, encode_audio = 1;
	while (encode_video || encode_audio) {
		/* select the stream to encode */
		if (encode_video &&
				(!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
												audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
			WriteVideoFrame(oc, &video_st, &Sbuf[0]);
			encode_video = 0;
		} else {
			encode_audio = !WriteAudioFrame(oc, &audio_st, this);
		}
	}
	cCritical::UnLock();
	return true;
#else
	return false;
#endif
}
Esempio n. 2
0
void Context::CreateNewCluster(const StreamVideo::VideoFrame* pvf_stop)
{
#if 0
    odbgstream os;
    os << "\nCreateNewCluster: pvf_stop=";

    if (pvf_stop == 0)
        os << "NULL";
    else
        os << pvf_stop->GetTimecode();

    os << endl;
#endif

    assert(m_pVideo);

    const StreamVideo::frames_t& vframes = m_pVideo->GetFrames();
    assert(!vframes.empty());

    clusters_t& cc = m_clusters;

    //const Cluster* const pPrevCluster = cc.empty() ? 0 : &cc.back();

    cc.push_back(Cluster());
    Cluster& c = cc.back();

    c.m_pos = m_file.GetPosition();

    {
        const StreamVideo::VideoFrame* const pvf = vframes.front();
        assert(pvf);
        assert(pvf != pvf_stop);

        const ULONG vt = pvf->GetTimecode();

        if ((m_pAudio == 0) || m_pAudio->GetFrames().empty())
            c.m_timecode = vt;
        else
        {
            const StreamAudio::frames_t& aframes = m_pAudio->GetFrames();
            const StreamAudio::AudioFrame* const paf = aframes.front();
            const ULONG at = paf->GetTimecode();

            c.m_timecode = (at <= vt) ? at : vt;
        }
    }

    m_file.WriteID4(0x1F43B675);  //Cluster ID

#if 0
    m_file.Write4UInt(0);         //patch size later, during close
#elif 0
    m_file.SetPosition(4, STREAM_SEEK_CUR);
#else
    m_file.Serialize4UInt(0x1FFFFFFF);
#endif

    m_file.WriteID1(0xE7);
    m_file.Write1UInt(4);
    m_file.Serialize4UInt(c.m_timecode);

    const __int64 off = c.m_pos - m_segment_pos - 12;
    assert(off >= 0);

#if 0
    //TODO: disable until we're sure this is allowed per the Webm std
    m_file.WriteID1(0xA7);        //Position ID
    m_file.Write1UInt(8);         //payload size is 8 bytes
    m_file.Serialize8UInt(off);   //payload

    if (pPrevCluster)
    {
        const __int64 size = c.m_pos - pPrevCluster->m_pos;
        assert(size > 0);

        m_file.WriteID1(0xAB);        //PrevSize ID
        m_file.Write1UInt(8);         //payload size is 8 bytes
        m_file.Serialize8UInt(size);  //payload
    }
#endif

    ULONG cFrames = 0;
    LONG vtc_prev  = -1;

    StreamVideo::frames_t& rframes = m_pVideo->GetKeyFrames();

    while (!vframes.empty())
    {
        typedef StreamVideo::frames_t::const_iterator video_iter_t;

        video_iter_t video_iter = vframes.begin();
        const video_iter_t video_iter_end = vframes.end();

        const StreamVideo::VideoFrame* const pvf = *video_iter++;
        assert(pvf);

        if (pvf == pvf_stop)
            break;

        const StreamVideo::VideoFrame* const pvf_next =
            (video_iter == video_iter_end) ? 0 : *video_iter;

        //const bool bLastVideo = (pvf_next == pvf_stop);

        const ULONG vt = pvf->GetTimecode();
        assert(vt >= c.m_timecode);
        assert((pvf_stop == 0) || (vt < pvf_stop->GetTimecode()));

        if ((m_pAudio == 0) || m_pAudio->GetFrames().empty())
        {
            if (!rframes.empty() && (pvf == rframes.front()))
                rframes.pop_front();

            const ULONG vtc = pvf->GetTimecode();

            WriteVideoFrame(c, cFrames, pvf_stop, pvf_next, vtc_prev);

            vtc_prev = vtc;

            continue;
        }

        const StreamAudio::frames_t& aframes = m_pAudio->GetFrames();
        typedef StreamAudio::frames_t::const_iterator audio_iter_t;

        audio_iter_t i = aframes.begin();
        const audio_iter_t j = aframes.end();

        const StreamAudio::AudioFrame* const paf = *i++;  //1st audio frame
        assert(paf);

        const ULONG at = paf->GetTimecode();
        assert(at >= c.m_timecode);

        if (vt < at)
        {
            if (!rframes.empty() && (pvf == rframes.front()))
                rframes.pop_front();

            const ULONG vtc = pvf->GetTimecode();

            WriteVideoFrame(c, cFrames, pvf_stop, pvf_next, vtc_prev);

            vtc_prev = vtc;

            continue;
        }

        //At this point, we have (at least) one audio frame,
        //and (at least) one video frame.  They could have an
        //equal timecode, or the audio might be smaller than
        //the video.  Our desire is that the largest audio
        //frame less than the pvf_stop go on the next cluster,
        //which means any video frames greater than the audio
        //frame will also go on the next cluster.

        if (pvf_stop == 0)  //means write all extant frames
        {
            //We know that this audio frame is less or equal to
            //the video frame, so write it now.

            WriteAudioFrame(c, cFrames);
            continue;
        }

        //At this point, we still have an audio frame and a
        //video frame, neigther of which has been written yet.

        const ULONG vt_stop = pvf_stop->GetTimecode();

        if (at >= vt_stop)  //weird
            break;

        if (i == j)  //weird
            break;

        const StreamAudio::AudioFrame* const paf_stop = *i;  //2nd audio frame
        assert(paf_stop);

        const ULONG at_stop = paf_stop->GetTimecode();

        if (at_stop >= vt_stop)
            break;

        WriteAudioFrame(c, cFrames);   //write 1st audio frame
    }

    const __int64 pos = m_file.GetPosition();

    const __int64 size_ = pos - c.m_pos - 8;
    assert(size_ <= ULONG_MAX);

    const ULONG size = static_cast<ULONG>(size_);

    m_file.SetPosition(c.m_pos + 4);
    m_file.Write4UInt(size);

    m_file.SetPosition(pos);
}
Esempio n. 3
0
int mxuvc_overlay_add_image(video_channel_t ch, overlay_image_params_t* params, char* yuv420p_filename)
{
	int ret=0;
	int YUVformat=0;
	int startframe=0;
	int ysize = 0;
	int uvsize = 0;
	unsigned char *y = NULL;
	unsigned char *uv = NULL;
	FILE *ifile = NULL;

	int data[7];

	if(ch >= NUM_MUX_VID_CHANNELS){
		TRACE("ch should be less than value %d\n", NUM_MUX_VID_CHANNELS);
		return -1;
	}

	if (camera == NULL) {
		TRACE("Unitialised camera handle\n");
		return -1;
	}

	if (params == NULL) {
		TRACE("params cannot be NULL\n");
		return -1;
	}
	
	ifile = fopen(yuv420p_filename, "r");
	if (ifile == NULL)
	{
		fprintf(stderr, "Could not open %s for reading\n", yuv420p_filename);
		return -1;
	}

	if (params->idx >= NUM_OVERLAY_IMAGE_IDX)
	{
		fprintf(stderr, "idx exceeds maximum supported value\n");
		return -1;
	}

	if (params->width > 320)
	{
		fprintf(stderr, "Maximum picture width supported is 320\n");
		return -1;
	}

	if (params->height > 240)
	{
		fprintf(stderr, "Maximum picture height supported is 240\n");
		return -1;
	}	

	
	data[0] = (int)ch;
	data[1] = params->idx;

	data[2] = params->xoff;
	data[3] = params->yoff;

	data[4] = params->width;
	data[5] = params->height;
	data[6] = params->alpha & 0x000000FF;

	ret = libusb_control_transfer(camera,
				/* bmRequestType */
		(LIBUSB_ENDPOINT_OUT | LIBUSB_REQUEST_TYPE_VENDOR |
			LIBUSB_RECIPIENT_INTERFACE),
				/* bRequest      */ LOGO_CMD_INIT,
				/* wValue        */ 0,
				/* MSB 4 bytes   */
				/* wIndex        */ 0,
				/* Data          */ (unsigned char *)&data[0],
				/* wLength       */ sizeof(data),
				/* timeout*/     0 
		);
	CHECK_ERROR(ret < 0, -1, "LOGO_INIT failed");

    //convert yuv to tile here.
	WriteVideoFrame(ifile, params->width, params->height, params->width, &y, &uv, &ysize, &uvsize, YUVformat, startframe );

	fclose(ifile);

	if(ysize != 0 && y != NULL) {
		ret = usb_send_buffer(camera, y, ysize, FWPACKETSIZE, LOGO_CMD_Y_UPDATE);

		ret = libusb_control_transfer(camera,
				/* bmRequestType */
			(LIBUSB_ENDPOINT_IN | LIBUSB_REQUEST_TYPE_VENDOR |
				LIBUSB_RECIPIENT_INTERFACE),
				/* bRequest      */ LOGO_CMD_Y_UPDATE_DONE,
				/* wValue        */ 0,
				/* MSB 4 bytes   */
				/* wIndex        */ 0,
				/* Data          */ NULL,
				/* wLength       */ 0,
				/* timeout*/     0 
			);
		CHECK_ERROR(ret < 0, -1, "LOGO_LUMA_UPDATE_DONE failed");
	}
	if(uvsize != 0 && uv != NULL) {	
		ret = usb_send_buffer(camera, uv, uvsize, FWPACKETSIZE, LOGO_CMD_UV_UPDATE);

		ret = libusb_control_transfer(camera,
				/* bmRequestType */
			(LIBUSB_ENDPOINT_IN | LIBUSB_REQUEST_TYPE_VENDOR |
				LIBUSB_RECIPIENT_INTERFACE),
				/* bRequest      */ LOGO_CMD_UV_UPDATE_DONE,
				/* wValue        */ 0,
				/* MSB 4 bytes   */
				/* wIndex        */ 0,
				/* Data          */ NULL,
				/* wLength       */ 0,
				/* timeout*/ 	  0 
			);
		CHECK_ERROR(ret < 0, -1, "LOGO_UV_UPDATE_DONE failed");
	}
	free(y);
	free(uv);

	// wait for cmd to execute at fw
	usleep(50000);
	return ret;
}