void BMPVideoEncoder::SetAudioFormat(const tWAVEFORMATEX *fmt)
{
  if(params.CaptureAudio && !intn->wave)
  {
    char filename[512];
    strcpy_s(filename,prefix);
    strcat_s(filename,".wav");

    delete intn->wfx;
    intn->wfx = CopyFormat(fmt);
    intn->wave = fopen(filename,"wb");
    
    if(intn->wave)
    {
      static unsigned char header[] = "RIFF\0\0\0\0WAVEfmt ";
      static unsigned char data[] = "data\0\0\0\0";

      fwrite(header,1,sizeof(header)-1,intn->wave);
      DWORD len = fmt->cbSize ? sizeof(WAVEFORMATEX)+fmt->cbSize : sizeof(WAVEFORMATEX)-2;
      fwrite(&len,1,sizeof(DWORD),intn->wave);
      fwrite(fmt,1,len,intn->wave);
      fwrite(data,1,sizeof(data)-1,intn->wave);
    }

    // fill already written frames with no sound
    unsigned char *buffer = new unsigned char[fmt->nBlockAlign * 1024];
    int sampleFill = MulDiv(frame,fmt->nSamplesPerSec*frameRateDenom,frameRateScaled);

    memset(buffer,0,fmt->nBlockAlign * 1024);
    for(int samplePos=0;samplePos<sampleFill;samplePos+=1024)
      WriteAudioFrame(buffer,min(sampleFill-samplePos,1024));
  }
}
Exemple #2
0
AVWRAP_DECL int AVWrapper_Close()
{
    int ret;
    // output buffered frames
    if (g_pVCodec->capabilities & AV_CODEC_CAP_DELAY)
    {
        do
            ret = WriteFrame(NULL);
        while (ret > 0);
        if (ret < 0)
            return ret;
    }
    // output any remaining audio
    do
    {
        ret = WriteAudioFrame();
    }
    while(ret > 0);
    if (ret < 0)
        return ret;

    // write the trailer, if any.
    av_write_trailer(g_pContainer);

    // close the output file
    if (!(g_pFormat->flags & AVFMT_NOFILE))
        avio_close(g_pContainer->pb);

    // free everything
    if (g_pVStream)
    {
        avcodec_close(g_pVideo);
        av_free(g_pVideo);
        av_free(g_pVStream);
        av_frame_free(&g_pVFrame);
    }
    if (g_pAStream)
    {
        avcodec_close(g_pAudio);
        av_free(g_pAudio);
        av_free(g_pAStream);
        av_frame_free(&g_pAFrame);
        av_free(g_pSamples);
        fclose(g_pSoundFile);
    }

    av_free(g_pContainer);
    return 0;
}
Exemple #3
0
////////////////////////////////////////////////////////////////
// AVI1フレーム書出し
//
// 引数:	wh		ウィンドウハンドル
// 返値:	bool	true:成功 false:失敗
////////////////////////////////////////////////////////////////
bool AVI6::AVIWriteFrame( HWINDOW wh )
{
#ifndef NOAVI
	if( !wh ) return false;
	
	cCritical::Lock();

	int xx = OSD_GetWindowWidth(wh);
	int yy = OSD_GetWindowHeight(wh);

	// 途中でウィンドウサイズが変わっていたら記録を中断
	if(xx != video_st.tmp_frame->width || yy != video_st.tmp_frame->height){
		StopAVI();
		cCritical::UnLock();
		return false;
	}

	Sbuf.resize(xx * yy * ABPP / 4);

	VRect ss;
	ss.x = 0;
	ss.y = 0;
	ss.w = xx;
	ss.h = yy;
	if( !OSD_GetWindowImage( wh, (void **)&Sbuf, &ss ) ) return false;

	int encode_video = 1, encode_audio = 1;
	while (encode_video || encode_audio) {
		/* select the stream to encode */
		if (encode_video &&
				(!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
												audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
			WriteVideoFrame(oc, &video_st, &Sbuf[0]);
			encode_video = 0;
		} else {
			encode_audio = !WriteAudioFrame(oc, &audio_st, this);
		}
	}
	cCritical::UnLock();
	return true;
#else
	return false;
#endif
}
Exemple #4
0
static int WriteFrame(AVFrame* pFrame)
{
    double AudioTime, VideoTime;
    int ret;
    // write interleaved audio frame
    if (g_pAStream)
    {
        VideoTime = (double)g_pVFrame->pts * g_pVStream->time_base.num/g_pVStream->time_base.den;
        do
        {
            AudioTime = (double)g_pAFrame->pts * g_pAStream->time_base.num/g_pAStream->time_base.den;
            ret = WriteAudioFrame();
        }
        while (AudioTime < VideoTime && ret);
        if (ret < 0)
            return ret;
    }

    if (!g_pVStream)
        return 0;

    AVPacket Packet;
    av_init_packet(&Packet);
    Packet.data = NULL;
    Packet.size = 0;

    g_pVFrame->pts++;
#if LIBAVCODEC_VERSION_MAJOR < 58
    if (g_pFormat->flags & AVFMT_RAWPICTURE)
    {
        /* raw video case. The API will change slightly in the near
           future for that. */
        Packet.flags |= AV_PKT_FLAG_KEY;
        Packet.stream_index = g_pVStream->index;
        Packet.data = (uint8_t*)pFrame;
        Packet.size = sizeof(AVPicture);

        if (av_interleaved_write_frame(g_pContainer, &Packet) != 0)
            return FatalError("Error while writing video frame");
        return 0;
    }
    else
#endif
    {
#if LIBAVCODEC_VERSION_MAJOR >= 54
        int got_packet;
        if (avcodec_encode_video2(g_pVideo, &Packet, pFrame, &got_packet) < 0)
            return FatalError("avcodec_encode_video2 failed");
        if (!got_packet)
            return 0;

        av_packet_rescale_ts(&Packet, g_pVideo->time_base, g_pVStream->time_base);
#else
        Packet.size = avcodec_encode_video(g_pVideo, g_OutBuffer, OUTBUFFER_SIZE, pFrame);
        if (Packet.size < 0)
            return FatalError("avcodec_encode_video failed");
        if (Packet.size == 0)
            return 0;

        if( g_pVideo->coded_frame->pts != AV_NOPTS_VALUE)
            Packet.pts = av_rescale_q(g_pVideo->coded_frame->pts, g_pVideo->time_base, g_pVStream->time_base);
        if( g_pVideo->coded_frame->key_frame )
            Packet.flags |= AV_PKT_FLAG_KEY;
        Packet.data = g_OutBuffer;
#endif
        // write the compressed frame in the media file
        Packet.stream_index = g_pVStream->index;
        if (av_interleaved_write_frame(g_pContainer, &Packet) != 0)
            return FatalError("Error while writing video frame");

        return 1;
    }
}
Exemple #5
0
void Context::CreateNewClusterAudioOnly()
{
    assert(m_pAudio);

    const StreamAudio::frames_t& aframes = m_pAudio->GetFrames();
    assert(!aframes.empty());

    const StreamAudio::AudioFrame* const paf_first = aframes.front();
    assert(paf_first);

    const StreamAudio::AudioFrame& af_first = *paf_first;

    const ULONG af_first_time = af_first.GetTimecode();

    clusters_t& cc = m_clusters;
    assert(cc.empty() || (af_first_time > cc.back().m_timecode));

    //const Cluster* const pPrevCluster = cc.empty() ? 0 : &cc.back();

    cc.push_back(Cluster());
    Cluster& c = cc.back();

    c.m_pos = m_file.GetPosition();
    c.m_timecode = af_first_time;

    m_file.WriteID4(0x1F43B675);  //Cluster ID

#if 0
    m_file.Write4UInt(0);         //patch size later, during close
#else
    m_file.SetPosition(4, STREAM_SEEK_CUR);
#endif

    m_file.WriteID1(0xE7);
    m_file.Write1UInt(4);
    m_file.Serialize4UInt(c.m_timecode);

    const __int64 off = c.m_pos - m_segment_pos - 12;
    assert(off >= 0);

#if 0
    //disable this until we're sure it's allowed per the WebM std
    m_file.WriteID1(0xA7);        //Position ID
    m_file.Write1UInt(8);         //payload size is 8 bytes
    m_file.Serialize8UInt(off);   //payload

    if (pPrevCluster)
    {
        const __int64 size = c.m_pos - pPrevCluster->m_pos;
        assert(size > 0);

        m_file.WriteID1(0xAB);        //PrevSize ID
        m_file.Write1UInt(8);         //payload size is 8 bytes
        m_file.Serialize8UInt(size);  //payload
    }
#endif

    ULONG cFrames = 0;   //TODO: must write cues for audio

    while (!aframes.empty())
    {
        const StreamAudio::AudioFrame* const paf = aframes.front();
        assert(paf);

        const ULONG t = paf->GetTimecode();
        assert(t >= c.m_timecode);

        const LONG dt = LONG(t) - LONG(c.m_timecode);

        if (dt > 1000)
            break;

        WriteAudioFrame(c, cFrames);
    }

    const __int64 pos = m_file.GetPosition();

    const __int64 size_ = pos - c.m_pos - 8;
    assert(size_ <= ULONG_MAX);

    const ULONG size = static_cast<ULONG>(size_);

    m_file.SetPosition(c.m_pos + 4);
    m_file.Write4UInt(size);

    m_file.SetPosition(pos);
}
Exemple #6
0
void Context::CreateNewCluster(const StreamVideo::VideoFrame* pvf_stop)
{
#if 0
    odbgstream os;
    os << "\nCreateNewCluster: pvf_stop=";

    if (pvf_stop == 0)
        os << "NULL";
    else
        os << pvf_stop->GetTimecode();

    os << endl;
#endif

    assert(m_pVideo);

    const StreamVideo::frames_t& vframes = m_pVideo->GetFrames();
    assert(!vframes.empty());

    clusters_t& cc = m_clusters;

    //const Cluster* const pPrevCluster = cc.empty() ? 0 : &cc.back();

    cc.push_back(Cluster());
    Cluster& c = cc.back();

    c.m_pos = m_file.GetPosition();

    {
        const StreamVideo::VideoFrame* const pvf = vframes.front();
        assert(pvf);
        assert(pvf != pvf_stop);

        const ULONG vt = pvf->GetTimecode();

        if ((m_pAudio == 0) || m_pAudio->GetFrames().empty())
            c.m_timecode = vt;
        else
        {
            const StreamAudio::frames_t& aframes = m_pAudio->GetFrames();
            const StreamAudio::AudioFrame* const paf = aframes.front();
            const ULONG at = paf->GetTimecode();

            c.m_timecode = (at <= vt) ? at : vt;
        }
    }

    m_file.WriteID4(0x1F43B675);  //Cluster ID

#if 0
    m_file.Write4UInt(0);         //patch size later, during close
#elif 0
    m_file.SetPosition(4, STREAM_SEEK_CUR);
#else
    m_file.Serialize4UInt(0x1FFFFFFF);
#endif

    m_file.WriteID1(0xE7);
    m_file.Write1UInt(4);
    m_file.Serialize4UInt(c.m_timecode);

    const __int64 off = c.m_pos - m_segment_pos - 12;
    assert(off >= 0);

#if 0
    //TODO: disable until we're sure this is allowed per the Webm std
    m_file.WriteID1(0xA7);        //Position ID
    m_file.Write1UInt(8);         //payload size is 8 bytes
    m_file.Serialize8UInt(off);   //payload

    if (pPrevCluster)
    {
        const __int64 size = c.m_pos - pPrevCluster->m_pos;
        assert(size > 0);

        m_file.WriteID1(0xAB);        //PrevSize ID
        m_file.Write1UInt(8);         //payload size is 8 bytes
        m_file.Serialize8UInt(size);  //payload
    }
#endif

    ULONG cFrames = 0;
    LONG vtc_prev  = -1;

    StreamVideo::frames_t& rframes = m_pVideo->GetKeyFrames();

    while (!vframes.empty())
    {
        typedef StreamVideo::frames_t::const_iterator video_iter_t;

        video_iter_t video_iter = vframes.begin();
        const video_iter_t video_iter_end = vframes.end();

        const StreamVideo::VideoFrame* const pvf = *video_iter++;
        assert(pvf);

        if (pvf == pvf_stop)
            break;

        const StreamVideo::VideoFrame* const pvf_next =
            (video_iter == video_iter_end) ? 0 : *video_iter;

        //const bool bLastVideo = (pvf_next == pvf_stop);

        const ULONG vt = pvf->GetTimecode();
        assert(vt >= c.m_timecode);
        assert((pvf_stop == 0) || (vt < pvf_stop->GetTimecode()));

        if ((m_pAudio == 0) || m_pAudio->GetFrames().empty())
        {
            if (!rframes.empty() && (pvf == rframes.front()))
                rframes.pop_front();

            const ULONG vtc = pvf->GetTimecode();

            WriteVideoFrame(c, cFrames, pvf_stop, pvf_next, vtc_prev);

            vtc_prev = vtc;

            continue;
        }

        const StreamAudio::frames_t& aframes = m_pAudio->GetFrames();
        typedef StreamAudio::frames_t::const_iterator audio_iter_t;

        audio_iter_t i = aframes.begin();
        const audio_iter_t j = aframes.end();

        const StreamAudio::AudioFrame* const paf = *i++;  //1st audio frame
        assert(paf);

        const ULONG at = paf->GetTimecode();
        assert(at >= c.m_timecode);

        if (vt < at)
        {
            if (!rframes.empty() && (pvf == rframes.front()))
                rframes.pop_front();

            const ULONG vtc = pvf->GetTimecode();

            WriteVideoFrame(c, cFrames, pvf_stop, pvf_next, vtc_prev);

            vtc_prev = vtc;

            continue;
        }

        //At this point, we have (at least) one audio frame,
        //and (at least) one video frame.  They could have an
        //equal timecode, or the audio might be smaller than
        //the video.  Our desire is that the largest audio
        //frame less than the pvf_stop go on the next cluster,
        //which means any video frames greater than the audio
        //frame will also go on the next cluster.

        if (pvf_stop == 0)  //means write all extant frames
        {
            //We know that this audio frame is less or equal to
            //the video frame, so write it now.

            WriteAudioFrame(c, cFrames);
            continue;
        }

        //At this point, we still have an audio frame and a
        //video frame, neigther of which has been written yet.

        const ULONG vt_stop = pvf_stop->GetTimecode();

        if (at >= vt_stop)  //weird
            break;

        if (i == j)  //weird
            break;

        const StreamAudio::AudioFrame* const paf_stop = *i;  //2nd audio frame
        assert(paf_stop);

        const ULONG at_stop = paf_stop->GetTimecode();

        if (at_stop >= vt_stop)
            break;

        WriteAudioFrame(c, cFrames);   //write 1st audio frame
    }

    const __int64 pos = m_file.GetPosition();

    const __int64 size_ = pos - c.m_pos - 8;
    assert(size_ <= ULONG_MAX);

    const ULONG size = static_cast<ULONG>(size_);

    m_file.SetPosition(c.m_pos + 4);
    m_file.Write4UInt(size);

    m_file.SetPosition(pos);
}