コード例 #1
0
ファイル: EncoderFFmpeg.cpp プロジェクト: Gemini88/xbmc-1
bool CEncoderFFmpeg::Close()
{
    if (m_Format) {
        /* if there is anything still in the buffer */
        if (m_BufferSize > 0) {
            /* zero the unused space so we dont encode random junk */
            memset(&m_Buffer[m_BufferSize], 0, m_NeededBytes - m_BufferSize);
            /* write any remaining data */
            WriteFrame();
        }

        /* write the eof flag */
        delete[] m_Buffer;
        m_Buffer = NULL;
        WriteFrame();

        /* write the trailer */
        m_dllAvFormat.av_write_trailer(m_Format);
        FlushStream();
        FileClose();

        /* cleanup */
        m_dllAvCodec.avcodec_close(m_CodecCtx);
        m_dllAvUtil.av_freep(&m_Stream    );
        m_dllAvUtil.av_freep(&m_Format->pb);
        m_dllAvUtil.av_freep(&m_Format    );
    }

    m_BufferSize = 0;

    m_dllAvFormat.Unload();
    m_dllAvUtil  .Unload();
    m_dllAvCodec .Unload();
    return true;
}
コード例 #2
0
ファイル: animation.cpp プロジェクト: SteveShaw/povray
void Animation::AppendFrame(Image *image) // writing only - NOTE: This method reserves the right to *modify* the image passed to it!!! [trf]
{
    if(writeOptions.blurradius > 0.0f)
    {
        boost::scoped_ptr<Image> mask(Image::Create(image->GetWidth(), image->GetHeight(), Image::Bit_Map));
        float r, g, b, f, t;

        mask->FillBitValue(false);

        if(writeOptions.bluredgethreshold < 1.0f)
            ComputeBlurMask(*image, *mask.get());

        for(int y = 0; y < image->GetHeight(); y++)
        {
            for(int x = 0; x < image->GetWidth(); x++)
            {
                if(mask->GetBitValue(x, y) == true)
                {
                    image->GetRGBFTValue(x, y, r, g, b, f, t);
                    GetBlurredPixel(*image, x, y, r, g, b);
                    image->SetRGBFTValue(x, y, r, g, b, f, t);
                }
            }
        }
    }

    WriteFrame(outFile, image);

    totalFrames++;
}
コード例 #3
0
TInt CCommFrameWriterAo::Write(CCsyMsgBufBpFrame* aBpFrame,
							   TBool aHighPriority)
/**
 * This method is called to transmit a frame to the baseband.
 *
 * @param aBpFrame - Pointer to frame
 * @param aHighPriority - Flag to indicate a high priority frame
 */
    {
	_LOG_L4C3(">>CCommFrameWriterAo::Write [aBpFrame=0x%x, aHighPriority=%d]",
		aBpFrame,aHighPriority);

	TInt ret = KErrNone;

	// 1st check if we are already transmitting a frame
	if (!IsActive())
		{
		_LOG_L4C1("Not currently writing a frame");
		ret = WriteFrame(aBpFrame);
		}
	else
		{
		// add frame to the list of frames that need to be sent to the BP
		_LOG_L2C1("Already writing a frame - add to queue");
		AddToWaitingToSendList(aBpFrame, aHighPriority);
		}

	_LOG_L4C2("<<CCommFrameWriterAo::Write [ret=%d]",ret);
	return ret;
	}
コード例 #4
0
ファイル: FifoPlayer.cpp プロジェクト: delroth/dolphin
CPU::State FifoPlayer::AdvanceFrame()
{
  if (m_CurrentFrame >= m_FrameRangeEnd)
  {
    if (!m_Loop)
      return CPU::State::PowerDown;
    // If there are zero frames in the range then sleep instead of busy spinning
    if (m_FrameRangeStart >= m_FrameRangeEnd)
      return CPU::State::Stepping;

    // When looping, reload the contents of all the BP/CP/CF registers.
    // This ensures that each time the first frame is played back, the state of the
    // GPU is the same for each playback loop.
    m_CurrentFrame = m_FrameRangeStart;
    LoadRegisters();
    LoadTextureMemory();
    FlushWGP();
  }

  if (m_FrameWrittenCb)
    m_FrameWrittenCb();

  if (m_EarlyMemoryUpdates && m_CurrentFrame == m_FrameRangeStart)
    WriteAllMemoryUpdates();

  WriteFrame(m_File->GetFrame(m_CurrentFrame), m_FrameInfo[m_CurrentFrame]);

  ++m_CurrentFrame;
  return CPU::State::Running;
}
コード例 #5
0
ファイル: avwrapper.c プロジェクト: kotofos/hw
AVWRAP_DECL int AVWrapper_WriteFrame(uint8_t* pY, uint8_t* pCb, uint8_t* pCr)
{
    g_pVFrame->data[0] = pY;
    g_pVFrame->data[1] = pCb;
    g_pVFrame->data[2] = pCr;
    return WriteFrame(g_pVFrame);
}
コード例 #6
0
ファイル: StatsFile.cpp プロジェクト: 1vanK/AHRUnrealEngine
void FStatsWriteFile::NewFrame( int64 TargetFrame )
{
	SCOPE_CYCLE_COUNTER( STAT_StreamFile );

	// Currently raw stat files are limited to 120 frames.
	enum
	{
		MAX_NUM_RAWFRAMES = 120,
	};
	if( Header.bRawStatsFile )
	{
		if( FCommandStatsFile::FirstFrame == -1 )
		{
			FCommandStatsFile::FirstFrame = TargetFrame;
		}
		else if( TargetFrame > FCommandStatsFile::FirstFrame + MAX_NUM_RAWFRAMES )
		{
			FCommandStatsFile::Stop();
			FCommandStatsFile::FirstFrame = -1;
			return;
		}
	}

	WriteFrame( TargetFrame );
	SendTask();
}
コード例 #7
0
ファイル: movie.cpp プロジェクト: eighttails/PC6001VX
static int WriteAudioFrame(AVFormatContext *oc, OutputStream *ost, AVI6 *avi)
{
	AVCodecContext *c = NULL;
	AVPacket pkt = { 0 };
	AVFrame *frame = NULL;
	int ret = 0;
	int got_packet = 0;
	int dst_nb_samples = 0;

	av_init_packet(&pkt);
	c = ost->st->codec;

	frame = GetAudioFrame(ost, avi);

	if (frame) {
		// フォーマット変換後のサンプル数を決定
		dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, frame->sample_rate) + frame->nb_samples,
										c->sample_rate, c->sample_rate, AV_ROUND_UP);
		//av_assert0(dst_nb_samples == frame->nb_samples);

		// フレームを書き込み可能にする
		ret = av_frame_make_writable(ost->frame);
		if (ret < 0)
			exit(1);

		// 音声フォーマットを変換
		ret = swr_convert(ost->swr_ctx,
						  ost->frame->data, dst_nb_samples,
						  (const uint8_t **)frame->data, frame->nb_samples);
		if (ret < 0) {
			fprintf(stderr, "Error while converting\n");
			return 0;
		}
		frame = ost->frame;

		frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
		ost->samples_count += dst_nb_samples;

		ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
		if (ret < 0) {
			fprintf(stderr, "Error encoding audio frame: %s\n", MakeErrorString(ret));
			return 0;
		}

		if (got_packet) {
			ret = WriteFrame(oc, &c->time_base, ost->st, &pkt);
			if (ret < 0) {
				fprintf(stderr, "Error while writing audio frame: %s\n",
						MakeErrorString(ret));
				return 0;
			}
		}
	}

	return (frame || got_packet) ? 0 : 1;
}
コード例 #8
0
ファイル: movie.cpp プロジェクト: eighttails/PC6001VX
static int WriteVideoFrame(AVFormatContext *oc, OutputStream *ost, BYTE* src_img)
{
	int ret = 0;
	AVCodecContext *c = NULL;
	AVFrame *frame = NULL;
	int got_packet = 0;

	c = ost->st->codec;

	frame = GetVideoFrame(ost, src_img);

	if (oc->oformat->flags & AVFMT_RAWPICTURE) {
		/* a hack to avoid data copy with some raw video muxers */
		AVPacket pkt;
		av_init_packet(&pkt);

		if (!frame)
			return 1;

		pkt.flags        |= AV_PKT_FLAG_KEY;
		pkt.stream_index  = ost->st->index;
		pkt.data          = (uint8_t *)frame;
		pkt.size          = sizeof(AVPicture);

		pkt.pts = pkt.dts = frame->pts;
		av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);

		ret = av_interleaved_write_frame(oc, &pkt);
	} else {
		AVPacket pkt = { 0 };
		av_init_packet(&pkt);

		// 映像をエンコード
		ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
		if (ret < 0) {
			fprintf(stderr, "Error encoding video frame: %s\n", MakeErrorString(ret));
			return 0;
		}

		if (got_packet) {
			ret = WriteFrame(oc, &c->time_base, ost->st, &pkt);
		} else {
			ret = 0;
		}
	}

	if (ret < 0) {
		fprintf(stderr, "Error while writing video frame: %s\n", MakeErrorString(ret));
		return 0;
	}

	return (frame || got_packet) ? 0 : 1;
}
コード例 #9
0
ファイル: FifoPlayer.cpp プロジェクト: NINI1988/dolphin
bool FifoPlayer::Play()
{
	if (!m_File)
		return false;

	if (m_File->GetFrameCount() == 0)
		return false;

	IsPlayingBackFifologWithBrokenEFBCopies = m_File->HasBrokenEFBCopies();

	m_CurrentFrame = m_FrameRangeStart;

	LoadMemory();

	// This loop replaces the CPU loop that occurs when a game is run
	while (PowerPC::GetState() != PowerPC::CPU_POWERDOWN)
	{
		if (PowerPC::GetState() == PowerPC::CPU_RUNNING)
		{
			if (m_CurrentFrame >= m_FrameRangeEnd)
			{
				if (m_Loop)
				{
					m_CurrentFrame = m_FrameRangeStart;

					PowerPC::ppcState.downcount = 0;
					CoreTiming::Advance();
				}
				else
				{
					PowerPC::Stop();
					Host_Message(WM_USER_STOP);
				}
			}
			else
			{
				if (m_FrameWrittenCb)
					m_FrameWrittenCb();

				if (m_EarlyMemoryUpdates && m_CurrentFrame == m_FrameRangeStart)
					WriteAllMemoryUpdates();

				WriteFrame(m_File->GetFrame(m_CurrentFrame), m_FrameInfo[m_CurrentFrame]);

				++m_CurrentFrame;
			}
		}
	}

	IsPlayingBackFifologWithBrokenEFBCopies = false;

	return true;
}
コード例 #10
0
ファイル: avwrapper.c プロジェクト: kotofos/hw
AVWRAP_DECL int AVWrapper_Close()
{
    int ret;
    // output buffered frames
    if (g_pVCodec->capabilities & AV_CODEC_CAP_DELAY)
    {
        do
            ret = WriteFrame(NULL);
        while (ret > 0);
        if (ret < 0)
            return ret;
    }
    // output any remaining audio
    do
    {
        ret = WriteAudioFrame();
    }
    while(ret > 0);
    if (ret < 0)
        return ret;

    // write the trailer, if any.
    av_write_trailer(g_pContainer);

    // close the output file
    if (!(g_pFormat->flags & AVFMT_NOFILE))
        avio_close(g_pContainer->pb);

    // free everything
    if (g_pVStream)
    {
        avcodec_close(g_pVideo);
        av_free(g_pVideo);
        av_free(g_pVStream);
        av_frame_free(&g_pVFrame);
    }
    if (g_pAStream)
    {
        avcodec_close(g_pAudio);
        av_free(g_pAudio);
        av_free(g_pAStream);
        av_frame_free(&g_pAFrame);
        av_free(g_pSamples);
        fclose(g_pSoundFile);
    }

    av_free(g_pContainer);
    return 0;
}
コード例 #11
0
ファイル: track.cpp プロジェクト: KonDiter42/ffms2
void FFMS_Track::Write(ZipFile &stream) const {
    stream.Write<uint8_t>(TT);
    stream.Write(TB.Num);
    stream.Write(TB.Den);
    stream.Write<int32_t>(MaxBFrames);
    stream.Write<uint8_t>(UseDTS);
    stream.Write<uint8_t>(HasTS);
    stream.Write<uint64_t>(size());

    if (empty()) return;

    FrameInfo temp{};
    for (size_t i = 0; i < size(); ++i)
        WriteFrame(stream, Frames[i], i == 0 ? temp : Frames[i - 1], TT);
}
コード例 #12
0
ファイル: actionthread.cpp プロジェクト: KDE/kipi-plugins
void ActionThread::processItem(int upperBound, MagickImage* const img, MagickImage* const imgNext, Action action)
{
/*
    // need to reimplement using appsrc plugin of gstreamer
    if(action == TYPE_IMAGE)
    {
        if(d->item->EffectName() == EFFECT_NONE)
            upperBound = 1;
    }
*/

    for (int n = 0; n < upperBound && d->running; n++)
    {
        Frame* const frm = getFrame(d->item, img, imgNext, n, action);
        ProcessFrame(frm);
        WriteFrame(frm);
        delete frm;
    }
}
コード例 #13
0
ファイル: avwrapper.c プロジェクト: TriptychCrypto/hw
AVWRAP_DECL int AVWrapper_WriteFrame(uint8_t *buf)
{
    int x, y, stride = g_Width * 4;
    uint8_t *data[3];

    // copy pointers, prepare source
    memcpy(data, g_pVFrame->data, sizeof(data));
    buf += (g_Height - 1) * stride;

    // convert to YUV 4:2:0
    for (y = 0; y < g_Height; y++) {
        for (x = 0; x < g_Width; x++) {
            int r = buf[x * 4 + 0];
            int g = buf[x * 4 + 1];
            int b = buf[x * 4 + 2];

            int luma = (int)(0.299f * r +  0.587f * g + 0.114f * b);
            data[0][x] = av_clip_uint8(luma);

            if (!(x & 1) && !(y & 1)) {
                int r = (buf[x * 4 + 0]          + buf[(x + 1) * 4 + 0] +
                         buf[x * 4 + 0 + stride] + buf[(x + 1) * 4 + 0 + stride]) / 4;
                int g = (buf[x * 4 + 1]          + buf[(x + 1) * 4 + 1] +
                         buf[x * 4 + 1 + stride] + buf[(x + 1) * 4 + 1 + stride]) / 4;
                int b = (buf[x * 4 + 2]          + buf[(x + 1) * 4 + 2] +
                         buf[x * 4 + 2 + stride] + buf[(x + 1) * 4 + 2 + stride]) / 4;

                int cr = (int)(-0.14713f * r - 0.28886f * g + 0.436f   * b);
                int cb = (int)( 0.615f   * r - 0.51499f * g - 0.10001f * b);
                data[1][x / 2] = av_clip_uint8(128 + cr);
                data[2][x / 2] = av_clip_uint8(128 + cb);
            }
        }
        buf += -stride;
        data[0] += g_pVFrame->linesize[0];
        if (y & 1) {
            data[1] += g_pVFrame->linesize[1];
            data[2] += g_pVFrame->linesize[2];
        }
    }

    return WriteFrame(g_pVFrame);
}
コード例 #14
0
ファイル: main.cpp プロジェクト: Amos-zq/ispc
int main(int argc, char** argv) {
    if (argc < 2) {
        printf("usage: deferred_shading <input_file (e.g. data/pp1280x720.bin)> [tasks iterations] [serial iterations]\n");
        return 1;
    }
    static unsigned int test_iterations[] = {5, 3, 500}; //last value is for nframes, it is scale.
    if (argc == 5) {
        for (int i = 0; i < 3; i++) {
            test_iterations[i] = atoi(argv[2 + i]);
        }
    }

    InputData *input = CreateInputDataFromFile(argv[1]);
    if (!input) {
        printf("Failed to load input file \"%s\"!\n", argv[1]);
        return 1;
    }

    Framebuffer framebuffer(input->header.framebufferWidth,
                            input->header.framebufferHeight);

    int nframes = test_iterations[2];
    double ispcCycles = 1e30;
    for (int i = 0; i < test_iterations[0]; ++i) {
        framebuffer.clear();
        reset_and_start_timer();
        for (int j = 0; j < nframes; ++j)
            ispc::RenderStatic(&input->header, &input->arrays,
                               VISUALIZE_LIGHT_COUNT,
                               framebuffer.r, framebuffer.g, framebuffer.b);
        double msec = get_elapsed_msec() / nframes;
        printf("@time of ISPC + TASKS run:\t\t\t[%.3f] msec [%.3f fps]\n", msec, 1.0e3/msec);
        ispcCycles = std::min(ispcCycles, msec);
    }
    printf("[ispc static + tasks]:\t\t[%.3f] msec to render "
           "%d x %d image\n", ispcCycles,
           input->header.framebufferWidth, input->header.framebufferHeight);
    WriteFrame("deferred-ispc-static.ppm", input, framebuffer);

    DeleteInputData(input);

    return 0;
}
コード例 #15
0
ファイル: EncoderFFmpeg.cpp プロジェクト: Gemini88/xbmc-1
int CEncoderFFmpeg::Encode(int nNumBytesRead, BYTE* pbtStream)
{
    while(nNumBytesRead > 0)
    {
        unsigned int space = m_NeededBytes - m_BufferSize;
        unsigned int copy  = (unsigned int)nNumBytesRead > space ? space : nNumBytesRead;

        memcpy(&m_Buffer[m_BufferSize], pbtStream, space);
        m_BufferSize  += copy;
        pbtStream     += copy;
        nNumBytesRead -= copy;

        /* only write full packets */
        if (m_BufferSize == m_NeededBytes)
            if (!WriteFrame()) return 0;
    }

    return 1;
}
コード例 #16
0
ファイル: FifoPlayer.cpp プロジェクト: Zombiebest/Dolphin
bool FifoPlayer::Play()
{
	if (!m_File)
		return false;

	if (m_File->GetFrameCount() == 0)
		return false;

	m_CurrentFrame = m_FrameRangeStart;

	LoadMemory();

	// This loop replaces the CPU loop that occurs when a game is run
	while (PowerPC::GetState() != PowerPC::CPU_POWERDOWN)
	{
		if (PowerPC::GetState() == PowerPC::CPU_RUNNING)
		{
			if (m_CurrentFrame >= m_FrameRangeEnd)
			{
				m_CurrentFrame = m_FrameRangeStart;

				CoreTiming::downcount = 0;
				CoreTiming::Advance();
			}
			else
			{
				if (m_FrameWrittenCb)
					m_FrameWrittenCb();

				if (m_EarlyMemoryUpdates && m_CurrentFrame == m_FrameRangeStart)
					WriteAllMemoryUpdates();

				WriteFrame(m_File->GetFrame(m_CurrentFrame), m_FrameInfo[m_CurrentFrame]);		

				++m_CurrentFrame;
			}
		}
	}

	return true;
}
コード例 #17
0
ファイル: FifoPlayer.cpp プロジェクト: LordNed/dolphin
int FifoPlayer::AdvanceFrame()
{
	if (m_CurrentFrame >= m_FrameRangeEnd)
	{
		if (!m_Loop)
			return CPU::CPU_POWERDOWN;
		// If there are zero frames in the range then sleep instead of busy spinning
		if (m_FrameRangeStart >= m_FrameRangeEnd)
			return CPU::CPU_STEPPING;

		m_CurrentFrame = m_FrameRangeStart;
	}

	if (m_FrameWrittenCb)
		m_FrameWrittenCb();

	if (m_EarlyMemoryUpdates && m_CurrentFrame == m_FrameRangeStart)
		WriteAllMemoryUpdates();

	WriteFrame(m_File->GetFrame(m_CurrentFrame), m_FrameInfo[m_CurrentFrame]);

	++m_CurrentFrame;
	return CPU::CPU_RUNNING;
}
コード例 #18
0
ファイル: avilib.cpp プロジェクト: ljshj/KindergartenParent
void AVIWriter::writeFrame(unsigned char *framepre,long sizepre,unsigned char *frame,long size,bool bKeyframe)
{
	WriteFrame(framepre, sizepre, frame, size, true, bKeyframe);
	counter++;
}
コード例 #19
0
ファイル: main.cpp プロジェクト: UIKit0/ispc
int main(int argc, char** argv) {
    if (argc != 2) {
        printf("usage: deferred_shading <input_file (e.g. data/pp1280x720.bin)>\n");
        return 1;
    }

    InputData *input = CreateInputDataFromFile(argv[1]);
    if (!input) {
        printf("Failed to load input file \"%s\"!\n", argv[1]);
        return 1;
    }

    Framebuffer framebuffer(input->header.framebufferWidth,
                            input->header.framebufferHeight);

    InitDynamicC(input);
#ifdef __cilk
    InitDynamicCilk(input);
#endif // __cilk

    int nframes = 5;
    double ispcCycles = 1e30;
    for (int i = 0; i < 5; ++i) {
        framebuffer.clear();
        reset_and_start_timer();
        for (int j = 0; j < nframes; ++j)
            ispc::RenderStatic(input->header, input->arrays,
                               VISUALIZE_LIGHT_COUNT,
                               framebuffer.r, framebuffer.g, framebuffer.b);
        double mcycles = get_elapsed_mcycles() / nframes;
        ispcCycles = std::min(ispcCycles, mcycles);
    }
    printf("[ispc static + tasks]:\t\t[%.3f] million cycles to render "
           "%d x %d image\n", ispcCycles,
           input->header.framebufferWidth, input->header.framebufferHeight);
    WriteFrame("deferred-ispc-static.ppm", input, framebuffer);

#ifdef __cilk
    double dynamicCilkCycles = 1e30;
    for (int i = 0; i < 5; ++i) {
        framebuffer.clear();
        reset_and_start_timer();
        for (int j = 0; j < nframes; ++j)
            DispatchDynamicCilk(input, &framebuffer);
        double mcycles = get_elapsed_mcycles() / nframes;
        dynamicCilkCycles = std::min(dynamicCilkCycles, mcycles);
    }
    printf("[ispc + Cilk dynamic]:\t\t[%.3f] million cycles to render image\n", 
           dynamicCilkCycles);
    WriteFrame("deferred-ispc-dynamic.ppm", input, framebuffer);
#endif // __cilk

    double serialCycles = 1e30;
    for (int i = 0; i < 5; ++i) {
        framebuffer.clear();
        reset_and_start_timer();
        for (int j = 0; j < nframes; ++j)
            DispatchDynamicC(input, &framebuffer);
        double mcycles = get_elapsed_mcycles() / nframes;
        serialCycles = std::min(serialCycles, mcycles);
    }
    printf("[C++ serial dynamic, 1 core]:\t[%.3f] million cycles to render image\n", 
           serialCycles);
    WriteFrame("deferred-serial-dynamic.ppm", input, framebuffer);

#ifdef __cilk
    printf("\t\t\t\t(%.2fx speedup from static ISPC, %.2fx from Cilk+ISPC)\n", 
           serialCycles/ispcCycles, serialCycles/dynamicCilkCycles);
#else
    printf("\t\t\t\t(%.2fx speedup from ISPC + tasks)\n", serialCycles/ispcCycles);
#endif // __cilk

    DeleteInputData(input);

    return 0;
}
コード例 #20
0
void GSDrawScanlineCodeGenerator::Generate()
{
	// TODO: on linux/mac rsi, rdi, xmm6-xmm15 are all caller saved

	push(rbx);
	push(rsi);
	push(rdi);
	push(rbp);
	push(r12);
	push(r13);

	sub(rsp, 8 + 10 * 16);
	
	for(int i = 6; i < 16; i++)
	{
		vmovdqa(ptr[rsp + (i - 6) * 16], Xmm(i));
	}

	mov(r10, (size_t)&m_test[0]);
	mov(r11, (size_t)&m_local);
	mov(r12, (size_t)m_local.gd);
	mov(r13, (size_t)m_local.gd->vm);

	Init();

	// rcx = steps
	// rsi = fza_base
	// rdi = fza_offset
	// r10 = &m_test[0]
	// r11 = &m_local
	// r12 = m_local->gd
	// r13 = m_local->gd.vm
	// xmm7 = vf (sprite && ltf)
	// xmm8 = z
	// xmm9 = f
	// xmm10 = s
	// xmm11 = t
	// xmm12 = q
	// xmm13 = rb
	// xmm14 = ga 
	// xmm15 = test

	if(!m_sel.edge)
	{
		align(16);
	}

L("loop");

	TestZ(xmm5, xmm6);

	// ebp = za

	if(m_sel.mmin)
	{
		SampleTextureLOD();
	}
	else
	{
		SampleTexture();
	}

	// ebp = za
	// xmm2 = rb
	// xmm3 = ga

	AlphaTFX();

	// ebp = za
	// xmm2 = rb
	// xmm3 = ga

	ReadMask();

	// ebp = za
	// xmm2 = rb
	// xmm3 = ga
	// xmm4 = fm
	// xmm5 = zm

	TestAlpha();

	// ebp = za
	// xmm2 = rb
	// xmm3 = ga
	// xmm4 = fm
	// xmm5 = zm

	ColorTFX();

	// ebp = za
	// xmm2 = rb
	// xmm3 = ga
	// xmm4 = fm
	// xmm5 = zm

	Fog();

	// ebp = za
	// xmm2 = rb
	// xmm3 = ga
	// xmm4 = fm
	// xmm5 = zm

	ReadFrame();

	// ebx = fa
	// ebp = za
	// xmm2 = rb
	// xmm3 = ga
	// xmm4 = fm
	// xmm5 = zm
	// xmm6 = fd

	TestDestAlpha();

	// ebx = fa
	// ebp = za
	// xmm2 = rb
	// xmm3 = ga
	// xmm4 = fm
	// xmm5 = zm
	// xmm6 = fd

	WriteMask();

	// ebx = fa
	// edx = fzm
	// ebp = za
	// xmm2 = rb
	// xmm3 = ga
	// xmm4 = fm
	// xmm5 = zm
	// xmm6 = fd

	WriteZBuf();

	// ebx = fa
	// edx = fzm
	// xmm2 = rb
	// xmm3 = ga
	// xmm4 = fm
	// xmm6 = fd

	AlphaBlend();

	// ebx = fa
	// edx = fzm
	// xmm2 = rb
	// xmm3 = ga
	// xmm4 = fm
	// xmm6 = fd

	WriteFrame();

L("step");

	// if(steps <= 0) break;

	if(!m_sel.edge)
	{
		test(rcx, rcx);

		jle("exit", T_NEAR);

		Step();

		jmp("loop", T_NEAR);
	}

L("exit");

	for(int i = 6; i < 16; i++)
	{
		vmovdqa(Xmm(i), ptr[rsp + (i - 6) * 16]);
	}

	add(rsp, 8 + 10 * 16);

	pop(r13);
	pop(r12);
	pop(rbp);
	pop(rdi);
	pop(rsi);
	pop(rbx);

	ret();
}
コード例 #21
0
void GSDrawScanlineCodeGenerator::Generate()
{
    push(ebx);
    push(esi);
    push(edi);
    push(ebp);

    const int params = 16;

    Init(params);

    if(!m_sel.edge)
    {
        align(16);
    }

    L("loop");

    // ecx = steps
    // esi = fzbr
    // edi = fzbc
    // xmm0 = z/zi
    // xmm2 = u (tme)
    // xmm3 = v (tme)
    // xmm5 = rb (!tme)
    // xmm6 = ga (!tme)
    // xmm7 = test

    bool tme = m_sel.tfx != TFX_NONE;

    TestZ(tme ? xmm5 : xmm2, tme ? xmm6 : xmm3);

    // ecx = steps
    // esi = fzbr
    // edi = fzbc
    // - xmm0
    // xmm2 = u (tme)
    // xmm3 = v (tme)
    // xmm5 = rb (!tme)
    // xmm6 = ga (!tme)
    // xmm7 = test

    SampleTexture();

    // ecx = steps
    // esi = fzbr
    // edi = fzbc
    // ebp = za
    // - xmm2
    // - xmm3
    // - xmm4
    // xmm5 = rb
    // xmm6 = ga
    // xmm7 = test

    AlphaTFX();

    // ecx = steps
    // esi = fzbr
    // edi = fzbc
    // ebp = za
    // xmm2 = gaf (TFX_HIGHLIGHT || TFX_HIGHLIGHT2 && !tcc)
    // xmm5 = rb
    // xmm6 = ga
    // xmm7 = test

    if(m_sel.fwrite)
    {
        movdqa(xmm3, xmmword[&m_env.fm]);
    }

    if(m_sel.zwrite)
    {
        movdqa(xmm4, xmmword[&m_env.zm]);
    }

    // ecx = steps
    // esi = fzbr
    // edi = fzbc
    // ebp = za
    // xmm2 = gaf (TFX_HIGHLIGHT || TFX_HIGHLIGHT2 && !tcc)
    // xmm3 = fm
    // xmm4 = zm
    // xmm5 = rb
    // xmm6 = ga
    // xmm7 = test

    TestAlpha();

    // ecx = steps
    // esi = fzbr
    // edi = fzbc
    // ebp = za
    // xmm2 = gaf (TFX_HIGHLIGHT || TFX_HIGHLIGHT2 && !tcc)
    // xmm3 = fm
    // xmm4 = zm
    // xmm5 = rb
    // xmm6 = ga
    // xmm7 = test

    ColorTFX();

    // ecx = steps
    // esi = fzbr
    // edi = fzbc
    // ebp = za
    // xmm3 = fm
    // xmm4 = zm
    // xmm5 = rb
    // xmm6 = ga
    // xmm7 = test

    Fog();

    // ecx = steps
    // esi = fzbr
    // edi = fzbc
    // ebp = za
    // xmm3 = fm
    // xmm4 = zm
    // xmm5 = rb
    // xmm6 = ga
    // xmm7 = test

    ReadFrame();

    // ecx = steps
    // esi = fzbr
    // edi = fzbc
    // ebp = za
    // xmm2 = fd
    // xmm3 = fm
    // xmm4 = zm
    // xmm5 = rb
    // xmm6 = ga
    // xmm7 = test

    TestDestAlpha();

    // fm |= test;
    // zm |= test;

    if(m_sel.fwrite)
    {
        por(xmm3, xmm7);
    }

    if(m_sel.zwrite)
    {
        por(xmm4, xmm7);
    }

    // int fzm = ~(fm == GSVector4i::xffffffff()).ps32(zm == GSVector4i::xffffffff()).mask();

    pcmpeqd(xmm1, xmm1);

    if(m_sel.fwrite && m_sel.zwrite)
    {
        movdqa(xmm0, xmm1);
        pcmpeqd(xmm1, xmm3);
        pcmpeqd(xmm0, xmm4);
        packssdw(xmm1, xmm0);
    }
    else if(m_sel.fwrite)
    {
        pcmpeqd(xmm1, xmm3);
        packssdw(xmm1, xmm1);
    }
    else if(m_sel.zwrite)
    {
        pcmpeqd(xmm1, xmm4);
        packssdw(xmm1, xmm1);
    }

    pmovmskb(edx, xmm1);
    not(edx);

    // ebx = fa
    // ecx = steps
    // edx = fzm
    // esi = fzbr
    // edi = fzbc
    // ebp = za
    // xmm2 = fd
    // xmm3 = fm
    // xmm4 = zm
    // xmm5 = rb
    // xmm6 = ga

    WriteZBuf();

    // ebx = fa
    // ecx = steps
    // edx = fzm
    // esi = fzbr
    // edi = fzbc
    // - ebp
    // xmm2 = fd
    // xmm3 = fm
    // - xmm4
    // xmm5 = rb
    // xmm6 = ga

    AlphaBlend();

    // ebx = fa
    // ecx = steps
    // edx = fzm
    // esi = fzbr
    // edi = fzbc
    // xmm2 = fd
    // xmm3 = fm
    // xmm5 = rb
    // xmm6 = ga

    WriteFrame(params);

    L("step");

    // if(steps <= 0) break;

    if(!m_sel.edge)
    {
        test(ecx, ecx);
        jle("exit", T_NEAR);

        Step();

        jmp("loop", T_NEAR);
    }

    L("exit");

    pop(ebp);
    pop(edi);
    pop(esi);
    pop(ebx);

    ret(8);
}
コード例 #22
0
void CCommFrameWriterAo::RunL()
/**
 * This method is called when a write to the LDD completes.
 */
	{
	_LOG_L4C1(" ");
	_LOG_L4C2(">>CCommFrameWriterAo::RunL [iStatus=%d] - written to LDD",
		iStatus.Int());

	if (iStatus.Int())
		{
		_LOG_L1C2("** Error writing to LDD ** [iStatus=%d]",iStatus.Int());

		if (!iCompleteWhenSent)
			{
			// The frame being sent was not the last or only one for this dlc, other
			// frames exist

			// go through list and remove other frames to send for this dlc
			RemoveAnyDlcFramesOnWriteList(iDlcNum, EFalse);

			iCompleteWhenSent = ETrue;
			}
		}

	if (iCompleteWhenSent)
		{
		iCompleteWhenSent = EFalse;

		_LOG_L3C2("Complete write [iDlcNum=%d]",iDlcNum);
		CompleteWrite(iDlcNum,iStatus.Int());
		}

	// check for another message that needs to be sent to the baseband
	CCsyMsgBufBpFrame* bpFrame = GetFrameToWrite();
	if (bpFrame)
		{
		TInt ret = KErrNone;
		do
			{
			ret = WriteFrame(bpFrame);
			if (ret)
				{
				_LOG_L1C2("** Write frame failed [ret=%d] **",ret);
				if (!iCompleteWhenSent)
					{
					// go through list and remove other frames to send for this dlc
					RemoveAnyDlcFramesOnWriteList(iDlcNum, EFalse);
					}
				}

			// Loop around if there is an error and try and send the next frame
			}
			while (ret);
		}
	else
		{
		_LOG_L3C1("Finished all writes - nothing more to send to LDD");
		}

	_LOG_L4C1("<<CCommFrameWriterAo::RunL");
	_LOG_L3C1(" "); // please leave this separator in
	}
コード例 #23
0
ファイル: xyuv_header_run.cpp プロジェクト: Catuna/xyuv
void XYUVHeader::Run(const ::options & options) {

    // If help has been requested, print it and quit.
    if (options.print_help) {
        PrintHelp();
        return;
    }

    // Otherwise do something useful.
#ifdef INSTALL_FORMATS_PATH
    // Load base formats from installation path
    config_manager_.load_configurations(INSTALL_FORMATS_PATH);
#endif

    // Load all additional formats supplied on the command line.
    for (const auto & path : options.additional_config_directories) {
        config_manager_.load_configurations(path);
    }

    // If a list of all formats has been requested, print it and quit.
    if (options.list_all_formats) {
        PrintAllFormats();
        return;
    }

    // Try to aquire each output stream.
    bool detect_concatinate = options.concatinate;
    std::vector<std::string> output_names;

    if (options.writeout) {
        // Make a set of output names to do argument validation.
        std::unordered_set<std::string> out_name_set;

        if ( options.output_name.size() == 0 ) {
            // If concatinating, use first file name as output.
            if (detect_concatinate) {
                std::string out_path = strip_suffix(options.input_files[0]) + ".xyuv";
                out_name_set.emplace(out_path);
                output_names.emplace_back(out_path);
            }
            else {
                for (const auto & in_file : options.input_files ) {
                    std::string out_path = strip_suffix(in_file) + ".xyuv";

                    out_name_set.insert(out_path);
                    output_names.emplace_back(out_path);
                }
            }
        }
        else {
            for (const auto & out_path : options.output_name ) {
                out_name_set.insert(out_path);
                output_names.emplace_back(out_path);
            }
        }

        // Check that no two output files overwrite one another.
        if (out_name_set.size() != output_names.size()) {
            throw std::invalid_argument("The same output name is given more than once. This is illegal for one invocation of the program.");
        }

        // Check that no input file is the same as an output file.
        // We don't account for relative paths etc here, but we won't care for now.
        for (const auto & in_path : options.input_files) {
            if (out_name_set.find(in_path) != out_name_set.end()) {
                throw std::invalid_argument("File '" + in_path + "' given as both an input and an output file, this is illegal.");
            }
        }

        // Check that the number of output files matches the number of input files
        // or one.
        if (output_names.size() != 1 && output_names.size() != options.input_files.size() ) {
            throw std::invalid_argument("If supplied, the number of output files must exactly match the number of input files or one.");
        }

        if (output_names.size() == 1 && options.input_files.size() > 1) {
            detect_concatinate = true;
        }
    }

    // Check validity of formats:
    std::vector<xyuv::format_template> format_templates;
    std::vector<xyuv::chroma_siting> sitings;
    std::vector<xyuv::conversion_matrix> matrices;

    for (const auto & format_template : options.output_formats ) {
        try {
            // Try to load the format-template from file.
            format_templates.push_back(xyuv::config_manager::load_format_template(format_template));
        } catch (std::runtime_error & e) {
            try {
                // If that failed look it up in the config manager.
                format_templates.push_back(config_manager_.get_format_template(format_template));
            } catch (std::exception e2) {
                std::string err_msg = std::string("Could not load format template ") + format_template +
                        ": " + e.what() + " and " + e2.what() + ". Please check the spelling of the argument.";
                throw std::invalid_argument(std::string(err_msg));
            }
        }
    }
    if (format_templates.size() != 1 && format_templates.size() != options.input_files.size()) {
        throw std::invalid_argument("The number of format templates must exactly match the number of input files or one (which implies all input files have the same format).");
    }

    for (const auto & siting : options.output_siting ) {
        try {
            // Try to load the siting from file.
            sitings.push_back(xyuv::config_manager::load_chroma_siting(siting));
        } catch (std::runtime_error & e) {
            try {
                // If that failed look it up in the config manager.
                sitings.push_back(config_manager_.get_chroma_siting(siting));
            } catch (std::exception e2) {
                std::string err_msg = std::string("Could not load chroma siting ") + siting +
                                      ": " + e.what() + " and " + e2.what() + ". Please check the spelling of the argument.";
                throw std::invalid_argument(std::string(err_msg));
            }
        }
    }
    if (sitings.size() != 1 && sitings.size() != options.input_files.size()) {
        throw std::invalid_argument("The number of chroma sitings must exactly match the number of input files or one (which implies all input files have the same chroma siting).");
    }

    for (const auto & matrix : options.output_matrix ) {
        try {
            // Try to load the matrix from file.
            matrices.push_back(xyuv::config_manager::load_conversion_matrix(matrix));
        } catch (std::runtime_error & e) {
            try {
                // If that failed look it up in the config manager.
                matrices.push_back(config_manager_.get_conversion_matrix(matrix));
            } catch (std::exception e2) {
                std::string err_msg = std::string("Could not load conversion matrix ") + matrix +
                                      ": " + e.what() + " and " + e2.what() + ". Please check the spelling of the argument.";
                throw std::invalid_argument(std::string(err_msg));
            }
        }
    }
    if (matrices.size() != 1 && matrices.size() != options.input_files.size()) {
        throw std::invalid_argument("The number of conversion matrices must exactly match the number of input files or one (which implies all input files have the same conversion matrix).");
    }

    // Do some extra checking for the chroma siting, which must match the subsampling mode of the format.
    if (sitings.size() == 1 && format_templates.size() > 1) {
        for (std::size_t i = 0; i < format_templates.size(); i++) {
            if (!(sitings[0].subsampling == format_templates[i].subsampling)) {
                throw std::invalid_argument("Sub-sampling mismatch, for format template #"
                                            + std::to_string(i) + "'" + options.output_formats[i] + "'"
                                            + " corresponding chroma siting expects subsampling mode: "
                                            + std::to_string(sitings[0].subsampling.macro_px_w) + "x" + std::to_string(sitings[0].subsampling.macro_px_h)
                                            + " got: "
                                            + std::to_string(format_templates[i].subsampling.macro_px_w) + "x" + std::to_string(format_templates[i].subsampling.macro_px_h)
                );
            }
        }
    }

    if (options.display && options.input_files.size() != 1 ) {
        throw std::invalid_argument("--display only supported for a single input.");
    }

    if (options.input_files.size() == 0) {
        throw std::logic_error("Missing input files.");
    }

    if (options.input_files.size() > 0 && options.image_w * options.image_h == 0) {
        throw std::logic_error("Image size must be non-zero.");
    }

    std::unique_ptr<std::ofstream> fout;
    if (detect_concatinate) {
        fout.reset(new std::ofstream(output_names[0], std::ios::binary | std::ios::app ));
        if (!(*fout)) {
            throw std::runtime_error("Could not open output file: '" + output_names[0] + "' for writing");
        }
    }

    // At this point everything looks good :) Lets load some formats.
    for ( std::size_t i = 0; i < options.input_files.size(); i++) {
        xyuv::format target_format = xyuv::create_format(
                options.image_w,
                options.image_h,
                format_templates.size() == 1 ? format_templates[0] : format_templates[i],
                matrices.size() == 1 ? matrices[0] : matrices[i],
                sitings.size() == 1 ? sitings[0] : sitings[i]
        );

        xyuv::frame frame = LoadConvertFrame(target_format, options.input_files[i]);

        // If --flip-y is set then change the image origin to the inverse.
        if (options.flip_y) {
            switch (frame.format.origin) {
                case xyuv::image_origin::UPPER_LEFT:
                    frame.format.origin = xyuv::image_origin::LOWER_LEFT;
                    break;
                case xyuv::image_origin::LOWER_LEFT:
                    frame.format.origin = xyuv::image_origin::UPPER_LEFT;
                    break;
                default:
                    break;
            }
        }

        if (options.writeout) {
            if (options.concatinate) {
                // Append file at end of concatinated string.
                xyuv::write_frame(*fout, frame);
            }
            else {
                try {
                    WriteFrame(frame, output_names[i]);
                } catch (std::exception & e) {
                    std::cout << "[Warning] Error occured while writing file '" <<
                                 output_names[i] << "'\n   " <<
                                 e.what() << "\n   Skipping file." << std::endl;
                }
            }
        }

        if (options.display) {
            Display(frame);
        }
    }
}
コード例 #24
0
void newVideoRenderTarget::slotConsumeFrame(const QImage &image, const int frameNumber)
{
    WriteFrame(writer, image);
}
コード例 #25
0
void GPUDrawScanlineCodeGenerator::Generate()
{
	push(esi);
	push(edi);

	Init();

	align(16);

L("loop");

	// GSVector4i test = m_test[7 + (steps & (steps >> 31))];

	mov(edx, ecx);
	sar(edx, 31);
	and(edx, ecx);
	shl(edx, 4);

	movdqa(xmm7, ptr[edx + (size_t)&m_test[7]]);

	// movdqu(xmm1, ptr[edi]);

	movq(xmm1, qword[edi]);
	movhps(xmm1, qword[edi + 8]);

	// ecx = steps
	// esi = tex (tme)
	// edi = fb
	// xmm1 = fd
	// xmm2 = s
	// xmm3 = t
	// xmm4 = r
	// xmm5 = g
	// xmm6 = b
	// xmm7 = test

	TestMask();

	SampleTexture();

	// xmm1 = fd
	// xmm3 = a
	// xmm4 = r
	// xmm5 = g
	// xmm6 = b
	// xmm7 = test
	// xmm0, xmm2 = free

	ColorTFX();

	AlphaBlend();

	Dither();

	WriteFrame();

L("step");

	// if(steps <= 0) break;

	test(ecx, ecx);
	jle("exit", T_NEAR);

	Step();

	jmp("loop", T_NEAR);

L("exit");

	pop(edi);
	pop(esi);

	ret(8);
}
コード例 #26
0
ファイル: avilib.cpp プロジェクト: ljshj/KindergartenParent
void AVIWriter::writeAudio(unsigned char *frame,long size)
{
	WriteFrame(0, 0, frame, size, false, true);
	audio_counter++;
}