예제 #1
0
파일: mpeg1dec.c 프로젝트: Cheeseness/ags
int apeg_play_mpg_ex(void *ptr, BITMAP *bmp, int loop, int (*callback)(BITMAP*))
{
	int ret;

	Initialize_Decoder();

	apeg_stream = apeg_open_stream_ex(ptr);
	if(!apeg_stream)
		return APEG_ERROR;

	if(bmp)
		clear_to_color(bmp, makecol(0, 0, 0));

	if(callback)
		callback_proc = callback;
	else
		callback_proc = default_callback;

restart_loop:
	ret = decode_stream((APEG_LAYER*)apeg_stream, bmp);
	if(loop && ret == APEG_OK)
	{
		apeg_reset_stream(apeg_stream);
		goto restart_loop;
	}

	apeg_close_stream(apeg_stream);
	apeg_stream = NULL;

	return ret;
}
예제 #2
0
파일: mpeg1dec.c 프로젝트: Cheeseness/ags
int apeg_play_apeg_stream(APEG_STREAM *stream_to_play, BITMAP *bmp, int loop, int (*callback)(BITMAP*tempBuffer))
{
	int ret = APEG_OK;
  apeg_stream = stream_to_play;

	Initialize_Decoder();

	if(bmp)
		clear_to_color(bmp, makecol(0, 0, 0));

	// Install the callback function
	if(callback)
		callback_proc = callback;
	else
		callback_proc = default_callback;

restart_loop:
	ret = decode_stream((APEG_LAYER*)apeg_stream, bmp);
	if(loop && ret == APEG_OK)
	{
		apeg_reset_stream(apeg_stream);
		goto restart_loop;
	}

	apeg_stream = NULL;

	return ret;
}
예제 #3
0
파일: mpeg1dec.c 프로젝트: Cheeseness/ags
APEG_STREAM *apeg_open_stream_ex(void *ptr)
{
	APEG_LAYER *layer;

	Initialize_Decoder();

	layer = new_apeg_stream();
	if(!layer)
		return NULL;

	if(setjmp(jmp_buffer))
	{
		apeg_close_stream((APEG_STREAM*)layer);
		return NULL;
	}

	layer->ext_data.init = _init_func;
	layer->ext_data.request = _request_func;
	layer->ext_data.skip = _skip_func;
	layer->ext_data.ptr = ptr;

	layer->pf = pack_fopen_vtable(&ext_vtable, layer);
	if(!layer->pf)
		apeg_error_jump("Could not open stream");
	layer->buffer_type = USER_BUFFER;

	setup_stream(layer);

	return (APEG_STREAM*)layer;
}
예제 #4
0
파일: mpeg1dec.c 프로젝트: Cheeseness/ags
APEG_STREAM *apeg_open_memory_stream(void *mpeg_data, int data_len)
{
	APEG_LAYER *layer;

	Initialize_Decoder();

	layer = new_apeg_stream();
	if(!layer)
		return NULL;

	if(setjmp(jmp_buffer))
	{
		apeg_close_stream((APEG_STREAM*)layer);
		return NULL;
	}

	layer->mem_data.buf = mpeg_data;
	layer->mem_data.bytes_left = data_len;
	layer->pf = pack_fopen_vtable(&mem_vtable, layer);
	if(!layer->pf)
		apeg_error_jump("Could not open stream");

	layer->buffer_type = MEMORY_BUFFER;
	layer->buffer_size = data_len;

	setup_stream(layer);

	return (APEG_STREAM*)layer;
}
예제 #5
0
파일: mpeg1dec.c 프로젝트: Cheeseness/ags
// Open an MPEG or Ogg file. Returns a structure for stream manipulation.
APEG_STREAM *apeg_open_stream(const char *filename)
{
	APEG_LAYER *layer;

	// Initialize the decoder
	Initialize_Decoder();

	// Create a new stream structure
	layer = new_apeg_stream();
	if(!layer)
		return NULL;

	// Set the jump position in case the decoder faults on opening.
	if(setjmp(jmp_buffer))
	{
		apeg_close_stream((APEG_STREAM*)layer);
		return NULL;
	}

	// Copy the filename so we can (re)open the file when the buffer
	// gets initialized
	layer->fname = strdup(filename);
	if(!layer->fname)
		apeg_error_jump("Couldn't duplicate filename");

	// Set the buffer type and size
	layer->buffer_type = DISK_BUFFER;
	layer->buffer_size = F_BUF_SIZE;

	setup_stream(layer);

	return (APEG_STREAM*)layer;
}
예제 #6
0
///////////////////////////////////////////////////////////////////////
// CheckInputType: Check if the input type can be done
///////////////////////////////////////////////////////////////////////
HRESULT CAACSBREnhanced::CheckInputType(const CMediaType *mtIn)
{
    if (*mtIn->Type() != MEDIATYPE_Audio)
    {
        return VFW_E_TYPE_NOT_ACCEPTED;
    }


    if (mtIn->subtype != MEDIASUBTYPE_EAACPLUS)
    {
        RETAILMSG(1,(L"\n[EAAC PLUS]Check MediaSubType again..\n"));
        return VFW_E_TYPE_NOT_ACCEPTED;
    }


    WAVEFORMATEX* wfe = (WAVEFORMATEX*) mtIn->Format();


    if(wfe!=NULL)
    {
        uiSampleRate = wfe->nSamplesPerSec;
        uiChannels = wfe->nChannels;
        bitsPerSample = wfe->wBitsPerSample;



        if(uiChannels>2 || uiChannels<1)
        {
            param_size=0;
            uiChannels=2;
            uiSampleRate=48000;
            bitsPerSample=16;
            wfe=NULL;
        }

        if(wfe!=NULL)
        {
            if(param==NULL)
            {
                param = (BYTE*)malloc(wfe->cbSize + 1);
            }
            memcpy(param, wfe+1, wfe->cbSize);
            param_size = wfe->cbSize;

            Initialize_Decoder(2048,param, param_size, &bytes_left);
        }


    }

    return S_OK;
}
예제 #7
0
///////////////////////////////////////////////////////////////////////
// Transform (for CTransformFitler)
//
// Copy the input sample into the output sample - then transform the
// output sample 'in place'. If we have all keyframes, then we should
// not do a copy. If we have cinepak or indeo and are decompressing
// frame N it needs frame decompressed frame N-1 available to calculate
// it, unless we are at a keyframe. So with keyframed codecs, you can't
// get away with applying the transform to change the frames in place,
// because you'll mess up the next frames decompression. The runtime
// MPEG decoder does not have keyframes in the same way so it can be
// done in place. We know if a sample is key frame as we transform
// because the sync point property will be set on the sample
///////////////////////////////////////////////////////////////////////
HRESULT CAACSBREnhanced::Transform(IMediaSample *pIn, IMediaSample *pOut)
{
    unsigned char *pBufferIn;
    unsigned char *pBufferOut;
    SsbSipAudioAacDecoderFrameInfo_t frameInfo;
    AM_MEDIA_TYPE *mtout;
    int iBytesConsumed=0;
    unsigned int ActualOutLength=0;
    short End;
    unsigned int decoded_bytes=0;
    static int once=1;
    frameInfo.sbrEnabled=0;


    HRESULT hr;
#if 0
    if (first_time == 1)
        RETAILMSG(1,(L"\n[EAAC PLUS] Transform....\n"));

    {
        LONGLONG  tStart, tEnd;
        pIn->GetMediaTime(&tStart, &tEnd);
        printf("\n\t*** Time = %u, %u", (unsigned int) tStart, (unsigned int) tEnd);
    }
#endif
    memset(&frameInfo,0,sizeof(SsbSipAudioAacDecoderFrameInfo_t));
    hr= pIn->GetPointer(&pBufferIn);
    if(FAILED(hr))
    {
        return E_FAIL;
    }

    hr= pOut->GetPointer(&pBufferOut);
    if(FAILED(hr))
    {
        return E_FAIL;
    }

    int srcBufLength= pIn->GetActualDataLength();

    if(param_size!=0)
    {

        if(first_time==1)
        {
            if(stopped_once==1)
            {
                Initialize_Decoder(2048,param, param_size,&bytes_left);
            }
            first_time=0;
            once=1;
        }
        memcpy(InBuf, pBufferIn, srcBufLength);
        End=AACDecode_Ittiam(AACDecoder,InBuf,OutBuf,&frameInfo);

        if(frameInfo.error)
        {
            RETAILMSG(1,(L"\nError in decoding : %d\n", frameInfo.error));
            pOut->SetActualDataLength(ActualOutLength);
            Stop();
        }
        if(frameInfo.sbrEnabled && once)
        {
            uiSampleRate *=2;
            once=0;
            CMediaType temp;

            GetMediaType(0,&temp);

            mtout=(AM_MEDIA_TYPE*)&temp;


            WAVEFORMATEXTENSIBLE *fmt = (WAVEFORMATEXTENSIBLE*)mtout->pbFormat;

            fmt->Format.nChannels = uiChannels;
            fmt->Format.nSamplesPerSec =uiSampleRate ;
            fmt->Format.nBlockAlign = fmt->Format.wBitsPerSample / 8 * fmt->Format.nChannels;
            fmt->Format.nAvgBytesPerSec = fmt->Format.nSamplesPerSec * fmt->Format.nBlockAlign;

            hr = m_pOutput->QueryAccept((AM_MEDIA_TYPE*)&temp);

            if(hr==S_OK)
                hr=pOut->SetMediaType(mtout);

        }
        iBytesConsumed =frameInfo.bytesconsumed;

        memcpy(pBufferOut, (void*)OutBuf, frameInfo.samples*2);
        ActualOutLength=frameInfo.samples*2;
        pOut->SetActualDataLength(ActualOutLength);


        RETAILMSG(0,(L"\n[EAAC PLUS] Decode  (in=%d)(out=%d)....\n", srcBufLength, ActualOutLength));
    }

    else
    {

        iBytesConsumed=0;
        if(first_time==1)
        {
            hr=Initialize_Decoder(srcBufLength,pBufferIn,srcBufLength,&iBytesConsumed);
            if(hr!= S_OK)
            {
                Stop();
                return E_FAIL;
            }

            first_time=0;
            once=1;

        }
        else
        {
            memcpy(InBuf+bytes_left, pBufferIn, srcBufLength);
        }

        bytes_left+=srcBufLength;
        ActualOutLength=0;

        while(1)
        {

            if(iBytesConsumed >0)
            {

                bytes_left-=iBytesConsumed;
                memmove(InBuf,InBuf+iBytesConsumed,bytes_left);

                if(bytes_left<500)
                {
                    break;
                }
            }

            End=AACDecode_Ittiam(AACDecoder,InBuf,OutBuf,&frameInfo);

            if(frameInfo.sbrEnabled && once)
            {
                uiSampleRate *=2;
                once=0;
                CMediaType temp;

                GetMediaType(0,&temp);

                mtout=(AM_MEDIA_TYPE*)&temp;


                WAVEFORMATEXTENSIBLE *fmt = (WAVEFORMATEXTENSIBLE*)mtout->pbFormat;

                fmt->Format.nChannels = uiChannels;
                fmt->Format.nSamplesPerSec =uiSampleRate ;
                fmt->Format.nBlockAlign = fmt->Format.wBitsPerSample / 8 * fmt->Format.nChannels;
                fmt->Format.nAvgBytesPerSec = fmt->Format.nSamplesPerSec * fmt->Format.nBlockAlign;

                hr = m_pOutput->QueryAccept((AM_MEDIA_TYPE*)&temp);

                if(hr==S_OK)
                    hr=pOut->SetMediaType(mtout);

            }

            if(frameInfo.error)
            {
                RETAILMSG(1,(L"\nError in decoding : %d\n", frameInfo.error));
                pOut->SetActualDataLength(ActualOutLength);
                Stop();
            }



            iBytesConsumed =frameInfo.bytesconsumed;

            memcpy(pBufferOut, (void*)OutBuf, frameInfo.samples*2);
            ActualOutLength+=frameInfo.samples*2;
            pBufferOut+=frameInfo.samples*2;

        }

        pOut->SetActualDataLength(ActualOutLength);

    }

    return S_OK;

}