示例#1
0
std::string CDVDDemuxClient::GetStreamCodecName(int iStreamId)
{
  CDemuxStream *stream = GetStream(iStreamId);
  std::string strName;
  if (stream)
  {
    if (stream->codec == AV_CODEC_ID_AC3)
      strName = "ac3";
    else if (stream->codec == AV_CODEC_ID_MP2)
      strName = "mp2";
    else if (stream->codec == AV_CODEC_ID_AAC)
      strName = "aac";
    else if (stream->codec == AV_CODEC_ID_DTS)
      strName = "dca";
    else if (stream->codec == AV_CODEC_ID_MPEG2VIDEO)
      strName = "mpeg2video";
    else if (stream->codec == AV_CODEC_ID_H264)
      strName = "h264";
    else if (stream->codec == AV_CODEC_ID_EAC3)
      strName = "eac3";
  }
  return strName;
}
unsigned long	GS_OpenStream(unsigned long nPort,unsigned long nStreamFormat,unsigned long nMode)
{
	//打开编解码器 内存分配
	PSTREAMCONFIG pm = GetStream(nPort);
	if(pm==NULL)
	{
		return S_FALSE;
	}
	pm->nStreamFormat = nStreamFormat;
	if(OpenStream(nStreamFormat,pm->pContrlConfig)==S_FALSE)
	{
		pm->nLastError = GENTEK_NOT_SUPPORT_STREAM;
		return S_FALSE;
	}
	if(InitStreamCodec(pm->pCodecConfig,pm->nVideoCodec,pm->nAudioCodec)==S_FALSE)
	{
		pm->nLastError = GENTEK_OPEN_CODEC_FAILED;
		return S_FALSE;
	}
	char str[128];
	sprintf(str," Open GentekPlatformStream %d\n",nPort);
	OutputDebugStringA(str);
	return S_OK;
}
示例#3
0
void
MoonVDADecoder::InputEnded ()
{
	GetStream ()->SetOutputEnded (true);
}
示例#4
0
unsigned int AudioEncoder::GetSampleRate() {
	return GetStream()->codec->sample_rate;
}
示例#5
0
AVSampleFormat AudioEncoder::GetSampleFormat() {
	return GetStream()->codec->sample_fmt;
}
示例#6
0
bool AudioEncoder::EncodeFrame(AVFrameWrapper* frame) {

	if(frame != NULL) {
#if SSR_USE_AVFRAME_NB_SAMPLES
		assert((unsigned int) frame->GetFrame()->nb_samples == GetFrameSize());
#endif
#if SSR_USE_AVFRAME_CHANNELS
		assert(frame->GetFrame()->channels == GetCodecContext()->channels);
#endif
#if SSR_USE_AVFRAME_SAMPLE_RATE
		assert(frame->GetFrame()->sample_rate == GetCodecContext()->sample_rate);
#endif
#if SSR_USE_AVFRAME_FORMAT
		assert(frame->GetFrame()->format == GetCodecContext()->sample_fmt);
#endif
	}

#if SSR_USE_AVCODEC_SEND_RECEIVE

	// send a frame
	AVFrame *avframe = (frame == NULL)? NULL : frame->Release();
	try {
		if(avcodec_send_frame(GetCodecContext(), avframe) < 0) {
			Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Sending of audio frame failed!"));
			throw LibavException();
		}
	} catch(...) {
		av_frame_free(&avframe);
		throw;
	}
	av_frame_free(&avframe);

	// try to receive a packet
	for( ; ; ) {
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());
		int res = avcodec_receive_packet(GetCodecContext(), packet->GetPacket());
		if(res == 0) { // we have a packet, send the packet to the muxer
			GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
			IncrementPacketCounter();
		} else if(res == AVERROR(EAGAIN)) { // we have no packet
			return true;
		} else if(res == AVERROR_EOF) { // this is the end of the stream
			return false;
		} else {
			Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Receiving of audio packet failed!"));
			throw LibavException();
		}
	}

#elif SSR_USE_AVCODEC_ENCODE_AUDIO2

	// allocate a packet
	std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());

	// encode the frame
	int got_packet;
	if(avcodec_encode_audio2(GetCodecContext(), packet->GetPacket(), (frame == NULL)? NULL : frame->GetFrame(), &got_packet) < 0) {
		Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of audio frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(got_packet) {

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#else

	// encode the frame
	short *data = (frame == NULL)? NULL : (short*) frame->GetFrame()->data[0];
	int bytes_encoded = avcodec_encode_audio(GetCodecContext(), m_temp_buffer.data(), m_temp_buffer.size(), data);
	if(bytes_encoded < 0) {
		Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of audio frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(bytes_encoded > 0) {

		// allocate a packet
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper(bytes_encoded));

		// copy the data
		memcpy(packet->GetPacket()->data, m_temp_buffer.data(), bytes_encoded);

		// set the timestamp
		// note: pts will be rescaled and stream_index will be set by Muxer
		if(GetCodecContext()->coded_frame != NULL && GetCodecContext()->coded_frame->pts != (int64_t) AV_NOPTS_VALUE)
			packet->GetPacket()->pts = GetCodecContext()->coded_frame->pts;

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#endif

}
void XnAudioProcessor::CalcDeleteChannel()
{
	m_bDeleteChannel = (m_pHelper->GetFirmwareVersion() >= XN_SENSOR_FW_VER_5_2 && GetStream()->GetNumberOfChannels() == 1);
}
示例#8
0
DataStream* KEYImporter::GetResource(const char* resname, SClass_ID type)
{
	//the word masking is a hack for synonyms, currently used for bcs==bs
	return GetStream(resname, type&0xFFFF);
}
示例#9
0
void BMPEncoder::Start(Size size)
{
	BMPHeader header;
	Zero(header);
	header.biSize = sizeof(BMP_INFOHEADER);
	header.biWidth = size.cx;
	header.biHeight = size.cy;
	header.biBitCount = bpp;
	header.biPlanes = 1;
	header.biCompression = 0;
	int ncolors = 0;
	switch(bpp) {
	case 1:  format.Set1mf(); ncolors = 2; break;
	case 4:  format.Set4mf(); ncolors = 16; break;
	case 8:  format.Set8(); ncolors = 256; break;
	case 16: format.Set16le(0xf800, 0x07E0, 0x001f); break;
	case 32: format.Set32le(0xff0000, 0x00ff00, 0x0000ff); break;
	default: format.Set24le(0xff0000, 0x00ff00, 0x0000ff); break;
	}
	if(ncolors) {
		if(grayscale)
			for(int i = 0; i < ncolors; i++) {
				BMP_RGB& p = header.palette[i];
				p.rgbRed = p.rgbGreen = p.rgbBlue = 255 * i / (ncolors - 1);
			}
		else {
			const RGBA *palette = GetPalette();
			for(int i = 0; i < ncolors; i++) {
				BMP_RGB& p = header.palette[i];
				p.rgbRed = palette[i].r;
				p.rgbGreen = palette[i].g;
				p.rgbBlue = palette[i].b;
			}
		}
	}
	if(bpp == 16) {
		dword *bitfields = reinterpret_cast<dword *>(header.palette);
		bitfields[2] = 0x001f;
		bitfields[1] = 0x07E0;
		bitfields[0] = 0xf800;
		header.biCompression = 3/* BI_BITFIELDS */;
		ncolors = 3;
	}
	row_bytes = (format.GetByteCount(size.cx) + 3) & ~3;
	scanline.Alloc(row_bytes);
	header.biSizeImage = size.cy * row_bytes;
	Size dots = GetDots();
	if(dots.cx && dots.cy) {
		header.biXPelsPerMeter = fround(header.biWidth  * (1000.0 / 25.4 * 600.0) / dots.cx);
		header.biYPelsPerMeter = fround(header.biHeight * (1000.0 / 25.4 * 600.0) / dots.cy);
	}
	BMP_FILEHEADER bmfh;
	Zero(bmfh);
	bmfh.bfType = 'B' + 256 * 'M';
	bmfh.bfOffBits = sizeof(bmfh) + sizeof(BMP_INFOHEADER) + sizeof(BMP_RGB) * ncolors;
	bmfh.bfSize = sizeof(bmfh) + sizeof(BMP_INFOHEADER) + ncolors + header.biSizeImage;
	bmfh.SwapEndian();
	GetStream().Put(&bmfh, sizeof(bmfh));
	header.SwapEndian();
	int h = sizeof(BMP_INFOHEADER) + sizeof(BMP_RGB) * ncolors;
	GetStream().Put(&header, h);
	soff = GetStream().GetPos();
	GetStream().SetSize(sizeof(bmfh) + h + size.cy * row_bytes);
	linei = size.cy;
	linebytes = format.GetByteCount(size.cx);
}
示例#10
0
// Play Stream
void HamurAudioManager::PlayStream(const string& strStreamName)
{
	GetStream(strStreamName)->PlayStream();
}
示例#11
0
bool CDplusProtocol::OnDvHeaderPacketIn(CDvHeaderPacket *Header, const CIp &Ip)
{
    bool newstream = false;
    
    // find the stream
    CPacketStream *stream = GetStream(Header->GetStreamId());
    if ( stream == NULL )
    {
        // no stream open yet, open a new one
        CCallsign via(Header->GetRpt1Callsign());
        
        // first, check module is valid
        if ( g_Reflector.IsValidModule(Header->GetRpt1Module()) )
        {
            // find this client
            CClient *client = g_Reflector.GetClients()->FindClient(Ip, PROTOCOL_DPLUS);
            if ( client != NULL )
            {
                // now we know if it's a dextra dongle or a genuine dplus node
                if ( Header->GetRpt2Callsign().HasSameCallsignWithWildcard(CCallsign("XRF*"))  )
                {
                    client->SetDextraDongle();
                }
                // now we know its module, let's update it
                if ( !client->HasModule() )
                {
                    client->SetModule(Header->GetRpt1Module());
                }
                // get client callsign
                via = client->GetCallsign();
                // and try to open the stream
                if ( (stream = g_Reflector.OpenStream(Header, client)) != NULL )
                {
                    // keep the handle
                    m_Streams.push_back(stream);
                    newstream = true;
                }
            }
            // release
            g_Reflector.ReleaseClients();
            
            // update last heard
            g_Reflector.GetUsers()->Hearing(Header->GetMyCallsign(), via, Header->GetRpt2Callsign());
            g_Reflector.ReleaseUsers();
        }
        else
        {
            std::cout << "DPlus node " << via << " link attempt on non-existing module" << std::endl;
        }
    }
    else
    {
        // stream already open
        // skip packet, but tickle the stream
        stream->Tickle();
        // and delete packet
        delete Header;
    }
    
    // done
    return newstream;
}
示例#12
0
  // TODO: SA: this TV() should be merged with TV() in DeviceX and DeviceZ!
  // TODO: SA: just a draft - a lot more needs to be done...
  void TV( EnvT* e)
  {
    SizeT nParam=e->NParam( 1); 

    GDLGStream* actStream = GetStream();

    // TODO: use it is XSIZE and YSIZE is not specified!
    //DLong xsize = (*static_cast<DLongGDL*>( dStruct->GetTag( xSTag, 0)))[0];
    //DLong ysize = (*static_cast<DLongGDL*>( dStruct->GetTag( ySTag, 0)))[0];

    DLong pos=0; // TODO: handle it!
    DDouble xmin, ymin;
    {
      DDouble null;
      lib::gdlGetCurrentAxisRange("X", xmin, null);
      lib::gdlGetCurrentAxisRange("Y", ymin, null);
    }
    if (nParam == 2) {
      e->AssureLongScalarPar( 1, pos);
    } else if (nParam >= 3) {
      if (e->KeywordSet("NORMAL")) 
      {
        e->Throw("NORMAL keyword not supported yet");
	//e->AssureDoubleScalarPar( 1, xmin);
	//e->AssureDoubleScalarPar( 2, ymin);
	//xLL = (DLong) rint(xLLf * xsize);
	//yLL = (DLong) rint(yLLf * ysize);
      } 
      else if (e->KeywordSet("DEVICE")) 
      {
        e->Throw("DEVICE keyword not supported yet");
      }
      else // aka DATA
      {
	e->AssureDoubleScalarPar( 1, xmin);
	e->AssureDoubleScalarPar( 2, ymin);
      }
    }

    DByteGDL* p0B = e->GetParAs<DByteGDL>( 0);
    SizeT rank = p0B->Rank();

    int width, height;
    DLong tru=0;
    e->AssureLongScalarKWIfPresent( "TRUE", tru);
    if (rank == 2) 
      {
	if (tru != 0)
	  e->Throw( "Array must have 3 dimensions: "+
		    e->GetParString(0));
	width  = p0B->Dim(0);
	height = p0B->Dim(1);
      } 
    else if( rank == 3) 
      {
	if (tru == 1) {
	  width = p0B->Dim(1);
	  height = p0B->Dim(2);
	} else if (tru == 2) {
	  width = p0B->Dim(0);
	  height = p0B->Dim(2);
	} else if (tru == 3) {
	  width = p0B->Dim(0);
	  height = p0B->Dim(1);
	} else {
	  e->Throw( "TRUE must be between 1 and 3");
	}
      } else {
	e->Throw( "Image array must have rank 2 or 3");
      }
    if (tru != 0) e->Throw("Decomposed images not supported yet with PostScript + TV() (FIXME)"); // TODO!

    /* TODO...
    if( width + xLL > xsize || height + yLL > ysize)
      e->Throw( "Value of image coordinates is out of allowed range.");
    */

    class grid2d {
      public: PLFLT** data;
      private: GDLGStream *pls;
      private: int w, h;
      public: grid2d(GDLGStream *actStream, int w, int h) 
        : pls(actStream), w(w), h(h) { pls->Alloc2dGrid(&data, w, h); }
      public: ~grid2d() { pls->Free2dGrid(data, w, h); }
    } idata(actStream, width, height);
    for (int x=0; x < width; ++x)
      for (int y=0; y < height; ++y)
        idata.data[x][y] = (*p0B)[x + y * width]; 

    PLFLT xmax, ymax;
    if (e->KeywordSet("XSIZE")) 
    {
      DDouble tmp;
      e->AssureDoubleScalarKW("XSIZE", tmp);
      xmax = xmin + tmp;
    }
    else e->Throw("Specification of XSIZE is mandatory for PostScript/TV() (FIXME!)"); // TODO!
    if (e->KeywordSet("YSIZE")) 
    {
      DDouble tmp;
      e->AssureDoubleScalarKW("YSIZE", tmp);
      ymax = ymin + tmp;
    }
    else e->Throw("Specification of YSIZE is mandatory for PostScript/TV() (FIXME!)"); // TODO!

    // TODO: map projection (via the last two arguments - same as was done in CONTOUR e.g.)
    bool mapSet = false;
#ifdef USE_LIBPROJ4
    //get_mapset(mapSet);
#endif
    if (mapSet) e->Throw("PostScript + TV() + mapping cobination not available yet (FIXME!)");

    actStream->imagefr(idata.data, width, height, xmin, xmax, ymin, ymax, 0., 255., 0., 255., NULL, NULL); 
  }
示例#13
0
unsigned int VideoEncoder::GetFrameRate() {
	assert(GetStream()->codec->time_base.num == 1);
	return GetStream()->codec->time_base.den;
}
示例#14
0
unsigned int VideoEncoder::GetHeight() {
	return GetStream()->codec->height;
}
示例#15
0
unsigned int VideoEncoder::GetWidth() {
	return GetStream()->codec->width;
}
示例#16
0
HRESULT 
BridgeSourceOutput::GetMediaType(int iPosition, CMediaType* pmt)
{
	return GetStream()->EnumOutputType(iPosition, pmt);
}
示例#17
0
STDMETHODIMP 
BridgeSourceOutput::Notify(IBaseFilter * pSender, Quality q)
{
	return GetStream()->NotifyQuality(pSender, q);
}
示例#18
0
void BMPEncoder::WriteLineRaw(const byte *s)
{
	GetStream().Seek(soff + row_bytes * --linei);
	memcpy(scanline, s, linebytes);
	GetStream().Put(scanline, row_bytes);
}
示例#19
0
DataStream* KEYImporter::GetResource(const char* resname, const ResourceDesc &type)
{
	return GetStream(resname, type.GetKeyType());
}
STDMETHODIMP CHandler::Extract(const UInt32* indices, UInt32 numItems,
                               Int32 _aTestMode, IArchiveExtractCallback *extractCallback)
{
    COM_TRY_BEGIN
    bool testMode = (_aTestMode != 0);
    bool allFilesMode = (numItems == UInt32(-1));
    if (allFilesMode)
        numItems = _db.Refs.Size();
    if (numItems == 0)
        return S_OK;
    UInt32 i;
    UInt64 totalSize = 0;
    for(i = 0; i < numItems; i++)
    {
        const CItem &item = _db.Items[_db.Refs[allFilesMode ? i : indices[i]].Did];
        if (!item.IsDir())
            totalSize += item.Size;
    }
    RINOK(extractCallback->SetTotal(totalSize));

    UInt64 totalPackSize;
    totalSize = totalPackSize = 0;

    NCompress::CCopyCoder *copyCoderSpec = new NCompress::CCopyCoder();
    CMyComPtr<ICompressCoder> copyCoder = copyCoderSpec;

    CLocalProgress *lps = new CLocalProgress;
    CMyComPtr<ICompressProgressInfo> progress = lps;
    lps->Init(extractCallback, false);

    for (i = 0; i < numItems; i++)
    {
        lps->InSize = totalPackSize;
        lps->OutSize = totalSize;
        RINOK(lps->SetCur());
        Int32 index = allFilesMode ? i : indices[i];
        const CItem &item = _db.Items[_db.Refs[index].Did];

        CMyComPtr<ISequentialOutStream> outStream;
        Int32 askMode = testMode ?
                        NArchive::NExtract::NAskMode::kTest :
                        NArchive::NExtract::NAskMode::kExtract;
        RINOK(extractCallback->GetStream(index, &outStream, askMode));

        if (item.IsDir())
        {
            RINOK(extractCallback->PrepareOperation(askMode));
            RINOK(extractCallback->SetOperationResult(NArchive::NExtract::NOperationResult::kOK));
            continue;
        }

        totalPackSize += _db.GetItemPackSize(item.Size);
        totalSize += item.Size;

        if (!testMode && (!outStream))
            continue;
        RINOK(extractCallback->PrepareOperation(askMode));
        Int32 res = NArchive::NExtract::NOperationResult::kDataError;
        CMyComPtr<ISequentialInStream> inStream;
        HRESULT hres = GetStream(index, &inStream);
        if (hres == S_FALSE)
            res = NArchive::NExtract::NOperationResult::kDataError;
        else if (hres == E_NOTIMPL)
            res = NArchive::NExtract::NOperationResult::kUnSupportedMethod;
        else
        {
            RINOK(hres);
            if (inStream)
            {
                RINOK(copyCoder->Code(inStream, outStream, NULL, NULL, progress));
                if (copyCoderSpec->TotalSize == item.Size)
                    res = NArchive::NExtract::NOperationResult::kOK;
            }
        }
        outStream.Release();
        RINOK(extractCallback->SetOperationResult(res));
    }
    return S_OK;
    COM_TRY_END
}
示例#21
0
XnAudioProcessor::~XnAudioProcessor()
{
	xnDumpFileClose(m_AudioInDump);
	GetStream()->NumberOfChannelsProperty().OnChangeEvent().Unregister(m_hNumChannelsCallback);
}
示例#22
0
  void TV( EnvT* e)
  {
    //    Graphics* actDevice = Graphics::GetDevice();
    SizeT nParam=e->NParam( 1); 

    GDLGStream* actStream = GetStream();

    //    actStream->NextPlot( false); // JMG

    DLong xsize = (*static_cast<DLongGDL*>( dStruct->GetTag( xSTag, 0)))[0];
    DLong ysize = (*static_cast<DLongGDL*>( dStruct->GetTag( ySTag, 0)))[0];

    DLong xLL=0, yLL=0, pos=0;
    if (nParam == 2) {
      e->AssureLongScalarPar( 1, pos);
    } else if (nParam >= 3) {
      DDouble xLLf, yLLf;
      if (e->KeywordSet(1)) { // NORMAL
	e->AssureDoubleScalarPar( 1, xLLf);
	e->AssureDoubleScalarPar( 2, yLLf);
	xLL = (DLong) rint(xLLf * xsize);
	yLL = (DLong) rint(yLLf * ysize);
      } else {
	e->AssureLongScalarPar( 1, xLL);
	e->AssureLongScalarPar( 2, yLL);
      }
    }

    actStream->vpor( 0, 1.0, 0, 1.0);
    actStream->wind( 1-xLL, xsize-xLL, 1-yLL, ysize-yLL);

    DByteGDL* p0B = e->GetParAs<DByteGDL>( 0);
    SizeT rank = p0B->Rank();

    int width, height;
    DLong tru=0;
    e->AssureLongScalarKWIfPresent( "TRUE", tru);
    if (rank == 2) 
      {
	if (tru != 0)
	  e->Throw( "Array must have 3 dimensions: "+
		    e->GetParString(0));
	width  = p0B->Dim(0);
	height = p0B->Dim(1);
      } 
    else if( rank == 3) 
      {
	if (tru == 1) {
	  width = p0B->Dim(1);
	  height = p0B->Dim(2);
	} else if (tru == 2) {
	  width = p0B->Dim(0);
	  height = p0B->Dim(2);
	} else if (tru == 3) {
	  width = p0B->Dim(0);
	  height = p0B->Dim(1);
	} else {
	  e->Throw( "TRUE must be between 1 and 3");
	}
      } else {
	e->Throw( "Image array must have rank 2 or 3");
      }

    if( width + xLL > xsize || height + yLL > ysize)
      e->Throw( "Value of image coordinates is out of allowed range.");

    DLong chan = 0;
    plimage_gdl(&(*p0B)[0], width, height, xLL, yLL, tru, chan);
  }
示例#23
0
bool AudioEncoder::EncodeFrame(AVFrame* frame) {

	if(frame != NULL) {
#if SSR_USE_AVFRAME_NB_SAMPLES
		assert((unsigned int) frame->nb_samples == GetFrameSize());
#endif
#if SSR_USE_AVFRAME_CHANNELS
		assert(frame->channels == GetStream()->codec->channels);
#endif
#if SSR_USE_AVFRAME_SAMPLE_RATE
		assert(frame->sample_rate == GetStream()->codec->sample_rate);
#endif
#if SSR_USE_AVFRAME_FORMAT
		assert(frame->format == GetStream()->codec->sample_fmt);
#endif
	}

#if SSR_USE_AVCODEC_ENCODE_AUDIO2

	// allocate a packet
	std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());

	// encode the frame
	int got_packet;
	if(avcodec_encode_audio2(GetStream()->codec, packet->GetPacket(), frame, &got_packet) < 0) {
		Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of audio frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(got_packet) {

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		return true;

	} else {
		return false;
	}

#else

	// encode the frame
	short *data = (frame == NULL)? NULL : (short*) frame->data[0];
	int bytes_encoded = avcodec_encode_audio(GetStream()->codec, m_temp_buffer.data(), m_temp_buffer.size(), data);
	if(bytes_encoded < 0) {
		Logger::LogError("[AudioEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of audio frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(bytes_encoded > 0) {

		// allocate a packet
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper(bytes_encoded));

		// copy the data
		memcpy(packet->GetPacket()->data, m_temp_buffer.data(), bytes_encoded);

		// set the timestamp
		// note: pts will be rescaled and stream_index will be set by Muxer
		if(GetStream()->codec->coded_frame != NULL && GetStream()->codec->coded_frame->pts != (int64_t) AV_NOPTS_VALUE)
			packet->GetPacket()->pts = GetStream()->codec->coded_frame->pts;

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		return true;

	} else {
		return false;
	}

#endif

}
示例#24
0
void Logger::Log(const std::string& level, const std::string& msg) {
    std::string lvl = boost::to_upper_copy(level);
    LogStream* s = GetStream(lvl);
    s->Output(this, msg);
}
示例#25
0
unsigned int AudioEncoder::GetChannels() {
	return GetStream()->codec->channels;
}
示例#26
0
bool CDVDDemuxClient::ParsePacket(DemuxPacket* pkt)
{
  bool change = false;

  CDemuxStream* st = GetStream(pkt->iStreamId);
  if (st == nullptr)
    return change;

  if (st->ExtraSize || !CodecHasExtraData(st->codec))
    return change;

  CDemuxStreamClientInternal* stream = dynamic_cast<CDemuxStreamClientInternal*>(st);

  if (stream == nullptr ||
     stream->m_parser == nullptr)
    return change;

  if (stream->m_context == nullptr)
  {
    AVCodec *codec = avcodec_find_decoder(st->codec);
    if (codec == nullptr)
    {
      CLog::Log(LOGERROR, "%s - can't find decoder", __FUNCTION__);
      stream->DisposeParser();
      return change;
    }

    stream->m_context = avcodec_alloc_context3(codec);
    if (stream->m_context == nullptr)
    {
      CLog::Log(LOGERROR, "%s - can't allocate context", __FUNCTION__);
      stream->DisposeParser();
      return change;
    }
    stream->m_context->time_base.num = 1;
    stream->m_context->time_base.den = DVD_TIME_BASE;
  }

  if (stream->m_parser_split && stream->m_parser->parser->split)
  {
    int len = stream->m_parser->parser->split(stream->m_context, pkt->pData, pkt->iSize);
    if (len > 0 && len < FF_MAX_EXTRADATA_SIZE)
    {
      if (st->ExtraData)
        delete[] st->ExtraData;
      st->changes++;
      st->disabled = false;
      st->ExtraSize = len;
      st->ExtraData = new uint8_t[len+AV_INPUT_BUFFER_PADDING_SIZE];
      memcpy(st->ExtraData, pkt->pData, len);
      memset(st->ExtraData + len, 0 , AV_INPUT_BUFFER_PADDING_SIZE);
      stream->m_parser_split = false;
      change = true;
      CLog::Log(LOGDEBUG, "CDVDDemuxClient::ParsePacket - split extradata");
    }
  }


  uint8_t *outbuf = nullptr;
  int outbuf_size = 0;
  int len = av_parser_parse2(stream->m_parser,
                             stream->m_context, &outbuf, &outbuf_size,
                             pkt->pData, pkt->iSize,
                             (int64_t)(pkt->pts * DVD_TIME_BASE),
                             (int64_t)(pkt->dts * DVD_TIME_BASE),
                             0);
  // our parse is setup to parse complete frames, so we don't care about outbufs
  if (len >= 0)
  {
    if (stream->m_context->profile != st->profile &&
        stream->m_context->profile != FF_PROFILE_UNKNOWN)
    {
      CLog::Log(LOGDEBUG, "CDVDDemuxClient::ParsePacket - ({}) profile changed from {} to {}", st->uniqueId, st->profile, stream->m_context->profile);
      st->profile = stream->m_context->profile;
      st->changes++;
      st->disabled = false;
    }

    if (stream->m_context->level != st->level &&
        stream->m_context->level != FF_LEVEL_UNKNOWN)
    {
      CLog::Log(LOGDEBUG, "CDVDDemuxClient::ParsePacket - ({}) level changed from {} to {}", st->uniqueId, st->level, stream->m_context->level);
      st->level = stream->m_context->level;
      st->changes++;
      st->disabled = false;
    }

    switch (st->type)
    {
      case STREAM_AUDIO:
      {
        CDemuxStreamClientInternalTpl<CDemuxStreamAudio>* sta = static_cast<CDemuxStreamClientInternalTpl<CDemuxStreamAudio>*>(st);
        if (stream->m_context->channels != sta->iChannels &&
            stream->m_context->channels != 0)
        {
          CLog::Log(LOGDEBUG, "CDVDDemuxClient::ParsePacket - ({}) channels changed from {} to {}", st->uniqueId, sta->iChannels, stream->m_context->channels);
          sta->iChannels = stream->m_context->channels;
          sta->changes++;
          sta->disabled = false;
        }
        if (stream->m_context->sample_rate != sta->iSampleRate &&
            stream->m_context->sample_rate != 0)
        {
          CLog::Log(LOGDEBUG, "CDVDDemuxClient::ParsePacket - ({}) samplerate changed from {} to {}", st->uniqueId, sta->iSampleRate, stream->m_context->sample_rate);
          sta->iSampleRate = stream->m_context->sample_rate;
          sta->changes++;
          sta->disabled = false;
        }
        break;
      }
      case STREAM_VIDEO:
      {
        CDemuxStreamClientInternalTpl<CDemuxStreamVideo>* stv = static_cast<CDemuxStreamClientInternalTpl<CDemuxStreamVideo>*>(st);
        if (stream->m_context->width != stv->iWidth &&
            stream->m_context->width != 0)
        {
          CLog::Log(LOGDEBUG, "CDVDDemuxClient::ParsePacket - ({}) width changed from {} to {}", st->uniqueId, stv->iWidth, stream->m_context->width);
          stv->iWidth = stream->m_context->width;
          stv->changes++;
          stv->disabled = false;
        }
        if (stream->m_context->height != stv->iHeight &&
            stream->m_context->height != 0)
        {
          CLog::Log(LOGDEBUG, "CDVDDemuxClient::ParsePacket - ({}) height changed from {} to {}", st->uniqueId, stv->iHeight, stream->m_context->height);
          stv->iHeight = stream->m_context->height;
          stv->changes++;
          stv->disabled = false;
        }
        break;
      }

      default:
        break;
    }
  }
  else
    CLog::Log(LOGDEBUG, "%s - parser returned error %d", __FUNCTION__, len);

  return change;
}
示例#27
0
void GranularGPUDataTransferer::RecordComputeStreamSyncPoint()
{
    PrepareDevice(m_deviceId);
    cudaEventRecord(m_syncEvent, GetStream()) || "cudeEventRecord failed";
}
示例#28
0
	void EnableStream(size_t N, bool active)
	{
#ifdef OM_TRACE_ENABLE
		GetStream(N).Enable(active);
#endif
	}
示例#29
0
int
SaveStream(IStorage* storage, int index, FILE* out)
{
    int res; ULONG bytesread;
    STATSTG st;

    IStream* stream;

    IStorage* temporary; IStorage* source;
    FILE* in;

    LARGE_INTEGER li;
    (void)memset(&li, 0, sizeof(li));

    res = GetStreamType(storage,index,&st.type);
    if (res == 0)
        return 0;

    if (st.type == STGTY_STREAM) {
        fprintf(stderr, "Fetching stream\n");
        res = GetStream(storage, index, &stream);
    } else if (st.type == STGTY_STORAGE) {
        fprintf(stderr, "Fetching store\n");
        res = GetStorage(storage, index, &source);
    } else {
        fprintf(stderr, "Fetching unknown %x\n", st.type);
        res = 0;
    }
    if (res == 0)
        return 0;

    switch(st.type) {
    case STGTY_STREAM:
        fprintf(stderr, "Seeking to beginning of stream\n");
        (HRESULT)stream->lpVtbl->Seek(stream, li, STREAM_SEEK_SET, NULL);

        fprintf(stderr, "Writing to file\n");
        /* write stream to file */
        for (;;) {
            res = stream->lpVtbl->Read(stream, g_TransferBuffer, sizeof(g_TransferBuffer), &bytesread);
            if (res != S_OK) { res = 1; goto stream_release; }
            (size_t)fwrite(g_TransferBuffer, 1, bytesread, out);
            if (bytesread < sizeof(g_TransferBuffer))
                break;
        }
        res = 1;
        fprintf(stderr, "Done\n");

stream_release:
        (ULONG)stream->lpVtbl->Release(stream);
        break;

    case STGTY_STORAGE:
        fprintf(stderr, "Creating temporary storage on disk\n");
        res = StgCreateStorageEx(NULL, STGM_CREATE|STGM_READWRITE|STGM_SHARE_EXCLUSIVE|STGM_DELETEONRELEASE, STGFMT_STORAGE, 0, NULL, NULL, &IID_IStorage, &temporary);
        if (res != S_OK) { res = 0; goto source_release; }

        fprintf(stderr, "Copying store to temporary storage\n");
        res = source->lpVtbl->CopyTo(source, 0, NULL, NULL, temporary);
        if (res != S_OK) { res = 0; goto temporary_release; }

        res = source->lpVtbl->Commit(source, STGC_DEFAULT);
        if (res != S_OK) { res = 0; goto temporary_release; }

        fprintf(stderr, "Getting temporary storage filename\n");
        res = temporary->lpVtbl->Stat(temporary, &st, STATFLAG_DEFAULT);
        if (res != S_OK) { res = 0; goto temporary_release; }

        fprintf(stderr, "Opening temporary storage %S\n", st.pwcsName);
        in = _wfopen(st.pwcsName, L"rb");
        if (in == NULL) { res = 0; goto temporary_release; }

        fprintf(stderr, "Writing to file\n");
        for (;;) {
            bytesread = fread(&g_TransferBuffer, 1, sizeof(g_TransferBuffer), in);
            (size_t)fwrite(g_TransferBuffer, 1, bytesread, out);
            if (bytesread < sizeof(g_TransferBuffer))
                break;
        }
        fclose(in);

        res = 1;
        fprintf(stderr, "Done\n");

temporary_release:
        temporary->lpVtbl->Release(temporary);
source_release:
        source->lpVtbl->Release(source);
        break;
    }
    return res;
}
示例#30
0
HRESULT 
BridgeSourceOutput::CheckMediaType(const CMediaType* pmt)
{
	return GetStream()->CanDeliverType(pmt);
}