Пример #1
0
  void unpack(StreamPtr input,
              StreamFactoryPtr out,
              Progress *prog = NULL,
              const FileList *list = NULL)
  {
    StreamPtr s;

    out->open("test1.dummy.empty");
    s = out->open("test2.txt");
    assert(s);
    s->write("Hello dolly\n", 12);
    s.reset();
    s = out->open("dir1/");
    //assert(!s);
    s = out->open("dir2/test3.txt");
    s->write("This is Louis, Dolly\n", 21);
  }
Пример #2
0
Future<unit> write_all(Reactor& reactor, StreamPtr out, Buffer data) {
    std::shared_ptr<Buffer> datap = std::make_shared<Buffer>(data);
    Completer<unit> completer;

    auto write_ready = [completer, datap, out]() {
        if(datap->size == 0) return;

        size_t len = out->write(*datap);
        *datap = datap->slice(len);

        if(datap->size == 0)
            completer.result(unit());
    };

    out->set_on_write_ready(write_ready);

    reactor.schedule(write_ready);

    return completer.future();
}
Пример #3
0
void CZegoRoomModel::AddStream(StreamPtr stream)
{
	if (stream == nullptr) { return; }

	std::string strStreamId = stream->GetId();

	if (stream->IsPrimary())
	{
		if (stream->IsPlaying())
		{
			auto iter = std::find_if(m_playingPrimaryStreams.begin(), m_playingPrimaryStreams.end(),
				[&strStreamId](const StreamPtr& elem) { return elem->GetId() == strStreamId; });
			if (iter == m_playingPrimaryStreams.end())
				m_playingPrimaryStreams.push_back(stream);
		}
		else
		{
			auto iter = std::find_if(m_pendingPrimaryStreams.begin(), m_pendingPrimaryStreams.end(),
				[&strStreamId](const StreamPtr& elem) { return elem->GetId() == strStreamId; });
			if (iter == m_pendingPrimaryStreams.end())
				m_pendingPrimaryStreams.push_back(stream);
		}
	}
	else
	{
		if (stream->IsPlaying())
		{
			auto iter = std::find_if(m_playingStudentStreams.begin(), m_playingStudentStreams.end(),
				[&strStreamId](const StreamPtr& elem) { return elem->GetId() == strStreamId; });
			if (iter == m_playingStudentStreams.end())
				m_playingStudentStreams.push_back(stream);
		}
		else
		{
			auto iter = std::find_if(m_pendingStudentStreams.begin(), m_pendingStudentStreams.end(),
				[&strStreamId](const StreamPtr& elem) { return elem->GetId() == strStreamId; });
			if (iter == m_pendingStudentStreams.end())
				m_pendingStudentStreams.push_back(stream);
		}
	}
}
Пример #4
0
      /// <summary>Determines whether a stream uses X3:TC file encryption.</summary>
      /// <param name="s">stream.</param>
      /// <returns></returns>
      bool  EncryptedX3Stream::IsEncrypted(StreamPtr s)
      {
         // Verify length
         if (s->GetLength() < 3)
            return false;
         
         // Prepare
         DWORD origin = s->GetPosition();
         WORD  header, key;

         // Generate key: XOR first byte with 0xC8
         s->Seek(0, SeekOrigin::Begin);
         s->Read(reinterpret_cast<byte*>(&key), 1);
         key ^= DECRYPT_SEED;

         // Generate WORD from first byte
         reinterpret_cast<byte*>(&key)[1] = reinterpret_cast<byte*>(&key)[0];

         // Read encrypted 2-byte header. reset position
         s->Read(reinterpret_cast<BYTE*>(&header), 2);
         s->Seek(origin, SeekOrigin::Begin);

         // Check for encrypted GZip header 
         return (header ^ key) == 0x8b1f;
      }
Пример #5
0
 bool CgDxShader::initialize(D3D11GraphicDevice* device, const ResourcePtr& resource, const ShaderDesc& desc) {
     mDesc = desc;
     
     StreamPtr stream = resource->readIntoMemory();
     if(!stream)
         return false;
     MemoryStream* memStream = ((MemoryStream*)stream.get());
     std::string content(memStream->data(), memStream->data() + memStream->size());
     
     CGprofile profile = _d3d_feature_level_to_cgprofile(device->getDeviceFeatureLevel(), desc.type);
     mProgram = cgCreateProgram(mContext, 
         CG_SOURCE, 
         content.c_str(), 
         profile, 
         desc.entry.c_str(), 
         cgD3D11GetOptimalOptions(profile));
     if(_check_error(mContext) &&
         D3D11Debug::CHECK_RESULT( cgD3D11LoadProgram(mProgram, 0))) {
             return true;
     }
     return false;
 }
Пример #6
0
void test(StreamPtr inp)
{
  cout << "Size: " << inp->size() << endl;
  cout << "Pos: " << inp->tell() << endl;
  cout << "Eof: " << inp->eof() << endl;
  char data[6];

  while(!inp->eof())
    {
      memset(data, 0, 6);
      cout << "\nReading " << inp->read(data, 5) << " bytes\n";
      cout << "Result: '" << data << "'\n";
      cout << "Pos: " << inp->tell() << endl;
      cout << "Eof: " << inp->eof() << endl;
    }
}
Пример #7
0
  /*
    input = stream to copy
    ADD = each read increment (for streams without size())
   */
  BufferStream(StreamPtr input, size_t ADD = 32*1024)
    {
      assert(input);

      // Allocate memory, read the stream into it. Then call set()
      if(input->hasSize)
        {
          // We assume that we can get the position as well
          assert(input->hasPosition);

          // Calculate how much is left of the stream
          size_t left = input->size() - input->tell();

          // Allocate the buffer and fill it
          buffer.resize(left);
          input->read(&buffer[0], left);
        }
      else
        {
          // We DON'T know how big the stream is. We'll have to read
          // it in increments.
          size_t len=0, newlen;

          while(!input->eof())
            {
              // Read one block
              newlen = len + ADD;
              buffer.resize(newlen);
              size_t read = input->read(&buffer[len], ADD);

              // Increase the total length
              len += read;

              // If we read less than expected, we should be at the
              // end of the stream
              assert(read == ADD || (read < ADD && input->eof()));
            }

          // Downsize to match the real length
          buffer.resize(len);
        }

      // After the buffer has been filled, set up our MemoryStream
      // ancestor to reference it.
      set(&buffer[0], buffer.size());
    }
Пример #8
0
int main(int argc, char **argv)
{
    // Init FFMPEG
    av::init();
    av::setFFmpegLoggingLevel(AV_LOG_DEBUG);

#if 0
    FilterGraphPtr     graph(new FilterGraph);
    string             filters;
    FilterInOutListPtr inputs;
    FilterInOutListPtr outputs;

    //filters = "yadif=0:-1:0, scale=400:226, drawtext=fontfile=/usr/share/fonts/TTF/DroidSans.ttf:"
    //          "text='tod- %X':x=(w-text_w)/2:y=H-60 :fontcolor=white :box=1:boxcolor=0x00000000@1";
    filters = "[0]scale=iw/2:ih/2,pad=2*iw:ih[left];[1]scale=iw/2:ih/2[right];[left][right]overlay=main_w/2:0";

    if (graph->parse(filters, inputs, outputs) < 0)
    {
        clog << "Error to parse graph" << endl;
        return 1;
    }

    if (graph->config() >= 0)
        graph->dump();

    if (inputs)
    {
        clog << "Inputs count: " << inputs->getCount() << endl;
        FilterInOutList::iterator it = inputs->begin();
        while (it != inputs->end())
        {
            FilterInOut inout = *it;

            clog << "Input: " << inout.getPadIndex() << ", name: " << inout.getName() << ", filter: " << inout.getFilterContext()->getName() << endl;
            ++it;
        }
    }

    if (outputs)
    {
        clog << "Outputs count: " << outputs->getCount() << endl;
        FilterInOutList::iterator it = outputs->begin();
        while (it != outputs->end())
        {
            FilterInOut inout = *it;

            clog << "Output: " << inout.getPadIndex() << ", name: " << inout.getName() << ", filter: " << inout.getFilterContext()->getName() << endl;
            ++it;
        }
    }

    for (unsigned i = 0; i < graph->getFiltersCount(); ++i)
    {
        clog << "Filter: " << graph->getFilter(i)->getName() << endl;
    }

#else

    string srcUri;
    string dstUri;
    string filterDescription = "";

    if (argc < 3)
    {
        usage(argv[0]);
        ::exit(1);
    }

    srcUri = string(argv[1]);
    dstUri = string(argv[2]);

    if (argc > 3)
    {
        filterDescription = string(argv[3]);
    }

    //
    // Prepare input
    //
    ContainerPtr in(new Container());

    if (in->openInput(srcUri.c_str()))
    {
        cout << "Open success\n";
    }
    else
    {
        cout << "Open fail\n";
        return 1;
    }

    int streamCount = in->getStreamsCount();
    cout << "Streams: " << streamCount << endl;

    set<int> audio;
    set<int> video;
    map<int, StreamCoderPtr> decoders;

    int inW = -1;
    int inH = -1;
    PixelFormat inPF;
    Rational inTimeBase;
    Rational inSampleAspectRatio;
    Rational inFrameRate;

    int            inSampleRate    = 0;
    int            inChannels      = 0;
    SampleFormat   inSampleFmt     = AV_SAMPLE_FMT_S16;
    uint64_t       inChannelLayout = 0;


    for (int i = 0; i < streamCount; ++i)
    {
        StreamPtr st = in->getStream(i);
        if (st->getMediaType() == AVMEDIA_TYPE_VIDEO ||
            st->getMediaType() == AVMEDIA_TYPE_AUDIO)
        {
            if (st->getMediaType() == AVMEDIA_TYPE_VIDEO)
                video.insert(i);
            else if (st->getMediaType() == AVMEDIA_TYPE_AUDIO)
                audio.insert(i);

            StreamCoderPtr coder(new StreamCoder(st));
            coder->open();

            if (st->getMediaType() == AVMEDIA_TYPE_VIDEO)
            {
                inW        = coder->getWidth();
                inH        = coder->getHeight();
                inPF       = coder->getPixelFormat();
                inTimeBase = coder->getTimeBase();
                inFrameRate = coder->getFrameRate();
                //inSampleAspectRatio = coder->getAVCodecContext()->sample_aspect_ratio;
                inSampleAspectRatio = st->getSampleAspectRatio();

                if (inSampleAspectRatio == Rational(0, 1))
                    inSampleAspectRatio = coder->getAVCodecContext()->sample_aspect_ratio;

                clog << "Aspect ratio: " << inSampleAspectRatio << endl;
            }
            else
            {
                inSampleRate = coder->getSampleRate();
                inChannels   = coder->getChannels();
                inChannelLayout = coder->getChannelLayout();
                inSampleFmt = coder->getSampleFormat();
            }

            clog << "In: TimeBases coder:" << coder->getTimeBase() << ", stream:" << st->getTimeBase()  << endl;

            decoders[i] = coder;
        }
    }






    //
    // Writing
    //
    ContainerFormatPtr writeFormat(new ContainerFormat());
    writeFormat->setOutputFormat(0, dstUri.c_str(), 0);
    if (!writeFormat->isOutput())
    {
        clog << "Fallback to MPEGTS output format" << endl;
        writeFormat->setOutputFormat("mpegts", 0, 0);
    }

    ContainerPtr writer(new Container());
    writer->setFormat(writeFormat);

    int         outW      = 640;
    int         outH      = 480;
    PixelFormat outPixFmt = PIX_FMT_YUV420P;

    int            outSampleRate    = inSampleRate;
    //int            outSampleRate    = 44100;
    int            outChannels      = inChannels;
    SampleFormat   outSampleFmt     = inSampleFmt;
    uint64_t       outChannelLayout = inChannelLayout;
    int            outFrameSize     = 0;

    map<int, StreamCoderPtr> encoders;
    map<int, int>            streamMapping;

    map<int, StreamCoderPtr>::iterator it;
    int i = 0;
    for (it = decoders.begin(); it != decoders.end(); ++it)
    {
        int originalIndex = it->first;

        CodecPtr codec;
        if (video.count(originalIndex) > 0)
        {
            codec = Codec::findEncodingCodec(writeFormat->getOutputDefaultVideoCodec());
            //continue;
        }
        else
        {
            codec = Codec::findEncodingCodec(writeFormat->getOutputDefaultAudioCodec());
            //continue;
        }

        streamMapping[originalIndex] = i;

        StreamPtr st = writer->addNewStream(codec);
        StreamCoderPtr coder(new StreamCoder(st));

        coder->setCodec(codec);

        if (st->getMediaType() == AVMEDIA_TYPE_VIDEO)
        {
            coder->setWidth(outW);
            coder->setHeight(outH);

            outPixFmt = *codec->getAVCodec()->pix_fmts;
            coder->setPixelFormat(outPixFmt);

            coder->setTimeBase(Rational(1,25));
            //st->setTimeBase(Rational(1,25));

            st->setFrameRate(Rational(25,1));
            coder->setBitRate(500000);
        }
        else if (st->getMediaType() == AVMEDIA_TYPE_AUDIO)
        {
            st->getAVStream()->id = 1;

            if (codec->getAVCodec()->supported_samplerates)
                outSampleRate = *codec->getAVCodec()->supported_samplerates;

            coder->setSampleRate(outSampleRate);
            coder->setChannels(outChannels);
            coder->setSampleFormat(outSampleFmt);
            outChannelLayout = coder->getChannelLayout();

            coder->setTimeBase(Rational(1, outSampleRate));
            //st->setTimeBase(Rational(1, outSampleRate));
        }

        if (!coder->open())
        {
            cerr << "Can't open coder" << endl;
        }

        clog << "Ou: TimeBases coder:" << coder->getTimeBase() << ", stream:" << st->getTimeBase()  << endl;

        if (st->getMediaType() == AVMEDIA_TYPE_AUDIO)
            outFrameSize = coder->getAVCodecContext()->frame_size;

        encoders[i] = coder;
        ++i;
    }

    writer->openOutput(dstUri.c_str());
    writer->dump();
    writer->writeHeader();
    writer->flush();

    //
    // Transcoding
    //

    PacketPtr pkt(new Packet());
    int stat = 0;

#if 0
    // Audio filter graph
    list<int>            dstSampleRates    = {outSampleRate};
    list<SampleFormat> dstSampleFormats  = {outSampleFmt};
    list<uint64_t>       dstChannelLayouts = {outChannelLayout};
    BufferSrcFilterContextPtr srcAudioFilter;
    BufferSinkFilterContextPtr sinkAudioFilter;
    FilterGraphPtr       audioFilterGraph;

    if (!audio.empty())
    {
         audioFilterGraph = FilterGraph::createSimpleAudioFilterGraph(Rational(1, inSampleRate),
                                                                      inSampleRate, inSampleFmt, inChannelLayout,
                                                                      dstSampleRates, dstSampleFormats, dstChannelLayouts,
                                                                      string());


         sinkAudioFilter = filter_cast<BufferSinkFilterContext>(audioFilterGraph->getSinkFilter());
         srcAudioFilter  = audioFilterGraph->getSrcFilter();

         if (sinkAudioFilter)
         {
             sinkAudioFilter->setFrameSize(outFrameSize);
         }

         audioFilterGraph->dump();
    }


    // Video filter graph
#if 0
    string videoFilterDesc = "movie=http\\\\://camproc1/snapshots/logo.png [watermark]; [in][watermark] overlay=0:0:rgb=1,"
                             "drawtext=fontfile=/home/hatred/fifte.ttf:fontsize=20:"
                             "text='%F %T':x=(w-text_w-5):y=H-20 :fontcolor=white :box=0:boxcolor=0x00000000@1 [out]";
#else
    string &videoFilterDesc = filterDescription;
#endif

    BufferSrcFilterContextPtr  srcVideoFilter;
    BufferSinkFilterContextPtr sinkVideoFilter;
    FilterGraphPtr videoFilterGraph =
            FilterGraph::createSimpleVideoFilterGraph(inTimeBase,
                                                      inSampleAspectRatio,
                                                      inFrameRate,
                                                      inW, inH, inPF,
                                                      outW, outH, outPixFmt,
                                                      videoFilterDesc);
    sinkVideoFilter = videoFilterGraph->getSinkFilter();
    srcVideoFilter  = videoFilterGraph->getSrcFilter();

    videoFilterGraph->dump();

    //return 0;
#endif

    // TODO may be fault
    VideoRescaler videoRescaler {outW, outH, outPixFmt, inW, inH, inPF};

    uint64_t samplesCount = 0;
    packetSync.reset();
    while (in->readNextPacket(pkt) >= 0)
    {
        if (streamMapping.find(pkt->getStreamIndex()) == streamMapping.end())
        {
            continue;
        }

        clog << "Input: "  << pkt->getStreamIndex()
             << ", PTS: "  << pkt->getPts()
             << ", DTS: "  << pkt->getDts()
             << ", TB: "   << pkt->getTimeBase()
             << ", time: " << pkt->getTimeBase().getDouble() * pkt->getDts()
             << endl;

        if (video.count(pkt->getStreamIndex()) > 0)
        {
            VideoFramePtr  frame(new VideoFrame());
            StreamCoderPtr coder = decoders[pkt->getStreamIndex()];

            if (pkt->getPts() == av::NoPts && pkt->getDts() != av::NoPts)
            {
                pkt->setPts(pkt->getDts());
            }

            auto ret = coder->decodeVideo(frame, pkt);
            frame->setStreamIndex(streamMapping[pkt->getStreamIndex()]);

            clog << "decoding ret: " << ret << ", pkt size: " << pkt->getSize() << endl;

            if (frame->isComplete())
            {
                StreamCoderPtr &encoder = encoders[streamMapping[pkt->getStreamIndex()]];

                clog << "Frame: aspect ratio: " << Rational(frame->getAVFrame()->sample_aspect_ratio)
                     << ", size: " << frame->getWidth() << "x" << frame->getHeight()
                     << ", pix_fmt: " << frame->getPixelFormat()
                     << ", time_base: " << frame->getTimeBase()
                     << endl;

#if 0
                stat = srcVideoFilter->addFrame(frame);
                if (stat < 0)
                {
                    clog << "Can't add video frame to filters chain" << endl;
                    continue;
                }

                while (1)
                {
                    FilterBufferRef frameref;

                    stat = sinkVideoFilter->getBufferRef(frameref, 0);

                    if (stat == AVERROR(EAGAIN) || stat == AVERROR_EOF)
                        break;

                    if (stat < 0)
                        break;

                    if (frameref.isValid())
                    {
                        FramePtr outFrame;
                        frameref.copyToFrame(outFrame);
                        if (outFrame)
                        {
                            VideoFramePtr outVideoFrame = std::static_pointer_cast<VideoFrame>(outFrame);
                            outVideoFrame->setStreamIndex(streamMapping[pkt->getStreamIndex()]);
                            outVideoFrame->setTimeBase(encoder->getTimeBase());
                            outVideoFrame->setPts(pkt->getTimeBase().rescale(pkt->getPts(), outVideoFrame->getTimeBase()));

                            stat = encoder->encodeVideo(outVideoFrame, std::bind(formatWriter, writer, std::placeholders::_1));
                        }
                    }
                }
#else
                FramePtr outFrame = frame;
                VideoFramePtr outVideoFrame = std::static_pointer_cast<VideoFrame>(outFrame);
                outVideoFrame->setStreamIndex(streamMapping[pkt->getStreamIndex()]);
                outVideoFrame->setTimeBase(encoder->getTimeBase());
                outVideoFrame->setPts(pkt->getTimeBase().rescale(pkt->getPts(), outVideoFrame->getTimeBase()));

                stat = encoder->encodeVideo(outVideoFrame, std::bind(formatWriter, writer, std::placeholders::_1));
#endif
            }
        }
        else if (audio.count(pkt->getStreamIndex()) > 0)
        {
            AudioSamplesPtr samples(new AudioSamples());
            StreamCoderPtr  coder = decoders[pkt->getStreamIndex()];

            pkt->setPts(av::NoPts);
            pkt->setDts(av::NoPts);

            int size = coder->decodeAudio(samples, pkt);
            samples->setStreamIndex(streamMapping[pkt->getStreamIndex()]);

            //clog << "Packet Size: " << pkt->getSize() << ", encoded bytes: " << size << endl;
            //clog << "Audio Inp PTS: " << pkt->getPts() << ", " << samples->getPts() << endl;

            if (samples->isComplete())
            {
                StreamCoderPtr &encoder = encoders[streamMapping[pkt->getStreamIndex()]];

#if 0
                stat = srcAudioFilter->addFrame(samples);
                if (stat < 0)
                {
                    clog << "Can't add audio samples to filters chain" << endl;
                    continue;
                }

                int count = 0;
                while (1)
                {
                    FilterBufferRef samplesref;

                    stat = sinkAudioFilter->getBufferRef(samplesref, 0);

                    if (stat == AVERROR(EAGAIN) || stat == AVERROR_EOF)
                        break;

                    if (stat < 0)
                        break;

                    if (samplesref.isValid())
                    {
                        const AVFilterBufferRefAudioProps *props = samplesref.getAVFilterBufferRef()->audio;

//                        clog << "Cnt:" << count << ", Ch layout:" << props->channel_layout
//                             << ", nb_samples:" << props->nb_samples
//                             << ", rate:" << props->sample_rate
//                             << endl;

                        FramePtr outFrame;
                        samplesref.copyToFrame(outFrame);
                        if (outFrame)
                        {
                            AudioSamplesPtr outSamples = std::static_pointer_cast<AudioSamples>(outFrame);

                            outSamples->setTimeBase(encoder->getTimeBase());
                            outSamples->setStreamIndex(streamMapping[pkt->getStreamIndex()]);

//                            clog << "Samples: " << outSamples->getSamplesCount()
//                                 << ", ts:" << outSamples->getPts() << " / " << AV_NOPTS_VALUE << ", " << (outSamples->getPts() == AV_NOPTS_VALUE)
//                                 << ", tb:" << outSamples->getTimeBase()
//                                 << endl;

                            stat = encoder->encodeAudio(outSamples, std::bind(formatWriter, writer, std::placeholders::_1));
                        }
                    }
                }
#else
                FramePtr outFrame = samples;
                AudioSamplesPtr outSamples = std::static_pointer_cast<AudioSamples>(outFrame);

                outSamples->setTimeBase(encoder->getTimeBase());
                outSamples->setStreamIndex(streamMapping[pkt->getStreamIndex()]);

                stat = encoder->encodeAudio(outSamples, std::bind(formatWriter, writer, std::placeholders::_1));
#endif
            }
        }
    }

    // Flush buffers
    set<int> allStreams = video;
    allStreams.insert(audio.begin(), audio.end());
    for (set<int>::const_iterator it = allStreams.begin(); it != allStreams.end(); ++it)
    {
        if (streamMapping.find(*it) == streamMapping.end())
        {
            continue;
        }

        clog << "Flush stream: " << *it << endl;

        PacketPtr pkt(new Packet());
        pkt->setStreamIndex(streamMapping[*it]);
        formatWriter(writer, pkt);
    }

    // Write trailer
    writer->flush();
    writer->writeTrailer();

    /*
    // Must be closed before container
    encoders.clear();

    // Container close
    writer->close();
    writer.reset();

    // decoders
    decoders.clear();

    // Input container
    in->close();
    in.reset();
    */

#endif
    return 0;
}
Пример #9
0
 // For fixed-size strings where you already know the size
 const char *getString(int size)
   { return (const char*)inp->getPtr(size); }
Пример #10
0
 /// Get current position
 int ADR_CALL tell()
   { assert(inp->hasPosition); return inp->tell(); }
Пример #11
0
template<class X> const X* getPtr() { return (const X*)inp->getPtr(sizeof(X)); }
Пример #12
0
 void skip(size_t size) { inp->getPtr(size); }
Пример #13
0
int main()
{
  StreamPtr inp = StringStream::Open("hello world!");

  cout << "Size: " << inp->size() << endl;
  cout << "Pos: " << inp->tell() << "\nSeeking...\n";
  inp->seek(3);
  cout << "Pos: " << inp->tell() << endl;
  char data[12];
  memset(data, 0, 12);
  cout << "Reading: " << inp->read(data, 4) << endl;
  cout << "Four bytes: " << data << endl;
  cout << "Eof: " << inp->eof() << endl;
  cout << "Pos: " << inp->tell() << "\nSeeking again...\n";
  inp->seek(33);
  cout << "Pos: " << inp->tell() << endl;
  cout << "Eof: " << inp->eof() << "\nSeek to 6\n";
  inp->seek(6);
  cout << "Eof: " << inp->eof() << endl;
  cout << "Pos: " << inp->tell() << endl;
  cout << "Over-reading: " << inp->read(data, 200) << endl;
  cout << "Result: " << data << endl;
  cout << "Eof: " << inp->eof() << endl;
  cout << "Pos: " << inp->tell() << endl;
  inp->seek(0);
  cout << "Finally, reading the entire string: " << inp->read(data,11) << endl;
  cout << "Result: " << data << endl;
  cout << "Eof: " << inp->eof() << endl;
  cout << "Pos: " << inp->tell() << endl;

  cout << "Entire stream from pointer: " << (char*)inp->getPtr() << endl;
  
  return 0;
}
Пример #14
0
bool AkVCam::PluginInterface::createDevice(const std::string &deviceId,
                                           const std::wstring &description,
                                           const std::vector<VideoFormat> &formats)
{
    AkLoggerLog("AkVCam::PluginInterface::createDevice");

    StreamPtr stream;

    // Create one device.
    auto pluginRef = reinterpret_cast<CMIOHardwarePlugInRef>(this->d);
    auto device = std::make_shared<Device>(pluginRef);
    device->setDeviceId(deviceId);
    device->connectAddListener(this, &PluginInterface::addListener);
    device->connectRemoveListener(this, &PluginInterface::removeListener);
    this->m_devices.push_back(device);

    // Define device properties.
    device->properties().setProperty(kCMIOObjectPropertyName,
                                     description.c_str());
    device->properties().setProperty(kCMIOObjectPropertyManufacturer,
                                     CMIO_PLUGIN_VENDOR);
    device->properties().setProperty(kCMIODevicePropertyModelUID,
                                     CMIO_PLUGIN_PRODUCT);
    device->properties().setProperty(kCMIODevicePropertyLinkedCoreAudioDeviceUID,
                                     "");
    device->properties().setProperty(kCMIODevicePropertyLinkedAndSyncedCoreAudioDeviceUID,
                                     "");
    device->properties().setProperty(kCMIODevicePropertySuspendedByUser,
                                     UInt32(0));
    device->properties().setProperty(kCMIODevicePropertyHogMode,
                                     pid_t(-1),
                                     false);
    device->properties().setProperty(kCMIODevicePropertyDeviceMaster,
                                     pid_t(-1));
    device->properties().setProperty(kCMIODevicePropertyExcludeNonDALAccess,
                                     UInt32(0));
    device->properties().setProperty(kCMIODevicePropertyDeviceIsAlive,
                                     UInt32(1));
    device->properties().setProperty(kCMIODevicePropertyDeviceUID,
                                     deviceId.c_str());
    device->properties().setProperty(kCMIODevicePropertyTransportType,
                                     UInt32(kIOAudioDeviceTransportTypePCI));
    device->properties().setProperty(kCMIODevicePropertyDeviceIsRunningSomewhere,
                                     UInt32(0));

    if (device->createObject() != kCMIOHardwareNoError)
        goto createDevice_failed;

    stream = device->addStream();

    // Register one stream for this device.
    if (!stream)
        goto createDevice_failed;

    stream->setFormats(formats);
    stream->properties().setProperty(kCMIOStreamPropertyDirection, UInt32(0));

    if (device->registerStreams() != kCMIOHardwareNoError) {
        device->registerStreams(false);

        goto createDevice_failed;
    }

    // Register the device.
    if (device->registerObject() != kCMIOHardwareNoError) {
        device->registerObject(false);
        device->registerStreams(false);

        goto createDevice_failed;
    }

    device->setBroadcasting(this->d->m_ipcBridge.broadcaster(deviceId));
    device->setMirror(this->d->m_ipcBridge.isHorizontalMirrored(deviceId),
                      this->d->m_ipcBridge.isVerticalMirrored(deviceId));
    device->setScaling(this->d->m_ipcBridge.scalingMode(deviceId));
    device->setAspectRatio(this->d->m_ipcBridge.aspectRatioMode(deviceId));
    device->setSwapRgb(this->d->m_ipcBridge.swapRgb(deviceId));

    return true;

createDevice_failed:
    this->m_devices.erase(std::prev(this->m_devices.end()));

    return false;
}
Пример #15
0
	void openFile(StreamPtr stream, IAudioDevicePtr audioDevice)
	{
		FlogI("Trying to load file: " << stream->GetPath());

		int ret;
		this->stream = stream;
		this->audioDevice = audioDevice;
		timeHandler = TimeHandler::Create(audioDevice);
		
		audioDevice->SetPaused(true);

		pFormatCtx = avformat_alloc_context();
		pFormatCtx->pb = stream->GetAVIOContext();

		if((ret = avformat_open_input(&pFormatCtx, stream->GetPath().c_str(), NULL, NULL)) != 0){
			char ebuf[512];
			av_strerror(ret, ebuf, sizeof(ebuf));
			FlogE("couldn't open file");
			FlogE(ebuf);
			throw VideoException(VideoException::EFile);
		}

		/* Get stream information */
		if(avformat_find_stream_info(pFormatCtx, NULL) < 0){
			FlogE("couldn't get stream info");
			throw VideoException(VideoException::EStreamInfo);
		}

		/* Print video format information */
		av_dump_format(pFormatCtx, 0, stream->GetPath().c_str(), 0);

		// If the loader logged something about wmv being DRM protected, give up
		if(drm){
			throw VideoException(VideoException::EStream);
		}

		// find the best audio and video streams
		audioStream = videoStream = AVERROR_STREAM_NOT_FOUND;
		pCodec = 0;

		videoStream = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &pCodec, 0);

		if(videoStream == AVERROR_STREAM_NOT_FOUND){
			FlogE("couldn't find stream");
			throw VideoException(VideoException::EStream);
		}	
		
		if(videoStream == AVERROR_DECODER_NOT_FOUND || !pCodec){
			FlogE("unsupported codec");
			throw VideoException(VideoException::EVideoCodec);
		}

		audioStream = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);

		if(hasAudioStream()){
			audioHandler = AudioHandler::Create(pFormatCtx->streams[audioStream]->codec, audioDevice, timeHandler);
		}else{
			audioHandler = AudioHandlerNoSound::Create(audioDevice, timeHandler);
			FlogD("no audio stream or unsupported audio codec");
		}
		
		/* Get a pointer to the codec context for the video stream */
		pCodecCtx = pFormatCtx->streams[videoStream]->codec;

		// Open codec
		if(avcodec_open2(pCodecCtx, pCodec, NULL) < 0){
			FlogE("unsupported codec");
			throw VideoException(VideoException::EVideoCodec);
		}

		w = pCodecCtx->width;
		h = pCodecCtx->height;

		// limit framequeue memory size
		int frameMemSize = avpicture_get_size((AVPixelFormat)pCodecCtx->pix_fmt, w, h);
		int maxVideoQueueMem = 512 * 1024 * 1024; // 512 MB
		maxFrameQueueSize = maxVideoQueueMem / frameMemSize;

		// cap to 256
		maxFrameQueueSize = std::min(maxFrameQueueSize, 256);

		// tick the video so that firstPts and firstDts are set
		tick(true);
	}
Пример #16
0
 Misc::SliceArray<X> getArrayLen(int num)
   { return Misc::SliceArray<X>((const X*)inp->getPtr(num*sizeof(X)),num); }
Пример #17
0
int main(int argc, char ** argv)
{
    std::string inURI = INPUT;
    std::string outURI = OUTPUT;

    if (argc > 1)
        inURI = std::string(argv[1]);
    
    if (argc > 2)
        outURI = std::string(argv[2]);

    //
    // Prepare in
    //

    ContainerPtr in(new Container());

    if (in->openInput(inURI.c_str()))
    {
        cout << "Open success\n";
    }
    else
        cout << "Open fail\n";

    int streamCount = in->getStreamsCount();
    cout << "Streams: " << streamCount << endl;

    set<int> audio;
    set<int> video;
    map<int, StreamCoderPtr> videoCoders;

    int inW = -1;
    int inH = -1;
    PixelFormat inPF;

    for (int i = 0; i < streamCount; ++i)
    {
        StreamPtr st = in->getStream(i);
        if (st->getAVStream()->codec->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            video.insert(i);

            StreamCoderPtr coder(new StreamCoder(st));
            coder->open();

            inW = coder->getWidth();
            inH = coder->getHeight();
            inPF = coder->getPixelFormat();

            videoCoders[i] = coder;
        }
        else if (st->getAVStream()->codec->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            audio.insert(i);
        }
    }


    //
    // Prepare coder
    //
    ContainerFormatPtr outFormat(new ContainerFormat());
    outFormat->setOutputFormat("flv", 0, 0);

    ContainerPtr out(new Container());
    out->setFormat(outFormat);

    CodecPtr outCodec = Codec::findEncodingCodec(outFormat->getOutputDefaultVideoCodec());

    // add stream
    StreamPtr outStream = out->addNewStream(outCodec);
    StreamCoderPtr outCoder(new StreamCoder(outStream));

    outCoder->setCodec(outCodec);
    outCoder->setTimeBase(Rational(1, 25));
    outCoder->setWidth(384);
    outCoder->setHeight(288);
    outCoder->setPixelFormat(PIX_FMT_YUV420P);
    //outCoder->setPixelFormat(PIX_FMT_YUVJ420P);

    outStream->setFrameRate(Rational(25, 1));

    outCoder->open();

    out->dump();



    //
    // Writing
    //
    ContainerFormatPtr writeFormat(new ContainerFormat());
    writeFormat->setOutputFormat("flv", 0, 0);

    ContainerPtr writer(new Container());
    writer->setFormat(outFormat);

    CodecPtr writeCodec = Codec::findEncodingCodec(writeFormat->getOutputDefaultVideoCodec());

    // add stream
    StreamPtr writeStream = writer->addNewStream(writeCodec);
    StreamCoderPtr writeCoder(new StreamCoder(writeStream));

    writeCoder->setCodec(writeCodec);
    writeCoder->setTimeBase(Rational(1, 25));
    writeCoder->setWidth(384);
    writeCoder->setHeight(288);
    writeCoder->setPixelFormat(PIX_FMT_YUV420P);

    writeStream->setFrameRate(Rational(25, 1));

    writeCoder->open();

    //writer->openOutput("/tmp/test2.flv");
    MyWriter fwriter(outURI);
    //writer->openOutput(fwriter);
    writer->openOutput(outURI);
    writer->dump();
    writer->writeHeader();
    writer->flush();




    //
    // Transcoding
    //

    PacketPtr pkt(new Packet());
    FramePtr  frame(new Frame());
    int stat = 0;

    vector<PacketPtr> packets;

    VideoResamplerPtr resampler(new VideoResampler(outCoder->getWidth(), outCoder->getHeight(), outCoder->getPixelFormat(),
                                                   inW, inH, inPF));

    while (in->readNextPacket(pkt) >= 0)
    {
        if (video.count(pkt->getStreamIndex()) > 0)
        {
            StreamCoderPtr coder = videoCoders[pkt->getStreamIndex()];

            coder->decodeVideo(frame, pkt);

            if (frame->isComplete())
            {
                //cout << "Complete frame: " << frame->getPts() << ", " << frame->getWidth() << "x" << frame->getHeight() << " / " << frame->getSize() << endl;

                FramePtr outFrame(new Frame(outCoder->getPixelFormat(), outCoder->getWidth(), outCoder->getHeight()));

                resampler->resample(outFrame, frame);

                PacketPtr outPkt(new Packet());
                stat = outCoder->encodeVideo(outPkt, outFrame);
                if (stat < 0)
                {
                    cout << "Encoding error...\n";
                }
                else
                {

#if 0
                    //cout << "Encode packet: " << outPkt->isComplete() << endl;
                    packets.push_back(outPkt);
                    // HACK:
                    if (packets.size() > 100)
                        break;
#endif
                    writer->writePacket(outPkt);
                }
            }
        }
    }


#if 0
    //
    // Writing
    //
    ContainerFormatPtr writeFormat(new ContainerFormat());
    writeFormat->setOutputFormat("flv", 0, 0);

    ContainerPtr writer(new Container());
    writer->setFormat(outFormat);

    CodecPtr writeCodec = Codec::findEncodingCodec(writeFormat->getOutputDefaultVideoCodec());

    // add stream
    StreamPtr writeStream = writer->addNewStream(writeCodec);
    StreamCoderPtr writeCoder(new StreamCoder(writeStream));

    writeCoder->setCodec(writeCodec);
    writeCoder->setTimeBase(Rational(1, 25));
    writeCoder->setWidth(384);
    writeCoder->setHeight(288);
    writeCoder->setPixelFormat(PIX_FMT_YUV420P);

    writeStream->setFrameRate(Rational(25, 1));

    writeCoder->open();

    MyWriter fwriter(outURI);
    //writer->openOutput(fwriter);
    writer->openOutput(outURI);
    writer->dump();
    writer->writeHeader();
    writer->flush();

    for (size_t i = 0; i < packets.size(); ++i)
    {
        writer->writePacket(packets.at(i));
    }

    writer->close();
#endif

    return 0;
}
Пример #18
0
 /// Read 'count' bytes, return bytes successfully read
 int ADR_CALL read(void *buf, int count)
   { return inp->read(buf,count); }