Beispiel #1
0
bool AVMuxer::Private::prepareStreams()
{
    audio_streams.clear();
    video_streams.clear();
    subtitle_streams.clear();
    AVOutputFormat* fmt = format_ctx->oformat;
    if (venc) {
        AVStream *s = addStream(format_ctx, venc->codecName(), fmt->video_codec);
        if (s) {
            AVCodecContext *c = s->codec;
            c->bit_rate = venc->bitRate();
            c->width = venc->width();
            c->height = venc->height();
            /// MUST set after encoder is open to ensure format is valid and the same
            c->pix_fmt = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(venc->pixelFormat());
            video_streams.push_back(s->id);
        }
    }
    if (aenc) {
        AVStream *s = addStream(format_ctx, aenc->codecName(), fmt->audio_codec);
        if (s) {
            AVCodecContext *c = s->codec;
            c->bit_rate = aenc->bitRate();
            /// MUST set after encoder is open to ensure format is valid and the same
            c->sample_rate = aenc->audioFormat().sampleRate();
            c->sample_fmt = (AVSampleFormat)aenc->audioFormat().sampleFormatFFmpeg();
            c->channel_layout = aenc->audioFormat().channelLayoutFFmpeg();
            c->channels = aenc->audioFormat().channels();
            c->bits_per_raw_sample = aenc->audioFormat().bytesPerSample()*8; // need??
            audio_streams.push_back(s->id);
        }
    }
    return !(audio_streams.isEmpty() && video_streams.isEmpty() && subtitle_streams.isEmpty());
}
Beispiel #2
0
bool AVMuxer::Private::prepareStreams()
{
    audio_streams.clear();
    video_streams.clear();
    subtitle_streams.clear();
    AVOutputFormat* fmt = format_ctx->oformat;
    if (venc && !venc->codecName().isEmpty()) {
        AVCodec *codec = avcodec_find_encoder_by_name(venc->codecName().toUtf8().constData());
        addStream(format_ctx, codec->id);
    } else if (fmt->video_codec != QTAV_CODEC_ID(NONE)) {
        addStream(format_ctx, fmt->video_codec);
    }
    return true;
}
Beispiel #3
0
EDLL void AtcpRemote::run()
{
    socket=init(5027);
    srvstop=false;
    srvrun=false;
    if(socket!=INVALID_SOCKET)
    {
        int		ciplen=sizeof(SOCKADDR_IN);
        if(listen(socket,SOMAXCONN)!=SOCKET_ERROR)
        {
            srvrun=true;
            while(!srvstop)
            {
                SOCKADDR_IN	cip;
                SOCKET		csock=accept(socket,(struct sockaddr *)&cip, &ciplen);
                if(csock!=INVALID_SOCKET)
                    addStream(csock);
                sleep(1);
            }
        }
        closesocket(socket);
        socket=INVALID_SOCKET;
    }
    WSACleanup();
    srvrun=false;
}
void AudioMixerSlave::mixStream(AudioMixerClientData& listenerNodeData, const QUuid& sourceNodeID,
        const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd) {
    // only add the stream to the mix if it has a valid position, we won't know how to mix it otherwise
    if (streamToAdd.hasValidPosition()) {
        addStream(listenerNodeData, sourceNodeID, listeningNodeStream, streamToAdd, false);
    }
}
Beispiel #5
0
//-----------------------------------------------------------------------------------------------
// initializes the Pipe, sets the streams and schedule
//-----------------------------------------------------------------------------------------------
void Pipe::initialize(const QVector<double> &schedule)
{
    m_schedule = schedule;

    clearStreams();
    for(int i = 0; i < m_schedule.size(); ++i) addStream(new Stream(schedule.at(i), 0, 0, 0, 0));

}
Beispiel #6
0
void StreamTable::loadFromFile() {
    StreamConfiguration sc;
    sc.load(); // load configured streams
    std::vector<std::pair<wxString, wxString> >::iterator it;
    it = sc.getStreams().begin();
    while(it < sc.getStreams().end()) {
        std::pair<wxString, wxString> p = *it;
        addStream(p.first, p.second);
        it++;
    }
}
    void FtpServerDataConnection::addString(const char *str) {

      std::string *newstr;

      newstr=new std::string(str);
      (*newstr)+="\r\n";

      addStream(
        new StlStringInputStream(
            newstr,true),
            true
          );
    }
  void TfStreamServer::alGoalCb(AlServer::GoalHandle gh)
  {
    if (!gh.getGoal())
    {
      gh.setCanceled(AlServer::Result(), "something went wrong, goal canceled");
      return;
    }

    if (gh.getGoal()->update)
      updateStream(gh);
    else
      addStream(gh);
  }
/* Listener client attaches to a stream
 */
bool openavbEptSrvrAttachStream(int h,
                            AVBStreamID_t *streamID,
                            openavbSrpLsnrDeclSubtype_t ld)
{
	openavbRC rc = OPENAVB_SUCCESS;
	static U8 emptyMAC[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
	static AVBTSpec_t emptytSpec = {0, 0};

	AVB_TRACE_ENTRY(AVB_TRACE_ENDPOINT);

	clientStream_t *ps = findStream(streamID);
	if (ps && ps->clientHandle != h) {
		AVB_LOGF_ERROR("Error attaching listener: multiple clients for stream %d", streamID->uniqueID);
		AVB_TRACE_EXIT(AVB_TRACE_ENDPOINT);
		return FALSE;
	}

	if (!ps) {
		ps = addStream(h, streamID);
		if (!ps) {
			AVB_LOGF_ERROR("Error attaching listener: unable to add client stream %d", streamID->uniqueID);
			AVB_TRACE_EXIT(AVB_TRACE_ENDPOINT);
			return FALSE;
		}
		ps->role = clientListener;
	}

	if(x_cfg.noSrp) {
		// we are operating in a mode supporting preconfigured streams; SRP is not in use,
		if(ld == openavbSrp_LDSt_Interest) {
			// As a proxy for SRP, which would normally make this call after confirming
			// availability of the stream, call the callback from here
			strmRegCb((void*)ps, openavbSrp_AtTyp_TalkerAdvertise,
					  emptyMAC, // a flag to listener to read info from configuration file
					  &emptytSpec,
					  MAX_AVB_SR_CLASSES, // srClass - value doesn't matter because openavbEptSrvrNotifyLstnrOfSrpCb() throws it away
					  1, // accumLatency
					  NULL); // *failInfo
		}
	} else {
		// Normal SRP Operation so pass to SRP
		rc = openavbSrpAttachStream((void*)ps, streamID, ld);
		if (!IS_OPENAVB_SUCCESS(rc))
			delStream(ps);
	}

	openavbEndPtLogAllStaticStreams();

	AVB_TRACE_EXIT(AVB_TRACE_ENDPOINT);
	return IS_OPENAVB_SUCCESS(rc);
}
Beispiel #10
0
void PluginView::performURLRequest(URLRequest* request)
{
    // First, check if this is a javascript: url.
    if (protocolIsJavaScript(request->request().url())) {
        performJavaScriptURLRequest(request);
        return;
    }

    if (!request->target().isNull()) {
        performFrameLoadURLRequest(request);
        return;
    }

    // This request is to load a URL and create a stream.
    RefPtr<Stream> stream = PluginView::Stream::create(this, request->requestID(), request->request());
    addStream(stream.get());
    stream->start();
}
Beispiel #11
0
void AzubuHandler::handleStreams() {

    QNetworkReply *reply = qobject_cast<QNetworkReply*>(QObject::sender());
    QByteArray data = reply->readAll();

    // delete the reply
    reply->deleteLater();

    QJsonDocument response(QJsonDocument::fromJson(data.mid(1,data.length()-2)));
    QJsonArray streamsArray = response.array();

    for(int i=0; i<streamsArray.size(); ++i) {

        QJsonObject streamJson = streamsArray[i].toObject();
        AzubuStream stream;

        // populate the stream
        stream.read(streamJson);

        // emit signal
        emit addStream(stream);
    }

}
bool
ScreenOrientedQuad::initialize (const Ctr::Region2f& screenLocation, bool background)
{    
    setPrimitiveType (Ctr::TriangleStrip);
    setPrimitiveCount (2);
    setVertexCount (4);

    _screenLocation = screenLocation;
    uint32_t width = 1;
    uint32_t height = 1;

    //create vertex streams and vertex declaration information
    std::vector<VertexElement>       vertexElements;
    vertexElements.push_back (VertexElement( 0, 0,  FLOAT4, METHOD_DEFAULT, POSITION, 0));
    vertexElements.push_back (VertexElement( 0, 16, FLOAT2, METHOD_DEFAULT, TEXCOORD, 0));
    vertexElements.push_back (VertexElement(0xFF,0, UNUSED,0,0,0));

    float lhx = _screenLocation.minExtent.x * width;
    float lhy = _screenLocation.minExtent.y * height;
    float screenWidth = _screenLocation.size().x * width;
    float screenHeight = _screenLocation.size().y * height;

    float z = 0;
    float w = 1;

    if (background)
    {
        z = 1;
        w = 1;
    }

    Vector4f vpos[] = {Vector4f(lhx, lhy, z, w), 
                       Vector4f(lhx, lhy + screenHeight, z, w), 
                       Vector4f(lhx+screenWidth, lhy, z, w), 
                       Vector4f(lhx+screenWidth, lhy + screenHeight, z, w) };    

    Vector2f tpos[] = {Vector2f(0, 1), Vector2f(0, 0), 
                           Vector2f(1, 1), Vector2f(1, 0) };    

    if (IVertexDeclaration* vertexDeclaration =
        Ctr::VertexDeclarationMgr::vertexDeclarationMgr()->createVertexDeclaration
        (&VertexDeclarationParameters(vertexElements)))
    {
        setVertexDeclaration (vertexDeclaration);
        _positionStream = new Ctr::VertexStream
            (Ctr::POSITION, 0, 4, 4, (float*)vpos);
        _texCoordStream = new Ctr::VertexStream
            (Ctr::TEXCOORD, 0, 2, 4, (float*)tpos);

        addStream (_positionStream);
        addStream (_texCoordStream);

        if (create())
        {
            return cache();
        }
    }
    else
    {
        safedelete (vertexDeclaration);
        return false;
    }

    return false;
}
Beispiel #13
0
bool OMXReader::getStreams()
{
    if(!avFormatContext)
        return false;
    
    unsigned int    programID         = UINT_MAX;
    
    ClearStreams();
    
    if (avFormatContext->nb_programs)
    {
        // look for first non empty stream and discard nonselected programs
        for (unsigned int i = 0; i < avFormatContext->nb_programs; i++)
        {
            if(programID == UINT_MAX && avFormatContext->programs[i]->nb_stream_indexes > 0)
                programID = i;
            
            if(i != programID)
                avFormatContext->programs[i]->discard = AVDISCARD_ALL;
        }
        if(programID != UINT_MAX)
        {
            // add streams from selected program
            for (unsigned int i = 0; i < avFormatContext->programs[programID]->nb_stream_indexes; i++)
                addStream(avFormatContext->programs[programID]->stream_index[i]);
        }
    }
    
    // if there were no programs or they were all empty, add all streams
    if (programID == UINT_MAX)
    {
        for (unsigned int i = 0; i < avFormatContext->nb_streams; i++)
            addStream(i);
    }
    
    if(videoCount)
        setActiveStreamInternal(OMXSTREAM_VIDEO, 0);
    
    if(audioCount)
        setActiveStreamInternal(OMXSTREAM_AUDIO, 0);
    
    if(subtitleCount)
        setActiveStreamInternal(OMXSTREAM_SUBTITLE, 0);
    
    int i = 0;
    for(i = 0; i < MAX_OMX_CHAPTERS; i++)
    {
        omxChapters[i].name      = "";
        omxChapters[i].seekto_ms = 0;
        omxChapters[i].ts        = 0;
    }
    
    chapterCount = 0;
    
    if(videoIndex != -1)
    {
        //m_current_chapter = 0;
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(52,14,0)
        chapterCount = (avFormatContext->nb_chapters > MAX_OMX_CHAPTERS) ? MAX_OMX_CHAPTERS : avFormatContext->nb_chapters;
        for(i = 0; i < chapterCount; i++)
        {
            if(i > MAX_OMX_CHAPTERS)
                break;
            
            AVChapter *chapter = avFormatContext->chapters[i];
            if(!chapter)
                continue;
            
            omxChapters[i].seekto_ms = ConvertTimestamp(chapter->start, chapter->time_base.den, chapter->time_base.num) / 1000;
            omxChapters[i].ts        = omxChapters[i].seekto_ms / 1000;
            
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(52,83,0)
            AVDictionaryEntry *titleTag = av_dict_get(avFormatContext->chapters[i]->metadata,"title", NULL, 0);
            if (titleTag)
                omxChapters[i].name = titleTag->value;
#else
            if(avFormatContext->chapters[i]->title)
                omxChapters[i].name = avFormatContext->chapters[i]->title;
#endif
            printf("Chapter : \t%d \t%s \t%8.2f\n", i, omxChapters[i].name.c_str(), omxChapters[i].ts);
        }
    }
#endif
    
    return true;
}
/*===========================================================================
 * FUNCTION   : addReprocStreamsFromSource
 *
 * DESCRIPTION: add reprocess streams from input source channel
 *
 * PARAMETERS :
 *   @allocator      : stream related buffer allocator
 *   @config         : pp feature configuration
 *   @pSrcChannel    : ptr to input source channel that needs reprocess
 *   @minStreamBufNum: number of stream buffers needed
 *   @burstNum       : number of burst captures needed
 *   @paddingInfo    : padding information
 *   @param          : reference to parameters
 *   @contStream     : continous streaming mode or burst
 *   @offline        : configure for offline reprocessing
 *
 * RETURN     : int32_t type of status
 *              NO_ERROR  -- success
 *              none-zero failure code
 *==========================================================================*/
int32_t QCameraReprocessChannel::addReprocStreamsFromSource(QCameraAllocator& allocator,
                                                            cam_pp_feature_config_t &config,
                                                            QCameraChannel *pSrcChannel,
                                                            uint8_t minStreamBufNum,
                                                            uint32_t burstNum,
                                                            cam_padding_info_t *paddingInfo,
                                                            QCameraParameters &param,
                                                            bool contStream,
                                                            bool offline)
{
    int32_t rc = 0;
    QCameraStream *pStream = NULL;
    QCameraHeapMemory *pStreamInfoBuf = NULL;
    cam_stream_info_t *streamInfo = NULL;

    memset(mSrcStreamHandles, 0, sizeof(mSrcStreamHandles));

    for (int i = 0; i < pSrcChannel->getNumOfStreams(); i++) {
        pStream = pSrcChannel->getStreamByIndex(i);
        if (pStream != NULL) {
            if (pStream->isTypeOf(CAM_STREAM_TYPE_METADATA) ||
                pStream->isTypeOf(CAM_STREAM_TYPE_RAW)) {
                // Skip metadata&raw for reprocess now because PP module cannot handle
                // meta data&raw. May need furthur discussion if Imaginglib need meta data
                continue;
            }

            if (pStream->isTypeOf(CAM_STREAM_TYPE_POSTVIEW) ||
                pStream->isTypeOf(CAM_STREAM_TYPE_PREVIEW)) {
                // Skip postview: in non zsl case, dont want to send
                // thumbnail through reprocess.
                // Skip preview: for same reason for zsl case
                continue;
            }

            if(pStream->isTypeOf(CAM_STREAM_TYPE_PREVIEW) ||
               pStream->isTypeOf(CAM_STREAM_TYPE_POSTVIEW) ||
               pStream->isOrignalTypeOf(CAM_STREAM_TYPE_PREVIEW) ||
               pStream->isOrignalTypeOf(CAM_STREAM_TYPE_POSTVIEW)) {
                  uint32_t feature_mask = config.feature_mask;

                  if ((feature_mask & ~CAM_QCOM_FEATURE_HDR) == 0
                      && param.isHDREnabled()
                      && !param.isHDRThumbnailProcessNeeded()) {

                      // Skip thumbnail stream reprocessing in HDR
                      // if only hdr is enabled
                      continue;
                  }

                  // skip thumbnail reprocessing if not needed
                  if (!param.needThumbnailReprocess(&feature_mask)) {
                      continue;
                  }

                  //Don't do WNR for thumbnail
                  feature_mask &= ~CAM_QCOM_FEATURE_DENOISE2D;
                  if(!feature_mask) {
                    // Skip thumbnail stream reprocessing since no other
                    //reprocessing is enabled.
                      continue;
                  }
            }

            pStreamInfoBuf = allocator.allocateStreamInfoBuf(CAM_STREAM_TYPE_OFFLINE_PROC);
            if (pStreamInfoBuf == NULL) {
                ALOGE("%s: no mem for stream info buf", __func__);
                rc = NO_MEMORY;
                break;
            }

            streamInfo = (cam_stream_info_t *)pStreamInfoBuf->getPtr(0);
            memset(streamInfo, 0, sizeof(cam_stream_info_t));
            streamInfo->stream_type = CAM_STREAM_TYPE_OFFLINE_PROC;
            rc = pStream->getFormat(streamInfo->fmt);
            rc = pStream->getFrameDimension(streamInfo->dim);
            if ( contStream ) {
                streamInfo->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
                streamInfo->num_of_burst = 0;
            } else {
                streamInfo->streaming_mode = CAM_STREAMING_MODE_BURST;
                streamInfo->num_of_burst = burstNum;
            }

            cam_stream_reproc_config_t rp_cfg;
            memset(&rp_cfg, 0, sizeof(cam_stream_reproc_config_t));
            if (offline) {
                cam_frame_len_offset_t offset;
                memset(&offset, 0, sizeof(cam_frame_len_offset_t));

                rp_cfg.pp_type = CAM_OFFLINE_REPROCESS_TYPE;
                pStream->getFormat(rp_cfg.offline.input_fmt);
                pStream->getFrameDimension(rp_cfg.offline.input_dim);
                pStream->getFrameOffset(offset);
                rp_cfg.offline.input_buf_planes.plane_info = offset;
                rp_cfg.offline.input_type = pStream->getMyType();
                //For input metadata + input buffer
                rp_cfg.offline.num_of_bufs = 2;
            } else {
                rp_cfg.pp_type = CAM_ONLINE_REPROCESS_TYPE;
                rp_cfg.online.input_stream_id = pStream->getMyServerID();
                rp_cfg.online.input_stream_type = pStream->getMyType();
            }
            streamInfo->reprocess_config = rp_cfg;
            streamInfo->reprocess_config.pp_feature_config = config;

            if (!(pStream->isTypeOf(CAM_STREAM_TYPE_SNAPSHOT) ||
                pStream->isOrignalTypeOf(CAM_STREAM_TYPE_SNAPSHOT))) {
                streamInfo->reprocess_config.pp_feature_config.feature_mask &= ~CAM_QCOM_FEATURE_CAC;
                //Don't do WNR for thumbnail
                streamInfo->reprocess_config.pp_feature_config.feature_mask &= ~CAM_QCOM_FEATURE_DENOISE2D;

                if (param.isHDREnabled()
                  && !param.isHDRThumbnailProcessNeeded()){
                    streamInfo->reprocess_config.pp_feature_config.feature_mask
                      &= ~CAM_QCOM_FEATURE_HDR;
                }
            }

            if (streamInfo->reprocess_config.pp_feature_config.feature_mask & CAM_QCOM_FEATURE_ROTATION) {
                if (streamInfo->reprocess_config.pp_feature_config.rotation == ROTATE_90 ||
                    streamInfo->reprocess_config.pp_feature_config.rotation == ROTATE_270) {
                    // rotated by 90 or 270, need to switch width and height
                    int32_t temp = streamInfo->dim.height;
                    streamInfo->dim.height = streamInfo->dim.width;
                    streamInfo->dim.width = temp;
                }
            }

            if (param.isZSLMode() &&
                (streamInfo->reprocess_config.online.input_stream_type == CAM_STREAM_TYPE_SNAPSHOT)) {
                // ZSL mode snapshot need reprocess to do the flip
                int flipMode =
                    param.getFlipMode(streamInfo->reprocess_config.online.input_stream_type);
                if (flipMode > 0) {
                    streamInfo->reprocess_config.pp_feature_config.feature_mask |= CAM_QCOM_FEATURE_FLIP;
                    streamInfo->reprocess_config.pp_feature_config.flip = flipMode;
                }
            }

            if(streamInfo->reprocess_config.pp_feature_config.feature_mask & CAM_QCOM_FEATURE_SCALE){
                //we only Scale Snapshot frame
                if(pStream->isTypeOf(CAM_STREAM_TYPE_SNAPSHOT)){
                    //also check whether rotation is needed
                    if((streamInfo->reprocess_config.pp_feature_config.feature_mask & CAM_QCOM_FEATURE_ROTATION) &&
                       (streamInfo->reprocess_config.pp_feature_config.rotation == ROTATE_90 ||
                        streamInfo->reprocess_config.pp_feature_config.rotation == ROTATE_270)){
                        //need swap
                        streamInfo->dim.width = streamInfo->reprocess_config.pp_feature_config.scale_param.output_height;
                        streamInfo->dim.height = streamInfo->reprocess_config.pp_feature_config.scale_param.output_width;
                    }else{
                        streamInfo->dim.width = streamInfo->reprocess_config.pp_feature_config.scale_param.output_width;
                        streamInfo->dim.height = streamInfo->reprocess_config.pp_feature_config.scale_param.output_height;
                    }
                }
                CDBG_HIGH("%s: stream width=%d, height=%d.", __func__, streamInfo->dim.width, streamInfo->dim.height);
            }

            // save source stream handler
            mSrcStreamHandles[m_numStreams] = pStream->getMyHandle();

            // add reprocess stream
            rc = addStream(allocator,
                           pStreamInfoBuf, minStreamBufNum,
                           paddingInfo,
                           NULL, NULL, false);
            if (rc != NO_ERROR) {
                ALOGE("%s: add reprocess stream failed, ret = %d", __func__, rc);
                break;
            }
        }
    }

    if (rc == NO_ERROR) {
        m_pSrcChannel = pSrcChannel;
    }
    return rc;
}
Beispiel #15
0
void PeerConnection00::addStream(PassRefPtr<MediaStream> stream, const Dictionary& mediaStreamHints, ExceptionCode& ec)
{
    // FIXME: When the spec says what the mediaStreamHints should look like use it.
    addStream(stream, ec);
}
Beispiel #16
0
bool StAVVideoMuxer::save(const StString& theFile) {
    if(myCtxListSrc.isEmpty()
    || theFile.isEmpty()) {
        return false;
    }

    StString aFormatName = myCtxListSrc[0]->iformat->name;
    const char* aFormatStr = formatToMetadata(myStereoFormat);

    std::vector<StRemuxContext> aSrcCtxList;
    //StArrayList<StRemuxContext> aSrcCtxList;
    unsigned int aStreamCount = 0;

    StAVOutContext aCtxOut;
    if(!aCtxOut.findFormat(NULL, theFile.toCString())) {
        signals.onError(StString("Unable to find a suitable output format for '") + theFile + "'.");
        return false;
    } else if(!aCtxOut.create(theFile)) {
        signals.onError(StString("Could not create output context."));
        return false;
    }

    for(size_t aCtxId = 0; aCtxId < myCtxListSrc.size(); ++aCtxId) {
        StRemuxContext aCtxSrc;
        aCtxSrc.Context = myCtxListSrc[aCtxId];
        if(aCtxId == 0) {
            av_dict_copy(&aCtxOut.Context->metadata, aCtxSrc.Context->metadata, AV_DICT_DONT_OVERWRITE);
            av_dict_set(&aCtxOut.Context->metadata, "STEREO_MODE", aFormatStr, 0);
        }
        for(unsigned int aStreamId = 0; aStreamId < aCtxSrc.Context->nb_streams; ++aStreamId) {
            aCtxSrc.Streams.add((unsigned int )-1);
            AVStream* aStreamSrc = aCtxSrc.Context->streams[aStreamId];
            if(aStreamSrc->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
                if(addStream(aCtxOut.Context, aStreamSrc)) {
                    aCtxSrc.Streams[aStreamId] = aStreamCount++;
                }
            }
        }
        aSrcCtxList.push_back(aCtxSrc);
    }

    // add audio streams after video
    for(size_t aCtxId = 0; aCtxId < myCtxListSrc.size(); ++aCtxId) {
        StRemuxContext& aCtxSrc = aSrcCtxList[aCtxId];
        for(unsigned int aStreamId = 0; aStreamId < aCtxSrc.Context->nb_streams; ++aStreamId) {
            AVStream* aStreamSrc = aCtxSrc.Context->streams[aStreamId];
            if(aStreamSrc->codec->codec_type == AVMEDIA_TYPE_AUDIO
            && addStream(aCtxOut.Context, aStreamSrc)) {
                aCtxSrc.Streams[aStreamId] = aStreamCount++;
            }
        }
    }

    // add other streams (subtitles) at the end
    for(size_t aCtxId = 0; aCtxId < myCtxListSrc.size(); ++aCtxId) {
        StRemuxContext& aCtxSrc = aSrcCtxList[aCtxId];
        for(unsigned int aStreamId = 0; aStreamId < aCtxSrc.Context->nb_streams; ++aStreamId) {
            AVStream* aStreamSrc = aCtxSrc.Context->streams[aStreamId];
            if(aStreamSrc->codec->codec_type != AVMEDIA_TYPE_VIDEO
            && aStreamSrc->codec->codec_type != AVMEDIA_TYPE_AUDIO
            && addStream(aCtxOut.Context, aStreamSrc)) {
                aCtxSrc.Streams[aStreamId] = aStreamCount++;
            }
        }
    }

    av_dump_format(aCtxOut.Context, 0, theFile.toCString(), 1);
    if(!(aCtxOut.Context->oformat->flags & AVFMT_NOFILE)) {
        const int aState = avio_open2(&aCtxOut.Context->pb, theFile.toCString(), AVIO_FLAG_WRITE, NULL, NULL);
        if(aState < 0) {
            signals.onError(StString("Could not open output file '") + theFile + "' (" + stAV::getAVErrorDescription(aState) + ")");
            return false;
        }
    }

    int aState = avformat_write_header(aCtxOut.Context, NULL);
    if(aState < 0) {
        signals.onError(StString("Error occurred when opening output file (") + stAV::getAVErrorDescription(aState) + ").");
        return false;
    }

    AVPacket aPacket;
    for(;;) {
        size_t aNbEmpty = 0;
        for(size_t aCtxId = 0; aCtxId < aSrcCtxList.size(); ++aCtxId) {
            StRemuxContext& aCtxSrc = aSrcCtxList[aCtxId];
            if(!aCtxSrc.State) {
                ++aNbEmpty;
                continue;
            }

            if(av_read_frame(aCtxSrc.Context, &aPacket) < 0) {
                aCtxSrc.State = false;
                ++aNbEmpty;
                continue;
            }

            unsigned int aStreamOutIndex = aCtxSrc.Streams[aPacket.stream_index];
            if(aStreamOutIndex == (unsigned int )-1) {
                continue;
            }

            AVStream* aStreamIn  = aCtxSrc.Context->streams[aPacket.stream_index];
            AVStream* aStreamOut = aCtxOut.Context->streams[aStreamOutIndex];

        #ifdef ST_LIBAV_FORK
            const AVRounding aRoundParams = AV_ROUND_NEAR_INF;
        #else
            const AVRounding aRoundParams = AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
        #endif
            aPacket.pts      = av_rescale_q_rnd(aPacket.pts, aStreamIn->time_base, aStreamOut->time_base, aRoundParams);
            aPacket.dts      = av_rescale_q_rnd(aPacket.dts, aStreamIn->time_base, aStreamOut->time_base, aRoundParams);
            aPacket.duration = static_cast<int >(av_rescale_q(aPacket.duration, aStreamIn->time_base, aStreamOut->time_base));
            aPacket.pos      = -1;

            aState = av_interleaved_write_frame(aCtxOut.Context, &aPacket);
            if(aState < 0) {
                signals.onError(StString("Error muxing packet (") + stAV::getAVErrorDescription(aState) + ").");
                return false;
            }
            av_free_packet(&aPacket);
        }
        if(aNbEmpty == aSrcCtxList.size()) {
            break;
        }
    }
    av_write_trailer(aCtxOut.Context);
    return true;
}
Beispiel #17
0
int EncoderFfmpegCore::initEncoder(int bitrate, int samplerate) {

#ifndef avformat_alloc_output_context2
    qDebug() << "EncoderFfmpegCore::initEncoder: Old Style initialization";
    m_pEncodeFormatCtx = avformat_alloc_context();
#endif

    m_lBitrate = bitrate * 1000;
    m_lSampleRate = samplerate;

#if LIBAVCODEC_VERSION_INT > 3544932
    if (m_SCcodecId == AV_CODEC_ID_MP3) {
#else
    if (m_SCcodecId == CODEC_ID_MP3) {
#endif // LIBAVCODEC_VERSION_INT > 3544932
        qDebug() << "EncoderFfmpegCore::initEncoder: Codec MP3";
#ifdef avformat_alloc_output_context2
        avformat_alloc_output_context2(&m_pEncodeFormatCtx, NULL, NULL, "output.mp3");
#else
        m_pEncoderFormat = av_guess_format(NULL, "output.mp3", NULL);
#endif // avformat_alloc_output_context2

#if LIBAVCODEC_VERSION_INT > 3544932
    } else if (m_SCcodecId == AV_CODEC_ID_AAC) {
#else
    } else if (m_SCcodecId == CODEC_ID_AAC) {
#endif // LIBAVCODEC_VERSION_INT > 3544932
        qDebug() << "EncoderFfmpegCore::initEncoder: Codec M4A";
#ifdef avformat_alloc_output_context2
        avformat_alloc_output_context2(&m_pEncodeFormatCtx, NULL, NULL, "output.m4a");
#else
        m_pEncoderFormat = av_guess_format(NULL, "output.m4a", NULL);
#endif // avformat_alloc_output_context2

    } else {
        qDebug() << "EncoderFfmpegCore::initEncoder: Codec OGG/Vorbis";
#ifdef avformat_alloc_output_context2
        avformat_alloc_output_context2(&m_pEncodeFormatCtx, NULL, NULL, "output.ogg");
        m_pEncodeFormatCtx->oformat->audio_codec=AV_CODEC_ID_VORBIS;
#else
        m_pEncoderFormat = av_guess_format(NULL, "output.ogg", NULL);
#if LIBAVCODEC_VERSION_INT > 3544932
        m_pEncoderFormat->audio_codec=AV_CODEC_ID_VORBIS;
#else
        m_pEncoderFormat->audio_codec=CODEC_ID_VORBIS;
#endif // LIBAVCODEC_VERSION_INT > 3544932
#endif // avformat_alloc_output_context2
    }

#ifdef avformat_alloc_output_context2
    m_pEncoderFormat = m_pEncodeFormatCtx->oformat;
#else
    m_pEncodeFormatCtx->oformat = m_pEncoderFormat;
#endif // avformat_alloc_output_context2

    m_pEncoderAudioStream = addStream(m_pEncodeFormatCtx, &m_pEncoderAudioCodec,
                                      m_pEncoderFormat->audio_codec);

    openAudio(m_pEncoderAudioCodec, m_pEncoderAudioStream);

    // qDebug() << "jepusti";

    return 0;
}

// Private methods

int EncoderFfmpegCore::writeAudioFrame(AVFormatContext *formatctx,
                                       AVStream *stream) {
    AVCodecContext *l_SCodecCtx = NULL;;
    AVPacket l_SPacket;
    AVFrame *l_SFrame = avcodec_alloc_frame();
    int l_iGotPacket;
    int l_iRet;
#ifdef av_make_error_string
    char l_strErrorBuff[256];
#endif // av_make_error_string

    av_init_packet(&l_SPacket);
    l_SPacket.size = 0;
    l_SPacket.data = NULL;

    // Calculate correct DTS for FFMPEG
    m_lDts = round(((double)m_lRecordedBytes / (double)44100 / (double)2. *
                    (double)m_pEncoderAudioStream->time_base.den));
    m_lPts = m_lDts;

    l_SCodecCtx = stream->codec;
#ifdef av_make_error_string
    memset(l_strErrorBuff, 0x00, 256);
#endif // av_make_error_string

    l_SFrame->nb_samples = m_iAudioInputFrameSize;
    // Mixxx uses float (32 bit) samples..
    l_SFrame->format = AV_SAMPLE_FMT_FLT;
#ifndef __FFMPEGOLDAPI__
    l_SFrame->channel_layout = l_SCodecCtx->channel_layout;
#endif // __FFMPEGOLDAPI__

    l_iRet = avcodec_fill_audio_frame(l_SFrame,
                                      l_SCodecCtx->channels,
                                      AV_SAMPLE_FMT_FLT,
                                      (const uint8_t *)m_pFltSamples,
                                      m_iFltAudioCpyLen,
                                      1);

    if (l_iRet != 0) {
#ifdef av_make_error_string
        qDebug() << "Can't fill FFMPEG frame: error " << l_iRet << "String '" <<
                 av_make_error_string(l_strErrorBuff, 256, l_iRet) << "'" <<
                 m_iFltAudioCpyLen;
#endif // av_make_error_string
        qDebug() << "Can't refill 1st FFMPEG frame!";
        return -1;
    }

    // If we have something else than AV_SAMPLE_FMT_FLT we have to convert it
    // to something that fits..
    if (l_SCodecCtx->sample_fmt != AV_SAMPLE_FMT_FLT) {

        reSample(l_SFrame);
        // After we have turned our samples to destination
        // Format we must re-alloc l_SFrame.. it easier like this..
#if LIBAVCODEC_VERSION_INT > 3544932
        avcodec_free_frame(&l_SFrame);
#else
        av_free(l_SFrame);
#endif // LIBAVCODEC_VERSION_INT > 3544932
        l_SFrame = NULL;
        l_SFrame = avcodec_alloc_frame();
        l_SFrame->nb_samples = m_iAudioInputFrameSize;
        l_SFrame->format = l_SCodecCtx->sample_fmt;
#ifndef __FFMPEGOLDAPI__
        l_SFrame->channel_layout = m_pEncoderAudioStream->codec->channel_layout;
#endif // __FFMPEGOLDAPI__

        l_iRet = avcodec_fill_audio_frame(l_SFrame, l_SCodecCtx->channels,
                                          l_SCodecCtx->sample_fmt,
                                          (const uint8_t *)m_pResample->getBuffer(),
                                          m_iAudioCpyLen,
                                          1);

        if (l_iRet != 0) {
#ifdef av_make_error_string
            qDebug() << "Can't refill FFMPEG frame: error " << l_iRet << "String '" <<
                     av_make_error_string(l_strErrorBuff, 256,
                                          l_iRet) << "'" <<  m_iAudioCpyLen <<
                     " " <<  av_samples_get_buffer_size(
                         NULL, 2,
                         m_iAudioInputFrameSize,
                         m_pEncoderAudioStream->codec->sample_fmt,
                         1) << " " << m_pOutSize;
#endif // av_make_error_string
            qDebug() << "Can't refill 2nd FFMPEG frame!";
            return -1;
        }
    }

    //qDebug() << "!!" << l_iRet;
    l_iRet = avcodec_encode_audio2(l_SCodecCtx, &l_SPacket, l_SFrame,
                                   &l_iGotPacket);

    if (l_iRet < 0) {
        qDebug() << "Error encoding audio frame";
        return -1;
    }

    if (!l_iGotPacket) {
        // qDebug() << "No packet! Can't encode audio!!";
        return -1;
    }

    l_SPacket.stream_index = stream->index;

    // Let's calculate DTS/PTS and give it to FFMPEG..
    // THEN codecs like OGG/Voris works ok!!
    l_SPacket.dts = m_lDts;
    l_SPacket.pts = m_lDts;

    // Some times den is zero.. so 0 dived by 0 is
    // Something?
    if (m_pEncoderAudioStream->pts.den == 0) {
        qDebug() << "Time hack!";
        m_pEncoderAudioStream->pts.den = 1;
    }

    // Write the compressed frame to the media file. */
    l_iRet = av_interleaved_write_frame(formatctx, &l_SPacket);

    if (l_iRet != 0) {
        qDebug() << "Error while writing audio frame";
        return -1;
    }

    av_free_packet(&l_SPacket);
    av_destruct_packet(&l_SPacket);
    av_free(l_SFrame);

    return 0;
}
Beispiel #18
0
static int svs_core_Camera_init(svs_core_Camera *self, PyObject *args, PyObject *kwds) {
    static char *kwlist[] = {
        "ip", "source_ip", "buffer_count", "packet_size", "queue_length", NULL
    };

    const char *ip = NULL;
    const char *source_ip = NULL;
    unsigned int buffer_count = 10;
    unsigned int packet_size = 9000;
    uint32_t ip_num, source_ip_num;
    char *manufacturer, *model;
    int ret;

    self->main_thread = PyGILState_GetThisThreadState();
    self->ready = NOT_READY;
    TAILQ_INIT(&self->images);
    self->images_length = 0;
    self->images_max = 50;

    /*
     * This means the definition is:
     * def __init__(self, ip, source_ip, buffer_count=10, packet_size=9000,
     *              queue_length=50):
     */
    if (!PyArg_ParseTupleAndKeywords(args, kwds, "ss|III", kwlist,
                &ip, &source_ip, &buffer_count, &packet_size,
                &self->images_max)) {
        return -1;
    }

    ip_num = ip_string_to_int(ip);
    source_ip_num = ip_string_to_int(source_ip);

    ret = openCamera(&self->handle, ip_num, source_ip_num, HEARTBEAT_TIMEOUT);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    self->ready = CONNECTED;

    manufacturer = strdup(Camera_getManufacturerName(self->handle));
    if (!manufacturer) {
        PyErr_SetString(PyExc_MemoryError, "Unable to allocate name");
        return -1;
    }

    model = strdup(Camera_getModelName(self->handle));
    if (!model) {
        free(manufacturer);
        PyErr_SetString(PyExc_MemoryError, "Unable to allocate name");
        return -1;
    }

    self->name = PyBytes_FromFormat("%s %s", manufacturer, model);
    free(manufacturer);
    free(model);
    if (!self->name) {
        return -1;
    }

    self->ready = NAME_ALLOCATED;

    ret = Camera_getTimestampTickFrequency(self->handle, &self->tick_frequency);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    ret = Camera_getImagerWidth(self->handle, &self->width);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    ret = Camera_getImagerHeight(self->handle, &self->height);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    /* 12-bit pixel depth */
    self->depth = 12;
    ret = Camera_setPixelDepth(self->handle, SVGIGE_PIXEL_DEPTH_12);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    /* Image buffer size in bytes */
    ret = Camera_getBufferSize(self->handle, &self->buffer_size);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    /* Open stream */
    ret = addStream(self->handle, &self->stream, &self->stream_ip,
                    &self->stream_port, self->buffer_size, buffer_count,
                    packet_size, PACKET_RESEND_TIMEOUT,
                    svs_core_Camera_stream_callback, self);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    ret = enableStream(self->stream, 1);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    self->ready = READY;

    return 0;
}
Beispiel #19
0
StreamsBrowsePage::StreamsBrowsePage(QWidget *p)
    : SinglePageWidget(p)
    , settings(0)
{
    importAction = new Action(Icon("document-import"), i18n("Import Streams Into Favorites"), this);
    exportAction = new Action(Icon("document-export"), i18n("Export Favorite Streams"), this);
    addAction = ActionCollection::get()->createAction("addstream", i18n("Add New Stream To Favorites"), Icons::self()->addRadioStreamIcon);
    editAction = new Action(Icons::self()->editIcon, i18n("Edit"), this);
    searchAction = new Action(Icons::self()->searchIcon, i18n("Seatch For Streams"), this);
    connect(searchAction, SIGNAL(triggered()), this, SIGNAL(searchForStreams()));
//     connect(view, SIGNAL(itemsSelected(bool)), addToPlaylist, SLOT(setEnabled(bool)));
    connect(view, SIGNAL(doubleClicked(const QModelIndex &)), this, SLOT(itemDoubleClicked(const QModelIndex &)));
    connect(view, SIGNAL(itemsSelected(bool)), SLOT(controlActions()));
    connect(addAction, SIGNAL(triggered()), this, SLOT(addStream()));
    connect(StreamsModel::self()->addBookmarkAct(), SIGNAL(triggered()), this, SLOT(addBookmark()));
    connect(StreamsModel::self()->configureDiAct(), SIGNAL(triggered()), this, SLOT(configureDi()));
    connect(StreamsModel::self()->reloadAct(), SIGNAL(triggered()), this, SLOT(reload()));
    connect(editAction, SIGNAL(triggered()), this, SLOT(edit()));
    connect(importAction, SIGNAL(triggered()), this, SLOT(importXml()));
    connect(exportAction, SIGNAL(triggered()), this, SLOT(exportXml()));
    connect(StreamsModel::self(), SIGNAL(error(const QString &)), this, SIGNAL(error(const QString &)));
    connect(StreamsModel::self(), SIGNAL(loading()), view, SLOT(showSpinner()));
    connect(StreamsModel::self(), SIGNAL(loaded()), view, SLOT(hideSpinner()));
    connect(StreamsModel::self(), SIGNAL(categoriesChanged()), view, SLOT(closeSearch()));
    connect(StreamsModel::self(), SIGNAL(favouritesLoaded()), SLOT(expandFavourites()));
    connect(StreamsModel::self(), SIGNAL(addedToFavourites(QString)), SLOT(addedToFavourites(QString)));
    connect(DigitallyImported::self(), SIGNAL(loginStatus(bool,QString)), SLOT(updateDiStatus()));
    connect(DigitallyImported::self(), SIGNAL(updated()), SLOT(updateDiStatus()));
    connect(view, SIGNAL(headerClicked(int)), SLOT(headerClicked(int)));
    StreamsModel::self()->configureDiAct()->setEnabled(false);

    proxy.setSourceModel(StreamsModel::self());
    view->setModel(&proxy);
    view->setDeleteAction(StdActions::self()->removeAction);
    view->setSearchResetLevel(1);
    view->alwaysShowHeader();

    Configuration config(metaObject()->className());
    view->setMode(ItemView::Mode_DetailedTree);
    view->load(config);

    MenuButton *menuButton=new MenuButton(this);
    Action *configureAction=new Action(Icons::self()->configureIcon, i18n("Configure"), this);
    connect(configureAction, SIGNAL(triggered()), SLOT(configure()));
    menuButton->addAction(createViewMenu(QList<ItemView::Mode>()  << ItemView::Mode_BasicTree << ItemView::Mode_SimpleTree
                                                                  << ItemView::Mode_DetailedTree << ItemView::Mode_List));
    menuButton->addAction(configureAction);
    menuButton->addAction(StreamsModel::self()->configureDiAct());
    menuButton->addSeparator();
    menuButton->addAction(addAction);
    menuButton->addAction(StdActions::self()->removeAction);
    menuButton->addAction(editAction);
    menuButton->addAction(StreamsModel::self()->reloadAct());
    menuButton->addSeparator();
    menuButton->addAction(importAction);
    menuButton->addAction(exportAction);

    diStatusLabel=new ServiceStatusLabel(this);
    diStatusLabel->setText("DI", i18nc("Service name", "Digitally Imported"));
    connect(diStatusLabel, SIGNAL(clicked()), SLOT(diSettings()));
    updateDiStatus();  
    ToolButton *searchButton=new ToolButton(this);
    searchButton->setDefaultAction(searchAction);
    init(ReplacePlayQueue, QList<QWidget *>() << menuButton << diStatusLabel, QList<QWidget *>() << searchButton);

    view->addAction(editAction);
    view->addAction(StdActions::self()->removeAction);
    view->addAction(StreamsModel::self()->addToFavouritesAct());
    view->addAction(StreamsModel::self()->addBookmarkAct());
    view->addAction(StreamsModel::self()->reloadAct());
}
/* Talker client registers a stream
 */
bool openavbEptSrvrRegisterStream(int h,
                              AVBStreamID_t *streamID,
                              U8 destAddr[],
                              AVBTSpec_t *tSpec,
                              U8 srClass,
                              U8 srRank,
                              U32 latency)
{
	openavbRC rc = OPENAVB_SUCCESS;

	AVB_TRACE_ENTRY(AVB_TRACE_ENDPOINT);

	clientStream_t *ps = findStream(streamID);
	
	if (ps && ps->clientHandle != h) {
		AVB_LOGF_ERROR("Error registering talker; multiple clients for stream %d", streamID->uniqueID);
		AVB_TRACE_EXIT(AVB_TRACE_ENDPOINT);
		return FALSE;
	}

	ps = addStream(h, streamID);
	if (!ps) {
		AVB_LOGF_ERROR("Error registering talker; unable to add client stream %d", streamID->uniqueID);
		AVB_TRACE_EXIT(AVB_TRACE_ENDPOINT);
		return FALSE;
	}
	ps->role = clientTalker;
	ps->tSpec = *tSpec;
	ps->srClass = (SRClassIdx_t)srClass;
	ps->srRank  = srRank;
	ps->latency = latency;
	ps->fwmark = INVALID_FWMARK;

	if (memcmp(ps->destAddr, destAddr, ETH_ALEN) == 0) {
		// no client-supplied address, use MAAP
		struct ether_addr addr;
		ps->hndMaap = openavbMaapAllocate(1, &addr);
		if (ps->hndMaap) {
			memcpy(ps->destAddr, addr.ether_addr_octet, ETH_ALEN);
			strmAttachCb((void*)ps, openavbSrp_LDSt_Stream_Info);		// Inform talker about MAAP
		}
		else {
			AVB_LOG_ERROR("Error registering talker: MAAP failed to allocate MAC address");
			AVB_TRACE_EXIT(AVB_TRACE_ENDPOINT);
			delStream(ps);
			return FALSE;
		}
	}
	else {
		// client-supplied destination MAC address
		memcpy(ps->destAddr, destAddr, ETH_ALEN);
		ps->hndMaap = NULL;
	}

	// Do SRP talker register
	AVB_LOGF_DEBUG("REGISTER: ps=%p, streamID=%d, tspec=%d,%d, srClass=%d, srRank=%d, latency=%d, da="ETH_FORMAT"",
				   ps, streamID->uniqueID,
				   tSpec->maxFrameSize, tSpec->maxIntervalFrames,
				   ps->srClass, ps->srRank, ps->latency,
				   ETH_OCTETS(ps->destAddr));


	if(x_cfg.noSrp) {
		// we are operating in a mode supporting preconfigured streams; SRP is not in use,
		// so, as a proxy for SRP, which would normally make this call after establishing
		// the stream, call the callback from here
		strmAttachCb((void*)ps, openavbSrp_LDSt_Ready);
	} else {
		// normal SRP operation
		rc = openavbSrpRegisterStream((void*)ps, &ps->streamID,
		                          ps->destAddr, &ps->tSpec,
		                          ps->srClass, ps->srRank,
		                          ps->latency);
		if (!IS_OPENAVB_SUCCESS(rc)) {
			if (ps->hndMaap)
				openavbMaapRelease(ps->hndMaap);
			delStream(ps);
		}
	}

	openavbEndPtLogAllStaticStreams();
	
	AVB_TRACE_EXIT(AVB_TRACE_ENDPOINT);
	return IS_OPENAVB_SUCCESS(rc);
}
Beispiel #21
0
command_stream* evaluatePostfix(token* finalTokenStream)
{
	/*
	notes on structures and types declared above
	typedef enum
	{
		WORD_TOKEN, //ls foo
		SEMICOLON_TOKEN, // ;
		PIPE_TOKEN, // |
		AND_TOKEN, // &&
		OR_TOKEN, // ||
		LEFT_PAREN_TOKEN, // ( //not relevant at this point
		RIGHT_PAREN_TOKEN, // )//not relevant at this point
		GREATER_TOKEN, // >
		LESS_TOKEN, // <
		NEWLINE_TOKEN, // \n //not relevant at this point
		NULL_TOKEN, // //not relevant at this point
	} tokentype;
	
	enum command_type
	{
		AND_COMMAND,         // A && B
		SEQUENCE_COMMAND,    // A ; B
		OR_COMMAND,          // A || B
		PIPE_COMMAND,        // A | B
		SIMPLE_COMMAND,      // a simple command
		SUBSHELL_COMMAND,    // ( A )
	};
	  
	typedef struct command_stream
	{
		struct command_stream* next;
		struct command_stream* prev;
		struct command* root;
		
	} command_stream;
	
	struct command
	{
		command* prev;
		command* next; //for linked list before tree conversion
		enum command_type type;
		// Exit status, or -1 if not known (e.g., because it has not exited yet).
		int status;
		// I/O redirections, or 0 if none.
		char *input;
		char *output;
		union
		{
			// for AND_COMMAND, SEQUENCE_COMMAND, OR_COMMAND, PIPE_COMMAND:
			struct command *command[2];
			// for SIMPLE_COMMAND:
			char **word;
			// for SUBSHELL_COMMAND:
			struct command *subshell_command;
		} u;
	};
	*/
	command* cmd_start = NULL;
	command* cmd_end = NULL;
	
	command_stream* str_start = NULL;
	command_stream* str_end = NULL;
	
	command_stream* firststream = malloc(sizeof(command_stream));
	addStream(&str_start, &str_end, firststream);
	
	token* curr;
	for (curr = finalTokenStream; curr != NULL; curr = curr->next)
		if (curr->t == WORD_TOKEN) //add simple command to command stack
		{
			command* newcommand = malloc(sizeof(command));
			newcommand->type = SIMPLE_COMMAND;
			newcommand->status = -1;
			newcommand->input = newcommand->output = 0;
			newcommand->u.word = curr->words;
			if (curr->prev && curr->prev->sub > 1 && curr->sub == 1) //if we just ended a subshell, then make what was there a subshell command on itself
			{
				command* prevcmd = malloc(sizeof(command));
				prevcmd->type = SUBSHELL_COMMAND;
				prevcmd->input = prevcmd->output = 0;
				prevcmd->status = -1;
				prevcmd->u.subshell_command = popCommand(&cmd_start, &cmd_end);
				addCommand(&cmd_start, &cmd_end, prevcmd);
			}
			
			addCommand(&cmd_start, &cmd_end, newcommand);
		}
		else if (curr->t == SEMICOLON_TOKEN && curr->sub == 1) //stop working on the current stream and instead make a new stream if no subshell, otherwise make a sequence command
		{
			if (curr->prev && curr->prev->sub > 1) //if we just ended a subshell, then make cmd_start a subshell command on itself
			{
				command* newcommand = malloc(sizeof(command));
				newcommand->type = SUBSHELL_COMMAND;
				newcommand->input = newcommand->output = 0;
				newcommand->status = -1;
				newcommand->u.subshell_command = cmd_start;
				cmd_start = newcommand;
			}
			str_end->root=cmd_start;
			cmd_start = cmd_end = NULL;
			command_stream* newstream = malloc(sizeof(command_stream));
			addStream(&str_start, &str_end, newstream);
		}
		else if (curr->t == LESS_TOKEN || curr->t == GREATER_TOKEN) //change the input based on what command you are redirecting
		{
			//remove two last commands and make them a subtree with correct depth level and add subtree back to command list
			command* dest = popCommand(&cmd_start, &cmd_end); //where to redirect
			command* operation = popCommand(&cmd_start, &cmd_end); //what to do, assume always simple command
			
			command* newcommand = malloc(sizeof(command));
			newcommand->type = SIMPLE_COMMAND;
			newcommand->status = -1;
			if (curr->t == LESS_TOKEN)
			{
				newcommand->input = concat(dest->u.word[0], dest->u.word[1]);
				newcommand->output= operation->output;
			}
			else
			{
				newcommand->output= concat(dest->u.word[0], dest->u.word[1]);
				newcommand->input = operation->input;
			}
			newcommand->u.word = operation->u.word;
			addCommand(&cmd_start, &cmd_end, newcommand);
		}
		else //an operation that needs to be made into a tree
		{
			command* operand2 = popCommand(&cmd_start, &cmd_end), *operand1 = popCommand(&cmd_start, &cmd_end);
			
			if ((curr->prev && curr->prev->sub > 1 && curr->sub == 1) || operand2->type == SEQUENCE_COMMAND) //if we just ended a subshell, then make operand2 a subshell command on itself
			{
				command* newcommand = malloc(sizeof(command));
				newcommand->type = SUBSHELL_COMMAND;
				newcommand->input = newcommand->output = 0;
				newcommand->status = -1;
				newcommand->u.subshell_command = operand2;
				operand2 = newcommand;
			}
			if (operand1->type == SEQUENCE_COMMAND) //convert operand1 into subshell if it was a sequence, as sequences are only possible in subshells
			{
				command* newcommand = malloc(sizeof(command));
				newcommand->type = SUBSHELL_COMMAND;
				newcommand->input = newcommand->output = 0;
				newcommand->status = -1;
				newcommand->u.subshell_command = operand1;
				operand1 = newcommand;
			}
			command* newcommand = malloc(sizeof(command));
			newcommand->type = tokenToCommandType(curr->t);
			newcommand->status = -1;
			newcommand->input = newcommand->output = 0;
			newcommand->u.command[0] = operand1;
			newcommand->u.command[1] = operand2;
			addCommand(&cmd_start, &cmd_end, newcommand);
		}
	
	str_end->root=cmd_start;
	
	return str_start;
}
Beispiel #22
0
void FFMPEGInvoker::send(const SendRequest& req) {
    SendRequest reqCopy = req;

    if (iequals(req.name, "render.start")) {
        // create a new encoding context
        int ret;
        EncodingContext* ctx = new EncodingContext();
        tthread::lock_guard<tthread::recursive_mutex> lock(ctx->mutex);

        std::string context;
        Event::getParam(req.params, "context", context);

        ctx->extension = "mpeg";
        Event::getParam(req.params, "format", ctx->extension);

        Event::getParam(req.params, "width", ctx->width);
        Event::getParam(req.params, "height", ctx->height);

        if (!ctx->width || !ctx->height)
            return;

        ctx->filename = URL::getTmpFilename();

        /* allocate the output media context */
        avformat_alloc_output_context2(&ctx->formatCtx, NULL, ctx->extension.c_str(), ctx->filename.c_str());
        if (!ctx->formatCtx) {
            printf("Could not deduce output format from file extension: using MPEG.\n");
            avformat_alloc_output_context2(&ctx->formatCtx, NULL, "mpeg", ctx->filename.c_str());
        }
        if (!ctx->formatCtx) {
            return;
        }
        ctx->format = ctx->formatCtx->oformat;

        /* Add the audio and video streams using the default format codecs
         * and initialize the codecs. */
        ctx->videoStream = NULL;

        if (ctx->format->video_codec != AV_CODEC_ID_NONE) {
            ctx->videoStream = addStream(ctx, ctx->formatCtx, &ctx->videoCodec, ctx->format->video_codec);
        }

        /* Now that all the parameters are set, we can open the audio and
         * video codecs and allocate the necessary encode buffers. */
        if (ctx->videoStream)
            openVideo(ctx, ctx->formatCtx, ctx->videoCodec, ctx->videoStream);

        /* open the output file, if needed */
        if (!(ctx->format->flags & AVFMT_NOFILE)) {
            ret = avio_open(&ctx->formatCtx->pb, ctx->filename.c_str(), AVIO_FLAG_WRITE);
            if (ret < 0) {
                // fprintf(stderr, "Could not open '%s': %s\n", ctx->filename.c_str(),
                //        av_err2str(ret));
                return;
            }
        }

        /* Write the stream header, if any. */
        ret = avformat_write_header(ctx->formatCtx, NULL);
        if (ret < 0) {
            // fprintf(stderr, "Error occurred when opening output file: %s\n",
            //        av_err2str(ret));
            return;
        }

        if (ctx->frame)
            ctx->frame->pts = 0;

        _encoders[context] = ctx;
    } else if(iequals(req.name, "render.frame")) {
        _workQueue.push(req);
    } else if(iequals(req.name, "render.end")) {
        _workQueue.push(req);
    }
}
 foreach( QString stream,streams)
 {
     addStream(stream);
 }
Beispiel #24
0
void PeerConnection00::addStream(PassRefPtr<MediaStream> stream, ExceptionCode& ec)
{
    String emptyHints;
    return addStream(stream, emptyHints, ec);
}
Beispiel #25
0
bool AkVCam::PluginInterface::createDevice(const std::string &deviceId,
                                           const std::wstring &description,
                                           const std::vector<VideoFormat> &formats)
{
    AkLoggerLog("AkVCam::PluginInterface::createDevice");

    StreamPtr stream;

    // Create one device.
    auto pluginRef = reinterpret_cast<CMIOHardwarePlugInRef>(this->d);
    auto device = std::make_shared<Device>(pluginRef);
    device->setDeviceId(deviceId);
    device->connectAddListener(this, &PluginInterface::addListener);
    device->connectRemoveListener(this, &PluginInterface::removeListener);
    this->m_devices.push_back(device);

    // Define device properties.
    device->properties().setProperty(kCMIOObjectPropertyName,
                                     description.c_str());
    device->properties().setProperty(kCMIOObjectPropertyManufacturer,
                                     CMIO_PLUGIN_VENDOR);
    device->properties().setProperty(kCMIODevicePropertyModelUID,
                                     CMIO_PLUGIN_PRODUCT);
    device->properties().setProperty(kCMIODevicePropertyLinkedCoreAudioDeviceUID,
                                     "");
    device->properties().setProperty(kCMIODevicePropertyLinkedAndSyncedCoreAudioDeviceUID,
                                     "");
    device->properties().setProperty(kCMIODevicePropertySuspendedByUser,
                                     UInt32(0));
    device->properties().setProperty(kCMIODevicePropertyHogMode,
                                     pid_t(-1),
                                     false);
    device->properties().setProperty(kCMIODevicePropertyDeviceMaster,
                                     pid_t(-1));
    device->properties().setProperty(kCMIODevicePropertyExcludeNonDALAccess,
                                     UInt32(0));
    device->properties().setProperty(kCMIODevicePropertyDeviceIsAlive,
                                     UInt32(1));
    device->properties().setProperty(kCMIODevicePropertyDeviceUID,
                                     deviceId.c_str());
    device->properties().setProperty(kCMIODevicePropertyTransportType,
                                     UInt32(kIOAudioDeviceTransportTypePCI));
    device->properties().setProperty(kCMIODevicePropertyDeviceIsRunningSomewhere,
                                     UInt32(0));

    if (device->createObject() != kCMIOHardwareNoError)
        goto createDevice_failed;

    stream = device->addStream();

    // Register one stream for this device.
    if (!stream)
        goto createDevice_failed;

    stream->setFormats(formats);
    stream->properties().setProperty(kCMIOStreamPropertyDirection, UInt32(0));

    if (device->registerStreams() != kCMIOHardwareNoError) {
        device->registerStreams(false);

        goto createDevice_failed;
    }

    // Register the device.
    if (device->registerObject() != kCMIOHardwareNoError) {
        device->registerObject(false);
        device->registerStreams(false);

        goto createDevice_failed;
    }

    device->setBroadcasting(this->d->m_ipcBridge.broadcaster(deviceId));
    device->setMirror(this->d->m_ipcBridge.isHorizontalMirrored(deviceId),
                      this->d->m_ipcBridge.isVerticalMirrored(deviceId));
    device->setScaling(this->d->m_ipcBridge.scalingMode(deviceId));
    device->setAspectRatio(this->d->m_ipcBridge.aspectRatioMode(deviceId));
    device->setSwapRgb(this->d->m_ipcBridge.swapRgb(deviceId));

    return true;

createDevice_failed:
    this->m_devices.erase(std::prev(this->m_devices.end()));

    return false;
}