void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) {

    auto nodeList = DependencyManager::get<NodeList>();

    // The append flag is a boolean value that will be packed right after the header.
    // This flag allows the client to know when it has received all stats packets, so it can group any downstream effects,
    // and clear its cache of injector stream stats; it helps to prevent buildup of dead audio stream stats in the client.
    quint8 appendFlag = AudioStreamStats::START;

    auto streamsCopy = getAudioStreams();

    // pack and send stream stats packets until all audio streams' stats are sent
    int numStreamStatsRemaining = int(streamsCopy.size());
    auto it = streamsCopy.cbegin();

    while (numStreamStatsRemaining > 0) {
        auto statsPacket = NLPacket::create(PacketType::AudioStreamStats);

        int numStreamStatsRoomFor = (int)(statsPacket->size() - sizeof(quint8) - sizeof(quint16)) / sizeof(AudioStreamStats);

        // calculate the number of stream stats to follow
        quint16 numStreamStatsToPack = std::min(numStreamStatsRemaining, numStreamStatsRoomFor);

        // is this the terminal packet?
        if (numStreamStatsRemaining <= numStreamStatsToPack) {
            appendFlag |= AudioStreamStats::END;
        }

        // pack the append flag in this packet
        statsPacket->writePrimitive(appendFlag);
        appendFlag = 0;

        // pack the number of stream stats to follow
        statsPacket->writePrimitive(numStreamStatsToPack);

        // pack the calculated number of stream stats
        for (int i = 0; i < numStreamStatsToPack; i++) {
            PositionalAudioStream* stream = it->second.get();

            stream->perSecondCallbackForUpdatingStats();

            AudioStreamStats streamStats = stream->getAudioStreamStats();
            statsPacket->writePrimitive(streamStats);

            ++it;
        }

        numStreamStatsRemaining -= numStreamStatsToPack;

        // send the current packet
        nodeList->sendPacket(std::move(statsPacket), *destinationNode);
    }
}
void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) {

    auto nodeList = DependencyManager::get<NodeList>();

    // The append flag is a boolean value that will be packed right after the header.  The first packet sent
    // inside this method will have 0 for this flag, while every subsequent packet will have 1 for this flag.
    // The sole purpose of this flag is so the client can clear its map of injected audio stream stats when
    // it receives a packet with an appendFlag of 0. This prevents the buildup of dead audio stream stats in the client.
    quint8 appendFlag = 0;

    auto streamsCopy = getAudioStreams();

    // pack and send stream stats packets until all audio streams' stats are sent
    int numStreamStatsRemaining = int(streamsCopy.size());
    auto it = streamsCopy.cbegin();

    while (numStreamStatsRemaining > 0) {
        auto statsPacket = NLPacket::create(PacketType::AudioStreamStats);

        // pack the append flag in this packet
        statsPacket->writePrimitive(appendFlag);
        appendFlag = 1;

        int numStreamStatsRoomFor = (int)(statsPacket->size() - sizeof(quint8) - sizeof(quint16)) / sizeof(AudioStreamStats);

        // calculate and pack the number of stream stats to follow
        quint16 numStreamStatsToPack = std::min(numStreamStatsRemaining, numStreamStatsRoomFor);
        statsPacket->writePrimitive(numStreamStatsToPack);

        // pack the calculated number of stream stats
        for (int i = 0; i < numStreamStatsToPack; i++) {
            PositionalAudioStream* stream = it->second.get();

            stream->perSecondCallbackForUpdatingStats();

            AudioStreamStats streamStats = stream->getAudioStreamStats();
            statsPacket->writePrimitive(streamStats);

            ++it;
        }

        numStreamStatsRemaining -= numStreamStatsToPack;

        // send the current packet
        nodeList->sendPacket(std::move(statsPacket), *destinationNode);
    }
}
FFmpegPlayer::~FFmpegPlayer()
{
    av_log(NULL, AV_LOG_INFO, "Destructing FFmpegPlayer...");

    quit(true);

    av_log(NULL, AV_LOG_INFO, "Have done quit");

    // release athe audio streams to make sure that the decoder doesn't retain any external
    // refences.
    getAudioStreams().clear(); // todo: I guess these objects should be deleted before clear()

    delete m_commands;

    if (m_vodd)
        delete m_vodd;

    av_log(NULL, AV_LOG_INFO, "Destructed FFmpegPlayer");
}
Beispiel #4
0
FFmpegImageStream::~FFmpegImageStream()
{
    OSG_INFO<<"Destructing FFmpegImageStream..."<<std::endl;

    quit(true);
    
    OSG_INFO<<"Have done quit"<<std::endl;

    // release athe audio streams to make sure that the decoder doesn't retain any external
    // refences.
    getAudioStreams().clear();

    // destroy the decoder and associated threads
    m_decoder = 0;


    delete m_commands;

    OSG_INFO<<"Destructed FFMpegImageStream."<<std::endl;
}
Beispiel #5
0
bool FFmpegImageStream::open(const std::string & filename, FFmpegParameters* parameters)
{
    setFileName(filename);

    if (! m_decoder->open(filename, parameters))
        return false;

    setImage(
        m_decoder->video_decoder().width(), m_decoder->video_decoder().height(), 1, GL_RGBA, GL_BGRA, GL_UNSIGNED_BYTE,
        const_cast<unsigned char *>(m_decoder->video_decoder().image()), NO_DELETE
    );


    setPixelAspectRatio(m_decoder->video_decoder().pixelAspectRatio());

    OSG_NOTICE<<"ffmpeg::open("<<filename<<") size("<<s()<<", "<<t()<<") aspect ratio "<<m_decoder->video_decoder().pixelAspectRatio()<<std::endl;

#if 1
    // swscale is reported errors and then crashing when rescaling video of size less than 10 by 10.
    if (s()<=10 || t()<=10) return false;
#endif

    m_decoder->video_decoder().setUserData(this);
    m_decoder->video_decoder().setPublishCallback(publishNewFrame);

    if (m_decoder->audio_decoder().validContext())
    {
        OSG_NOTICE<<"Attaching FFmpegAudioStream"<<std::endl;

        getAudioStreams().push_back(new FFmpegAudioStream(m_decoder.get()));
    }

    _status = PAUSED;
    applyLoopingMode();

    start(); // start thread

    return true;
}
void MPlayerMediaWidget::readStandardOutput()
{
	QByteArray data = process.readAllStandardOutput();
	standardError.write(data); // forward
	standardError.flush();

	if ((data == "\n") || (data.indexOf("\n\n") >= 0)) {
		process.write("pausing_keep_force get_property path\n");
	}

	bool videoPropertiesChanged = false;
	QStringList audioStreams = getAudioStreams();
	bool audioStreamsChanged = false;
	QStringList subtitles = getSubtitles();
	bool subtitlesChanged = false;

	foreach (const QByteArray &line, data.split('\n')) {
		if (line.startsWith("VO: ")) {
			videoPropertiesChanged = true;
			continue;
		}

		if (line.startsWith("audio stream: ")) {
			int begin = 14;
			int end = line.indexOf(' ', begin);

			if (end < 0) {
				end = line.size();
			}

			int audioStreamIndex = line.mid(begin, end - begin).toInt();

			while (audioStreams.size() < audioStreamIndex) {
				audioStreams.append(QString::number(audioStreams.size() + 1));
			}

			while (audioIds.size() < audioStreamIndex) {
				audioIds.append(-1);
			}

			audioStreams.erase(audioStreams.begin() + audioStreamIndex,
				audioStreams.end());
			audioIds.erase(audioIds.begin() + audioStreamIndex, audioIds.end());
			QString audioStream;
			begin = line.indexOf("language: ");

			if (begin >= 0) {
				begin += 10;
				end = line.indexOf(' ', begin);

				if (end < 0) {
					end = line.size();
				}

				audioStream = line.mid(begin, end - begin);
			}

			if (audioStream.isEmpty()) {
				audioStream = QString::number(audioStreams.size() + 1);
			}

			int audioId = -1;
			begin = line.indexOf("aid: ");

			if (begin >= 0) {
				begin += 5;
				end = line.indexOf('.', begin);

				if (end < 0) {
					end = line.size();
				}

				audioId = line.mid(begin, end - begin).toInt();
			}

			audioStreams.append(audioStream);
			audioIds.append(audioId);
			audioStreamsChanged = true;
			continue;
		}

		if (line.startsWith("subtitle ")) {
			int begin = line.indexOf("( sid ): ");

			if (begin < 0) {
				continue;
			}

			begin += 9;
			int end = line.indexOf(' ', begin);

			if (end < 0) {
				end = line.size();
			}

			int subtitleIndex = line.mid(begin, end - begin).toInt();

			while (subtitles.size() < subtitleIndex) {
				subtitles.append(QString::number(subtitles.size() + 1));
			}

			subtitles.erase(subtitles.begin() + subtitleIndex, subtitles.end());
			QString subtitle;
			begin = line.indexOf("language: ");

			if (begin >= 0) {
				begin += 10;
				end = line.indexOf(' ', begin);

				if (end < 0) {
					end = line.size();
				}

				subtitle = line.mid(begin, end - begin);
			}

			if (subtitle.isEmpty()) {
				subtitle = QString::number(subtitles.size() + 1);
			}

			subtitles.append(subtitle);
			subtitlesChanged = true;
			continue;
		}

		if (line == "ANS_path=(null)") {
			switch (getPlaybackStatus()) {
			case MediaWidget::Idle:
				break;
			case MediaWidget::Playing:
			case MediaWidget::Paused:
				playbackFinished();
				break;
			}

			resetState();
			continue;
		}

		if (line.startsWith("ANS_length=")) {
			int totalTime = (line.mid(11).toFloat() * 1000 + 0.5);
			updateCurrentTotalTime(getCurrentTime(), totalTime);
			continue;
		}

		if (line.startsWith("ANS_time_pos=")) {
			int currentTime = (line.mid(13).toFloat() * 1000 + 0.5);
			updateCurrentTotalTime(currentTime, getTotalTime());
			continue;
		}

		if (line.startsWith("ANS_width=")) {
			videoWidth = line.mid(10).toInt();

			if (videoWidth < 0) {
				videoWidth = 0;
			}

			continue;
		}

		if (line.startsWith("ANS_height=")) {
			videoHeight = line.mid(11).toInt();

			if (videoHeight < 0) {
				videoHeight = 0;
			}

			continue;
		}

		if (line.startsWith("ANS_aspect=")) {
			videoAspectRatio = line.mid(11).toFloat();

			if ((videoAspectRatio > 0.01) && (videoAspectRatio < 100)) {
				// ok
			} else {
				videoAspectRatio = (videoWidth / float(videoHeight));

				if ((videoAspectRatio > 0.01) && (videoAspectRatio < 100)) {
					// ok
				} else {
					videoAspectRatio = 1;
				}
			}

			updateVideoWidgetGeometry();
			continue;
		}

		if (line.startsWith("ANS_switch_audio=")) {
			int audioId = line.mid(17).toInt();
			updateCurrentAudioStream(audioIds.indexOf(audioId));
			continue;
		}

		if (line.startsWith("ANS_sub=")) {
			int currentSubtitle = line.mid(8).toInt();
			updateCurrentSubtitle(currentSubtitle);
			continue;
		}
	}

	if (videoPropertiesChanged) {
		process.write("pausing_keep_force get_property width\n"
			"pausing_keep_force get_property height\n"
			"pausing_keep_force get_property aspect\n");
	}

	if (audioStreamsChanged) {
		updateAudioStreams(audioStreams);
		process.write("pausing_keep_force get_property switch_audio\n");
	}

	if (subtitlesChanged) {
		updateSubtitles(subtitles);
		process.write("pausing_keep_force get_property sub\n");
	}
}
bool FFmpegPlayer::open(const std::string & filename, FFmpegParameters* parameters, VideoOutputDevice * pVOD)
{
    av_log(NULL, AV_LOG_INFO, "FFmpeg plugin release version: %d", JAZZROS_FFMPEG_LIBRARY_RELEASE_VERSION_INT);
    av_log(NULL, AV_LOG_INFO, "OS physical RAM size: %d MB", getMemorySize() / 1000000);

    setFileName(filename);

    m_pVOD = pVOD;

    m_vodd = m_pVOD->CreateData();

    if (m_fileHolder.open(filename, parameters, m_vodd->getFFPixelFormat()) < 0)
        return false;

    /**
     * We need initialize video output device before open streamer and after holder has been opened
     */
    if (m_fileHolder.videoIndex() >= 0) {
        if (m_pVOD->Initialize() == 0) {

            if (m_vodd->Initialize(m_pVOD, m_fileHolder.getFrameSize()) != 0) {

                av_log(NULL, AV_LOG_ERROR, "ERROR: Cannot initialize video output device");
                m_fileHolder.close();
                return false;
            }

            pVOD->SetCurrentData (m_vodd);
        }
    }

    if (m_streamer.open(& m_fileHolder, this) < 0)
    {
        m_fileHolder.close();
        return false;
    }
    // If video exist...
    if (m_fileHolder.videoIndex() >= 0)
    {
        m_pVOD->render(m_vodd, m_streamer.getFrame());

        setPixelAspectRatio(m_fileHolder.pixelAspectRatio());

        av_log(NULL, AV_LOG_INFO, "File( %s ) size(%d, %d) aspect ratio %f",
               filename.c_str(),
               s(),t(),
               m_fileHolder.pixelAspectRatio());

        // swscale is reported errors and then crashing when rescaling video of size less than 10 by 10.
        if (s()<=10 || t()<=10)
            return false;
    }
    // If audio exist...
    if (m_fileHolder.isHasAudio())
    {
        av_log(NULL, AV_LOG_INFO, "Attaching FFmpegAudioStream");

        getAudioStreams().push_back(new FFmpegAudioStream(& m_fileHolder, & m_streamer));
    }

    _status = PAUSED;
    applyLoopingMode();

    start(); // start thread

    return true;
}
QJsonObject AudioMixerClientData::getAudioStreamStats() {
    QJsonObject result;

    QJsonObject downstreamStats;
    AudioStreamStats streamStats = _downstreamAudioStreamStats;
    downstreamStats["desired"] = streamStats._desiredJitterBufferFrames;
    downstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
    downstreamStats["available"] = (double) streamStats._framesAvailable;
    downstreamStats["unplayed"] = (double) streamStats._unplayedMs;
    downstreamStats["starves"] = (double) streamStats._starveCount;
    downstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
    downstreamStats["overflows"] = (double) streamStats._overflowCount;
    downstreamStats["lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
    downstreamStats["lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
    downstreamStats["min_gap"] = formatUsecTime(streamStats._timeGapMin);
    downstreamStats["max_gap"] = formatUsecTime(streamStats._timeGapMax);
    downstreamStats["avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
    downstreamStats["min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
    downstreamStats["max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
    downstreamStats["avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);

    result["downstream"] = downstreamStats;

    AvatarAudioStream* avatarAudioStream = getAvatarAudioStream();

    if (avatarAudioStream) {
        QJsonObject upstreamStats;

        AudioStreamStats streamStats = avatarAudioStream->getAudioStreamStats();
        upstreamStats["mic.desired"] = streamStats._desiredJitterBufferFrames;
        upstreamStats["desired_calc"] = avatarAudioStream->getCalculatedJitterBufferFrames();
        upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
        upstreamStats["available"] = (double) streamStats._framesAvailable;
        upstreamStats["unplayed"] = (double) streamStats._unplayedMs;
        upstreamStats["starves"] = (double) streamStats._starveCount;
        upstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
        upstreamStats["overflows"] = (double) streamStats._overflowCount;
        upstreamStats["silents_dropped"] = (double) streamStats._framesDropped;
        upstreamStats["lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
        upstreamStats["lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
        upstreamStats["min_gap"] = formatUsecTime(streamStats._timeGapMin);
        upstreamStats["max_gap"] = formatUsecTime(streamStats._timeGapMax);
        upstreamStats["avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
        upstreamStats["min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
        upstreamStats["max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
        upstreamStats["avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);

        result["upstream"] = upstreamStats;
    } else {
        result["upstream"] = "mic unknown";
    }

    QJsonArray injectorArray;
    auto streamsCopy = getAudioStreams();
    for (auto& injectorPair : streamsCopy) {
        if (injectorPair.second->getType() == PositionalAudioStream::Injector) {
            QJsonObject upstreamStats;

            AudioStreamStats streamStats = injectorPair.second->getAudioStreamStats();
            upstreamStats["inj.desired"]  = streamStats._desiredJitterBufferFrames;
            upstreamStats["desired_calc"] = injectorPair.second->getCalculatedJitterBufferFrames();
            upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
            upstreamStats["available"] = (double) streamStats._framesAvailable;
            upstreamStats["unplayed"] = (double) streamStats._unplayedMs;
            upstreamStats["starves"] = (double) streamStats._starveCount;
            upstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
            upstreamStats["overflows"] = (double) streamStats._overflowCount;
            upstreamStats["silents_dropped"] = (double) streamStats._framesDropped;
            upstreamStats["lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
            upstreamStats["lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
            upstreamStats["min_gap"] = formatUsecTime(streamStats._timeGapMin);
            upstreamStats["max_gap"] = formatUsecTime(streamStats._timeGapMax);
            upstreamStats["avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
            upstreamStats["min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
            upstreamStats["max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
            upstreamStats["avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);

            injectorArray.push_back(upstreamStats);
        }
    }

    result["injectors"] = injectorArray;

    return result;
}