void AudioMixerSlave::mix(const SharedNodePointer& node) {
    // check that the node is valid
    AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData();
    if (data == nullptr) {
        return;
    }

    if (node->isUpstream()) {
        return;
    }

    // check that the stream is valid
    auto avatarStream = data->getAvatarAudioStream();
    if (avatarStream == nullptr) {
        return;
    }

    // send mute packet, if necessary
    if (AudioMixer::shouldMute(avatarStream->getQuietestFrameLoudness()) || data->shouldMuteClient()) {
        sendMutePacket(node, *data);
    }

    // send audio packets, if necessary
    if (node->getType() == NodeType::Agent && node->getActiveSocket()) {
        ++stats.sumListeners;

        // mix the audio
        bool mixHasAudio = prepareMix(node);

        // send audio packet
        if (mixHasAudio || data->shouldFlushEncoder()) {
            QByteArray encodedBuffer;
            if (mixHasAudio) {
                // encode the audio
                QByteArray decodedBuffer(reinterpret_cast<char*>(_bufferSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO);
                data->encode(decodedBuffer, encodedBuffer);
            } else {
                // time to flush (resets shouldFlush until the next encode)
                data->encodeFrameOfZeros(encodedBuffer);
            }

            sendMixPacket(node, *data, encodedBuffer);
        } else {
            ++stats.sumListenersSilent;
            sendSilentPacket(node, *data);
        }

        // send environment packet
        sendEnvironmentPacket(node, *data);

        // send stats packet (about every second)
        const unsigned int NUM_FRAMES_PER_SEC = (int)ceil(AudioConstants::NETWORK_FRAMES_PER_SEC);
        if (data->shouldSendStats(_frame % NUM_FRAMES_PER_SEC)) {
            data->sendAudioStreamStatsPackets(node);
        }
    }
}
Exemple #2
0
ORTHANC_PLUGINS_API int32_t DecodeImage(OrthancPluginRestOutput* output,
                                        const char* url,
                                        const OrthancPluginHttpRequest* request)
{
  std::string instance(request->groups[0]);
  std::string outputFormat(request->groups[1]);
  OrthancContext::GetInstance().LogWarning("Using GDCM to decode instance " + instance);

  // Download the request DICOM instance from Orthanc into a memory buffer
  std::string dicom;
  OrthancContext::GetInstance().GetDicomForInstance(dicom, instance);

  // Prepare a memory stream over the DICOM instance
  std::stringstream stream(dicom);

  // Parse the DICOM instance using GDCM
  gdcm::ImageReader imageReader;
  imageReader.SetStream(stream);
  if (!imageReader.Read())
  {
    OrthancContext::GetInstance().LogError("GDCM cannot extract an image from this DICOM instance");
    AnswerUnsupportedImage(output);
    return 0;
  }

  gdcm::Image& image = imageReader.GetImage();


  // Log information about the decoded image
  char tmp[1024];
  sprintf(tmp, "Image format: %dx%d %s with %d color channel(s)", image.GetRows(), image.GetColumns(), 
          image.GetPixelFormat().GetScalarTypeAsString(), image.GetPixelFormat().GetSamplesPerPixel());
  OrthancContext::GetInstance().LogWarning(tmp);


  // Convert planar configuration
  gdcm::ImageChangePlanarConfiguration planar;
  if (image.GetPlanarConfiguration() != 0 && 
      image.GetPixelFormat().GetSamplesPerPixel() != 1)
  {
    OrthancContext::GetInstance().LogWarning("Converting planar configuration to interleaved");
    planar.SetInput(imageReader.GetImage());
    planar.Change();
    image = planar.GetOutput();
  }


  // Create a read-only accessor to the bitmap decoded by GDCM
  Orthanc::PixelFormat format;
  if (!GetOrthancPixelFormat(format, image))
  {
    OrthancContext::GetInstance().LogError("This sample plugin does not support this image format");
    AnswerUnsupportedImage(output);
    return 0;
  }

  Orthanc::ImageAccessor decodedImage;
  std::vector<char> decodedBuffer(image.GetBufferLength());

  if (decodedBuffer.size())
  {
    image.GetBuffer(&decodedBuffer[0]);
    unsigned int pitch = image.GetColumns() * ::Orthanc::GetBytesPerPixel(format);
    decodedImage.AssignWritable(format, image.GetColumns(), image.GetRows(), pitch, &decodedBuffer[0]);
  }
  else
  {
    // Empty image
    decodedImage.AssignWritable(format, 0, 0, 0, NULL);
  }


  // Convert the pixel format from GDCM to the format requested by the REST query
  Orthanc::ImageBuffer converted;
  converted.SetWidth(decodedImage.GetWidth());
  converted.SetHeight(decodedImage.GetHeight());

  if (outputFormat == "preview")
  {
    if (format == Orthanc::PixelFormat_RGB24 ||
        format == Orthanc::PixelFormat_RGBA32)
    {
      // Do not rescale color image
      converted.SetFormat(Orthanc::PixelFormat_RGB24);
    }
    else
    {
      converted.SetFormat(Orthanc::PixelFormat_Grayscale8);

      // Rescale the image to the [0,255] range
      int64_t a, b;
      Orthanc::ImageProcessing::GetMinMaxValue(a, b, decodedImage);

      float offset = -a;
      float scaling = 255.0f / static_cast<float>(b - a);
      Orthanc::ImageProcessing::ShiftScale(decodedImage, offset, scaling);
    }
  }
  else
  {
    if (format == Orthanc::PixelFormat_RGB24 ||
        format == Orthanc::PixelFormat_RGBA32)
    {
      // Do not convert color images to grayscale values (this is Orthanc convention)
      AnswerUnsupportedImage(output);
      return 0;
    }

    if (outputFormat == "image-uint8")
    {
      converted.SetFormat(Orthanc::PixelFormat_Grayscale8);
    }
    else if (outputFormat == "image-uint16")
    {
      converted.SetFormat(Orthanc::PixelFormat_Grayscale16);
    }
    else if (outputFormat == "image-int16")
    {
      converted.SetFormat(Orthanc::PixelFormat_SignedGrayscale16);
    }
    else
    {
      OrthancContext::GetInstance().LogError("Unknown output format: " + outputFormat);
      AnswerUnsupportedImage(output);
      return 0;
    }
  }

  Orthanc::ImageAccessor convertedAccessor(converted.GetAccessor());
  Orthanc::ImageProcessing::Convert(convertedAccessor, decodedImage);

  // Compress the converted image as a PNG file
  OrthancContext::GetInstance().CompressAndAnswerPngImage(output, convertedAccessor);

  return 0;  // Success
}
Exemple #3
0
void AudioMixer::broadcastMixes() {
    auto nodeList = DependencyManager::get<NodeList>();

    auto nextFrameTimestamp = p_high_resolution_clock::now();
    auto timeToSleep = std::chrono::microseconds(0);

    const int TRAILING_AVERAGE_FRAMES = 100;
    int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;

    int currentFrame { 1 };
    int numFramesPerSecond { (int) ceil(AudioConstants::NETWORK_FRAMES_PER_SEC) };

    while (!_isFinished) {
        const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
        const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;

        const float RATIO_BACK_OFF = 0.02f;

        const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
        const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;

        if (timeToSleep.count() < 0) {
            timeToSleep = std::chrono::microseconds(0);
        }

        _trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio)
            + (timeToSleep.count() * CURRENT_FRAME_RATIO / (float) AudioConstants::NETWORK_FRAME_USECS);

        float lastCutoffRatio = _performanceThrottlingRatio;
        bool hasRatioChanged = false;

        if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) {
            if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) {
                // we're struggling - change our min required loudness to reduce some load
                _performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio));

                qDebug() << "Mixer is struggling, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was"
                    << lastCutoffRatio << "and is now" << _performanceThrottlingRatio;
                hasRatioChanged = true;
            } else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) {
                // we've recovered and can back off the required loudness
                _performanceThrottlingRatio = _performanceThrottlingRatio - RATIO_BACK_OFF;

                if (_performanceThrottlingRatio < 0) {
                    _performanceThrottlingRatio = 0;
                }

                qDebug() << "Mixer is recovering, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was"
                    << lastCutoffRatio << "and is now" << _performanceThrottlingRatio;
                hasRatioChanged = true;
            }

            if (hasRatioChanged) {
                // set out min audability threshold from the new ratio
                _minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio));
                qDebug() << "Minimum audability required to be mixed is now" << _minAudibilityThreshold;

                framesSinceCutoffEvent = 0;
            }
        }

        if (!hasRatioChanged) {
            ++framesSinceCutoffEvent;
        }

        nodeList->eachNode([&](const SharedNodePointer& node) {

            if (node->getLinkedData()) {
                AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();

                // this function will attempt to pop a frame from each audio stream.
                // a pointer to the popped data is stored as a member in InboundAudioStream.
                // That's how the popped audio data will be read for mixing (but only if the pop was successful)
                nodeData->checkBuffersBeforeFrameSend();

                // if the stream should be muted, send mute packet
                if (nodeData->getAvatarAudioStream()
                    && shouldMute(nodeData->getAvatarAudioStream()->getQuietestFrameLoudness())) {
                    auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0);
                    nodeList->sendPacket(std::move(mutePacket), *node);
                }

                if (node->getType() == NodeType::Agent && node->getActiveSocket()
                    && nodeData->getAvatarAudioStream()) {

                    bool mixHasAudio = prepareMixForListeningNode(node.data());

                    std::unique_ptr<NLPacket> mixPacket;

                    if (mixHasAudio) {
                        int mixPacketBytes = sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE 
                                                             + AudioConstants::NETWORK_FRAME_BYTES_STEREO;
                        mixPacket = NLPacket::create(PacketType::MixedAudio, mixPacketBytes);

                        // pack sequence number
                        quint16 sequence = nodeData->getOutgoingSequenceNumber();
                        mixPacket->writePrimitive(sequence);

                        // write the codec
                        QString codecInPacket = nodeData->getCodecName();
                        mixPacket->writeString(codecInPacket);

                        QByteArray decodedBuffer(reinterpret_cast<char*>(_clampedSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO);
                        QByteArray encodedBuffer;
                        nodeData->encode(decodedBuffer, encodedBuffer);

                        // pack mixed audio samples
                        mixPacket->write(encodedBuffer.constData(), encodedBuffer.size());
                    } else {
                        int silentPacketBytes = sizeof(quint16) + sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE;
                        mixPacket = NLPacket::create(PacketType::SilentAudioFrame, silentPacketBytes);

                        // pack sequence number
                        quint16 sequence = nodeData->getOutgoingSequenceNumber();
                        mixPacket->writePrimitive(sequence);

                        // write the codec
                        QString codecInPacket = nodeData->getCodecName();
                        mixPacket->writeString(codecInPacket);

                        // pack number of silent audio samples
                        quint16 numSilentSamples = AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
                        mixPacket->writePrimitive(numSilentSamples);
                    }

                    // Send audio environment
                    sendAudioEnvironmentPacket(node);

                    // send mixed audio packet
                    nodeList->sendPacket(std::move(mixPacket), *node);
                    nodeData->incrementOutgoingMixedAudioSequenceNumber();

                    // send an audio stream stats packet to the client approximately every second
                    ++currentFrame;
                    currentFrame %= numFramesPerSecond;

                    if (nodeData->shouldSendStats(currentFrame)) {
                        nodeData->sendAudioStreamStatsPackets(node);
                    }

                    ++_sumListeners;
                }
            }
        });

        ++_numStatFrames;

        // since we're a while loop we need to help Qt's event processing
        QCoreApplication::processEvents();

        if (_isFinished) {
            // at this point the audio-mixer is done
            // check if we have a deferred delete event to process (which we should once finished)
            QCoreApplication::sendPostedEvents(this, QEvent::DeferredDelete);
            break;
        }

        // push the next frame timestamp to when we should send the next
        nextFrameTimestamp += std::chrono::microseconds(AudioConstants::NETWORK_FRAME_USECS);

        // sleep as long as we need until next frame, if we can
        auto now = p_high_resolution_clock::now();
        timeToSleep = std::chrono::duration_cast<std::chrono::microseconds>(nextFrameTimestamp - now);

        std::this_thread::sleep_for(timeToSleep);
    }
}