void cSemaineEmotionSender::sendDimensionsFSRE_I( cComponentMessage *_msg )
{
  // range check:
  //if (_msg->floatData[0] < 0) _msg->floatData[0] = 0.0;
  //if (_msg->floatData[0] > 1) _msg->floatData[0] = 1.0;
  //--

  char strtmp[50];
  sprintf(strtmp,"%.2f",_msg->floatData[0]);
  std::string dimVal(strtmp);

  sprintf(strtmp,"%ld",smileTimeToSemaineTime(_msg->userTime1));
  std::string startTm(strtmp);
  sprintf(strtmp,"%ld",(long long)round((_msg->userTime2 - _msg->userTime1)*1000.0));
  std::string duration(strtmp);

  sprintf(strtmp,"%s",(const char *)(_msg->msgname));
  std::string codername(strtmp);

  // Create and fill a simple EMMA EmotionML document
  XERCESC_NS::DOMDocument * document = XMLTool::newDocument(EMMA::E_EMMA, EMMA::namespaceURI, EMMA::version);
  XMLTool::setPrefix(document->getDocumentElement(), "emma");

  XERCESC_NS::DOMElement * interpretation = XMLTool::appendChildElement(document->getDocumentElement(), EMMA::E_INTERPRETATION);
  XMLTool::setAttribute(interpretation, EMMA::A_OFFSET_TO_START, startTm);
  XMLTool::setAttribute(interpretation, EMMA::A_DURATION, duration);
  XMLTool::setAttribute(interpretation, EMMA::A_CONFIDENCE, "1.0");
  XMLTool::setPrefix(interpretation, "emma");
  
  XERCESC_NS::DOMElement * emotion = XMLTool::appendChildElement(interpretation, EmotionML::E_EMOTION, EmotionML::namespaceURI);
  XMLTool::setAttribute(emotion, EmotionML::A_DIMENSION_VOCABULARY, EmotionML::VOC_FSRE_DIMENSION_DEFINITION);
  XMLTool::setAttribute(emotion, EmotionML::A_MODALITY, "voice");
  XMLTool::setPrefix(emotion, "emotion");

  int i; int intIdx = -1;
  sClassifierResults * re = ((sClassifierResults*)(_msg->custData));
  for (i=0; i<re->nFilled; i++) {
    if (!strcmp(re->resnameA[i],intensityStr)) {
      intIdx = i; continue;
    }

    char strtmp[50];
    if (!strcmp(re->resnameA[i],unpredictabilityStr)) {
      re->res[i] = (1.0 - re->res[i])/2.0;   //// not nice hack...
    } else {
      re->res[i] = (re->res[i] + 1.0)/2.0;
    } 
    if (re->res[i] < 0.0) re->res[i] = 0.0;
    if (re->res[i] > 1.0) re->res[i] = 1.0;

    sprintf(strtmp,"%.2f",re->res[i]);
    std::string dimVal(strtmp);
    sprintf(strtmp,"%s",re->resnameA[i]);
    std::string dimStr(strtmp);

    XERCESC_NS::DOMElement * dimension = XMLTool::appendChildElement(emotion, EmotionML::E_DIMENSION, EmotionML::namespaceURI);
    XMLTool::setAttribute(dimension, EmotionML::A_NAME, dimStr); // dimensionStr
    XMLTool::setAttribute(dimension, EmotionML::A_VALUE, dimVal);
    XMLTool::setPrefix(dimension, "emotion");
  }

  XERCESC_NS::DOMElement * info = XMLTool::appendChildElement(emotion, EmotionML::E_INFO);
  XERCESC_NS::DOMElement * coder = XMLTool::appendChildElement(info, "predictor");
  XMLTool::setAttribute(coder, "value", codername);
  XMLTool::setPrefix(coder, "emotion");
  XMLTool::setPrefix(info, "emotion");

  if (intIdx >= 0) {
    XERCESC_NS::DOMElement * intensity = XMLTool::appendChildElement(interpretation, EmotionML::E_EMOTION, EmotionML::namespaceURI);
    XMLTool::setAttribute(intensity, EmotionML::A_DIMENSION_VOCABULARY, EmotionML::VOC_SEMAINE_INTENSITY_DIMENSION_DEFINITON);
    XMLTool::setAttribute(intensity, EmotionML::A_MODALITY, "voice");
    XMLTool::setPrefix(emotion, "emotion");

    sprintf(strtmp,"%.2f",re->res[intIdx]);
    std::string dimVal(strtmp);
    sprintf(strtmp,"%s",re->resnameA[intIdx]);
    std::string dimStr(strtmp);

    XERCESC_NS::DOMElement * idim = XMLTool::appendChildElement(intensity, EmotionML::E_DIMENSION, EmotionML::namespaceURI);
    XMLTool::setAttribute(idim, EmotionML::A_NAME, dimStr); // dimensionStr
    XMLTool::setAttribute(idim, EmotionML::A_VALUE, dimVal);
    XMLTool::setPrefix(idim, "emotion");

    XERCESC_NS::DOMElement * iinfo = XMLTool::appendChildElement(intensity, EmotionML::E_INFO);
    XERCESC_NS::DOMElement * icoder = XMLTool::appendChildElement(iinfo, "predictor");
    XMLTool::setAttribute(icoder, "value", codername);
    XMLTool::setPrefix(icoder, "emotion");
    XMLTool::setPrefix(iinfo, "emotion");
  }

  sendDocument(document);
}
Exemplo n.º 2
0
 system_clock::time_point system_clock::from_time_t(std::time_t t)
 {
     return time_point(duration(static_cast<system_clock::rep>(t) * 1000000000));
 }
Exemplo n.º 3
0
int vrpn_Tng3::syncDatastream (double seconds) {

    struct timeval miniDelay;
    miniDelay.tv_sec = 0;
    miniDelay.tv_usec = 50000;

    unsigned long maxDelay = 1000000L * (long) seconds;
    struct timeval start_time;
    vrpn_gettimeofday(&start_time, NULL);

    int loggedOn = 0;
    int numRead;

    if (serial_fd < 0) {
	return 0;
    }

    // ensure that the packet start byte is valid
    if ( bDataPacketStart != 0x55 && bDataPacketStart != 0xAA ) {
      bDataPacketStart = 0x55;
    }

    vrpn_flush_input_buffer(serial_fd);

//    vrpn_write_characters(serial_fd, (const unsigned char *)"E", 1);
    pause (0.01);

    while (!loggedOn) {
	struct timeval current_time;
	vrpn_gettimeofday(&current_time, NULL);
	if (duration(current_time, start_time) > maxDelay ) {
	    // if we've timed out, go back unhappy
	    fprintf(stderr,"vrpn_Tng3::syncDatastream timeout expired: %d secs\n", (int)seconds);
	    return 0;  // go back unhappy
	}
	
	// get a byte
        if (1 != vrpn_read_available_characters(serial_fd, _buffer, 1, &miniDelay)) {
	    continue;
        }
	// if not a record start, skip
        if (_buffer[0] != bDataPacketStart) {
	    continue;
        }
	// invert the packet start byte for the next test
	bDataPacketStart ^= 0xFF;
	
	// get an entire report
	numRead = vrpn_read_available_characters(serial_fd, 
		    _buffer, DATA_RECORD_LENGTH, &miniDelay);

        if (numRead < DATA_RECORD_LENGTH) {
	    continue;
        }

	// get the start byte for the next packet
        if (1 != vrpn_read_available_characters(serial_fd, _buffer, 1, &miniDelay)) {
	    continue;
        }

	// if not the anticipated record start, things are not yet sync'd
        if (_buffer[0] != bDataPacketStart) {
	    continue;
        }

	// invert the packet start byte in anticipation of normal operation
	bDataPacketStart ^= 0xFF;

	// get an entire report
	numRead = vrpn_read_available_characters(serial_fd, 
			_buffer, DATA_RECORD_LENGTH, &miniDelay);

        if (numRead < DATA_RECORD_LENGTH) {
	    continue;
        }

	return 1;
    }
    return 0;
}
void MediaPlayerPrivateAVFoundation::updateStates()
{
    MediaPlayer::NetworkState oldNetworkState = m_networkState;
    MediaPlayer::ReadyState oldReadyState = m_readyState;

    LOG(Media, "MediaPlayerPrivateAVFoundation::updateStates(%p) - entering with networkState = %i, readyState = %i", 
        this, static_cast<int>(m_networkState), static_cast<int>(m_readyState));

    if (m_loadingMetadata)
        m_networkState = MediaPlayer::Loading;
    else {
        // -loadValuesAsynchronouslyForKeys:completionHandler: has invoked its handler; test status of keys and determine state.
        AVAssetStatus avAssetStatus = assetStatus();
        ItemStatus itemStatus = playerItemStatus();
        
        m_assetIsPlayable = (avAssetStatus == MediaPlayerAVAssetStatusPlayable);
        if (m_readyState < MediaPlayer::HaveMetadata && avAssetStatus > MediaPlayerAVAssetStatusLoading) {
            if (m_assetIsPlayable) {
                if (itemStatus == MediaPlayerAVPlayerItemStatusUnknown) {
                    if (avAssetStatus == MediaPlayerAVAssetStatusFailed || m_preload > MediaPlayer::MetaData) {
                        // We may have a playable asset that doesn't support inspection prior to playback; go ahead 
                        // and create the AVPlayerItem now. When the AVPlayerItem becomes ready to play, we will 
                        // have access to its metadata. Or we may have been asked to become ready to play immediately.
                        m_networkState = MediaPlayer::Loading;
                        prepareToPlay();
                    } else
                        m_networkState = MediaPlayer::Idle;
                }
                if (avAssetStatus == MediaPlayerAVAssetStatusLoaded)
                    m_readyState = MediaPlayer::HaveMetadata;
            } else {
                // FIX ME: fetch the error associated with the @"playable" key to distinguish between format 
                // and network errors.
                m_networkState = MediaPlayer::FormatError;
            }
        }
        
        if (avAssetStatus >= MediaPlayerAVAssetStatusLoaded && itemStatus > MediaPlayerAVPlayerItemStatusUnknown) {
            if (seeking())
                m_readyState = m_readyState >= MediaPlayer::HaveMetadata ? MediaPlayer::HaveMetadata : MediaPlayer::HaveNothing;
            else {
                float maxLoaded = maxTimeLoaded();
                switch (itemStatus) {
                case MediaPlayerAVPlayerItemStatusUnknown:
                    break;
                case MediaPlayerAVPlayerItemStatusFailed:
                    m_networkState = MediaPlayer::DecodeError;
                    break;
                case MediaPlayerAVPlayerItemStatusPlaybackLikelyToKeepUp:
                    m_readyState = MediaPlayer::HaveEnoughData;
                    break;
                case MediaPlayerAVPlayerItemStatusReadyToPlay:
                case MediaPlayerAVPlayerItemStatusPlaybackBufferEmpty:
                case MediaPlayerAVPlayerItemStatusPlaybackBufferFull:
                    if (maxLoaded > currentTime())
                        m_readyState = MediaPlayer::HaveFutureData;
                    else
                        m_readyState = MediaPlayer::HaveCurrentData;
                    break;
                }

                if (itemStatus >= MediaPlayerAVPlayerItemStatusReadyToPlay)
                    m_networkState = (maxLoaded == duration()) ? MediaPlayer::Loaded : MediaPlayer::Loading;
            }
        }
    }

    if (isReadyForVideoSetup() && currentRenderingMode() != preferredRenderingMode())
        setUpVideoRendering();

    if (m_networkState != oldNetworkState)
        m_player->networkStateChanged();

    if (m_readyState != oldReadyState)
        m_player->readyStateChanged();

    LOG(Media, "MediaPlayerPrivateAVFoundation::updateStates(%p) - exiting with networkState = %i, readyState = %i", 
        this, static_cast<int>(m_networkState), static_cast<int>(m_readyState));
}
Exemplo n.º 5
0
void RGBMatrix::write(MasterTimer* timer, QList<Universe *> universes)
{
    Q_UNUSED(timer);

    {
        QMutexLocker algorithmLocker(&m_algorithmMutex);
        if (m_group == NULL)
        {
            // No fixture group to control
            stop(FunctionParent::master());
            return;
        }

        // No time to do anything.
        if (duration() == 0)
            return;

        // Invalid/nonexistent script
        if (m_algorithm == NULL || m_algorithm->apiVersion() == 0)
            return;

        if (isPaused() == false)
        {
            // Get a new map every time elapsed is reset to zero
            if (elapsed() < MasterTimer::tick())
            {
                if (tempoType() == Beats)
                    m_stepBeatDuration = beatsToTime(duration(), timer->beatTimeDuration());

                //qDebug() << "RGBMatrix step" << m_stepHandler->currentStepIndex() << ", color:" << QString::number(m_stepHandler->stepColor().rgb(), 16);
                RGBMap map = m_algorithm->rgbMap(m_group->size(), m_stepHandler->stepColor().rgb(), m_stepHandler->currentStepIndex());
                updateMapChannels(map, m_group);
            }
        }
    }

    // Run the generic fader that takes care of fading in/out individual channels
    m_fader->write(universes, isPaused());

    if (isPaused() == false)
    {
        // Increment the ms elapsed time
        incrementElapsed();

        /* Check if we need to change direction, stop completely or go to next step
         * The cases are:
         * 1- time tempo type: act normally, on ms elapsed time
         * 2- beat tempo type, beat occurred: check if the elapsed beats is a multiple of
         *    the step beat duration. If so, proceed to the next step
         * 3- beat tempo type, not beat: if the ms elapsed time reached the step beat
         *    duration in ms, and the ms time to the next beat is not less than 1/16 of
         *    the step beat duration in ms, then proceed to the next step. If the ms time to the
         *    next beat is less than 1/16 of the step beat duration in ms, then defer the step
         *    change to case #2, to resync the matrix to the next beat
         */
        if (tempoType() == Time && elapsed() >= duration())
        {
            roundCheck();
        }
        else if (tempoType() == Beats)
        {
            if (timer->isBeat())
            {
                incrementElapsedBeats();
                qDebug() << "Elapsed beats:" << elapsedBeats() << ", time elapsed:" << elapsed() << ", step time:" << m_stepBeatDuration;
                if (elapsedBeats() % duration() == 0)
                {
                    roundCheck();
                    resetElapsed();
                }
            }
            else if (elapsed() >= m_stepBeatDuration && (uint)timer->timeToNextBeat() > m_stepBeatDuration / 16)
            {
                qDebug() << "Elapsed exceeded";
                roundCheck();
            }
        }
    }
}
Exemplo n.º 6
0
char* ServerMediaSession::generateSDPDescription() {
  AddressString ipAddressStr(ourIPAddress(envir()));
  unsigned ipAddressStrSize = strlen(ipAddressStr.val());

  // For a SSM sessions, we need a "a=source-filter: incl ..." line also:
  char* sourceFilterLine;
  if (fIsSSM) {
    char const* const sourceFilterFmt =
      "a=source-filter: incl IN IP4 * %s\r\n"
      "a=rtcp-unicast: reflection\r\n";
    unsigned const sourceFilterFmtSize = strlen(sourceFilterFmt) + ipAddressStrSize + 1;

    sourceFilterLine = new char[sourceFilterFmtSize];
    sprintf(sourceFilterLine, sourceFilterFmt, ipAddressStr.val());
  } else {
    sourceFilterLine = strDup("");
  }

  char* rangeLine = NULL; // for now
  char* sdp = NULL; // for now

  do {
    // Count the lengths of each subsession's media-level SDP lines.
    // (We do this first, because the call to "subsession->sdpLines()"
    // causes correct subsession 'duration()'s to be calculated later.)
    unsigned sdpLength = 0;
    ServerMediaSubsession* subsession;
    for (subsession = fSubsessionsHead; subsession != NULL;
	 subsession = subsession->fNext) {
      char const* sdpLines = subsession->sdpLines();
      if (sdpLines == NULL) continue; // the media's not available
      sdpLength += strlen(sdpLines);
    }
    if (sdpLength == 0) break; // the session has no usable subsessions

    // Unless subsessions have differing durations, we also have a "a=range:" line:
    float dur = duration();
    if (dur == 0.0) {
      rangeLine = strDup("a=range:npt=0-\r\n");
    } else if (dur > 0.0) {
      char buf[100];
      sprintf(buf, "a=range:npt=0-%.3f\r\n", dur);
      rangeLine = strDup(buf);
    } else { // subsessions have differing durations, so "a=range:" lines go there
      rangeLine = strDup("");
    }

    char const* const sdpPrefixFmt =
      "v=0\r\n"
      "o=- %ld%06ld %d IN IP4 %s\r\n"
      "s=%s\r\n"
      "i=%s\r\n"
      "t=0 0\r\n"
      "a=tool:%s%s\r\n"
      "a=type:broadcast\r\n"
      "a=control:*\r\n"
      "%s"
      "%s"
      "a=x-qt-text-nam:%s\r\n"
      "a=x-qt-text-inf:%s\r\n"
      "%s";
    sdpLength += strlen(sdpPrefixFmt)
      + 20 + 6 + 20 + ipAddressStrSize
      + strlen(fDescriptionSDPString)
      + strlen(fInfoSDPString)
      + strlen(libNameStr) + strlen(libVersionStr)
      + strlen(sourceFilterLine)
      + strlen(rangeLine)
      + strlen(fDescriptionSDPString)
      + strlen(fInfoSDPString)
      + strlen(fMiscSDPLines);
    sdpLength += 1000; // in case the length of the "subsession->sdpLines()" calls below change
    sdp = new char[sdpLength];
    if (sdp == NULL) break;

    // Generate the SDP prefix (session-level lines):
    snprintf(sdp, sdpLength, sdpPrefixFmt,
	     fCreationTime.tv_sec, fCreationTime.tv_usec, // o= <session id>
	     1, // o= <version> // (needs to change if params are modified)
	     ipAddressStr.val(), // o= <address>
	     fDescriptionSDPString, // s= <description>
	     fInfoSDPString, // i= <info>
	     libNameStr, libVersionStr, // a=tool:
	     sourceFilterLine, // a=source-filter: incl (if a SSM session)
	     rangeLine, // a=range: line
	     fDescriptionSDPString, // a=x-qt-text-nam: line
	     fInfoSDPString, // a=x-qt-text-inf: line
	     fMiscSDPLines); // miscellaneous session SDP lines (if any)

    // Then, add the (media-level) lines for each subsession:
    char* mediaSDP = sdp;
    for (subsession = fSubsessionsHead; subsession != NULL;
	 subsession = subsession->fNext) {
      unsigned mediaSDPLength = strlen(mediaSDP);
      mediaSDP += mediaSDPLength;
      sdpLength -= mediaSDPLength;
      if (sdpLength <= 1) break; // the SDP has somehow become too long

      char const* sdpLines = subsession->sdpLines();
      if (sdpLines != NULL) snprintf(mediaSDP, sdpLength, "%s", sdpLines);
    }
  } while (0);

  delete[] rangeLine; delete[] sourceFilterLine;
  return sdp;
}
Exemplo n.º 7
0
cf_clock::time_point cf_clock::from_time_t(const time_t &__t) noexcept
{
    return time_point(duration(CFAbsoluteTime(__t) - kCFAbsoluteTimeIntervalSince1970));
}
Exemplo n.º 8
0
quint32 Function::totalDuration()
{
    // fall back to duration in case a
    // subclass doesn't provide this method
    return duration();
}
Exemplo n.º 9
0
void AVPlayer::seekBackward()
{
    demuxer_thread->seekBackward();
    qDebug("seek %f%%", clock->value()/duration()*100.0);
}
Exemplo n.º 10
0
double GameTime::GetTotalElapsed(void)
{
	boost::posix_time::time_duration duration(m_latestTime - m_startTime);
	return double(duration.total_nanoseconds()*0.000000001);
}
Exemplo n.º 11
0
double GameTime::GetTotalElapsedNow(void)
{
	boost::posix_time::time_duration duration(boost::posix_time::microsec_clock::local_time() - m_startTime);
	return double(duration.total_nanoseconds()*0.000000001);
}
void MplVideoPlayerBackend::durationIsKnown()
{
    emit durationChanged(duration());
}
Exemplo n.º 13
0
void media_input::open(const std::vector<std::string> &urls, const device_request &dev_request)
{
    assert(urls.size() > 0);

    // Open media objects
    _is_device = dev_request.is_device();
    _media_objects.resize(urls.size());
    for (size_t i = 0; i < urls.size(); i++)
    {
        _media_objects[i].open(urls[i], dev_request);
    }

    // Construct id for this input
    _id = basename(_media_objects[0].url());
    for (size_t i = 1; i < _media_objects.size(); i++)
    {
        _id += '/';
        _id += basename(_media_objects[i].url());
    }

    // Gather metadata
    for (size_t i = 0; i < _media_objects.size(); i++)
    {
        // Note that we may have multiple identical tag names in our metadata
        for (size_t j = 0; j < _media_objects[i].tags(); j++)
        {
            _tag_names.push_back(_media_objects[i].tag_name(j));
            _tag_values.push_back(_media_objects[i].tag_value(j));
        }
    }

    // Gather streams and stream names
    for (size_t i = 0; i < _media_objects.size(); i++)
    {
        for (int j = 0; j < _media_objects[i].video_streams(); j++)
        {
            _video_stream_names.push_back(_media_objects[i].video_frame_template(j).format_info());
        }
    }
    if (_video_stream_names.size() > 1)
    {
        for (size_t i = 0; i < _video_stream_names.size(); i++)
        {
            _video_stream_names[i].insert(0,
                    std::string(1, '#') + str::from(i + 1) + '/'
                    + str::from(_video_stream_names.size()) + ": ");
        }
    }
    for (size_t i = 0; i < _media_objects.size(); i++)
    {
        for (int j = 0; j < _media_objects[i].audio_streams(); j++)
        {
            _audio_stream_names.push_back(_media_objects[i].audio_blob_template(j).format_info());
        }
    }
    if (_audio_stream_names.size() > 1)
    {
        for (size_t i = 0; i < _audio_stream_names.size(); i++)
        {
            _audio_stream_names[i].insert(0,
                    std::string(1, '#') + str::from(i + 1) + '/'
                    + str::from(_audio_stream_names.size()) + ": ");
        }
    }
    for (size_t i = 0; i < _media_objects.size(); i++)
    {
        for (int j = 0; j < _media_objects[i].subtitle_streams(); j++)
        {
            _subtitle_stream_names.push_back(_media_objects[i].subtitle_box_template(j).format_info());
        }
    }
    if (_subtitle_stream_names.size() > 1)
    {
        for (size_t i = 0; i < _subtitle_stream_names.size(); i++)
        {
            _subtitle_stream_names[i].insert(0,
                    std::string(1, '#') + str::from(i + 1) + '/'
                    + str::from(_subtitle_stream_names.size()) + ": ");
        }
    }

    // Set duration information
    _duration = std::numeric_limits<int64_t>::max();
    for (size_t i = 0; i < _media_objects.size(); i++)
    {
        for (int j = 0; j < _media_objects[i].video_streams(); j++)
        {
            int64_t d = _media_objects[i].video_duration(j);
            if (d < _duration)
            {
                _duration = d;
            }
        }
        for (int j = 0; j < _media_objects[i].audio_streams(); j++)
        {
            int64_t d = _media_objects[i].audio_duration(j);
            if (d < _duration)
            {
                _duration = d;
            }
        }
        // Ignore subtitle stream duration; it seems unreliable and is not important anyway.
    }

    // Skip advertisement in 3dtv.at movies. Only works for single media objects.
    // XXX: Disable this since seeking in WMV files is notoriously bad with FFmpeg.
    //try { _initial_skip = str::to<int64_t>(tag_value("StereoscopicSkip")) / 10; } catch (...) { }

    // Find stereo layout and set active video stream(s)
    _supports_stereo_layout_separate = false;
    if (video_streams() == 2)
    {
        int o0, o1, v0, v1;
        get_video_stream(0, o0, v0);
        get_video_stream(1, o1, v1);
        video_frame t0 = _media_objects[o0].video_frame_template(v0);
        video_frame t1 = _media_objects[o1].video_frame_template(v1);
        if (t0.width == t1.width
                && t0.height == t1.height
                && (t0.aspect_ratio <= t1.aspect_ratio && t0.aspect_ratio >= t1.aspect_ratio)
                && t0.layout == t1.layout
                && t0.color_space == t1.color_space
                && t0.value_range == t1.value_range
                && t0.chroma_location == t1.chroma_location)
        {
            _supports_stereo_layout_separate = true;
        }
    }
    if (_supports_stereo_layout_separate)
    {
        _active_video_stream = 0;
        int o, s;
        get_video_stream(_active_video_stream, o, s);
        _video_frame = _media_objects[o].video_frame_template(s);
        _video_frame.stereo_layout = parameters::layout_separate;
    }
    else if (video_streams() > 0)
    {
        _active_video_stream = 0;
        int o, s;
        get_video_stream(_active_video_stream, o, s);
        _video_frame = _media_objects[o].video_frame_template(s);
    }
    else
    {
        _active_video_stream = -1;
    }
    if (_active_video_stream >= 0)
    {
        select_video_stream(_active_video_stream);
    }

    // Set active audio stream
    _active_audio_stream = (audio_streams() > 0 ? 0 : -1);
    if (_active_audio_stream >= 0)
    {
        int o, s;
        get_audio_stream(_active_audio_stream, o, s);
        _audio_blob = _media_objects[o].audio_blob_template(s);
        select_audio_stream(_active_audio_stream);
    }

    // Set active subtitle stream
    _active_subtitle_stream = -1;       // no subtitles by default

    // Print summary
    msg::inf(_("Input:"));
    for (int i = 0; i < video_streams(); i++)
    {
        int o, s;
        get_video_stream(i, o, s);
        msg::inf(4, _("Video %s: %s"), video_stream_name(i).c_str(),
                _media_objects[o].video_frame_template(s).format_name().c_str());
    }
    if (video_streams() == 0)
    {
        msg::inf(4, _("No video."));
    }
    for (int i = 0; i < audio_streams(); i++)
    {
        int o, s;
        get_audio_stream(i, o, s);
        msg::inf(4, _("Audio %s: %s"), audio_stream_name(i).c_str(),
                _media_objects[o].audio_blob_template(s).format_name().c_str());
    }
    if (audio_streams() == 0)
    {
        msg::inf(4, _("No audio."));
    }
    for (int i = 0; i < subtitle_streams(); i++)
    {
        int o, s;
        get_subtitle_stream(i, o, s);
        msg::inf(4, _("Subtitle %s: %s"), subtitle_stream_name(i).c_str(),
                _media_objects[o].subtitle_box_template(s).format_name().c_str());
    }
    if (subtitle_streams() == 0)
    {
        msg::inf(4, _("No subtitle."));
    }
    msg::inf(4, _("Duration: %g seconds"), duration() / 1e6f);
    if (video_streams() > 0)
    {
        msg::inf(4, _("Stereo Video layout: %s"), parameters::stereo_layout_to_string(
                    video_frame_template().stereo_layout, video_frame_template().stereo_layout_swap).c_str());
    }
}
void cSemaineEmotionSender::sendInterestC( cComponentMessage *_msg )
{
  char strtmp[50];
  sprintf(strtmp,"%.2f",_msg->floatData[0]);
  std::string interestStr(strtmp);

  // Create and fill a simple EMMA EmotionML document
  XERCESC_NS::DOMDocument * document = XMLTool::newDocument(EMMA::E_EMMA, EMMA::namespaceURI, EMMA::version);
  XMLTool::setPrefix(document->getDocumentElement(), "emma");

  sprintf(strtmp,"%ld",smileTimeToSemaineTime(_msg->userTime1));
  std::string startTm(strtmp);
  sprintf(strtmp,"%ld",(long long)round((_msg->userTime2 - _msg->userTime1)*1000.0));
  std::string duration(strtmp);

  XERCESC_NS::DOMElement * oneof = XMLTool::appendChildElement(document->getDocumentElement(), EMMA::E_ONEOF);
  XMLTool::setAttribute(oneof, EMMA::A_OFFSET_TO_START, startTm);
  XMLTool::setAttribute(oneof, EMMA::A_DURATION, duration);
  XMLTool::setPrefix(oneof, "emma");

  double v = 0.0;
  //TODO: update this to new classification result message!  check that probEstim is not NULL....!
  XERCESC_NS::DOMElement * interpretation0 = XMLTool::appendChildElement(oneof, EMMA::E_INTERPRETATION);

  if (_msg->custData != NULL) v = ((double*)(_msg->custData))[0];
  sprintf(strtmp,"%.3f",v);
  std::string conf0(strtmp);
  XMLTool::setAttribute(interpretation0, EMMA::A_CONFIDENCE, conf0);
  XMLTool::setPrefix(interpretation0, "emma");

  XERCESC_NS::DOMElement * emotion0 = XMLTool::appendChildElement(interpretation0, EmotionML::E_EMOTION, EmotionML::namespaceURI);
  XMLTool::setAttribute(emotion0, EmotionML::A_CATEGORY_VOCABULARY , EmotionML::VOC_SEMAINE_INTEREST_CATEGORY_DEFINITION);
  XMLTool::setPrefix(emotion0, "emotion");
  
  XERCESC_NS::DOMElement * category0 = XMLTool::appendChildElement(emotion0, EmotionML::E_CATEGORY, EmotionML::namespaceURI);
  XMLTool::setAttribute(category0, EmotionML::A_NAME, "bored");
  XMLTool::setPrefix(category0, "emotion");

  XERCESC_NS::DOMElement * interpretation1 = XMLTool::appendChildElement(oneof, EMMA::E_INTERPRETATION);
  if (_msg->custData != NULL) v = ((double*)(_msg->custData))[1];
  else v = 0;
  sprintf(strtmp,"%.3f",v);
  std::string conf1(strtmp);
  XMLTool::setAttribute(interpretation1, EMMA::A_CONFIDENCE, conf1);
  XMLTool::setPrefix(interpretation1, "emma");

  XERCESC_NS::DOMElement * emotion1 = XMLTool::appendChildElement(interpretation1, EmotionML::E_EMOTION, EmotionML::namespaceURI);
  XMLTool::setAttribute(emotion1, EmotionML::A_CATEGORY_VOCABULARY , EmotionML::VOC_SEMAINE_INTEREST_CATEGORY_DEFINITION);
  XMLTool::setPrefix(emotion1, "emotion");
  XERCESC_NS::DOMElement * category1 = XMLTool::appendChildElement(emotion1, EmotionML::E_CATEGORY, EmotionML::namespaceURI);
  XMLTool::setAttribute(category1, EmotionML::A_NAME, "neutral");
  XMLTool::setPrefix(category1, "emotion");
  
  XERCESC_NS::DOMElement * interpretation2 = XMLTool::appendChildElement(oneof, EMMA::E_INTERPRETATION);
  if (_msg->custData != NULL) v = ((double*)(_msg->custData))[2];
  else v = 0;
  sprintf(strtmp,"%.3f",v);
  std::string conf2(strtmp);
  XMLTool::setAttribute(interpretation2, EMMA::A_CONFIDENCE, conf2);
  XMLTool::setPrefix(interpretation2, "emma");

  XERCESC_NS::DOMElement * emotion2 = XMLTool::appendChildElement(interpretation2, EmotionML::E_EMOTION, EmotionML::namespaceURI);
  XMLTool::setAttribute(emotion2, EmotionML::A_CATEGORY_VOCABULARY , EmotionML::VOC_SEMAINE_INTEREST_CATEGORY_DEFINITION);
  XMLTool::setPrefix(emotion2, "emotion");
  XERCESC_NS::DOMElement * category2 = XMLTool::appendChildElement(emotion2, EmotionML::E_CATEGORY, EmotionML::namespaceURI);
  XMLTool::setAttribute(category2, EmotionML::A_NAME, "interested");
  XMLTool::setPrefix(category2, "emotion");

  // Now send it
  sendDocument(document);
}
Exemplo n.º 15
0
void RGBMatrix::roundCheck(const QSize& size)
{
    QMutexLocker algorithmLocker(&m_algorithmMutex);
    if (m_algorithm == NULL)
        return;

    if (runOrder() == PingPong)
    {
        if (m_direction == Forward && (m_step + 1) == m_algorithm->rgbMapStepCount(size))
        {
            m_direction = Backward;
            m_step = m_algorithm->rgbMapStepCount(size) - 2;
            if (m_endColor.isValid())
                m_stepColor = m_endColor;

            updateStepColor(m_step);
        }
        else if (m_direction == Backward && (m_step - 1) < 0)
        {
            m_direction = Forward;
            m_step = 1;
            m_stepColor = m_startColor;
            updateStepColor(m_step);
        }
        else
        {
            if (m_direction == Forward)
                m_step++;
            else
                m_step--;
            updateStepColor(m_step);
        }
    }
    else if (runOrder() == SingleShot)
    {
        if (m_direction == Forward)
        {
            if (m_step >= m_algorithm->rgbMapStepCount(size) - 1)
                stop();
            else
            {
                m_step++;
                updateStepColor(m_step);
            }
        }
        else
        {
            if (m_step <= 0)
                stop();
            else
            {
                m_step--;
                updateStepColor(m_step);
            }
        }
    }
    else
    {
        if (m_direction == Forward)
        {
            if (m_step >= m_algorithm->rgbMapStepCount(size) - 1)
            {
                m_step = 0;
                m_stepColor = m_startColor;
            }
            else
            {
                m_step++;
                updateStepColor(m_step);
            }
        }
        else
        {
            if (m_step <= 0)
            {
                m_step = m_algorithm->rgbMapStepCount(size) - 1;
                if (m_endColor.isValid())
                    m_stepColor = m_endColor;
            }
            else
            {
                m_step--;
                updateStepColor(m_step);
            }
        }
    }

    m_roundTime->restart();
    roundElapsed(duration());
}
Exemplo n.º 16
0
static int create_ffaudiofileformats(JNIEnv *env, AVFormatContext *format_context, jobjectArray *array, jstring url) {
    int res = 0;
    jlong duration_in_microseconds = -1;
    jfloat frame_rate = -1;
    jobject vbr = NULL;
    jboolean big_endian = 1;
    jobject audio_format = NULL;
    jint frame_size = -1;
    jint sample_size = 0;
    int audio_stream_count = 0;
    int audio_stream_number = 0;

    // count possible audio streams
    int i;
    for (i=0; i<format_context->nb_streams; i++) {
        AVStream* stream = format_context->streams[i];
        if (stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            audio_stream_count++;
        }
    }

#ifdef DEBUG
    fprintf(stderr, "Found %i audio streams.\n", audio_stream_count);
#endif

    // create output array
    *array = (*env)->NewObjectArray(env, audio_stream_count, (*env)->FindClass(env, "javax/sound/sampled/AudioFileFormat"), NULL);
    if (array == NULL) {
        goto bail;
    }

#ifdef DEBUG
    fprintf(stderr, "Created audio file format array.\n");
#endif

    // iterate over audio streams
    for (i=0; i<format_context->nb_streams; i++) {
        AVStream* stream = format_context->streams[i];
        if (stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            res = ff_open_stream(env, stream);
            if (res) {
                goto bail;
            }

            // create object
            duration_in_microseconds = duration(format_context, stream);
            frame_rate = get_frame_rate(stream, duration_in_microseconds);
            big_endian = ff_big_endian(stream->codec->codec_id);
            if (is_pcm(stream->codec->codec_id)) {
                frame_size = (stream->codec->bits_per_coded_sample / 8) * stream->codec->channels;
            }
            // TODO: Support VBR.

            sample_size = stream->codec->bits_per_coded_sample
                ? stream->codec->bits_per_coded_sample
                : stream->codec->bits_per_raw_sample;

            #ifdef DEBUG
                fprintf(stderr, "stream->codec->bits_per_coded_sample: %i\n", stream->codec->bits_per_coded_sample);
                fprintf(stderr, "stream->codec->bits_per_raw_sample  : %i\n", stream->codec->bits_per_raw_sample);
                fprintf(stderr, "stream->codec->bit_rate             : %i\n", stream->codec->bit_rate);
                fprintf(stderr, "format_context->packet_size         : %i\n", format_context->packet_size);
                fprintf(stderr, "frames     : %" PRId64 "\n", stream->nb_frames);
                fprintf(stderr, "sample_rate: %i\n", stream->codec->sample_rate);
                fprintf(stderr, "sampleSize : %i\n", stream->codec->bits_per_coded_sample);
                fprintf(stderr, "channels   : %i\n", stream->codec->channels);
                fprintf(stderr, "frame_size : %i\n", (int)frame_size);
                fprintf(stderr, "codec_id   : %i\n", stream->codec->codec_id);
                fprintf(stderr, "duration   : %" PRId64 "\n", (int64_t)duration_in_microseconds);
                fprintf(stderr, "frame_rate : %f\n", frame_rate);
                if (big_endian) {
                    fprintf(stderr, "big_endian  : true\n");
                } else {
                    fprintf(stderr, "big_endian  : false\n");
                }
            #endif
            audio_format = create_ffaudiofileformat(env, url,
                                                           stream->codec->codec_id,
                                                           (jfloat)stream->codec->sample_rate,
                                                           sample_size,
                                                           stream->codec->channels,
                                                           frame_size,
                                                           frame_rate,
                                                           big_endian,
                                                           duration_in_microseconds,
                                                           stream->codec->bit_rate,
                                                           vbr);

            (*env)->SetObjectArrayElement(env, *array, audio_stream_number, audio_format);
            audio_stream_number++;

            // clean up
            if (stream && stream->codec) {
                avcodec_close(stream->codec);
            }
        }
    }

bail:
    return res;
}
  void display()
  {
    std::vector<cv::Point2f> pointsColor, pointsIr;
    cv::Mat color, ir, irGrey, depth;
    cv::Mat colorDisp, irDisp;
    bool foundColor = false;
    bool foundIr = false;
    bool save = false;
    bool running = true;

    std::chrono::milliseconds duration(1);
    while(!update && ros::ok())
    {
      std::this_thread::sleep_for(duration);
    }

    for(; ros::ok() && running;)
    {
      if(update)
      {
        lock.lock();
        color = this->color;
        ir = this->ir;
        irGrey = this->irGrey;
        depth = this->depth;
        foundColor = this->foundColor;
        foundIr = this->foundIr;
        pointsColor = this->pointsColor;
        pointsIr = this->pointsIr;
        update = false;
        lock.unlock();

        if(mode == COLOR || mode == SYNC)
        {
          cv::cvtColor(color, colorDisp, CV_GRAY2BGR);
          cv::drawChessboardCorners(colorDisp, boardDims, pointsColor, foundColor);
          //cv::resize(colorDisp, colorDisp, cv::Size(), 0.5, 0.5);
          //cv::flip(colorDisp, colorDisp, 1);
        }
        if(mode == IR || mode == SYNC)
        {
          cv::cvtColor(irGrey, irDisp, CV_GRAY2BGR);
          cv::drawChessboardCorners(irDisp, boardDims, pointsIr, foundIr);
          //cv::resize(irDisp, irDisp, cv::Size(), 0.5, 0.5);
          //cv::flip(irDisp, irDisp, 1);
        }
      }

      switch(mode)
      {
      case COLOR:
        cv::imshow("color", colorDisp);
        break;
      case IR:
        cv::imshow("ir", irDisp);
        break;
      case SYNC:
        cv::imshow("color", colorDisp);
        cv::imshow("ir", irDisp);
        break;
      }

      int key = cv::waitKey(10);
      switch(key & 0xFF)
      {
      case ' ':
      case 's':
        save = true;
        break;
      case 27:
      case 'q':
        running = false;
        break;
      case '1':
        minIr = std::max(0, minIr - 100);
        break;
      case '2':
        minIr = std::min(maxIr - 1, minIr + 100);
        break;
      case '3':
        maxIr = std::max(minIr + 1, maxIr - 100);
        break;
      case '4':
        maxIr = std::min(0xFFFF, maxIr + 100);
        break;
      case 'l':
        minIr = std::max(0, minIr - 100);
        maxIr = std::max(minIr + 1, maxIr - 100);
        break;
      case 'h':
        maxIr = std::min(0x7FFF, maxIr + 100);
        minIr = std::min(maxIr - 1, minIr + 100);
        break;
      }

      if(save && ((mode == COLOR && foundColor) || (mode == IR && foundIr) || (mode == SYNC && foundColor && foundIr)))
      {
        store(color, ir, irGrey, depth, pointsColor, pointsIr);
        save = false;
      }
    }
    cv::destroyAllWindows();
    cv::waitKey(100);
  }
Exemplo n.º 18
0
 duration operator- (timestamp that) const
 {
     return duration(m_ticks - that.m_ticks);
 }
Exemplo n.º 19
0
cf_clock::time_point cf_clock::now() noexcept
{
    return time_point(duration(CFAbsoluteTimeGetCurrent()));
}
/* Caution: This function is executed on all sorts of strange threads, which should
 * not be excessively delayed, deadlocked, or used for anything GUI-related. Primarily,
 * we want to emit signals (which will result in queued slot calls) or do queued method
 * invocation to handle GUI updates. */
GstBusSyncReply VideoPlayerBackend::busHandler(GstBus *bus, GstMessage *msg)
{
    Q_UNUSED(bus);

    switch (GST_MESSAGE_TYPE(msg))
    {
    case GST_MESSAGE_BUFFERING:
        {
            gint percent = 0;
            gst_message_parse_buffering(msg, &percent);
            qDebug() << "gstreamer: buffering" << percent << "%";
            emit bufferingStatus(percent);
        }
        break;

    case GST_MESSAGE_STATE_CHANGED:
        {
            if (m_state == PermanentError)
                break;

            GstState oldState, newState;
            gst_message_parse_state_changed(msg, &oldState, &newState, 0);
            VideoState vpState = m_state;

            switch (newState)
            {
            case GST_STATE_VOID_PENDING:
            case GST_STATE_NULL:
                if (m_state == Error)
                    break;
            case GST_STATE_READY:
                vpState = Stopped;
                break;
            case GST_STATE_PAUSED:
                vpState = Paused;
                emit durationChanged(duration());
                break;
            case GST_STATE_PLAYING:
                vpState = Playing;
                emit durationChanged(duration());
                break;
            }

            if (vpState != m_state)
            {
                VideoState old = m_state;
                m_state = vpState;
                emit stateChanged(m_state, old);
            }
        }
        break;

    case GST_MESSAGE_DURATION:
        emit durationChanged(duration());
        break;

    case GST_MESSAGE_EOS:
        {
            qDebug("gstreamer: end of stream");
            VideoState old = m_state;
            m_state = Done;
            emit stateChanged(m_state, old);
            emit endOfStream();
        }
        break;

    case GST_MESSAGE_ERROR:
        {
            gchar *debug;
            GError *error;

            gst_message_parse_error(msg, &error, &debug);
            qDebug() << "gstreamer: Error:" << error->message;
            qDebug() << "gstreamer: Debug:" << debug;

            /* Set the error message, but don't move to the error state, because that will stop playback,
             * possibly incorrectly. */
            m_errorMessage = QString::fromLatin1(error->message);

            g_free(debug);
            g_error_free(error);
        }
        break;

    case GST_MESSAGE_WARNING:
        {
            gchar *debug;
            GError *error;

            gst_message_parse_warning(msg, &error, &debug);
            qDebug() << "gstreamer: Warning:" << error->message;
            qDebug() << "gstreamer:   Debug:" << debug;

            g_free(debug);
            g_error_free(error);
        }
        break;

    default:
        break;
    }

    return GST_BUS_PASS;
}
Exemplo n.º 21
0
int main(int argc, char** argv) {
    CommandLineFlags::SetUsage("Converts skottie to a mp4");
    CommandLineFlags::Parse(argc, argv);

    if (FLAGS_input.count() == 0) {
        SkDebugf("-i input_file.json argument required\n");
        return -1;
    }

    SkString assetPath;
    if (FLAGS_assetPath.count() > 0) {
        assetPath.set(FLAGS_assetPath[0]);
    } else {
        assetPath = SkOSPath::Dirname(FLAGS_input[0]);
    }
    SkDebugf("assetPath %s\n", assetPath.c_str());

    auto animation = skottie::Animation::Builder()
        .setResourceProvider(skottie_utils::FileResourceProvider::Make(assetPath))
        .makeFromFile(FLAGS_input[0]);
    if (!animation) {
        SkDebugf("failed to load %s\n", FLAGS_input[0]);
        return -1;
    }

    SkISize dim = animation->size().toRound();
    double duration = animation->duration();
    int fps = FLAGS_fps;
    if (fps < 1) {
        fps = 1;
    } else if (fps > 240) {
        fps = 240;
    }

    if (FLAGS_verbose) {
        SkDebugf("size %dx%d duration %g\n", dim.width(), dim.height(), duration, fps);
    }

    SkVideoEncoder encoder;
    const int frames = SkScalarRoundToInt(duration * fps);

    while (FLAGS_loop) {
        double start = SkTime::GetSecs();
        encoder.beginRecording(dim, fps);
        for (int i = 0; i <= frames; ++i) {
            animation->seek(i * 1.0 / frames);  // normalized time
            animation->render(encoder.beginFrame());
            encoder.endFrame();
        }
        (void)encoder.endRecording();
        if (FLAGS_verbose) {
            double dur = SkTime::GetSecs() - start;
            SkDebugf("%d frames, %g secs, %d fps\n",
                     frames, dur, (int)floor(frames / dur + 0.5));
        }
    }

    encoder.beginRecording(dim, fps);

    for (int i = 0; i <= frames; ++i) {
        double ts = i * 1.0 / fps;
        if (FLAGS_verbose) {
            SkDebugf("rendering frame %d ts %g\n", i, ts);
        }
        animation->seek(i * 1.0 / frames);  // normalized time
        animation->render(encoder.beginFrame());
        encoder.endFrame();
    }

    if (FLAGS_output.count() == 0) {
        SkDebugf("missing -o output_file.mp4 argument\n");
        return 0;
    }

    auto data = encoder.endRecording();
    SkFILEWStream ostream(FLAGS_output[0]);
    if (!ostream.isValid()) {
        SkDebugf("Can't create output file %s\n", FLAGS_output[0]);
        return -1;
    }
    ostream.write(data->data(), data->size());
    return 0;
}
Exemplo n.º 22
0
void
Vehicle_pickDeliver::insert(const Order &order) {
    invariant();
    pgassert(!has_order(order));

    auto pick_pos(position_limits(order.pickup()));
    auto deliver_pos(position_limits(order.delivery()));
#ifndef NDEBUG
    std::ostringstream err_log;
    err_log << "\n\tpickup limits (low, high) = ("
        << pick_pos.first << ", "
        << pick_pos.second << ") "
        << "\n\tdeliver limits (low, high) = ("
        << deliver_pos.first << ", "
        << deliver_pos.second << ") "
        << "\noriginal" << tau();
#endif

    if (pick_pos.second < pick_pos.first) {
        /* pickup generates twv evrywhere,
         *  so put the order as last */
        push_back(order);
        return;
    }

    if (deliver_pos.second < deliver_pos.first) {
        /* delivery generates twv evrywhere,
         *  so put the order as last */
        push_back(order);
        return;
    }
    /*
     * Because delivery positions were estimated without
     * the pickup:
     *   - increase the upper limit position estimation
     */
    ++deliver_pos.second;


    auto d_pos_backup(deliver_pos);
    auto best_pick_pos = m_path.size();
    auto best_deliver_pos = m_path.size() + 1;
    auto current_duration(duration());
    auto min_delta_duration = (std::numeric_limits<double>::max)();
    auto found(false);
    pgassertwm(!has_order(order), err_log.str());
    while (pick_pos.first <= pick_pos.second) {
#ifndef NDEBUG
        err_log << "\n\tpickup cycle limits (low, high) = ("
            << pick_pos.first << ", "
            << pick_pos.second << ") ";
#endif
        Vehicle::insert(pick_pos.first, order.pickup());
#ifndef NDEBUG
        err_log << "\npickup inserted: " << tau();
#endif

        while (deliver_pos.first <= deliver_pos.second) {
            Vehicle::insert(deliver_pos.first, order.delivery());
            orders_in_vehicle.insert(order.id());
            pgassertwm(has_order(order), err_log.str());
#ifndef NDEBUG
            err_log << "\ndelivery inserted: " << tau();
#endif
            if (is_feasable()) {
                pgassert(is_feasable());
                auto delta_duration = duration()-current_duration;
                if (delta_duration < min_delta_duration) {
#ifndef NDEBUG
                    err_log << "\nsuccess" << tau();
#endif
                    min_delta_duration = delta_duration;
                    best_pick_pos = pick_pos.first;
                    best_deliver_pos = deliver_pos.first;
                    found = true;
                }
            }
            Vehicle::erase(deliver_pos.first);
#ifndef NDEBUG
            err_log << "\ndelivery erased: " << tau();
#endif
            ++deliver_pos.first;
        }
        Vehicle::erase(pick_pos.first);
#ifndef NDEBUG
        err_log << "\npickup erased: " << tau();
#endif
        orders_in_vehicle.erase(order.id());
        pgassertwm(!has_order(order), err_log.str());

        deliver_pos = d_pos_backup;
#ifndef NDEBUG
        err_log << "\n\trestoring deliver limits (low, high) = ("
            << deliver_pos.first << ", "
            << deliver_pos.second << ") ";
#endif
        ++pick_pos.first;
    }
    pgassertwm(!has_order(order), err_log.str());
    if (!found) {
        /* order causes twv
         *  so put the order as last */
        push_back(order);
        return;
    }
    Vehicle::insert(best_pick_pos, order.pickup());
    Vehicle::insert(best_deliver_pos, order.delivery());

    orders_in_vehicle.insert(order.id());
    pgassertwm(is_feasable(), err_log.str());
    pgassertwm(has_order(order), err_log.str());
    pgassertwm(!has_cv(), err_log.str());
    invariant();
}
Exemplo n.º 23
0
static int64_t currentTimeInMills() {
    boost::posix_time::ptime time = boost::posix_time::microsec_clock::local_time();
    boost::posix_time::time_duration duration(time.time_of_day());

    return duration.total_milliseconds();
}
Exemplo n.º 24
0
void SplineBase::computeIndex(float s, int& i, float& u) const {
    int N = time.size();
    debugAssertM(N > 0, "No control points");
    float t0 = time[0];
    float tn = time[N - 1];
    
    if (N < 2) {
        // No control points to work with
        i = 0;
        u = 0.0;
    } else if (extrapolationMode == SplineExtrapolationMode::CYCLIC) {
        float fi = getFinalInterval();
        
        // Cyclic spline
        if ((s < t0) || (s >= tn + fi)) {
            // Cyclic, off the bottom or top
            
            // Compute offset and reduce to the in-bounds case

            float d = duration();
            // Number of times we wrapped around the cyclic array
            int wraps = iFloor((s - t0) / d);
            
            debugAssert(s - d * wraps >= t0);
            debugAssert(s - d * wraps < tn + getFinalInterval());

            computeIndex(s - d * wraps, i, u);
            i += wraps * N;
            
        } else if (s >= tn) {
            debugAssert(s < tn + fi);
            // Cyclic, off the top but before the end of the last interval
            i = N - 1;
            u = (s - tn) / fi;
            
        } else {
            // Cyclic, in bounds
            computeIndexInBounds(s, i, u);
        }
        
    } else {
        // Non-cyclic
        
        if (s < t0) {
            // Non-cyclic, off the bottom.  Assume points are spaced
            // following the first time interval.
            
            float dt = time[1] - t0;
            float x = (s - t0) / dt;
            i = iFloor(x);
            u = x - i;
            
        } else if (s >= tn) {
            // Non-cyclic, off the top.  Assume points are spaced following
            // the last time interval.
            
            float dt = tn - time[N - 2];
            float x = N - 1 + (s - tn) / dt;
            i = iFloor(x);
            u = x - i;  
            
        } else {
            // In bounds, non-cyclic.  Assume a regular
            // distribution (which gives O(1) for uniform spacing)
            // and then binary search to handle the general case
            // efficiently.
            computeIndexInBounds(s, i, u);
            
        } // if in bounds
    } // extrapolation Mode
}
Exemplo n.º 25
0
qreal QNumberStyleAnimation::currentValue() const
{
    qreal step = qreal(currentTime() - delay()) / (duration() - delay());
    return _start + qMax(qreal(0), step) * (_end - _start);
}
Exemplo n.º 26
0
void
ProgressBar::paintEvent( QPaintEvent* e )
{
    StylableWidget::paintEvent( e );

    QPainter p( this );

    StopWatch* sw = ScrobbleService::instance().stopWatch();

    if ( !m_track.isNull() )
    {
        QFont timeFont = font();
        timeFont.setPixelSize( 10 );
        setFont( timeFont );

        p.setPen( QColor( 0x333333 ) );

        if ( m_track.extra( "playerId" ) != "spt" )
        {
            if ( m_track.duration() >= 30 )
            {
                QString format( "m:ss" );

                QTime duration( 0, 0 );
                duration = duration.addMSecs( m_track.duration() * 1000 );
                QTime progress( 0, 0 );
                progress = progress.addMSecs( m_frame );

                if ( duration.hour() > 0 )
                    format = "h:mm:ss";

                QTextOption timeTextOption;
                timeTextOption.setAlignment( Qt::AlignVCenter | Qt::AlignLeft );

                QString timeText;

                if ( m_track.source() == Track::LastFmRadio )
                    timeText = QString( "%1 / %2" ).arg( progress.toString( format ), duration.toString( format ) );
                else
                    timeText = QString( "%1" ).arg( progress.toString( format ) );

                p.setPen( QColor( 0x333333 ) );
                p.drawText( rect().adjusted( 6, 0, 0, 0 ), timeText, timeTextOption );

                QFontMetrics fm( font() );
                int indent = fm.width( timeText ) + ( 2 * 6 ) + 2;

                int width = this->width() - indent;

                p.setPen( QColor( 0xbdbdbd ));
                p.drawLine( QPoint( indent - 2, rect().top() ),
                            QPoint( indent - 2, rect().bottom() - 1 ) );

                p.setPen( QColor( 0xe6e6e6 ) );
                p.drawLine( QPoint( indent - 1, rect().top() ),
                            QPoint( indent - 1, rect().bottom() - 1 ) );


                // draw the chunk
                p.setPen( Qt::transparent );
                p.setBrush( m_chunk );
                p.drawRect( rect().adjusted( indent, 0, ((m_frame * width) / (m_track.duration() * 1000)) - width, -1) );

                //bool scrobblingOn = unicorn::UserSettings().value( "scrobblingOn", true ).toBool();
                bool scrobblingOn = ScrobbleService::instance().scrobblableTrack( m_track );

                if ( scrobblingOn ||
                     !scrobblingOn && m_track.scrobbleStatus() != Track::Null )
                {
                    if ( !scrobblingOn && m_track.scrobbleStatus() != Track::Null )
                    {
                        QTextOption textOption;
                        textOption.setAlignment( Qt::AlignVCenter | Qt::AlignRight );
                        p.drawText( rect().adjusted( 0, 0, -6, 0 ), tr( "Scrobbling off" ), textOption );
                    }

                    uint scrobblePoint = sw->scrobblePoint() * 1000;

                    int scrobbleMarker = indent + (scrobblePoint * width) / ( m_track.duration() * 1000 );

                    p.setPen( QPen( QColor( 0xbdbdbd ), 1, Qt::DotLine) );
                    p.drawLine( QPoint( scrobbleMarker - 1, rect().top() ),
                                QPoint( scrobbleMarker - 1, rect().bottom() ) );

                    p.setPen( QPen( QColor( 0xe6e6e6 ), 1, Qt::DotLine) );
                    p.drawLine( QPoint( scrobbleMarker, rect().top() ),
                                QPoint( scrobbleMarker, rect().bottom() ) );

                    // Draw the 'as'!
                    // if the scrobble marker is too close to the left draw the 'as' on the right hand side
                    QPoint asPoint;
                    QImage as( m_track.scrobbleStatus() != Track::Null ? m_scrobbleMarkerOn : m_scrobbleMarkerOff );

                    if ( ( as.width() + 10 ) > scrobbleMarker - indent )
                        asPoint = QPoint ( scrobbleMarker + 5 , (rect().height() / 2) - (as.height() / 2) );
                    else
                        asPoint = QPoint ( scrobbleMarker - as.width() - 5, (rect().height() / 2) - (as.height() / 2) );

                    p.drawImage( asPoint, as );

                }
                else
                {
                    QString offMessage = tr( "Not scrobbling" );

                    if ( unicorn::UserSettings().value( "scrobblingOn", true ).toBool() )
                    {
                        if ( m_track.isVideo() )
                            offMessage = tr( "Not scrobbling - not a music video" );
                        else if ( !unicorn::UserSettings().value( "podcasts", true ).toBool() && m_track.isPodcast() )
                            offMessage = tr( "Not scrobbling - podcasts disabled" );
                        else if ( m_track.artist().isNull() )
                            offMessage = tr( "Not scrobbling - missing aritst" );
                    }
                    else
                    {
                        offMessage = tr( "Not scrobbling - scrobbling disabled" );
                    }

                    p.setPen( QColor( 0x333333 ) );
                    QTextOption textOption;
                    textOption.setAlignment( Qt::AlignVCenter | Qt::AlignRight );
                    p.drawText( rect().adjusted( 0, 0, -6, 0 ), offMessage, textOption );
                }
            }
            else
            {
                QTextOption textOption;
                textOption.setAlignment( Qt::AlignVCenter | Qt::AlignRight );
                p.drawText( rect().adjusted( 0, 0, -6, 0 ), tr( "Not scrobbling - track too short" ), textOption );
            }
        }
        else
        {
            QTextOption textOption;
            textOption.setAlignment( Qt::AlignVCenter | Qt::AlignRight );
            p.drawText( rect().adjusted( 0, 0, -6, 0 ), tr("Enable scrobbling in Spotify's preferences!"), textOption );
        }
    }
}
Exemplo n.º 27
0
void MediaPlayerPrivateAVFoundation::updateStates()
{
    if (m_ignoreLoadStateChanges)
        return;

    MediaPlayer::NetworkState oldNetworkState = m_networkState;
    MediaPlayer::ReadyState oldReadyState = m_readyState;

    LOG(Media, "MediaPlayerPrivateAVFoundation::updateStates(%p) - entering with networkState = %i, readyState = %i", 
        this, static_cast<int>(m_networkState), static_cast<int>(m_readyState));

    if (m_loadingMetadata)
        m_networkState = MediaPlayer::Loading;
    else {
        // -loadValuesAsynchronouslyForKeys:completionHandler: has invoked its handler; test status of keys and determine state.
        AssetStatus assetStatus = this->assetStatus();
        ItemStatus itemStatus = playerItemStatus();
        
        m_assetIsPlayable = (assetStatus == MediaPlayerAVAssetStatusPlayable);
        if (m_readyState < MediaPlayer::HaveMetadata && assetStatus > MediaPlayerAVAssetStatusLoading) {
            if (m_assetIsPlayable) {
                if (assetStatus >= MediaPlayerAVAssetStatusLoaded)
                    m_readyState = MediaPlayer::HaveMetadata;
                if (itemStatus <= MediaPlayerAVPlayerItemStatusUnknown) {
                    if (assetStatus == MediaPlayerAVAssetStatusFailed || m_preload > MediaPlayer::MetaData || isLiveStream()) {
                        // The asset is playable but doesn't support inspection prior to playback (eg. streaming files),
                        // or we are supposed to prepare for playback immediately, so create the player item now.
                        m_networkState = MediaPlayer::Loading;
                        prepareToPlay();
                    } else
                        m_networkState = MediaPlayer::Idle;
                }
            } else {
                // FIX ME: fetch the error associated with the @"playable" key to distinguish between format 
                // and network errors.
                m_networkState = MediaPlayer::FormatError;
            }
        }
        
        if (assetStatus >= MediaPlayerAVAssetStatusLoaded && itemStatus > MediaPlayerAVPlayerItemStatusUnknown) {
            switch (itemStatus) {
            case MediaPlayerAVPlayerItemStatusDoesNotExist:
            case MediaPlayerAVPlayerItemStatusUnknown:
            case MediaPlayerAVPlayerItemStatusFailed:
                break;

            case MediaPlayerAVPlayerItemStatusPlaybackLikelyToKeepUp:
            case MediaPlayerAVPlayerItemStatusPlaybackBufferFull:
                // If the status becomes PlaybackBufferFull, loading stops and the status will not
                // progress to LikelyToKeepUp. Set the readyState to  HAVE_ENOUGH_DATA, on the
                // presumption that if the playback buffer is full, playback will probably not stall.
                m_readyState = MediaPlayer::HaveEnoughData;
                break;

            case MediaPlayerAVPlayerItemStatusReadyToPlay:
                // If the readyState is already HaveEnoughData, don't go lower because of this state change.
                if (m_readyState == MediaPlayer::HaveEnoughData)
                    break;

            case MediaPlayerAVPlayerItemStatusPlaybackBufferEmpty:
                if (maxTimeLoaded() > currentTime())
                    m_readyState = MediaPlayer::HaveFutureData;
                else
                    m_readyState = MediaPlayer::HaveCurrentData;
                break;
            }

            if (itemStatus == MediaPlayerAVPlayerItemStatusPlaybackBufferFull)
                m_networkState = MediaPlayer::Idle;
            else if (itemStatus == MediaPlayerAVPlayerItemStatusFailed)
                m_networkState = MediaPlayer::DecodeError;
            else if (itemStatus != MediaPlayerAVPlayerItemStatusPlaybackBufferFull && itemStatus >= MediaPlayerAVPlayerItemStatusReadyToPlay)
                m_networkState = (maxTimeLoaded() == duration()) ? MediaPlayer::Loaded : MediaPlayer::Loading;
        }
    }

    if (isReadyForVideoSetup() && currentRenderingMode() != preferredRenderingMode())
        setUpVideoRendering();

    if (!m_haveReportedFirstVideoFrame && m_cachedHasVideo && hasAvailableVideoFrame()) {
        if (m_readyState < MediaPlayer::HaveCurrentData)
            m_readyState = MediaPlayer::HaveCurrentData;
        m_haveReportedFirstVideoFrame = true;
        m_player->firstVideoFrameAvailable();
    }

    if (m_networkState != oldNetworkState)
        m_player->networkStateChanged();

    if (m_readyState != oldReadyState)
        m_player->readyStateChanged();

    if (m_playWhenFramesAvailable && hasAvailableVideoFrame()) {
        m_playWhenFramesAvailable = false;
        platformPlay();
    }

    LOG(Media, "MediaPlayerPrivateAVFoundation::updateStates(%p) - exiting with networkState = %i, readyState = %i", 
        this, static_cast<int>(m_networkState), static_cast<int>(m_readyState));
}
Exemplo n.º 28
0
 typename functor::Duration<T>::duration_type duration(const T& _t)
 {
   typename functor::Duration<T>::duration_type _duration = 0.0;
   duration(_t,_duration);
   return _duration;
 }
Exemplo n.º 29
0
frame_builder::frame_builder(const config& cfg,const std::string& frame_string) :
	duration_(1),
	image_(cfg[frame_string + "image"]),
	image_diagonal_(cfg[frame_string + "image_diagonal"]),
	image_mod_(cfg[frame_string + "image_mod"]),
	halo_(cfg[frame_string + "halo"]),
	halo_x_(cfg[frame_string + "halo_x"]),
	halo_y_(cfg[frame_string + "halo_y"]),
	halo_mod_(cfg[frame_string + "halo_mod"]),
	sound_(cfg[frame_string + "sound"]),
	text_(cfg[frame_string + "text"]),
	text_color_(0),
	blend_with_(0),
	blend_ratio_(cfg[frame_string + "blend_ratio"]),
	highlight_ratio_(cfg[frame_string + "alpha"]),
	offset_(cfg[frame_string + "offset"]),
	submerge_(cfg[frame_string + "submerge"]),
	x_(cfg[frame_string + "x"]),
	y_(cfg[frame_string + "y"]),
	directional_x_(cfg[frame_string + "directional_x"]),
	directional_y_(cfg[frame_string + "directional_y"]),
	auto_vflip_(t_unset),
	auto_hflip_(t_unset),
	primary_frame_(t_unset),
	drawing_layer_(cfg[frame_string + "layer"])
{
	if(!cfg.has_attribute(frame_string + "auto_vflip")) {
		auto_vflip_ = t_unset;
	} else if(cfg[frame_string + "auto_vflip"].to_bool()) {
		auto_vflip_ = t_true;
	} else {
		auto_vflip_ = t_false;
	}
	if(!cfg.has_attribute(frame_string + "auto_hflip")) {
		auto_hflip_ = t_unset;
	} else if(cfg[frame_string + "auto_hflip"].to_bool()) {
		auto_hflip_ = t_true;
	} else {
		auto_hflip_ = t_false;
	}
	if(!cfg.has_attribute(frame_string + "primary")) {
		primary_frame_ = t_unset;
	} else if(cfg[frame_string + "primary"].to_bool()) {
		primary_frame_ = t_true;
	} else {
		primary_frame_ = t_false;
	}
	std::vector<std::string> color = utils::split(cfg[frame_string + "text_color"]);
	if (color.size() == 3) {
		text_color_ = display::rgb(atoi(color[0].c_str()),
			atoi(color[1].c_str()), atoi(color[2].c_str()));
	}

	if (const config::attribute_value *v = cfg.get(frame_string + "duration")) {
		duration(*v);
	} else if (!cfg.get(frame_string + "end")) {
		int halo_duration = (progressive_string(halo_,1)).duration();
		int image_duration = (progressive_image(image_,1)).duration();
		int image_diagonal_duration = (progressive_image(image_diagonal_,1)).duration();
		duration(std::max(std::max(image_duration,image_diagonal_duration),halo_duration));
		
	} else {
		duration(cfg[frame_string + "end"].to_int() - cfg[frame_string + "begin"].to_int());
	}
	duration_ = std::max(duration_,1);

	color = utils::split(cfg[frame_string + "blend_color"]);
	if (color.size() == 3) {
		blend_with_ = display::rgb(atoi(color[0].c_str()),
			atoi(color[1].c_str()), atoi(color[2].c_str()));
	}
}
Exemplo n.º 30
0
/* Conversion to system time */
static
time_point to_system_time(const time_point& t) {
	static std::atomic<int64_t> ns_offset(0);
	static std::atomic<int64_t> offset_timestamp(0);
	static std::atomic_flag lock = ATOMIC_FLAG_INIT;

	static const duration OFFSET_TIMEOUT (15 * (int64_t)1E9, time_unit::NSEC);
	static const duration CLOSE_DISTANCE (15 * (int64_t)1E3, time_unit::NSEC);
	static const uint64_t MAX_UPDATE_TRIES (100);

	time_point current_tsc_time = tsc_clock::now();
	time_unit tsc_unit = current_tsc_time.time_since_epoch().unit();

	int64_t offset_ts = offset_timestamp.load(std::memory_order_acquire);

	if (offset_ts == 0 ||
			current_tsc_time.time_since_epoch() - duration(offset_ts, tsc_unit) > OFFSET_TIMEOUT
		)
	{
		if (!lock.test_and_set(std::memory_order_acquire)) {
			time_point cycles_start, cycles_end;
			time_point current_system_time;

			bool close_pair_found = false;
			for (uint64_t update_try = 0; update_try < MAX_UPDATE_TRIES; ++update_try) {
				cycles_start = tsc_clock::now();
				current_system_time = system_clock::now();
				cycles_end = tsc_clock::now();

				if (cycles_end - cycles_start < CLOSE_DISTANCE) {
					close_pair_found = true;
					break;
				}
			}

			if (close_pair_found) {
				time_point cycles_middle = cycles_start + (cycles_end - cycles_start) / 2;
				int64_t new_offset =
					duration::convert_to(
						time_unit::NSEC,
						current_system_time.time_since_epoch() - cycles_middle.time_since_epoch()
					)
					.count();

				ns_offset.store(new_offset, std::memory_order_release);
				offset_timestamp.store(cycles_middle.time_since_epoch().count(), std::memory_order_release);
			}

			lock.clear(std::memory_order_release);
		}
	}

	return
		time_point(
			duration::convert_to(
				time_unit::NSEC,
				t.time_since_epoch() + duration(ns_offset.load(std::memory_order_acquire), time_unit::NSEC)
			),
			clock_type::SYSTEM
		);
}