void VideoSource::toggleMute() { session->getVPMSession()->enableSource( ssrc, isMuted() ); enableRendering = !isMuted(); if ( isMuted() ) { baseBColor.R = 1.0f; baseBColor.G = 0.1f; baseBColor.B = 0.15f; // see resetColor for why we check for selected here if ( !selected ) setColor( baseBColor ); setSecondaryColor( baseBColor ); } else { resetColor(); destSecondaryColor.R = 0.0f; destSecondaryColor.G = 0.0f; destSecondaryColor.B = 0.0f; destSecondaryColor.A = 0.0f; setSecondaryColor( destSecondaryColor ); } }
void TelegramGui::sendNotify(quint64 msg_id) { QStringList actions; if( desktopSession() != Enums::Unity ) { actions << QString("%1:%2").arg(NOTIFY_ACT_SHOW).arg(msg_id) << tr("Show"); actions << QString("%1:%2").arg(NOTIFY_ACT_MUTE).arg(msg_id) << tr("Mute"); // actions << QString("%1:%2").arg(NOTIFY_ACT_RMND).arg(msg_id) << tr("Mute & Remind"); } int to_id = p->tg->messageToId(msg_id); int from_id = p->tg->messageFromId(msg_id); if( from_id == p->tg->me() ) return; if( isMuted(to_id) || isMuted(from_id) ) return; QString title = p->tg->messageFromName(msg_id); QString icon = p->tg->getPhotoPath(from_id); QString body = p->tg->messageBody(msg_id); if( p->tg->dialogIsChat(to_id) ) title += tr("at %1").arg(p->tg->dialogTitle(to_id)); else if( p->tg->dialogIsChat(from_id) ) title += tr("at %1").arg(p->tg->dialogTitle(from_id)); p->notify->sendNotify( title, body, icon, 0, 3000, actions ); }
bool LLAudioSource::play(const LLUUID &audio_uuid) { // Special abuse of play(); don't play a sound, but kill it. if (audio_uuid.isNull()) { if (getChannel()) { getChannel()->setSource(NULL); setChannel(NULL); if (!isMuted()) { mCurrentDatap = NULL; } } return false; } // Reset our age timeout if someone attempts to play the source. mAgeTimer.reset(); if (!gAudiop) { LL_WARNS("AudioEngine") << "LLAudioEngine instance doesn't exist!" << LL_ENDL; return false; } LLAudioData *adp = gAudiop->getAudioData(audio_uuid); addAudioData(adp); if (isMuted()) { return false; } bool has_buffer = gAudiop->updateBufferForData(adp, audio_uuid); if (!has_buffer) { // Don't bother trying to set up a channel or anything, we don't have an audio buffer. return false; } if (!setupChannel()) { return false; } if (isSyncSlave()) { // A sync slave, it doesn't start playing until it's synced up with the master. // Flag this channel as waiting for sync, and return true. getChannel()->setWaiting(true); return true; } getChannel()->play(); return true; }
bool LLAudioSource::play(const LLUUID &audio_uuid) { // Special abuse of play(); don't play a sound, but kill it. if (audio_uuid.isNull()) { if (getChannel()) { llassert(this == getChannel()->getSource()); getChannel()->setSource(NULL); if (!isMuted()) { mCurrentDatap = NULL; } } return false; } // <edit> if(mType != LLAudioEngine::AUDIO_TYPE_UI) //&& mSourceID.notNull()) logSoundPlay(this, audio_uuid); // </edit> // Reset our age timeout if someone attempts to play the source. mAgeTimer.reset(); LLAudioData *adp = gAudiop->getAudioData(audio_uuid); addAudioData(adp); if (isMuted()) { return false; } bool has_buffer = gAudiop->updateBufferForData(adp, audio_uuid); if (!has_buffer) { // Don't bother trying to set up a channel or anything, we don't have an audio buffer. return false; } if (!setupChannel()) { return false; } if (isSyncSlave()) { // A sync slave, it doesn't start playing until it's synced up with the master. // Flag this channel as waiting for sync, and return true. getChannel()->setWaiting(true); return true; } getChannel()->play(); return true; }
BOOL LLMuteList::autoRemove(const LLUUID& agent_id, const EAutoReason reason, const std::string& first_name, const std::string& last_name) { BOOL removed = FALSE; if (isMuted(agent_id)) { LLMute automute(agent_id, LLStringUtil::null, LLMute::AGENT); removed = TRUE; remove(automute); if (first_name.empty() && last_name.empty()) { std::string cache_first, cache_last; if (gCacheName->getName(agent_id, cache_first, cache_last)) { // name in cache, call callback directly notify_automute_callback(agent_id, cache_first, cache_last, FALSE, (void *)reason); } else { // not in cache, lookup name from cache gCacheName->get(agent_id, FALSE, notify_automute_callback, (void *)reason); } } else { // call callback directly notify_automute_callback(agent_id, first_name, last_name, FALSE, (void *)reason); } } return removed; }
void eDVBVolumecontrol::volumeToggleMute() { if (isMuted()) volumeUnMute(); else volumeMute(); }
BOOL LLMuteList::autoRemove(const LLUUID& agent_id, const EAutoReason reason) { BOOL removed = FALSE; if (isMuted(agent_id)) { LLMute automute(agent_id, LLStringUtil::null, LLMute::AGENT); removed = TRUE; remove(automute); std::string full_name; if (gCacheName->getFullName(agent_id, full_name)) { // name in cache, call callback directly notify_automute_callback(agent_id, full_name, false, reason); } else { // not in cache, lookup name from cache gCacheName->get(agent_id, false, boost::bind(¬ify_automute_callback, _1, _2, _3, reason)); } } return removed; }
void LLAudioSource::updatePriority() { if (isAmbient()) { mPriority = 1.f; } else if (isMuted()) { mPriority = 0.f; } else { // Priority is based on distance LLVector3 dist_vec; dist_vec.setVec(getPositionGlobal()); if (gAudiop) { dist_vec -= gAudiop->getListenerPos(); } F32 dist_squared = llmax(1.f, dist_vec.magVecSquared()); mPriority = mGain / dist_squared; } }
void VideoSource::setRendering( bool r ) { // muting is related to rendering - enablerendering shouldn't be true when // it's muted, so stop it from changing when it is muted if ( !isMuted() ) { enableRendering = r; if ( !enableRendering ) { baseBColor.R = 0.05f; baseBColor.G = 0.1f; baseBColor.B = 1.0f; // see resetColor for why we check for selected here if ( !selected ) setColor( baseBColor ); setSecondaryColor( baseBColor ); } else { resetColor(); destSecondaryColor.R = 0.0f; destSecondaryColor.G = 0.0f; destSecondaryColor.B = 0.0f; destSecondaryColor.A = 0.0f; setSecondaryColor( destSecondaryColor ); } } }
void Pattern::saveSettings( QDomDocument & _doc, QDomElement & _this ) { _this.setAttribute( "type", m_patternType ); _this.setAttribute( "name", name() ); // as the target of copied/dragged pattern is always an existing // pattern, we must not store actual position, instead we store -1 // which tells loadSettings() not to mess around with position if( _this.parentNode().nodeName() == "clipboard" || _this.parentNode().nodeName() == "dnddata" ) { _this.setAttribute( "pos", -1 ); } else { _this.setAttribute( "pos", startPosition() ); } _this.setAttribute( "len", length() ); _this.setAttribute( "muted", isMuted() ); _this.setAttribute( "steps", m_steps ); // now save settings of all notes for( NoteVector::Iterator it = m_notes.begin(); it != m_notes.end(); ++it ) { if( ( *it )->length() ) { ( *it )->saveState( _doc, _this ); } } }
void InstrumentTrack::processAudioBuffer( sampleFrame* buf, const fpp_t frames, NotePlayHandle* n ) { // we must not play the sound if this InstrumentTrack is muted... if( isMuted() || ( n && n->isBbTrackMuted() ) || ! m_instrument ) { return; } // Test for silent input data if instrument provides a single stream only (i.e. driven by InstrumentPlayHandle) // We could do that in all other cases as well but the overhead for silence test is bigger than // what we potentially save. While playing a note, a NotePlayHandle-driven instrument will produce sound in // 99 of 100 cases so that test would be a waste of time. if( m_instrument->flags().testFlag( Instrument::IsSingleStreamed ) && MixHelpers::isSilent( buf, frames ) ) { // at least pass one silent buffer to allow if( m_silentBuffersProcessed ) { // skip further processing return; } m_silentBuffersProcessed = true; } else { m_silentBuffersProcessed = false; } // if effects "went to sleep" because there was no input, wake them up // now m_audioPort.effects()->startRunning(); // get volume knob data static const float DefaultVolumeRatio = 1.0f / DefaultVolume; /*ValueBuffer * volBuf = m_volumeModel.valueBuffer(); float v_scale = volBuf ? 1.0f : getVolume() * DefaultVolumeRatio;*/ // instruments using instrument-play-handles will call this method // without any knowledge about notes, so they pass NULL for n, which // is no problem for us since we just bypass the envelopes+LFOs if( m_instrument->flags().testFlag( Instrument::IsSingleStreamed ) == false && n != NULL ) { const f_cnt_t offset = n->noteOffset(); m_soundShaping.processAudioBuffer( buf + offset, frames - offset, n ); const float vol = ( (float) n->getVolume() * DefaultVolumeRatio ); const panning_t pan = qBound( PanningLeft, n->getPanning(), PanningRight ); stereoVolumeVector vv = panningToVolumeVector( pan, vol ); for( f_cnt_t f = offset; f < frames; ++f ) { for( int c = 0; c < 2; ++c ) { buf[f][c] *= vv.vol[c]; } } } m_audioPort.setNextFxChannel( m_effectChannelModel.value() ); }
void InstrumentTrack::processAudioBuffer( sampleFrame* buf, const fpp_t frames, NotePlayHandle* n ) { // we must not play the sound if this InstrumentTrack is muted... if( isMuted() || ( n && n->isBbTrackMuted() ) ) { return; } // Test for silent input data if instrument provides a single stream only (i.e. driven by InstrumentPlayHandle) // We could do that in all other cases as well but the overhead for silence test is bigger than // what we potentially save. While playing a note, a NotePlayHandle-driven instrument will produce sound in // 99 of 100 cases so that test would be a waste of time. if( m_instrument->flags().testFlag( Instrument::IsSingleStreamed ) && MixHelpers::isSilent( buf, frames ) ) { // at least pass one silent buffer to allow if( m_silentBuffersProcessed ) { // skip further processing return; } m_silentBuffersProcessed = true; } else { m_silentBuffersProcessed = false; } // if effects "went to sleep" because there was no input, wake them up // now m_audioPort.effects()->startRunning(); float v_scale = (float) getVolume() / DefaultVolume; // instruments using instrument-play-handles will call this method // without any knowledge about notes, so they pass NULL for n, which // is no problem for us since we just bypass the envelopes+LFOs if( m_instrument->flags().testFlag( Instrument::IsSingleStreamed ) == false && n != NULL ) { m_soundShaping.processAudioBuffer( buf, frames, n ); v_scale *= ( (float) n->getVolume() / DefaultVolume ); } m_audioPort.setNextFxChannel( m_effectChannelModel.value() ); int framesToMix = frames; int offset = 0; int panning = m_panningModel.value(); if( n ) { framesToMix = qMin<f_cnt_t>( n->framesLeftForCurrentPeriod(), framesToMix ); offset = n->offset(); panning += n->getPanning(); panning = tLimit<int>( panning, PanningLeft, PanningRight ); } engine::mixer()->bufferToPort( buf, framesToMix, offset, panningToVolumeVector( panning, v_scale ), &m_audioPort ); }
void S60AudioMediaRecorderControl::setMuted(bool muted) { if (muted != isMuted()) { TRACE("S60AudioMediaRecorderControl::setMuted" << qtThisPtr() << "muted" << muted); m_session->mute(muted); } }
void AutomationPattern::saveSettings( QDomDocument & _doc, QDomElement & _this ) { _this.setAttribute( "pos", startPosition() ); _this.setAttribute( "len", length() ); _this.setAttribute( "name", name() ); _this.setAttribute( "prog", QString::number( progressionType() ) ); _this.setAttribute( "tens", QString::number( getTension() ) ); _this.setAttribute( "mute", QString::number( isMuted() ) ); for( timeMap::const_iterator it = m_timeMap.begin(); it != m_timeMap.end(); ++it ) { QDomElement element = _doc.createElement( "time" ); element.setAttribute( "pos", it.key() ); element.setAttribute( "value", it.value() ); _this.appendChild( element ); } for( objectVector::const_iterator it = m_objects.begin(); it != m_objects.end(); ++it ) { if( *it ) { QDomElement element = _doc.createElement( "object" ); element.setAttribute( "id", ProjectJournal::idToSave( ( *it )->id() ) ); _this.appendChild( element ); } } }
QObject *SoundManager::playSoundByName(const QString &soundName) { if (isMuted()) return nullptr; auto file = m_configuration->deprecatedApi()->readEntry("Sounds", soundName + "_sound"); return playFile(file); }
void Logger::logReturn(NPAPI_Action action, DWORD dwRet) { if (isMuted(action)) return; char msg[512]; sprintf(msg, "---Return: %d\r\n", dwRet); logMessage(msg); }
void QDeclarativeAudio::setMuted(bool muted) { if (isMuted() == muted) return; if (m_complete) { m_player->setMuted(muted); } else { m_muted = muted; emit mutedChanged(); } }
void pattern::loadSettings( const QDomElement & _this ) { unfreeze(); m_patternType = static_cast<PatternTypes>( _this.attribute( "type" ).toInt() ); setName( _this.attribute( "name" ) ); if( _this.attribute( "pos" ).toInt() >= 0 ) { movePosition( _this.attribute( "pos" ).toInt() ); } changeLength( MidiTime( _this.attribute( "len" ).toInt() ) ); if( _this.attribute( "muted" ).toInt() != isMuted() ) { toggleMute(); } clearNotes(); QDomNode node = _this.firstChild(); while( !node.isNull() ) { if( node.isElement() && !node.toElement().attribute( "metadata" ).toInt() ) { note * n = new note; n->restoreState( node.toElement() ); m_notes.push_back( n ); } node = node.nextSibling(); } m_steps = _this.attribute( "steps" ).toInt(); if( m_steps == 0 ) { m_steps = MidiTime::stepsPerTact(); } ensureBeatNotes(); checkType(); /* if( _this.attribute( "frozen" ).toInt() ) { freeze(); }*/ emit dataChanged(); updateBBTrack(); }
void bbTCO::saveSettings( QDomDocument & _doc, QDomElement & _this ) { _this.setAttribute( "name", name() ); if( _this.parentNode().nodeName() == "clipboard" ) { _this.setAttribute( "pos", -1 ); } else { _this.setAttribute( "pos", startPosition() ); } _this.setAttribute( "len", length() ); _this.setAttribute( "muted", isMuted() ); _this.setAttribute( "color", m_color ); }
void InstrumentTrack::processAudioBuffer( sampleFrame * _buf, const fpp_t _frames, notePlayHandle * _n ) { // we must not play the sound if this instrumentTrack is muted... if( isMuted() || ( _n && _n->bbTrackMuted() ) ) { return; } // if effects "went to sleep" because there was no input, wake them up // now m_audioPort.effects()->startRunning(); float v_scale = (float) getVolume() / DefaultVolume; // instruments using instrument-play-handles will call this method // without any knowledge about notes, so they pass NULL for _n, which // is no problem for us since we just bypass the envelopes+LFOs if( _n != NULL ) { m_soundShaping.processAudioBuffer( _buf, _frames, _n ); v_scale *= ( (float) _n->getVolume() / DefaultVolume ); } else { if( getVolume() < DefaultVolume && m_instrument->isMidiBased() ) { v_scale = 1; } } m_audioPort.setNextFxChannel( m_effectChannelModel.value() ); int panning = m_panningModel.value(); if( _n != NULL ) { panning += _n->getPanning(); panning = tLimit<int>( panning, PanningLeft, PanningRight ); } engine::getMixer()->bufferToPort( _buf, ( _n != NULL ) ? qMin<f_cnt_t>(_n->framesLeftForCurrentPeriod(), _frames ) : _frames, ( _n != NULL ) ? _n->offset() : 0, panningToVolumeVector( panning, v_scale ), &m_audioPort ); }
void AvatarInputs::update() { if (!Menu::getInstance()) { return; } AI_UPDATE(mirrorVisible, Menu::getInstance()->isOptionChecked(MenuOption::Mirror) && !qApp->isHMDMode() && !Menu::getInstance()->isOptionChecked(MenuOption::FullscreenMirror)); AI_UPDATE(cameraEnabled, !Menu::getInstance()->isOptionChecked(MenuOption::NoFaceTracking)); AI_UPDATE(cameraMuted, Menu::getInstance()->isOptionChecked(MenuOption::MuteFaceTracking)); auto audioIO = DependencyManager::get<AudioClient>(); const float CLIPPING_INDICATOR_TIME = 1.0f; const float AUDIO_METER_AVERAGING = 0.5; const float LOG2 = log(2.0f); const float METER_LOUDNESS_SCALE = 2.8f / 5.0f; const float LOG2_LOUDNESS_FLOOR = 11.0f; float audioLevel = 0.0f; auto audio = DependencyManager::get<AudioClient>(); float loudness = audio->getLastInputLoudness() + 1.0f; _trailingAudioLoudness = AUDIO_METER_AVERAGING * _trailingAudioLoudness + (1.0f - AUDIO_METER_AVERAGING) * loudness; float log2loudness = logf(_trailingAudioLoudness) / LOG2; if (log2loudness <= LOG2_LOUDNESS_FLOOR) { audioLevel = (log2loudness / LOG2_LOUDNESS_FLOOR) * METER_LOUDNESS_SCALE; } else { audioLevel = (log2loudness - (LOG2_LOUDNESS_FLOOR - 1.0f)) * METER_LOUDNESS_SCALE; } if (audioLevel > 1.0) { audioLevel = 1.0; } AI_UPDATE_FLOAT(audioLevel, audioLevel, 0.01); AI_UPDATE(audioClipping, ((audioIO->getTimeSinceLastClip() > 0.0f) && (audioIO->getTimeSinceLastClip() < 1.0f))); AI_UPDATE(audioMuted, audioIO->isMuted()); //// Make muted icon pulsate //static const float PULSE_MIN = 0.4f; //static const float PULSE_MAX = 1.0f; //static const float PULSE_FREQUENCY = 1.0f; // in Hz //qint64 now = usecTimestampNow(); //if (now - _iconPulseTimeReference > (qint64)USECS_PER_SECOND) { // // Prevents t from getting too big, which would diminish glm::cos precision // _iconPulseTimeReference = now - ((now - _iconPulseTimeReference) % USECS_PER_SECOND); //} //float t = (float)(now - _iconPulseTimeReference) / (float)USECS_PER_SECOND; //float pulseFactor = (glm::cos(t * PULSE_FREQUENCY * 2.0f * PI) + 1.0f) / 2.0f; //iconColor = PULSE_MIN + (PULSE_MAX - PULSE_MIN) * pulseFactor; }
void Sounds::play(const char *id) { if (isMuted()) { qmlInfo(this) << "not playing sounds while muted"; return; } if (!m_ctx) { qmlInfo(this) << "not connected to pulse audio"; return; } pa_operation *o = pa_context_play_sample(m_ctx, id, NULL, playbackVolume(), NULL, NULL); if (o) { pa_operation_unref(o); } }
// play _frames frames of given TCO within starting with _start bool bbTrack::play( const midiTime & _start, const fpp_t _frames, const f_cnt_t _offset, Sint16 _tco_num ) { if( isMuted() ) { return( false ); } if( _tco_num >= 0 ) { return( engine::getBBTrackContainer()->play( _start, _frames, _offset, s_infoMap[this] ) ); } tcoVector tcos; getTCOsInRange( tcos, _start, _start + static_cast<int>( _frames / engine::framesPerTick() ) ); if( tcos.size() == 0 ) { return( false ); } midiTime lastPosition; midiTime lastLen; for( tcoVector::iterator it = tcos.begin(); it != tcos.end(); ++it ) { if( !( *it )->isMuted() && ( *it )->startPosition() >= lastPosition ) { lastPosition = ( *it )->startPosition(); lastLen = ( *it )->length(); } } if( _start - lastPosition < lastLen ) { return( engine::getBBTrackContainer()->play( _start - lastPosition, _frames, _offset, s_infoMap[this] ) ); } return( false ); }
void LLParticipantList::LLParticipantListMenu::show(LLView* spawning_view, const uuid_vec_t& uuids, S32 x, S32 y) { if (uuids.size() == 0) return; LLListContextMenu::show(spawning_view, uuids, x, y); const LLUUID& speaker_id = mUUIDs.front(); BOOL is_muted = isMuted(speaker_id); if (is_muted) { LLMenuGL::sMenuContainer->getChildView("ModerateVoiceMuteSelected")->setVisible( false); } else { LLMenuGL::sMenuContainer->getChildView("ModerateVoiceUnMuteSelected")->setVisible( false); } }
void bbTCO::loadSettings( const QDomElement & _this ) { setName( _this.attribute( "name" ) ); if( _this.attribute( "pos" ).toInt() >= 0 ) { movePosition( _this.attribute( "pos" ).toInt() ); } changeLength( _this.attribute( "len" ).toInt() ); if( _this.attribute( "muted" ).toInt() != isMuted() ) { toggleMute(); } if( _this.attribute( "color" ).toUInt() != 0 ) { m_color = _this.attribute( "color" ).toUInt(); } }
void LLParticipantList::LLParticipantListMenu::moderateVoice(const LLSD& userdata) { if (!gAgent.getRegion()) return; bool moderate_selected = userdata.asString() == "selected"; if (moderate_selected) { const LLUUID& selected_avatar_id = mUUIDs.front(); bool is_muted = isMuted(selected_avatar_id); moderateVoiceParticipant(selected_avatar_id, is_muted); } else { bool unmute_all = userdata.asString() == "unmute_all"; moderateVoiceAllParticipants(unmute_all); } }
StkFrames& NxSoundMediaFileVSTi::Update( StkFrames& Frame ) { if(!Frame.size()) Frame.resize( SND_BUFFER_SIZE, GetNumChannels() ); if( isMuted() || isStopped() ) { for ( unsigned int i=0; i < Frame.size(); i++ ){ Frame[i] = 0.0f; } return Frame; } if( VSTInstrument ){ VstInt32 numinputs = VSTInstrument->GetNumInputs() ; VstInt32 numoutputs = VSTInstrument->GetNumOutputs() ; if(( VSTInstrument->GetEffect()->flags & effFlagsCanReplacing) != 0) { try { VSTInstrument->GetEffect()->processReplacing (VSTInstrument->GetEffect(), NULL , outBufs , VSTInstrument->GetBlockSize() ); } catch (...) { Log( "NxSound : Error processing NxSoundMediaFileVSTi::Update" ); } for ( unsigned int i=0; i< VSTInstrument->GetBlockSize() ; i++ ){ float * buffer = outBufs[0];//VSTInstrument->GetOutputBuffer(0) ; Frame( i , 0 ) = buffer[i] ; } for ( unsigned int i=0; i< VSTInstrument->GetBlockSize() ; i++ ){ float * buffer = outBufs[1];//VSTInstrument->GetOutputBuffer(1) ; Frame( i , 1 ) = buffer[i] ; } } } return Frame ; }
void Logger::logCall(NPAPI_Action action, DWORD dw1, DWORD dw2, DWORD dw3, DWORD dw4, DWORD dw5, DWORD dw6, DWORD dw7) { if(isMuted(action)) return; std::string log; LogItemStruct * lis = makeLogItemStruct(action, dw1, dw2, dw3, dw4, dw5, dw6, dw7); formatLogItem(lis, &log, TRUE); freeLogItemStruct(lis); if(bToConsole) printf("%s", log.c_str()); if(bToFile) filer.write(log); if(bToWindow) dumpStringToMainWindow(log); }
void SampleTCO::saveSettings( QDomDocument & _doc, QDomElement & _this ) { if( _this.parentNode().nodeName() == "clipboard" ) { _this.setAttribute( "pos", -1 ); } else { _this.setAttribute( "pos", startPosition() ); } _this.setAttribute( "len", length() ); _this.setAttribute( "muted", isMuted() ); _this.setAttribute( "src", sampleFile() ); if( sampleFile() == "" ) { QString s; _this.setAttribute( "data", m_sampleBuffer->toBase64( s ) ); } // TODO: start- and end-frame }
void BBTCO::loadSettings( const QDomElement & element ) { setName( element.attribute( "name" ) ); if( element.attribute( "pos" ).toInt() >= 0 ) { movePosition( element.attribute( "pos" ).toInt() ); } changeLength( element.attribute( "len" ).toInt() ); if( element.attribute( "muted" ).toInt() != isMuted() ) { toggleMute(); } if( element.hasAttribute( "color" ) ) { setColor( QColor( element.attribute( "color" ).toUInt() ) ); } if( element.hasAttribute( "usestyle" ) ) { if( element.attribute( "usestyle" ).toUInt() == 1 ) { m_useStyleColor = true; } else { m_useStyleColor = false; } } else { if( m_color.rgb() == qRgb( 128, 182, 175 ) || m_color.rgb() == qRgb( 64, 128, 255 ) ) // old or older default color { m_useStyleColor = true; } else { m_useStyleColor = false; } } }