예제 #1
0
/**
 * メッセージ
 * @param[in] nMessage メッセージ
 * @param[in] nParameter パラメータ
 * @return 結果
 */
INTPTR CExternalPsg::Message(UINT nMessage, INTPTR nParameter)
{
	switch (nMessage)
	{
		case kMute:
			Mute(nParameter != 0);
			break;
	}
	return 0;
}
GlobalShortcuts::GlobalShortcuts(QObject *parent)
  : QObject(parent),
    gnome_backend_(NULL),
    system_backend_(NULL),
    use_gnome_(false),
    rating_signals_mapper_(new QSignalMapper(this))
{
  settings_.beginGroup(kSettingsGroup);

  // Create actions
  AddShortcut("play", tr("Play"), SIGNAL(Play()));
  AddShortcut("pause", tr("Pause"), SIGNAL(Pause()));
  AddShortcut("play_pause", tr("Play/Pause"), SIGNAL(PlayPause()), QKeySequence(Qt::Key_MediaPlay));
  AddShortcut("stop", tr("Stop"), SIGNAL(Stop()), QKeySequence(Qt::Key_MediaStop));
  AddShortcut("stop_after", tr("Stop playing after current track"), SIGNAL(StopAfter()));
  AddShortcut("next_track", tr("Next track"), SIGNAL(Next()), QKeySequence(Qt::Key_MediaNext));
  AddShortcut("prev_track", tr("Previous track"), SIGNAL(Previous()), QKeySequence(Qt::Key_MediaPrevious));
  AddShortcut("inc_volume", tr("Increase volume"), SIGNAL(IncVolume()));
  AddShortcut("dec_volume", tr("Decrease volume"), SIGNAL(DecVolume()));
  AddShortcut("mute", tr("Mute"), SIGNAL(Mute()));
  AddShortcut("seek_forward", tr("Seek forward"), SIGNAL(SeekForward()));
  AddShortcut("seek_backward", tr("Seek backward"), SIGNAL(SeekBackward()));
  AddShortcut("show_hide", tr("Show/Hide"), SIGNAL(ShowHide()));
  AddShortcut("show_osd", tr("Show OSD"), SIGNAL(ShowOSD()));
  AddShortcut("toggle_pretty_osd", tr("Toggle Pretty OSD"), SIGNAL(TogglePrettyOSD())); // Toggling possible only for pretty OSD
  AddShortcut("shuffle_mode", tr("Change shuffle mode"), SIGNAL(CycleShuffleMode()));
  AddShortcut("repeat_mode", tr("Change repeat mode"), SIGNAL(CycleRepeatMode()));
  AddShortcut("toggle_last_fm_scrobbling", tr("Enable/disable Last.fm scrobbling"), SIGNAL(ToggleScrobbling()));
  AddShortcut("global_search_popup", tr("Show Global Search Popup"), SIGNAL(ShowGlobalSearch()));

  AddRatingShortcut("rate_zero_star", tr("Rate the current song 0 stars"), rating_signals_mapper_, 0);
  AddRatingShortcut("rate_one_star", tr("Rate the current song 1 star"), rating_signals_mapper_, 1);
  AddRatingShortcut("rate_two_star", tr("Rate the current song 2 stars"), rating_signals_mapper_, 2);
  AddRatingShortcut("rate_three_star", tr("Rate the current song 3 stars"), rating_signals_mapper_, 3);
  AddRatingShortcut("rate_four_star", tr("Rate the current song 4 stars"), rating_signals_mapper_, 4);
  AddRatingShortcut("rate_five_star", tr("Rate the current song 5 stars"), rating_signals_mapper_, 5);

  connect(rating_signals_mapper_, SIGNAL(mapped(int)), SIGNAL(RateCurrentSong(int)));

  // Create backends - these do the actual shortcut registration
  gnome_backend_ = new GnomeGlobalShortcutBackend(this);

#ifndef Q_OS_DARWIN
  system_backend_ = new QxtGlobalShortcutBackend(this);
#else
  system_backend_ = new MacGlobalShortcutBackend(this);
#endif

  ReloadSettings();
}
예제 #3
0
GlobalShortcuts::GlobalShortcuts(QObject *parent)
    : QObject(parent),
    backend(nullptr) {

    // Create actions
    AddShortcut("play", tr("Play"), SIGNAL(Play()));
    AddShortcut("pause", tr("Pause"), SIGNAL(Pause()));
    AddShortcut("play_pause", tr("Play/Pause"), SIGNAL(PlayPause()), QKeySequence(Qt::Key_MediaPlay));
    AddShortcut("stop", tr("Stop"), SIGNAL(Stop()), QKeySequence(Qt::Key_MediaStop));
    AddShortcut("stop_after", tr("Stop playing after current track"), SIGNAL(StopAfter()));
    AddShortcut("next_track", tr("Next track"), SIGNAL(Next()), QKeySequence(Qt::Key_MediaNext));
    AddShortcut("prev_track", tr("Previous track"), SIGNAL(Previous()), QKeySequence(Qt::Key_MediaPrevious));
    AddShortcut("inc_volume", tr("Increase volume"), SIGNAL(IncVolume()));
    AddShortcut("dec_volume", tr("Decrease volume"), SIGNAL(DecVolume()));
    AddShortcut("mute", tr("Mute"), SIGNAL(Mute()));
    AddShortcut("seek_forward", tr("Seek forward"), SIGNAL(SeekForward()));
    AddShortcut("seek_backward", tr("Seek backward"), SIGNAL(SeekBackward()));

}
예제 #4
0
int FG::qt_metacall(QMetaObject::Call _c, int _id, void **_a)
{
    _id = QMainWindow::qt_metacall(_c, _id, _a);
    if (_id < 0)
        return _id;
    if (_c == QMetaObject::InvokeMetaMethod) {
        switch (_id) {
        case 0: receiveValue((*reinterpret_cast< double(*)>(_a[1]))); break;
        case 1: receiveValue((*reinterpret_cast< QString(*)>(_a[1]))); break;
        case 2: receiveValue((*reinterpret_cast< uint(*)>(_a[1]))); break;
        case 3: callLineEditInput((*reinterpret_cast< int(*)>(_a[1]))); break;
        case 4: Beep(); break;
        case 5: Mute(); break;
        case 6: on_AWGBox_clicked(); break;
        case 7: on_modeBox_currentIndexChanged((*reinterpret_cast< int(*)>(_a[1]))); break;
        case 8: on_calibrate_clicked(); break;
        case 9: on_appBckPSoC_clicked(); break;
        case 10: on_fineCoarse_clicked(); break;
        case 11: on_leftBut_clicked(); break;
        case 12: on_rightBut_clicked(); break;
        case 13: on_exit_clicked(); break;
        case 14: on_upBut_clicked(); break;
        case 15: on_downBut_clicked(); break;
        case 16: on_GateBut_clicked(); break;
        case 17: on_EXTBut_clicked(); break;
        case 18: on_INTBut_clicked(); break;
        case 19: on_MANBut_clicked(); break;
        case 20: on_RUNBut_clicked(); break;
        case 21: on_hiZBut_clicked(); break;
        case 22: on_triangleBut_clicked(); break;
        case 23: on_rampBut_clicked(); break;
        case 24: on_squareBut_clicked(); break;
        case 25: on_sineBut_clicked(); break;
        case 26: on_singleBut_clicked(); break;
        case 27: on_burstBut_clicked(); break;
        case 28: on_continuousBut_clicked(); break;
        default: ;
        }
        _id -= 29;
    }
    return _id;
}
예제 #5
0
void
AudioContext::Shutdown()
{
  mIsShutDown = true;

  // We mute rather than suspending, because the delay between the ::Shutdown
  // call and the CC would make us overbuffer in the MediaStreamGraph.
  // See bug 936784 for details.
  if (!mIsOffline) {
    Mute();
  }

  // Release references to active nodes.
  // Active AudioNodes don't unregister in destructors, at which point the
  // Node is already unregistered.
  mActiveNodes.Clear();

  // For offline contexts, we can destroy the MediaStreamGraph at this point.
  if (mIsOffline && mDestination) {
    mDestination->OfflineShutdown();
  }
}
예제 #6
0
void MpvHandler::SetProperties()
{
    Volume(volume);
    Speed(speed);
    Mute(mute);
}
예제 #7
0
파일: rx.c 프로젝트: gegel/pairphone
//*****************************************************************************
//receiving loop: grab 48KHz baseband samples from Line,
//demodulate, decrypt, decode, play 8KHz voice over Speaker
int rx(int typing)
{
 //input: -1 for no typing chars, 1 - exist some chars in input buffer
 //output: 0 - no any jobs doing, 1 - some jobs were doing
 int i;
 float f;
 int job=0; //flag of any job were doing 
 char lag_flag=0; //block lag is locked (modems synchronization complete)
 //char lock_flag=0; //phase of carrier (1333Hz, 6 samples per period) is locked
 //char sync_flag=0; //the difference of frequency transmitter-to-receiver sampling rate is locked
 //char current_lag=0;  //block lag (0-90, aligned to last bit position, the 6 samples for bit)

 char info[8]={0}; //call info

 //regularly play Speaker's buffer
 job=playjit(); //the first try to play a tail of samples in buffer
 //check for we have enough samples for demodulation
 if(cnt<180*6) //check we haven't enough of unprocessed samples
 {
  //move tail to start of receiving buffer
  if(samples>speech) //check for tail
  {
   for(i=0; i<cnt; i++) speech[i]=samples[i]; //move tail to start of buffer
   samples=speech; //set pointer to start of buffer
  }
  //record
  i=_soundgrab((char*)(samples+cnt), 180*6);  //try to grab new 48KHZ samples from Line

  if((i>0)&&(i<=(180*6))) //some samples grabbed
  {
   cnt+=i;  //add grabbed  samples to account
   job+=4;  //set job
  }
 }
 else //we have enough samples for processing
 {
  i=Demodulate(samples, buf); //process samples: 36*6 (35-37)*6 samples
  samples+=i; //move pointer to next samples (with frequency adjusting)
  cnt-=i; //decrease the number of unprocessed samples
  if(0x80&buf[11]) //checks flag for output data block is ready
  {
   //check for synck and averages BER
   lag_flag=!(!(buf[11]&0x40)); //block lag is locked (synchronization compleet)
   //lock_flag=!(!(buf[11]&0x20)); //phaze of carrier (1333Hz, 6 samples per period) is locked
   //sync_flag=!(!(buf[11]&0x10)); //the differency of frequency transmitter-to-receiver sampling rate is locked
   //current_lag=buf[10]>>1;  //block lag (0-90, aligned to last bit position, the 6 samples for bit)
   if(lag_flag) //check modem sync
   {
    //averages BER
    i=(0x0F&buf[11]); //count symbols errors (only 1 error per 9-bit symbol can be detected)
    fber*=0.99; //fber in range 0-900
    fber+=i;  //in range 0-9 errored bits per 90 bits treceived
   }
   //output statistics
   if(typing<0) //output call's info if no characters were typed by user
   {
    f=Mute(0);   //get packets counter value
    i=State(0); //get current connection step * vad flag 
	
	//notification of state and voice output 
    if(!i) strcpy(info, (char*)"IDLE"); 
    else if(abs(i)<8) strcpy(info, (char*)"CALL");  
    else if(f<=0) strcpy(info, (char*)"MUTE");
    else if(i<0) strcpy(info, (char*)"PAUS");
    else strcpy(info, (char*)"TALK");

    if(f<0) f=-f; //absolute value
    i=f*0.0675; //computes total time of the call in sec: each packet 67,5 ms

    f=fau/4-100; //computes authentification level in %
    if(f<0) f=0; //only positive results have reason
    //current state notification
    if(lag_flag) printf("%s %dmin %dsec BER:%0.02f AU:%d%%\r", info, i/60, i%60, fber/90, (int)f);
    else printf("%s %dmin %dsec BER:---- AU:%d%%\r", info, i/60, i%60, (int)f); //lost of sync in modem
   }

   //process received packet detects voice/silence type
   buf[11]=0xFE; //set flag default as for silence descriptor
   if(lag_flag) //check modem sync
   {
    i=ProcessPkt(buf);  //decode received packet
    if(i>=0) //received packet is a control type
    {
     fau*=0.99;  //fau in range 0-800 (400 for random data)
     fau+=i; //averages authentication level
    }
    else if(i==-3) 
	{
	 buf[11]=0xFF; //set flag for voice data received 
	}
   } //end of sync ok, packets processing
  } //end of data block received
 } //end of a portion of sampless processing

 //check we have received data and output buffer is empty for decoding
 if((0x0E&buf[11])&&(l_jit_buf<=180))
 {
  //decode voice data or set silency
  job+=16; //set job
  if(1&buf[11]) //this is a voice frame, decode it
  {
   melpe_s(sp, buf); //decode 81 bits in 11 bytes to 540 8KHz samples 
  }
  else memset(sp, 0, 1080); //or output 67.5 mS of silence
  buf[11]=0; //clears flag: data buffer is processed

  //computes average playing delay
  i=getdelay()+l_jit_buf; //total number of unplayed samples in buffers
  fdelay*=0.9; //averages
  fdelay+=i;
  //computes optimal resapling ratio for the optimum delay
  f=fabs(fdelay/10-720)/10000000; //correction rate due inconsistency
  if(i<360) qff-=f;  //adjust current ratio
  else if(i>1080) qff+=f;
  if(qff<0.888) qff=0.888; //restrictions
  else if(qff>1.142) qff=1.142;

  //resample and play to Headset
  if(l_jit_buf>180) l_jit_buf=0; //prevent overflow
  l_jit_buf+=resample(sp, jit_buf+l_jit_buf, qff); //resample buffer for playing
  playjit(); //immediately try to play buffer
 }
 return job;
}
예제 #8
0
	~GimicIf() {
		Log("YMF288::Reset()\n");
		Mute();
	}
예제 #9
0
파일: tNASBar.cpp 프로젝트: dulton/53_hero
//-----------------------------------------------------------------------------
//! Create all the menu actions.
//-----------------------------------------------------------------------------
void tNASBar::CreateActions()
{
    TRACE_FUNCTION;
    if ( m_FusionClientAgent.IsPowerButtonAllowed() )
    {
        // Some audio servers support a power button
        m_pPowerAction = new tAction( tr("Power"), this );
        m_pPowerAction->setCheckable(true);
        m_pPowerAction->setChecked( m_FusionClientAgent.GetPowerState() != tFusionClient::ePS_Off );
        Connect( m_pPowerAction , SIGNAL( triggered() ), this, SLOT( PowerClicked() ));
    }
    else
    {
        if (m_pPowerAction != NULL)
        {
            delete m_pPowerAction;
            m_pPowerAction = 0;
        }
    }
    if ( m_FusionClientAgent.IsOnAndInitialized() )
    {
        // Connect firmware progress message
        Connect( &m_FusionClientAgent, SIGNAL(UpdateFirmwareProgress( quint8 )), this, SLOT(OnUpdateFirmwareProgress( quint8 )));

        // Volume action
        m_pVolumeAction = new tSliderAction(
            tr( "Volume", "sound level for SonicHub/FUSION-Link" ),
            m_FusionClientAgent.MinVolumeLevel(),
            m_FusionClientAgent.MaxVolumeLevel(),
            0,  // current volume is set when the Audio zone or Sonic hub Fader actions are created
            true,
            "%v",
            this
            );

        // Mute action
        m_pMuteAction = new tAction( tr("Mute"), this );
        m_pMuteAction->setCheckable( true );
        m_pMuteAction->setChecked( m_FusionClientAgent.IsMuted() );
        Connect( m_pMuteAction, SIGNAL( toggled( bool ) ), this, SLOT( SetMute( bool ) ) );
        Connect( &m_FusionClientAgent, SIGNAL( Mute( tFusionClient::eState ) ), this, SLOT( OnMute( tFusionClient::eState ) ) );
        Connect( &m_FusionClientAgent, SIGNAL( Mute( tFusionClient::eState ) ), m_pVolumeWidget, SLOT( OnMute( tFusionClient::eState ) ) );

        // Audio sources.
        m_pAudioSourceAction = new tAudioSourceAction( m_FusionClientAgent, m_MySourceId, this );

        // Virtual head - giving an empty string as the text, the device bars themselves will fill it it.
        // if you don't give it an empty string, it comes up as a dummy action and doesn't draw correctly on the softkey bar
        // it is also disabled in the menus.  Less work by just setting as empty string now.
        m_pVirtualHeadAction = new tAction( "", this );
        Connect( m_pVirtualHeadAction , SIGNAL( triggered() ), this, SLOT( ActivateVirtualHead() ), Qt::QueuedConnection );

        m_pBluetoothDevicesAction = new tAction( tr( "Bluetooth devices" ), this );
        Connect( m_pBluetoothDevicesAction , SIGNAL( triggered() ), this, SLOT( ShowBluetoothDialog() ), Qt::QueuedConnection );

        m_pMixerActions = tMixerActions::Create( m_FusionClientAgent, this );
        m_pZoneSetupAction = m_pMixerActions->MixerAction();
        Connect( m_pMixerActions, SIGNAL( MasterVolumeChanged( int ) ), m_pVolumeWidget, SLOT( SetValue( int ) ) );
        Connect( m_pMixerActions, SIGNAL( MasterVolumeChanged( int ) ), m_pVolumeAction, SLOT( SetValue( int ) ) );
        Connect( m_pVolumeAction, SIGNAL( ValueChanged( int ) ), m_pMixerActions, SLOT( RequestVolumeLevel( int ) ) );
        m_pMixerActions->UpdateMasterVolume();
    }
예제 #10
0
파일: tx.c 프로젝트: gegel/pairphone
//*****************************************************************************
//transmition loop: grab 8KHz speech samples from Mike,
//resample, collect frame (540 in 67.5 mS), encode
//encrypt, modulate, play 48KHz baseband signal into Line
int tx(int job)
{
 int i,j;

 //loop 1: try to play unplayed samples
 job+=_playjit(); //the first try to play a tail of samples in buffer

 //loop 2: try to grab next 180 samples
 //check for number of grabbed samples
 if(spcnt<540) //we haven't enought samples for melpe encoder
 {
  i=soundgrab((char*)spraw, 180); //grab up to 180 samples
  if((i>0)&&(i<=180)) //if some samles was grabbed
  {
   //Since we are using different audio devices
   //on headset and line sides, the sampling rates of grabbing
   // and playing devices can slightly differ then 48/8 depends HW
   //so we must adjusts one of rates for synchronizing grabbing and playing processes
   //The line side is more sensitive (requirements for baseband is more hard)
   //That why we resamples grabbed stream (slave) for matching rate with playing stream as a master
   //The adjusting process doing approximation in iterative way
   //and requires several seconds for adaptation during possible loss of some speech 67.5mS frames

   //computes estimated rate depends recording delay obtained in moment of last block was modulated
   j=8000-(_fdelay-27000)/50; //computes samplerate using optimal delay and adjusting sensitivity
   if(j>9000) j=9000; //restrict resulting samplerate
   if(j<7000) j=7000;

   //change rate of grabbed samples for synchronizing grabbing and playing loops
   i=_resample(spraw, spbuf+spcnt, i, j); //resample and collect speech samples
   spcnt+=i; //the number of samples in buffer for processing
   tgrab+=i; //the total difference between grabbed speech and played baseband samples
                //this is actually recording delay and must be near 270 sample in average
                //for jitter protecting (due PC multi threading etc.)

   job+=32; //set job
  }
 }
 //check for we have enough grabbed samples for processing
 if(spcnt>=540) //we have enough samples for melpe encoder
 {
  if(Mute(0)>0)
  {
   i=vad2(spbuf+10, &vad);  //check frame is speech (by VAD)
   i+=vad2(spbuf+100,&vad);
   i+=vad2(spbuf+190,&vad);
   i+=vad2(spbuf+280,&vad);
   i+=vad2(spbuf+370,&vad);
   i+=vad2(spbuf+460,&vad);
  }
  else i=0;
  
  txbuf[11]=0xFF;   //set defaults flag for voiced frame
  if(i) //frame is voices: compress it
  {
   melpe_a(txbuf, spbuf); //encode the speech frame
   i=State(1); //set VAD flag
  }
  else //unvoiced frame: sync packet will be send
  {
   txbuf[11]=0xFE; //or set silence flag for control blocks
   i=State(-1); //clears VAD flag
  }

  spcnt-=540; //samples rest
  if(spcnt) memcpy((char*)spbuf, (char*)(spbuf+540), 2*spcnt);  //move tail to start of buffer
  job+=64;
 }

 //Loop 3: playing
//get number of unplayed samples in buffer 
 i=_getdelay();
//preventing of freezing audio output after underrun or overrun 
 if(i>540*3*6)
 {
  _soundflush1();
  i=_getdelay();
 }   
//check for delay is acceptable for playing next portion of samples 
 if(i<720*6) 
 {
  if(l__jit_buf) return job; //we have some unplayed samples in local buffer, not play now.
  MakePkt(txbuf); //encrypt voice or get actual control packet
  l__jit_buf=Modulate(txbuf, _jit_buf); //modulate block
  txbuf[11]=0; //clear tx buffer (processed)
  _playjit();  //immediately play baseband into Line
 
  //estimate rate changing for grabbed samples for synchronizing grabbing and playing
  _fdelay*=0.99; //smooth coefficient
  _fdelay+=tgrab;   //averages recording delay
  tgrab-=540;  //decrease counter of grabbed samples

  job+=128;
 }

 return job;
}