Ejemplo n.º 1
0
bool Track::SyncLockAdjust(double oldT1, double newT1)
{
   if (newT1 > oldT1) {
      // Insert space within the track

      if (oldT1 > GetEndTime())
         return true;

      Track *tmp;
      bool ret;

      ret = Cut(oldT1, GetEndTime(), &tmp);
      if (!ret) return false;

      ret = Paste(newT1, tmp);
      wxASSERT(ret);

      delete tmp;
      return ret;
   }
   else if (newT1 < oldT1) {
      // Remove from the track
      return Clear(newT1, oldT1);
   }

   // fall-through: no change
   return true;
}
Ejemplo n.º 2
0
bool WaveClip::Clear(double t0, double t1)
{
   sampleCount s0, s1;

   TimeToSamplesClip(t0, &s0);
   TimeToSamplesClip(t1, &s1);

   if (GetSequence()->Delete(s0, s1-s0))
   {
      // msmeyer
      //
      // Delete all cutlines that are within the given area, if any.
      //
      // Note that when cutlines are active, two functions are used:
      // Clear() and ClearAndAddCutLine(). ClearAndAddCutLine() is called
      // whenever the user directly calls a command that removes some audio, e.g.
      // "Cut" or "Clear" from the menu. This command takes care about recursive
      // preserving of cutlines within clips. Clear() is called when internal
      // operations want to remove audio. In the latter case, it is the right
      // thing to just remove all cutlines within the area.
      //
      double clip_t0 = t0;
      double clip_t1 = t1;
      if (clip_t0 < GetStartTime())
         clip_t0 = GetStartTime();
      if (clip_t1 > GetEndTime())
         clip_t1 = GetEndTime();

      WaveClipList::compatibility_iterator nextIt;

      for (WaveClipList::compatibility_iterator it = mCutLines.GetFirst(); it; it=nextIt)
      {
         nextIt = it->GetNext();
         WaveClip* clip = it->GetData();
         double cutlinePosition = mOffset + clip->GetOffset();
         if (cutlinePosition >= t0 && cutlinePosition <= t1)
         {
            // This cutline is within the area, delete it
            delete clip;
            mCutLines.DeleteNode(it);
         } else
         if (cutlinePosition >= t1)
         {
            clip->Offset(clip_t0-clip_t1);
         }
      }

      // Collapse envelope
      GetEnvelope()->CollapseRegion(t0, t1);
      if (t0 < GetStartTime())
         Offset(-(GetStartTime() - t0));

      MarkChanged();
      return true;
   }

   return false;
}
void UAblSetShaderParameterTask::OnTaskTick(const TWeakObjectPtr<const UAblAbilityContext>& Context, float deltaTime) const
{
	Super::OnTaskTick(Context, deltaTime);

	UAblSetShaderParameterTaskScratchPad* ScratchPad = Cast<UAblSetShaderParameterTaskScratchPad>(Context->GetScratchPadForTask(this));
	check(ScratchPad);

	if (!ScratchPad->BlendIn.IsComplete())
	{
		ScratchPad->BlendIn.Update(deltaTime);

		verify(ScratchPad->DynamicMaterials.Num() == ScratchPad->PreviousValues.Num());
		for (int32 i = 0; i < ScratchPad->DynamicMaterials.Num(); ++i)
		{
			InternalSetShaderValue(ScratchPad->DynamicMaterials[i].Get(), m_Value, ScratchPad->PreviousValues[i].Get(), ScratchPad->BlendIn.GetBlendedValue());
		}
	}
	else if (m_RestoreValueOnEnd && !ScratchPad->BlendOut.IsComplete())
	{
		// If we're within range to start blending out, go ahead and start that process.
		if (GetEndTime() - Context->GetCurrentTime() < ScratchPad->BlendOut.GetBlendTime())
		{
			ScratchPad->BlendOut.Update(deltaTime);
			
			verify(ScratchPad->DynamicMaterials.Num() == ScratchPad->PreviousValues.Num());
			for (int32 i = 0; i < ScratchPad->DynamicMaterials.Num(); ++i)
			{
				InternalSetShaderValue(ScratchPad->DynamicMaterials[i].Get(), ScratchPad->PreviousValues[i].Get(), m_Value, ScratchPad->BlendOut.GetBlendedValue());
			}
		}
	}
}
Ejemplo n.º 4
0
bool BatchCommands::WriteMp3File( const wxString Name, int bitrate )
{  //check if current project is mono or stereo
   int numChannels = 2;
   if (IsMono()) {
      numChannels = 1;
   }

   double endTime = GetEndTime();
   if( endTime <= 0.0f )
      return false;
   AudacityProject *project = GetActiveProject();
   if( bitrate <=0 )
   {
      // 'No' bitrate given, use the current default.
      // Use Mp3Stereo to control if export is to a stereo or mono file
      return mExporter.Process(project, numChannels, wxT("MP3"), Name, false, 0.0, endTime);
   }


   bool rc;
   long prevBitRate = gPrefs->Read(wxT("/FileFormats/MP3Bitrate"), 128);
   gPrefs->Write(wxT("/FileFormats/MP3Bitrate"), bitrate);
   // Use Mp3Stereo to control if export is to a stereo or mono file
   rc = mExporter.Process(project, numChannels, wxT("MP3"), Name, false, 0.0, endTime);
   gPrefs->Write(wxT("/FileFormats/MP3Bitrate"), prevBitRate);
   gPrefs->Flush();
   return rc;
}
void ThreadProc::StatusEndProc ()
{
    GetEndTime ();
    UIFreshFoo(theTaskList.length ()-1);
    GameOver ();
    return;
}
Ejemplo n.º 6
0
void
AudioSink::AudioLoop()
{
  AssertOnAudioThread();
  SINK_LOG("AudioLoop started");

  if (NS_FAILED(InitializeAudioStream())) {
    NS_WARNING("Initializing AudioStream failed.");
    mStateMachine->DispatchOnAudioSinkError();
    return;
  }

  while (1) {
    {
      ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
      WaitForAudioToPlay();
      if (!IsPlaybackContinuing()) {
        break;
      }
    }
    // See if there's a gap in the audio. If there is, push silence into the
    // audio hardware, so we can play across the gap.
    // Calculate the timestamp of the next chunk of audio in numbers of
    // samples.
    NS_ASSERTION(AudioQueue().GetSize() > 0, "Should have data to play");
    CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate);

    // Calculate the number of frames that have been pushed onto the audio hardware.
    CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) + mWritten;

    CheckedInt64 missingFrames = sampleTime - playedFrames;
    if (!missingFrames.isValid() || !sampleTime.isValid()) {
      NS_WARNING("Int overflow adding in AudioLoop");
      break;
    }

    if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
      // The next audio chunk begins some time after the end of the last chunk
      // we pushed to the audio hardware. We must push silence into the audio
      // hardware so that the next audio chunk begins playback at the correct
      // time.
      missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
      mWritten += PlaySilence(static_cast<uint32_t>(missingFrames.value()));
    } else {
      mWritten += PlayFromAudioQueue();
    }
    int64_t endTime = GetEndTime();
    if (endTime != -1) {
      mOnAudioEndTimeUpdateTask->Dispatch(endTime);
    }
  }
  ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
  MOZ_ASSERT(mStopAudioThread || AudioQueue().AtEndOfStream());
  if (!mStopAudioThread && mPlaying) {
    Drain();
  }
  SINK_LOG("AudioLoop complete");
  Cleanup();
  SINK_LOG("AudioLoop exit");
}
Ejemplo n.º 7
0
C4Graph::ValueType C4TableGraph::GetValue(TimeType iAtTime) const {
  // must be inside buffer
  assert(Inside(iAtTime, GetStartTime(), GetEndTime() - 1));
  // query it - can't be negative if inside start/end-time
  return pAveragedValues[(iAtTime - iInitialStartTime) % iBackLogLength] *
         fMultiplier;
}
Ejemplo n.º 8
0
void CGuildWar::Process( CTime & time )
{
	if( m_nFlag == WF_WARTIME )
	{
		if( GetEndTime() < time )
		{
			m_nFlag	= WF_END;
			g_DPCoreClient.SendWarTimeout( m_idWar );
			TRACE( "SendWarTimeout, %d\n", m_idWar );
		}
		else
		{
			CGuild* pDecl	= g_GuildMng.GetGuild( m_Decl.idGuild );
			CGuild* pAcpt	= g_GuildMng.GetGuild( m_Acpt.idGuild );
			if( pDecl && pAcpt )
			{
				CUser* pMaster	= g_UserMng.GetUserByPlayerID( pDecl->m_idMaster );
				if( IsInvalidObj( pMaster ) )
					g_DPCoreClient.SendWarMasterAbsent( m_idWar, TRUE );
				pMaster	= g_UserMng.GetUserByPlayerID( pAcpt->m_idMaster );
				if( IsInvalidObj( pMaster ) )
					g_DPCoreClient.SendWarMasterAbsent( m_idWar, FALSE );
			}
		}
	}
}
UMovieSceneSection* UMovieSceneSection::SplitSection(float SplitTime)
{
	if (!IsTimeWithinSection(SplitTime))
	{
		return nullptr;
	}

	SetFlags(RF_Transactional);

	if (TryModify())
	{
		float SectionEndTime = GetEndTime();
				
		// Trim off the right
		SetEndTime(SplitTime);

		// Create a new section
		UMovieSceneTrack* Track = CastChecked<UMovieSceneTrack>(GetOuter());
		Track->Modify();

		UMovieSceneSection* NewSection = DuplicateObject<UMovieSceneSection>(this, Track);
		check(NewSection);

		NewSection->SetStartTime(SplitTime);
		NewSection->SetEndTime(SectionEndTime);
		Track->AddSection(*NewSection);

		return NewSection;
	}

	return nullptr;
}
Ejemplo n.º 10
0
void Track::SyncLockAdjust(double oldT1, double newT1)
{
   if (newT1 > oldT1) {
      // Insert space within the track

      if (oldT1 > GetEndTime())
         return;

      auto tmp = Cut(oldT1, GetEndTime());

      Paste(newT1, tmp.get());
   }
   else if (newT1 < oldT1) {
      // Remove from the track
      Clear(newT1, oldT1);
   }
}
Ejemplo n.º 11
0
	void	cParticleEmiterWithShowPosition::RearrangeTime(float e_fNewTime)
	{
		float	l_fPercent = e_fNewTime/GetEndTime();
		this->m_fStartTime *= l_fPercent;
		this->m_EndTimeCounter.SetTargetTime(l_fPercent*m_EndTimeCounter.fTargetTime);
		if( m_ePathType != ePT_NO_PATH )
		{
			this->m_pCurveWithTime->RearrangeTime(l_fPercent*m_EndTimeCounter.fTargetTime);
		}
	}
Ejemplo n.º 12
0
//Trim trims within a clip, rather than trimming everything.
//If a bound is outside a clip, it trims everything.
bool WaveTrack::Trim (double t0, double t1)
{
   bool inside0 = false;
   bool inside1 = false;
   //Keeps track of the offset of the first clip greater than
   // the left selection t0.
   double firstGreaterOffset = -1;

   WaveClipList::Node * it;
   for(it = GetClipIterator(); it; it = it->GetNext())
      {
            
         WaveClip * clip = it->GetData();

         //Find the first clip greater than the offset.
         //If we end up clipping the entire track, this is useful.
         if(firstGreaterOffset < 0 && 
            clip->GetStartTime() >= t0)
            firstGreaterOffset = clip->GetStartTime();

         if(t1 > clip->GetStartTime() && t1 < clip->GetEndTime())
            {
               if (!clip->Clear(t1,clip->GetEndTime()))
                  return false;
               inside1 = true;
            }

         if(t0 > clip->GetStartTime() && t0 < clip->GetEndTime())
            {
               if (!clip->Clear(clip->GetStartTime(),t0))
                  return false;
               clip->SetOffset(t0);
               inside0 = true;
            }
      }

   //if inside0 is false, then the left selector was between
   //clips, so delete everything to its left.
   if(false == inside1)
      {
         if (!Clear(t1,GetEndTime()))
            return false;
      }

   if(false == inside0)
      {
         if (!Clear(0,t0))
            return false;
         //Reset the track offset to be at the point of the first remaining clip. 
         SetOffset(firstGreaterOffset );
      }
   
   return true;
}
Ejemplo n.º 13
0
	void ProfilerViewer_Imp::DrawProfiledInfoSection(int top)
	{
		DrawTextSprite(
			Vector2DF(COLUMN_HEADER_OFFSET, top),
			SECTION_HEADER_COLOR,
			ToAString("Profiled Information"));

		DrawTextSprite(
			Vector2DF(COLUMN1_OFFSET, top + ROW_HEIGHT),
			HEADER_COLOR,
			ToAString("ID"));

		DrawTextSprite(
			Vector2DF(COLUMN2_OFFSET, top + ROW_HEIGHT),
			HEADER_COLOR,
			ToAString("Time(ns)"));

		DrawTextSprite(
			Vector2DF(COLUMN3_OFFSET, top + ROW_HEIGHT),
			HEADER_COLOR,
			ToAString("Processor"));

		int bodyTop = top + ROW_HEIGHT * 2;
		int index = 0;
		for (auto& profile : m_profiler->GetProfiles())
		{
			auto perf = profile->GetLastLog();
			if (perf == nullptr)
			{
				continue;
			}

			int time = perf->GetEndTime() - perf->GetStartTime();

			DrawTextSprite(
				Vector2DF(COLUMN1_OFFSET, bodyTop + index * ROW_HEIGHT),
				CONTENT_COLOR,
				ToAString(to_string(profile->GetID()).c_str()));

			DrawTextSprite(
				Vector2DF(COLUMN2_OFFSET, bodyTop + index * ROW_HEIGHT),
				CONTENT_COLOR,
				ToAString(to_string(time).c_str()));

			DrawTextSprite(
				Vector2DF(COLUMN3_OFFSET, bodyTop + index * ROW_HEIGHT),
				CONTENT_COLOR,
				ToAString(to_string(perf->GetProcessorNumber()).c_str()));

			++index;
		}

		DrawSprite(RectF(0, top, m_windowSize.X, (index + 2)*ROW_HEIGHT + SECTION_SPAN), Color(0, 64, 64, 128), 0);
	}
void UMovieSceneAnimationSection::GetSnapTimes(TArray<float>& OutSnapTimes, bool bGetSectionBorders) const
{
	Super::GetSnapTimes(OutSnapTimes, bGetSectionBorders);
	float CurrentTime = GetAnimationStartTime();
	while (CurrentTime <= GetEndTime())
	{
		if (CurrentTime >= GetStartTime())
		{
			OutSnapTimes.Add(CurrentTime);
		}
		CurrentTime += GetAnimationDuration();
	}
}
Ejemplo n.º 15
0
BOOL CMAPIAppointment::GetEndTime(CString& strEndTime, LPCTSTR szFormat)
{
	SYSTEMTIME tm;
	if(GetEndTime(tm)) 
	{
		TCHAR szTime[256];
		if(!szFormat) szFormat=_T("MM/dd/yyyy hh:mm:ss tt");
		GetDateFormat(LOCALE_SYSTEM_DEFAULT, 0, &tm, szFormat, szTime, 256);
		GetTimeFormat(LOCALE_SYSTEM_DEFAULT, 0, &tm, szTime, szTime, 256);
		strEndTime=szTime;
		return TRUE;
	}
	return FALSE;
}
Ejemplo n.º 16
0
bool CEventInfo::GetEndTimeLocal(SYSTEMTIME *pTime) const
{
	if (pTime == nullptr)
		return false;

	SYSTEMTIME st;

	if (GetEndTime(&st) && EpgTimeToLocalTime(&st, pTime))
		return true;

	::ZeroMemory(pTime, sizeof(SYSTEMTIME));

	return false;
}
Ejemplo n.º 17
0
bool Track::SyncLockAdjust(double oldT1, double newT1)
{
   if (newT1 > oldT1) {
      // Insert space within the track

      if (oldT1 > GetEndTime())
         return true;

      auto tmp = Cut(oldT1, GetEndTime());
      if (!tmp) return false;

      bool ret = Paste(newT1, tmp.get());
      wxASSERT(ret); // TODO: handle this.

      return ret;
   }
   else if (newT1 < oldT1) {
      // Remove from the track
      return Clear(newT1, oldT1);
   }

   // fall-through: no change
   return true;
}
Ejemplo n.º 18
0
bool CFlex::EnterSceneSequence( CChoreoScene *scene, CChoreoEvent *event, bool bRestart )
{
	CAI_NPC *myNpc = MyNPCPointer( );

	if (!myNpc)
		return false;

	// 2 seconds past current event, or 0.2 seconds past end of scene, whichever is shorter
	float flDuration = MIN( 2.0, MIN( event->GetEndTime() - scene->GetTime() + 2.0, scene->FindStopTime() - scene->GetTime() + 0.2 ) );

	if (myNpc->IsCurSchedule( SCHED_SCENE_GENERIC ))
	{
		myNpc->AddSceneLock( flDuration );
		return true;
	}

	// for now, don't interrupt sequences that don't understand being interrupted
	if (myNpc->GetCurSchedule())
	{
		CAI_ScheduleBits testBits;
		myNpc->GetCurSchedule()->GetInterruptMask( &testBits );

		testBits.Clear( COND_PROVOKED );

		if (testBits.IsAllClear()) 
		{
			return false;
		}
	}

	if (myNpc->IsInterruptable())
	{
		if (myNpc->Get_m_hCine())
		{
			// Assert( !(myNpc->GetFlags() & FL_FLY ) );
			myNpc->ExitScriptedSequence( );
		}

		myNpc->OnStartScene();
		myNpc->SetSchedule( SCHED_SCENE_GENERIC );
		myNpc->AddSceneLock( flDuration );
		return true;
	}

	return false;
}
Ejemplo n.º 19
0
void ReadOptionsDirectTimeRange(struct options_direct *opts,struct input *X)
{
  if(opts->t_start_flag==0)
    {
      opts->t_start = GetStartTime(X);
      opts->t_start_flag=1;
      mexWarnMsgIdAndTxt("STAToolkit:ReadOptionsTimeRange:missingParameter","Missing parameter start_time. Extracting from input: %f.\n",opts->t_start);
    }

  if(opts->t_end_flag==0)
    {
      opts->t_end = GetEndTime(X);
      opts->t_end_flag=1;
      mexWarnMsgIdAndTxt("STAToolkit:ReadOptionsTimeRange:missingParameter","Missing parameter end_time. Extracting from input: %f.\n",opts->t_end);
    }

  if((opts->t_start)>(opts->t_end))
      mexErrMsgIdAndTxt("STAToolkit:ReadOptionsTimeRange:badRange","Lower limit greater than upper limit for start_time and end_time.\n");
}
Ejemplo n.º 20
0
unsigned int CPhonemeTag::ComputeDataCheckSum()
{
	CRC32_t crc;
	CRC32_Init( &crc );

	// Checksum the text
	CRC32_ProcessBuffer( &crc, m_szPhoneme, Q_strlen( m_szPhoneme ) );
	int phonemeCode = GetPhonemeCode();
	CRC32_ProcessBuffer( &crc, &phonemeCode, sizeof( int ) );

	// Checksum timestamps
	float startTime = GetStartTime();
	float endTime = GetEndTime();
	CRC32_ProcessBuffer( &crc, &startTime, sizeof( float ) );
	CRC32_ProcessBuffer( &crc, &endTime, sizeof( float ) );

	CRC32_Final( &crc );

	return ( unsigned int )crc;
}
Ejemplo n.º 21
0
bool C4TableGraph::DumpToFile(const StdStrBuf &rszFilename,
                              bool fAppend) const {
  assert(!!rszFilename);
  // nothing to write?
  if (!fWrapped && !iBackLogPos) return false;
  // try append if desired; create if unsuccessful
  CStdFile out;
  if (fAppend)
    if (!out.Append(rszFilename.getData())) fAppend = false;
  if (!fAppend) {
    if (!out.Create(rszFilename.getData())) return false;
    // print header
    out.WriteString("t\tv\n\r");
  }
  // write out current timeframe
  int iEndTime = GetEndTime();
  StdStrBuf buf;
  for (int iWriteTime = GetStartTime(); iWriteTime < iEndTime; ++iWriteTime) {
    buf.Format("%d\t%d\n\r", (int)iWriteTime, (int)GetValue(iWriteTime));
    out.WriteString(buf.getData());
  }
  return true;
}
Ejemplo n.º 22
0
bool BatchCommands::WriteMp3File( const wxString Name, int bitrate )
{  //check if current project is mono or stereo
   bool Mp3Stereo = true;
   if (IsMono()) {Mp3Stereo = false;};

   double endTime = GetEndTime();
   if( endTime <= 0.0f )
      return false;
   AudacityProject *project = GetActiveProject();
   if( bitrate <=0 )
   {
      // 'No' bitrate given, use the current default.
      // Use Mp3Stereo to control if export is to a stereo or mono file
      return ::ExportMP3(project, Mp3Stereo, Name, false, 0.0, endTime);
   }

   bool rc;
   long prevBitRate = gPrefs->Read(wxT("/FileFormats/MP3Bitrate"), 128);
   gPrefs->Write(wxT("/FileFormats/MP3Bitrate"), bitrate);
   // Use Mp3Stereo to control if export is to a stereo or mono file
   rc = ::ExportMP3(project, Mp3Stereo, Name, false, 0.0, endTime);
   gPrefs->Write(wxT("/FileFormats/MP3Bitrate"), prevBitRate);
   return rc;
}
Ejemplo n.º 23
0
	bool CReserveListView::CReserveListItem::GetText(int ID,LPTSTR pszText,int MaxLength) const
	{
		pszText[0]=_T('\0');

		switch (ID) {
		case COLUMN_TITLE:
			if (!m_pReserveData->title.empty())
				::lstrcpyn(pszText,m_pReserveData->title.c_str(),MaxLength);
			break;

		case COLUMN_TIME:
			{
				SYSTEMTIME EndTime;
				TCHAR szStartTime[64],szEndTime[64];

				GetEndTime(m_pReserveData->startTime,m_pReserveData->durationSecond,&EndTime);
				FormatSystemTime(m_pReserveData->startTime,szStartTime, _countof(szStartTime),
								 SYSTEMTIME_FORMAT_TIME | SYSTEMTIME_FORMAT_SECONDS);
				FormatSystemTime(EndTime,szEndTime, _countof(szEndTime),
								 SYSTEMTIME_FORMAT_TIMEONLY | SYSTEMTIME_FORMAT_SECONDS);
				FormatString(pszText,MaxLength,TEXT("%s 〜 %s"),szStartTime,szEndTime);
			}
			break;

		case COLUMN_SERVICE:
			if (!m_pReserveData->stationName.empty())
				::lstrcpyn(pszText,m_pReserveData->stationName.c_str(),MaxLength);
			break;

		case COLUMN_STATUS:
			{
				LPCTSTR pszStatus=NULL;

				if (m_pReserveData->recSetting.recMode==RECMODE_NO) {
					pszStatus=TEXT("無効");
				} else {
					switch (m_pReserveData->overlapMode) {
					case 0:	pszStatus=TEXT("正常");				break;
					case 1:	pszStatus=TEXT("一部実行");			break;
					case 2:	pszStatus=TEXT("チューナー不足");	break;
					}
				}
				if (pszStatus!=NULL)
					::lstrcpyn(pszText,pszStatus,MaxLength);
			}
			break;

		case COLUMN_RECMODE:
			{
				LPCTSTR pszRecMode;

				switch (m_pReserveData->recSetting.recMode) {
				case RECMODE_ALL:
					pszRecMode=TEXT("全サービス");
					break;
				case RECMODE_SERVICE:
					pszRecMode=TEXT("指定サービス");
					break;
				case RECMODE_ALL_NOB25:
					pszRecMode=TEXT("全サービス(スクランブル解除なし)");
					break;
				case RECMODE_SERVICE_NOB25:
					pszRecMode=TEXT("指定サービス(スクランブル解除なし)");
					break;
				case RECMODE_VIEW:
					pszRecMode=TEXT("視聴");
					break;
				case RECMODE_NO:
					pszRecMode=TEXT("無効");
					break;
				case RECMODE_EPG:
					pszRecMode=TEXT("EPG取得");
					break;
				default:
					pszRecMode=NULL;
				}
				if (pszRecMode!=NULL)
					::lstrcpyn(pszText,pszRecMode,MaxLength);
			}
			break;

		case COLUMN_PRIORITY:
			FormatInt(m_pReserveData->recSetting.priority,pszText,MaxLength);
			break;

		default:
			return false;
		}

		return true;
	}
Ejemplo n.º 24
0
bool EffectSoundTouch::ProcessStereo(WaveTrack* leftTrack, WaveTrack* rightTrack,
                                     sampleCount start, sampleCount end)
{
   mSoundTouch->setSampleRate((unsigned int)(leftTrack->GetRate()+0.5));

   auto outputLeftTrack = mFactory->NewWaveTrack(leftTrack->GetSampleFormat(),
                                                       leftTrack->GetRate());
   auto outputRightTrack = mFactory->NewWaveTrack(rightTrack->GetSampleFormat(),
                                                        rightTrack->GetRate());

   //Get the length of the buffer (as double). len is
   //used simple to calculate a progress meter, so it is easier
   //to make it a double now than it is to do it later
   double len = (double)(end - start);

   //Initiate a processing buffer.  This buffer will (most likely)
   //be shorter than the length of the track being processed.
   // Make soundTouchBuffer twice as big as MaxBlockSize for each channel,
   // because Soundtouch wants them interleaved, i.e., each
   // Soundtouch sample is left-right pair.
   sampleCount maxBlockSize = leftTrack->GetMaxBlockSize();
   float* leftBuffer = new float[maxBlockSize];
   float* rightBuffer = new float[maxBlockSize];
   float* soundTouchBuffer = new float[maxBlockSize * 2];

   // Go through the track one stereo buffer at a time.
   // sourceSampleCount counts the sample at which the current buffer starts,
   // per channel.
   sampleCount sourceSampleCount = start;
   while (sourceSampleCount < end) {
      //Get a block of samples (smaller than the size of the buffer)
      sampleCount blockSize = leftTrack->GetBestBlockSize(sourceSampleCount);

      //Adjust the block size if it is the final block in the track
      if (sourceSampleCount + blockSize > end)
         blockSize = end - sourceSampleCount;

      // Get the samples from the tracks and put them in the buffers.
      leftTrack->Get((samplePtr)(leftBuffer), floatSample, sourceSampleCount, blockSize);
      rightTrack->Get((samplePtr)(rightBuffer), floatSample, sourceSampleCount, blockSize);

      // Interleave into soundTouchBuffer.
      for (int index = 0; index < blockSize; index++) {
         soundTouchBuffer[index*2]       = leftBuffer[index];
         soundTouchBuffer[(index*2)+1]   = rightBuffer[index];
      }

      //Add samples to SoundTouch
      mSoundTouch->putSamples(soundTouchBuffer, blockSize);

      //Get back samples from SoundTouch
      unsigned int outputCount = mSoundTouch->numSamples();
      if (outputCount > 0)
         this->ProcessStereoResults(outputCount, outputLeftTrack.get(), outputRightTrack.get());

      //Increment sourceSampleCount one blockfull of samples
      sourceSampleCount += blockSize;

      //Update the Progress meter
      // mCurTrackNum is left track. Include right track.
      int nWhichTrack = mCurTrackNum;
      double frac = (sourceSampleCount - start) / len;
      if (frac < 0.5)
         frac *= 2.0; // Show twice as far for each track, because we're doing 2 at once.
      else
      {
         nWhichTrack++;
         frac -= 0.5;
         frac *= 2.0; // Show twice as far for each track, because we're doing 2 at once.
      }
      if (TrackProgress(nWhichTrack, frac))
         return false;
   }

   // Tell SoundTouch to finish processing any remaining samples
   mSoundTouch->flush();

   unsigned int outputCount = mSoundTouch->numSamples();
   if (outputCount > 0)
      this->ProcessStereoResults(outputCount, outputLeftTrack.get(), outputRightTrack.get());

   // Flush the output WaveTracks (since they're buffered, too)
   outputLeftTrack->Flush();
   outputRightTrack->Flush();

   // Clean up the buffers.
   delete [] leftBuffer;
   delete [] rightBuffer;
   delete [] soundTouchBuffer;

   // Take the output tracks and insert in place of the original
   // sample data.
   leftTrack->ClearAndPaste(mCurT0, mCurT1, outputLeftTrack.get(), true, false, GetTimeWarper());
   rightTrack->ClearAndPaste(mCurT0, mCurT1, outputRightTrack.get(), true, false, GetTimeWarper());

   // Track the longest result length
   double newLength = outputLeftTrack->GetEndTime();
   m_maxNewLength = wxMax(m_maxNewLength, newLength);
   newLength = outputRightTrack->GetEndTime();
   m_maxNewLength = wxMax(m_maxNewLength, newLength);

   //Return true because the effect processing succeeded.
   return true;
}
Ejemplo n.º 25
0
void BattleGround::PlayerAddedToBGCheckIfBGIsRunning(Player* plr)
{
    if (GetStatus() != STATUS_WAIT_LEAVE)
        return;

    WorldPacket data;
    BattleGroundQueueTypeId bgQueueTypeId = BattleGroundMgr::BGQueueTypeId(GetTypeID());

    BlockMovement(plr);

    sBattleGroundMgr.BuildPvpLogDataPacket(&data, this);
    plr->GetSession()->SendPacket(&data);

    sBattleGroundMgr.BuildBattleGroundStatusPacket(&data, this, plr->GetBattleGroundQueueIndex(bgQueueTypeId), STATUS_IN_PROGRESS, GetEndTime(), GetStartTime());
    plr->GetSession()->SendPacket(&data);
}
Ejemplo n.º 26
0
// ProcessOne() takes a track, transforms it to bunch of buffer-blocks,
// and calls libsamplerate code on these blocks.
bool EffectChangeSpeed::ProcessOne(WaveTrack * track,
                           sampleCount start, sampleCount end)
{
   if (track == NULL)
      return false;

   // initialization, per examples of Mixer::Mixer and
   // EffectSoundTouch::ProcessOne

   auto outputTrack = mFactory->NewWaveTrack(track->GetSampleFormat(),
                                                    track->GetRate());

   //Get the length of the selection (as double). len is
   //used simple to calculate a progress meter, so it is easier
   //to make it a double now than it is to do it later
   auto len = (end - start).as_double();

   // Initiate processing buffers, most likely shorter than
   // the length of the selection being processed.
   auto inBufferSize = track->GetMaxBlockSize();

   Floats inBuffer{ inBufferSize };

   // mFactor is at most 100-fold so this shouldn't overflow size_t
   auto outBufferSize = size_t( mFactor * inBufferSize + 10 );
   Floats outBuffer{ outBufferSize };

   // Set up the resampling stuff for this track.
   Resample resample(true, mFactor, mFactor); // constant rate resampling

   //Go through the track one buffer at a time. samplePos counts which
   //sample the current buffer starts at.
   bool bResult = true;
   auto samplePos = start;
   while (samplePos < end) {
      //Get a blockSize of samples (smaller than the size of the buffer)
      auto blockSize = limitSampleBufferSize(
         track->GetBestBlockSize(samplePos),
         end - samplePos
      );

      //Get the samples from the track and put them in the buffer
      track->Get((samplePtr) inBuffer.get(), floatSample, samplePos, blockSize);

      const auto results = resample.Process(mFactor,
                                    inBuffer.get(),
                                    blockSize,
                                    ((samplePos + blockSize) >= end),
                                    outBuffer.get(),
                                    outBufferSize);
      const auto outgen = results.second;

      if (outgen > 0)
         outputTrack->Append((samplePtr)outBuffer.get(), floatSample,
                             outgen);

      // Increment samplePos
      samplePos += results.first;

      // Update the Progress meter
      if (TrackProgress(mCurTrackNum, (samplePos - start).as_double() / len)) {
         bResult = false;
         break;
      }
   }

   // Flush the output WaveTrack (since it's buffered, too)
   outputTrack->Flush();

   // Take the output track and insert it in place of the original
   // sample data
   double newLength = outputTrack->GetEndTime();
   if (bResult)
   {
      LinearTimeWarper warper { mCurT0, mCurT0, mCurT1, mCurT0 + newLength };
      bResult = track->ClearAndPaste(
         mCurT0, mCurT1, outputTrack.get(), true, false, &warper);
   }

   if (newLength > mMaxNewLength)
      mMaxNewLength = newLength;

   return bResult;
}
Ejemplo n.º 27
0
bool EffectPaulstretch::ProcessOne(WaveTrack *track,double t0,double t1,int count)
{
   auto badAllocMessage = _("Requested value exceeds memory capacity.");

   const auto stretch_buf_size = GetBufferSize(track->GetRate());
   if (stretch_buf_size == 0) {
      ::Effect::MessageBox( badAllocMessage );
      return false;
   }

   double amount = this->mAmount;

   auto start = track->TimeToLongSamples(t0);
   auto end = track->TimeToLongSamples(t1);
   auto len = end - start;

   const auto minDuration = stretch_buf_size * 2 + 1;
   if (minDuration < stretch_buf_size) {
      // overflow!
      ::Effect::MessageBox( badAllocMessage );
      return false;
   }

   if (len < minDuration) {   //error because the selection is too short

      float maxTimeRes = log( len.as_double() ) / log(2.0);
      maxTimeRes = pow(2.0, floor(maxTimeRes) + 0.5);
      maxTimeRes = maxTimeRes / track->GetRate();

      if (this->IsPreviewing()) {
         double defaultPreviewLen;
         gPrefs->Read(wxT("/AudioIO/EffectsPreviewLen"), &defaultPreviewLen, 6.0);

         /* i18n-hint: 'Time Resolution' is the name of a control in the Paulstretch effect.*/
         if ((minDuration / mProjectRate) < defaultPreviewLen) {
            ::Effect::MessageBox (wxString::Format(_("Audio selection too short to preview.\n\n"
                                               "Try increasing the audio selection to at least %.1f seconds,\n"
                                               "or reducing the 'Time Resolution' to less than %.1f seconds."),
                                             (minDuration / track->GetRate()) + 0.05, // round up to 1/10 s.
                                             floor(maxTimeRes * 10.0) / 10.0),
                            wxOK | wxICON_EXCLAMATION);
         }
         else {
            /* i18n-hint: 'Time Resolution' is the name of a control in the Paulstretch effect.*/
            ::Effect::MessageBox (wxString::Format(_("Unable to Preview.\n\n"
                                               "For the current audio selection, the maximum\n"
                                               "'Time Resolution' is %.1f seconds."),
                                             floor(maxTimeRes * 10.0) / 10.0),
                            wxOK | wxICON_EXCLAMATION);
         }
      }
      else {
         /* i18n-hint: 'Time Resolution' is the name of a control in the Paulstretch effect.*/
         ::Effect::MessageBox (wxString::Format(_("The 'Time Resolution' is too long for the selection.\n\n"
                                            "Try increasing the audio selection to at least %.1f seconds,\n"
                                            "or reducing the 'Time Resolution' to less than %.1f seconds."),
                                          (minDuration / track->GetRate()) + 0.05, // round up to 1/10 s.
                                          floor(maxTimeRes * 10.0) / 10.0),
                         wxOK | wxICON_EXCLAMATION);
      }

      return false;
   }


   auto dlen = len.as_double();
   double adjust_amount = dlen /
      (dlen - ((double)stretch_buf_size * 2.0));
   amount = 1.0 + (amount - 1.0) * adjust_amount;

   auto outputTrack = mFactory->NewWaveTrack(track->GetSampleFormat(),track->GetRate());

   try {
      // This encloses all the allocations of buffers, including those in
      // the constructor of the PaulStretch object

      PaulStretch stretch(amount, stretch_buf_size, track->GetRate());

      auto nget = stretch.get_nsamples_for_fill();

      auto bufsize = stretch.poolsize;
      Floats buffer0{ bufsize };
      float *bufferptr0 = buffer0.get();
      bool first_time = true;

      const auto fade_len = std::min<size_t>(100, bufsize / 2 - 1);
      bool cancelled = false;

      {
         Floats fade_track_smps{ fade_len };
         decltype(len) s=0;

         while (s < len) {
            track->Get((samplePtr)bufferptr0, floatSample, start + s, nget);
            stretch.process(buffer0.get(), nget);

            if (first_time) {
               stretch.process(buffer0.get(), 0);
            };

            s += nget;

            if (first_time){//blend the the start of the selection
               track->Get((samplePtr)fade_track_smps.get(), floatSample, start, fade_len);
               first_time = false;
               for (size_t i = 0; i < fade_len; i++){
                  float fi = (float)i / (float)fade_len;
                  stretch.out_buf[i] =
                     stretch.out_buf[i] * fi + (1.0 - fi) * fade_track_smps[i];
               }
            }
            if (s >= len){//blend the end of the selection
               track->Get((samplePtr)fade_track_smps.get(), floatSample, end - fade_len, fade_len);
               for (size_t i = 0; i < fade_len; i++){
                  float fi = (float)i / (float)fade_len;
                  auto i2 = bufsize / 2 - 1 - i;
                  stretch.out_buf[i2] =
                     stretch.out_buf[i2] * fi + (1.0 - fi) *
                     fade_track_smps[fade_len - 1 - i];
               }
            }

            outputTrack->Append((samplePtr)stretch.out_buf.get(), floatSample, stretch.out_bufsize);

            nget = stretch.get_nsamples();
            if (TrackProgress(count,
               s.as_double() / len.as_double()
            )) {
               cancelled = true;
               break;
            }
         }
      }

      if (!cancelled){
         outputTrack->Flush();

         track->Clear(t0,t1);
         track->Paste(t0, outputTrack.get());
         m_t1 = mT0 + outputTrack->GetEndTime();
      }
      
      return !cancelled;
   }
   catch ( const std::bad_alloc& ) {
      ::Effect::MessageBox( badAllocMessage );
      return false;
   }
};
Ejemplo n.º 28
0
bool WaveClip::ClearAndAddCutLine(double t0, double t1)
{
   if (t0 > GetEndTime() || t1 < GetStartTime())
      return true; // time out of bounds
      
   WaveClip *newClip = new WaveClip(mSequence->GetDirManager(),
                                    mSequence->GetSampleFormat(),
                                    mRate);
   double clip_t0 = t0;
   double clip_t1 = t1;
   if (clip_t0 < GetStartTime())
      clip_t0 = GetStartTime();
   if (clip_t1 > GetEndTime())
      clip_t1 = GetEndTime();

   if (!newClip->CreateFromCopy(clip_t0, clip_t1, this))
      return false;
   newClip->SetOffset(clip_t0-mOffset);

   // Sort out cutlines that belong to the new cutline
   WaveClipList::compatibility_iterator nextIt;

   for (WaveClipList::compatibility_iterator it = mCutLines.GetFirst(); it; it=nextIt)
   {
      nextIt = it->GetNext();
      WaveClip* clip = it->GetData();
      double cutlinePosition = mOffset + clip->GetOffset();
      if (cutlinePosition >= t0 && cutlinePosition <= t1)
      {
         clip->SetOffset(cutlinePosition - newClip->GetOffset() - mOffset);
         newClip->mCutLines.Append(clip);
         mCutLines.DeleteNode(it);
      } else
      if (cutlinePosition >= t1)
      {
         clip->Offset(clip_t0-clip_t1);
      }
   }
   
   // Clear actual audio data
   sampleCount s0, s1;

   TimeToSamplesClip(t0, &s0);
   TimeToSamplesClip(t1, &s1);
   
   if (GetSequence()->Delete(s0, s1-s0))
   {
      // Collapse envelope
      GetEnvelope()->CollapseRegion(t0, t1);
      if (t0 < GetStartTime())
         Offset(-(GetStartTime() - t0));

      MarkChanged();

      mCutLines.Append(newClip);
      return true;
   } else
   {
      delete newClip;
      return false;
   }
}
Ejemplo n.º 29
0
	float	cCurveWithTime::GetRestTime()
	{
		float	l_fRestTime = GetEndTime()-this->m_fCurrentTime;
		return l_fRestTime;
	}
Ejemplo n.º 30
0
// TIDY-ME: Get rid of special commands and make them part of the
// 'menu' system (but not showing on the menu)
//
// ======= IMPORTANT ========
// Special Commands are a KLUDGE whilst we wait for a better system to handle the menu
// commands from batch mode.
//
// Really we should be using a similar (or same) system to that used for effects
// so that parameters can be passed to the commands.  Many of the menu
// commands take a selection as their parameter.
//
// If you find yourself adding lots of existing commands from the menus here, STOP
// and think again.  
// ======= IMPORTANT ========
// CLEANSPEECH remnant
bool BatchCommands::ApplySpecialCommand(int WXUNUSED(iCommand), const wxString command,const wxString params)
{
   if (ReportAndSkip(command, params))
      return true;

   AudacityProject *project = GetActiveProject();

   int numChannels = 1;    //used to switch between mono and stereo export
   if (IsMono()) {
      numChannels = 1;  //export in mono
   } else {
      numChannels = 2;  //export in stereo
   }

   wxString filename;
   wxString extension; // required for correct message
   if (command == wxT("ExportWAV"))
      extension = wxT(".wav");
   else if (command == wxT("ExportOgg"))
      extension = wxT(".ogg");
   else if (command == wxT("ExportFLAC"))
      extension = wxT(".flac");
   else extension = wxT(".mp3");

   if (mFileName.IsEmpty()) {   
      filename = project->BuildCleanFileName(project->GetFileName(), extension);
   }
   else {
      filename = project->BuildCleanFileName(mFileName, extension);
   }

   // We have a command index, but we don't use it!
   // TODO: Make this special-batch-command code use the menu item code....
   // FIXME: No error reporting on write file failure in batch mode.
   if (command == wxT("NoAction")) {
      return true;
   } else if (!mFileName.IsEmpty() && command == wxT("Import")) {
      // historically this was in use, now ignored if there
      return true;
   } else if (command == wxT("ExportMP3_56k_before")) {
      filename.Replace(wxT("cleaned/"), wxT("cleaned/MasterBefore_"), false);
      return WriteMp3File(filename, 56);
   } else if (command == wxT("ExportMP3_56k_after")) {
      filename.Replace(wxT("cleaned/"), wxT("cleaned/MasterAfter_"), false);
      return WriteMp3File(filename, 56);
   } else if (command == wxT("StereoToMono")) {
      // StereoToMono is an effect masquerading as a menu item.
      Effect * f = EffectManager::Get().GetEffectByIdentifier(wxT("StereoToMono"));
      if (f != NULL) {
         return ApplyEffectCommand(f, command, params);
      }
      wxMessageBox(_("Stereo to Mono Effect not found"));
      return false;
   } else if (command == wxT("ExportMP3")) {
      return WriteMp3File(filename, 0); // 0 bitrate means use default/current
   } else if (command == wxT("ExportWAV")) {
      filename.Replace(wxT(".mp3"), wxT(".wav"), false);
      double endTime = GetEndTime();
      if (endTime <= 0.0f) {
         return false;
      }
      return mExporter.Process(project, numChannels, wxT("WAV"), filename, false, 0.0, endTime);
   } else if (command == wxT("ExportOgg")) {
#ifdef USE_LIBVORBIS
      filename.Replace(wxT(".mp3"), wxT(".ogg"), false);
      double endTime = GetEndTime();
      if (endTime <= 0.0f) {
         return false;
      }
      return mExporter.Process(project, numChannels, wxT("OGG"), filename, false, 0.0, endTime);
#else
      wxMessageBox(_("Ogg Vorbis support is not included in this build of Audacity"));
      return false;
#endif
   } else if (command == wxT("ExportFLAC")) {
#ifdef USE_LIBFLAC
      filename.Replace(wxT(".mp3"), wxT(".flac"), false);
      double endTime = GetEndTime();
      if (endTime <= 0.0f) {
         return false;
      }
      return mExporter.Process(project, numChannels, wxT("FLAC"), filename, false, 0.0, endTime);
#else
      wxMessageBox(_("FLAC support is not included in this build of Audacity"));
      return false;
#endif
   } 
   wxMessageBox(wxString::Format(_("Command %s not implemented yet"),command.c_str()));
   return false;
}