コード例 #1
0
ファイル: SBSMSEffect.cpp プロジェクト: GYGit/Audacity
bool EffectSBSMS::Process()
{
   bool bGoodResult = true;

   //Iterate over each track
   //Track::All is needed because this effect needs to introduce silence in the group tracks to keep sync
   this->CopyInputTracks(Track::All); // Set up mOutputTracks.
   TrackListIterator iter(mOutputTracks);
   Track* t;
   mCurTrackNum = 0;

   double maxDuration = 0.0;

   // Must sync if selection length will change
   bool mustSync = (rateStart != rateEnd);
   Slide rateSlide(rateSlideType,rateStart,rateEnd);
   Slide pitchSlide(pitchSlideType,pitchStart,pitchEnd);
   mTotalStretch = rateSlide.getTotalStretch();

   t = iter.First();
   while (t != NULL) {
      if (t->GetKind() == Track::Label &&
            (t->GetSelected() || (mustSync && t->IsSyncLockSelected())) )
      {
         if (!ProcessLabelTrack(t)) {
            bGoodResult = false;
            break;
         }
      }
      else if (t->GetKind() == Track::Wave && t->GetSelected() )
      {
         WaveTrack* leftTrack = (WaveTrack*)t;

         //Get start and end times from track
         mCurT0 = leftTrack->GetStartTime();
         mCurT1 = leftTrack->GetEndTime();

         //Set the current bounds to whichever left marker is
         //greater and whichever right marker is less
         mCurT0 = wxMax(mT0, mCurT0);
         mCurT1 = wxMin(mT1, mCurT1);

         // Process only if the right marker is to the right of the left marker
         if (mCurT1 > mCurT0) {
            sampleCount start;
            sampleCount end;
            start = leftTrack->TimeToLongSamples(mCurT0);
            end = leftTrack->TimeToLongSamples(mCurT1);

            WaveTrack* rightTrack = NULL;
            if (leftTrack->GetLinked()) {
               double t;
               rightTrack = (WaveTrack*)(iter.Next());

               //Adjust bounds by the right tracks markers
               t = rightTrack->GetStartTime();
               t = wxMax(mT0, t);
               mCurT0 = wxMin(mCurT0, t);
               t = rightTrack->GetEndTime();
               t = wxMin(mT1, t);
               mCurT1 = wxMax(mCurT1, t);

               //Transform the marker timepoints to samples
               start = leftTrack->TimeToLongSamples(mCurT0);
               end = leftTrack->TimeToLongSamples(mCurT1);

               mCurTrackNum++; // Increment for rightTrack, too.
            }
            sampleCount trackStart = leftTrack->TimeToLongSamples(leftTrack->GetStartTime());
            sampleCount trackEnd = leftTrack->TimeToLongSamples(leftTrack->GetEndTime());

            // SBSMS has a fixed sample rate - we just convert to its sample rate and then convert back
            float srTrack = leftTrack->GetRate();
            float srProcess = bLinkRatePitch?srTrack:44100.0;

            // the resampler needs a callback to supply its samples
            ResampleBuf rb;
            sampleCount maxBlockSize = leftTrack->GetMaxBlockSize();
            rb.blockSize = maxBlockSize;
            rb.buf = (audio*)calloc(rb.blockSize,sizeof(audio));
            rb.leftTrack = leftTrack;
            rb.rightTrack = rightTrack?rightTrack:leftTrack;
            rb.leftBuffer = (float*)calloc(maxBlockSize,sizeof(float));
            rb.rightBuffer = (float*)calloc(maxBlockSize,sizeof(float));

            // Samples in selection
            sampleCount samplesIn = end-start;

            // Samples for SBSMS to process after resampling
            sampleCount samplesToProcess = (sampleCount) ((float)samplesIn*(srProcess/srTrack));

            SlideType outSlideType;
            SBSMSResampleCB outResampleCB;

            sampleCount processPresamples = 0;
            sampleCount trackPresamples = 0;

            if(bLinkRatePitch) {
              rb.bPitch = true;
              outSlideType = rateSlideType;
              outResampleCB = resampleCB;
              rb.offset = start;
              rb.end = end;
              rb.iface = new SBSMSInterfaceSliding(&rateSlide,&pitchSlide,
                                                       bPitchReferenceInput,
                                                       samplesToProcess,0,
                                                       NULL);
            } else {
              rb.bPitch = false;
              outSlideType = (srProcess==srTrack?SlideIdentity:SlideConstant);
              outResampleCB = postResampleCB;
              rb.ratio = srProcess/srTrack;
              rb.quality = new SBSMSQuality(&SBSMSQualityStandard);
              rb.resampler = new Resampler(resampleCB, &rb, srProcess==srTrack?SlideIdentity:SlideConstant);
              rb.sbsms = new SBSMS(rightTrack?2:1,rb.quality,true);
              rb.SBSMSBlockSize = rb.sbsms->getInputFrameSize();
              rb.SBSMSBuf = (audio*)calloc(rb.SBSMSBlockSize,sizeof(audio));

              processPresamples = wxMin(rb.quality->getMaxPresamples(),
                                        (long)((float)(start-trackStart)*(srProcess/srTrack)));
              trackPresamples = wxMin(start-trackStart,
                                      (long)((float)(processPresamples)*(srTrack/srProcess)));
              rb.offset = start - trackPresamples;
              rb.end = trackEnd;
              rb.iface = new SBSMSEffectInterface(rb.resampler,
                                                      &rateSlide,&pitchSlide,
                                                      bPitchReferenceInput,
                                                      samplesToProcess,processPresamples,
                                                      rb.quality);
            }

            Resampler resampler(outResampleCB,&rb,outSlideType);

            audio outBuf[SBSMSOutBlockSize];
            float outBufLeft[2*SBSMSOutBlockSize];
            float outBufRight[2*SBSMSOutBlockSize];

            // Samples in output after SBSMS
            sampleCount samplesToOutput = rb.iface->getSamplesToOutput();

            // Samples in output after resampling back
            sampleCount samplesOut = (sampleCount) ((float)samplesToOutput * (srTrack/srProcess));

            // Duration in track time
            double duration =  (mCurT1-mCurT0) * mTotalStretch;

            if(duration > maxDuration)
               maxDuration = duration;

            TimeWarper *warper = createTimeWarper(mCurT0,mCurT1,maxDuration,rateStart,rateEnd,rateSlideType);
            SetTimeWarper(warper);

            rb.outputLeftTrack = mFactory->NewWaveTrack(leftTrack->GetSampleFormat(),
                                                        leftTrack->GetRate());
            if(rightTrack)
               rb.outputRightTrack = mFactory->NewWaveTrack(rightTrack->GetSampleFormat(),
                                                            rightTrack->GetRate());
            long pos = 0;
            long outputCount = -1;

            // process
            while(pos<samplesOut && outputCount) {
               long frames;
               if(pos+SBSMSOutBlockSize>samplesOut) {
                  frames = samplesOut - pos;
               } else {
                  frames = SBSMSOutBlockSize;
               }
               outputCount = resampler.read(outBuf,frames);
               for(int i = 0; i < outputCount; i++) {
                  outBufLeft[i] = outBuf[i][0];
                  if(rightTrack)
                     outBufRight[i] = outBuf[i][1];
               }
               pos += outputCount;
               rb.outputLeftTrack->Append((samplePtr)outBufLeft, floatSample, outputCount);
               if(rightTrack)
                  rb.outputRightTrack->Append((samplePtr)outBufRight, floatSample, outputCount);

               double frac = (double)pos/(double)samplesOut;
               int nWhichTrack = mCurTrackNum;
               if(rightTrack) {
                  nWhichTrack = 2*(mCurTrackNum/2);
                  if (frac < 0.5)
                     frac *= 2.0; // Show twice as far for each track, because we're doing 2 at once.
                  else {
                     nWhichTrack++;
                     frac -= 0.5;
                     frac *= 2.0; // Show twice as far for each track, because we're doing 2 at once.
                  }
               }
               if (TrackProgress(nWhichTrack, frac))
                  return false;
            }
            rb.outputLeftTrack->Flush();
            if(rightTrack)
               rb.outputRightTrack->Flush();

            bool bResult =
               leftTrack->ClearAndPaste(mCurT0, mCurT1, rb.outputLeftTrack,
                                          true, false, GetTimeWarper());
            wxASSERT(bResult); // TO DO: Actually handle this.

            if(rightTrack)
            {
               bResult =
                  rightTrack->ClearAndPaste(mCurT0, mCurT1, rb.outputRightTrack,
                                             true, false, GetTimeWarper());
               wxASSERT(bResult); // TO DO: Actually handle this.
            }
         }
         mCurTrackNum++;
      }
      else if (mustSync && t->IsSyncLockSelected())
      {
         t->SyncLockAdjust(mCurT1, mCurT0 + (mCurT1 - mCurT0) * mTotalStretch);
      }
      //Iterate to the next track
      t = iter.Next();
   }

   if (bGoodResult)
      ReplaceProcessedTracks(bGoodResult);

   // Update selection
   mT0 = mCurT0;
   mT1 = mCurT0 + maxDuration;

   return bGoodResult;
}
コード例 #2
0
ファイル: Noise.cpp プロジェクト: ruthmagnus/audacity
bool EffectNoise::Process()
{
   if (noiseDuration <= 0.0)
      noiseDuration = sDefaultGenerateLen;

   //Iterate over each track
   int ntrack = 0;
   this->CopyInputWaveTracks(); // Set up mOutputWaveTracks.
   bool bGoodResult = true;
   
#ifdef EXPERIMENTAL_FULL_LINKING
   HandleLinkedTracksOnGenerate(noiseDuration, mT0);
#endif

   TrackListIterator iter(mOutputWaveTracks);
   WaveTrack *track = (WaveTrack *)iter.First();
   while (track) {
      WaveTrack *tmp = mFactory->NewWaveTrack(track->GetSampleFormat(), track->GetRate());
      numSamples = track->TimeToLongSamples(noiseDuration);
      sampleCount i = 0;
      float *data = new float[tmp->GetMaxBlockSize()];
      sampleCount block;

      while ((i < numSamples) && bGoodResult) {
         block = tmp->GetBestBlockSize(i);
         if (block > (numSamples - i))
             block = numSamples - i;

         MakeNoise(data, block, track->GetRate(), noiseAmplitude);

         tmp->Append((samplePtr)data, floatSample, block);
         i += block;

         //Update the Progress meter
         if (TrackProgress(ntrack, (double)i / numSamples))
            bGoodResult = false;
      }
      delete[] data;

      tmp->Flush();
      track->HandleClear(mT0, mT1, false, false);
      track->HandlePaste(mT0, tmp);
      delete tmp;

      if (!bGoodResult)
         break;

      //Iterate to the next track
      ntrack++;
      track = (WaveTrack *)iter.Next();
   }

   if (bGoodResult)
   {
      /*
         save last used values
         save duration unless value was got from selection, so we save only
         when user explicitely setup a value
      */
      if (mT1 == mT0)
         gPrefs->Write(wxT("/CsPresets/NoiseGen_Duration"), noiseDuration);

      gPrefs->Write(wxT("/CsPresets/NoiseGen_Type"), noiseType);
      gPrefs->Write(wxT("/CsPresets/NoiseGen_Amp"), noiseAmplitude);

      mT1 = mT0 + noiseDuration; // Update selection.
   }

   this->ReplaceProcessedWaveTracks(bGoodResult); 
   return bGoodResult;
}
コード例 #3
0
ファイル: ImportFFmpeg.cpp プロジェクト: LBoggino/audacity
int FFmpegImportFileHandle::Import(TrackFactory *trackFactory,
              Track ***outTracks,
              int *outNumTracks,
              Tags *tags)
{

   CreateProgress();

   // Remove stream contexts which are not marked for importing and adjust mScs and mNumStreams accordingly
   for (int i = 0; i < mNumStreams;)
   {
      if (!mScs[i]->m_use)
      {
         delete mScs[i];
         for (int j = i; j < mNumStreams - 1; j++)
         {
            mScs[j] = mScs[j+1];
         }
         mNumStreams--;
      }
      else i++;
   }

   mChannels = new WaveTrack **[mNumStreams];

   for (int s = 0; s < mNumStreams; s++)
   {
      switch (mScs[s]->m_stream->codec->sample_fmt)
      {
         case AV_SAMPLE_FMT_U8:
         case AV_SAMPLE_FMT_S16:
         case AV_SAMPLE_FMT_U8P:
         case AV_SAMPLE_FMT_S16P:
            mScs[s]->m_osamplesize = sizeof(int16_t);
            mScs[s]->m_osamplefmt = int16Sample;
         break;
         default:
            mScs[s]->m_osamplesize = sizeof(float);
            mScs[s]->m_osamplefmt = floatSample;
         break;
      }

      // There is a possibility that number of channels will change over time, but we do not have WaveTracks for new channels. Remember the number of channels and stick to it.
      mScs[s]->m_initialchannels = mScs[s]->m_stream->codec->channels;
      mChannels[s] = new WaveTrack *[mScs[s]->m_stream->codec->channels];
      int c;
      for (c = 0; c < mScs[s]->m_stream->codec->channels; c++)
      {
         mChannels[s][c] = trackFactory->NewWaveTrack(mScs[s]->m_osamplefmt, mScs[s]->m_stream->codec->sample_rate);

         if (mScs[s]->m_stream->codec->channels == 2)
         {
            switch (c)
            {
            case 0:
               mChannels[s][c]->SetChannel(Track::LeftChannel);
               mChannels[s][c]->SetLinked(true);
               break;
            case 1:
               mChannels[s][c]->SetChannel(Track::RightChannel);
               break;
            }
         }
         else
         {
            mChannels[s][c]->SetChannel(Track::MonoChannel);
         }
      }
   }

   // Handles the start_time by creating silence. This may or may not be correct.
   // There is a possibility that we should ignore first N milliseconds of audio instead. I do not know.
   /// TODO: Nag FFmpeg devs about start_time until they finally say WHAT is this and HOW to handle it.
   for (int s = 0; s < mNumStreams; s++)
   {
      int64_t stream_delay = 0;
      if (mScs[s]->m_stream->start_time != int64_t(AV_NOPTS_VALUE) && mScs[s]->m_stream->start_time > 0)
      {
         stream_delay = mScs[s]->m_stream->start_time;
         wxLogDebug(wxT("Stream %d start_time = %lld, that would be %f milliseconds."), s, (long long) mScs[s]->m_stream->start_time, double(mScs[s]->m_stream->start_time)/AV_TIME_BASE*1000);
      }
      if (stream_delay != 0)
      {
         for (int c = 0; c < mScs[s]->m_stream->codec->channels; c++)
         {
            WaveTrack *t = mChannels[s][c];
            t->InsertSilence(0,double(stream_delay)/AV_TIME_BASE);
         }
      }
   }
   // This is the heart of the importing process
   // The result of Import() to be returend. It will be something other than zero if user canceled or some error appears.
   int res = eProgressSuccess;

#ifdef EXPERIMENTAL_OD_FFMPEG
   mUsingOD = false;
   gPrefs->Read(wxT("/Library/FFmpegOnDemand"), &mUsingOD);
   //at this point we know the file is good and that we have to load the number of channels in mScs[s]->m_stream->codec->channels;
   //so for OD loading we create the tracks and releasee the modal lock after starting the ODTask.
   if (mUsingOD) {
      std::vector<ODDecodeFFmpegTask*> tasks;
      //append blockfiles to each stream and add an individual ODDecodeTask for each one.
      for (int s = 0; s < mNumStreams; s++) {
         ODDecodeFFmpegTask* odTask=new ODDecodeFFmpegTask(mScs,mNumStreams,mChannels,mFormatContext, s);
         odTask->CreateFileDecoder(mFilename);

         //each stream has different duration.  We need to know it if seeking is to be allowed.
         sampleCount sampleDuration = 0;
         if (mScs[s]->m_stream->duration > 0)
            sampleDuration = ((sampleCount)mScs[s]->m_stream->duration * mScs[s]->m_stream->time_base.num) *mScs[s]->m_stream->codec->sample_rate / mScs[s]->m_stream->time_base.den;
         else
            sampleDuration = ((sampleCount)mFormatContext->duration *mScs[s]->m_stream->codec->sample_rate) / AV_TIME_BASE;

         //      printf(" OD duration samples %qi, sr %d, secs %d\n",sampleDuration, (int)mScs[s]->m_stream->codec->sample_rate,(int)sampleDuration/mScs[s]->m_stream->codec->sample_rate);

         //for each wavetrack within the stream add coded blockfiles
         for (int c = 0; c < mScs[s]->m_stream->codec->channels; c++) {
            WaveTrack *t = mChannels[s][c];
            odTask->AddWaveTrack(t);

            sampleCount maxBlockSize = t->GetMaxBlockSize();
            //use the maximum blockfile size to divide the sections (about 11secs per blockfile at 44.1khz)
            for (sampleCount i = 0; i < sampleDuration; i += maxBlockSize) {
               sampleCount blockLen = maxBlockSize;
               if (i + blockLen > sampleDuration)
                  blockLen = sampleDuration - i;

               t->AppendCoded(mFilename, i, blockLen, c,ODTask::eODFFMPEG);

               // This only works well for single streams since we assume
               // each stream is of the same duration and channels
               res = mProgress->Update(i+sampleDuration*c+ sampleDuration*mScs[s]->m_stream->codec->channels*s,
                                       sampleDuration*mScs[s]->m_stream->codec->channels*mNumStreams);
               if (res != eProgressSuccess)
                  break;
            }
         }
         tasks.push_back(odTask);
      }
      //Now we add the tasks and let them run, or delete them if the user cancelled
      for(int i=0; i < (int)tasks.size(); i++) {
         if(res==eProgressSuccess)
            ODManager::Instance()->AddNewTask(tasks[i]);
         else
            {
               delete tasks[i];
            }
      }
   } else {
#endif
   streamContext *sc = NULL;

   // Read next frame.
   while ((sc = ReadNextFrame()) != NULL && (res == eProgressSuccess))
   {
      // ReadNextFrame returns 1 if stream is not to be imported
      if (sc != (streamContext*)1)
      {
         // Decode frame until it is not possible to decode any further
         while (sc->m_pktRemainingSiz > 0 && (res == eProgressSuccess || res == eProgressStopped))
         {
            if (DecodeFrame(sc,false) < 0)
               break;

            // If something useable was decoded - write it to mChannels
            if (sc->m_frameValid)
               res = WriteData(sc);
         }

         // Cleanup after frame decoding
         if (sc->m_pktValid)
         {
            av_free_packet(&sc->m_pkt);
            sc->m_pktValid = 0;
         }
      }
   }

   // Flush the decoders.
   if ((mNumStreams != 0) && (res == eProgressSuccess || res == eProgressStopped))
   {
      for (int i = 0; i < mNumStreams; i++)
      {
         if (DecodeFrame(mScs[i], true) == 0)
         {
            WriteData(mScs[i]);

            if (mScs[i]->m_pktValid)
            {
               av_free_packet(&mScs[i]->m_pkt);
               mScs[i]->m_pktValid = 0;
            }
         }
      }
   }
#ifdef EXPERIMENTAL_OD_FFMPEG
   } // else -- !mUsingOD == true
#endif   //EXPERIMENTAL_OD_FFMPEG

   // Something bad happened - destroy everything!
   if (res == eProgressCancelled || res == eProgressFailed)
   {
      for (int s = 0; s < mNumStreams; s++)
      {
         delete[] mChannels[s];
      }
      delete[] mChannels;

      return res;
   }
   //else if (res == 2), we just stop the decoding as if the file has ended

   *outNumTracks = 0;
   for (int s = 0; s < mNumStreams; s++)
   {
      *outNumTracks += mScs[s]->m_initialchannels;
   }

   // Create new tracks
   *outTracks = new Track *[*outNumTracks];

   // Copy audio from mChannels to newly created tracks (destroying mChannels elements in process)
   int trackindex = 0;
   for (int s = 0; s < mNumStreams; s++)
   {
      for(int c = 0; c < mScs[s]->m_initialchannels; c++)
      {
         mChannels[s][c]->Flush();
         (*outTracks)[trackindex++] = mChannels[s][c];
      }
      delete[] mChannels[s];
   }
   delete[] mChannels;

   // Save metadata
   WriteMetadata(tags);

   return res;
}
コード例 #4
0
ファイル: DtmfGen.cpp プロジェクト: ruthmagnus/audacity
bool EffectDtmf::Process()
{
   if (dtmfDuration <= 0.0)
      return false;

   //Iterate over each track
   this->CopyInputWaveTracks(); // Set up mOutputWaveTracks.
   bool bGoodResult = true;
   int ntrack = 0;
   TrackListIterator iter(mOutputWaveTracks);
   WaveTrack *track = (WaveTrack *)iter.First();
   while (track) {
      // new tmp track, to fill with dtmf sequence
      // we will build the track by adding a tone, then a silence, next tone, and so on...
      WaveTrack *tmp = mFactory->NewWaveTrack(track->GetSampleFormat(), track->GetRate());

      // all dtmf sequence durations in samples from seconds
      numSamplesSequence = (longSampleCount)(dtmfDuration * track->GetRate() + 0.5);
      numSamplesTone = (longSampleCount)(dtmfTone * track->GetRate() + 0.5);
      numSamplesSilence = (longSampleCount)(dtmfSilence * track->GetRate() + 0.5);

      // recalculate the sum, and spread the difference - due to approximations.
      // Since diff should be in the order of "some" samples, a division (resulting in zero)
      // is not sufficient, so we add the additional remaining samples in each tone/silence block,
      // at least until available.
      int diff = numSamplesSequence - (dtmfNTones*numSamplesTone) - (dtmfNTones-1)*numSamplesSilence;
      if (diff>dtmfNTones) {
         // in this case, both these values would change, so it makes sense to recalculate diff
         // otherwise just keep the value we already have

         // should always be the case that dtmfNTones>1, as if 0, we don't even start processing,
         // and with 1 there is no difference to spread (no silence slot)...
         wxASSERT(dtmfNTones > 1);
         numSamplesTone += (diff/(dtmfNTones));
         numSamplesSilence += (diff/(dtmfNTones-1));
         diff = numSamplesSequence - (dtmfNTones*numSamplesTone) - (dtmfNTones-1)*numSamplesSilence;
      }
      // this var will be used as extra samples distributor
      int extra=0;

      longSampleCount i = 0;
      longSampleCount j = 0;
      int n=0; // pointer to string in dtmfString
      sampleCount block;
      bool isTone = true;
      float *data = new float[tmp->GetMaxBlockSize()];

      // for the whole dtmf sequence, we will be generating either tone or silence
      // according to a bool value, and this might be done in small chunks of size
      // 'block', as a single tone might sometimes be larger than the block
      // tone and silence generally have different duration, thus two generation blocks
      //
      // Note: to overcome a 'clicking' noise introduced by the abrupt transition from/to
      // silence, I added a fade in/out of 1/250th of a second (4ms). This can still be
      // tweaked but gives excellent results at 44.1kHz: I haven't tried other freqs.
      // A problem might be if the tone duration is very short (<10ms)... (?)
      //
      // One more problem is to deal with the approximations done when calculating the duration 
      // of both tone and silence: in some cases the final sum might not be same as the initial
      // duration. So, to overcome this, we had a redistribution block up, and now we will spread
      // the remaining samples in every bin in order to achieve the full duration: test case was
      // to generate an 11 tone DTMF sequence, in 4 seconds, and with DutyCycle=75%: after generation
      // you ended up with 3.999s or in other units: 3 seconds and 44097 samples.
      //
      while ((i < numSamplesSequence) && bGoodResult) {
         if (isTone)
         // generate tone
         {
            // the statement takes care of extracting one sample from the diff bin and
            // adding it into the tone block until depletion
            extra=(diff-- > 0?1:0);
            for(j=0; j < numSamplesTone+extra; j+=block) {
               block = tmp->GetBestBlockSize(j);
               if (block > (numSamplesTone+extra - j))
                   block = numSamplesTone+extra - j;

               // generate the tone and append
               MakeDtmfTone(data, block, track->GetRate(), dtmfString[n], j, numSamplesTone, dtmfAmplitude);
               tmp->Append((samplePtr)data, floatSample, block);
            }
            i += numSamplesTone;
            n++;
            if(n>=dtmfNTones)break;
         }
         else
         // generate silence
         {
            // the statement takes care of extracting one sample from the diff bin and
            // adding it into the silence block until depletion
            extra=(diff-- > 0?1:0);
            for(j=0; j < numSamplesSilence+extra; j+=block) {
               block = tmp->GetBestBlockSize(j);
               if (block > (numSamplesSilence+extra - j))
                   block = numSamplesSilence+extra - j;

               // generate silence and append
               memset(data, 0, sizeof(float)*block);
               tmp->Append((samplePtr)data, floatSample, block);
            }
            i += numSamplesSilence;
         }
         // flip flag
         isTone=!isTone;

         //Update the Progress meter
         if (TrackProgress(ntrack, (double)i / numSamplesSequence))
            bGoodResult = false;
      } // finished the whole dtmf sequence

      delete[] data;

      tmp->Flush();
      track->Clear(mT0, mT1);
      track->Paste(mT0, tmp);
      delete tmp;

      if (!bGoodResult)
         break;

      //Iterate to the next track
      ntrack++;
      track = (WaveTrack *)iter.Next();
   }

   if (bGoodResult)
      {
      /*
         save last used values
         save duration unless value was got from selection, so we save only
         when user explicitely setup a value
      */
      if (mT1 == mT0)
         gPrefs->Write(wxT("/CsPresets/DtmfGen_SequenceDuration"), dtmfDuration);

      gPrefs->Write(wxT("/CsPresets/DtmfGen_String"), dtmfString);
      gPrefs->Write(wxT("/CsPresets/DtmfGen_DutyCycle"), dtmfDutyCycle);
      gPrefs->Write(wxT("/CsPresets/DtmfGen_Amplitude"), dtmfAmplitude);

      // Update selection: this is not accurate if my calculations are wrong.
      // To validate, once the effect is done, unselect, and select all, then
      // see what the selection length is being reported (in sec,ms,samples)
      mT1 = mT0 + dtmfDuration;
   }

   this->ReplaceProcessedWaveTracks(bGoodResult); 
   return bGoodResult;
}