Пример #1
0
float ContrastDialog::GetDB()
{
   // FIXME: what if more than one track?
   float rms = float(0.0);

   AudacityProject *p = GetActiveProject();
   TrackListOfKindIterator iter(Track::Wave, p->GetTracks());
   Track *t = iter.First();
   if(mT0 > mT1)
   {
      wxMessageDialog m(NULL, _("Start time after end time!\nPlease enter reasonable times."), _("Error"), wxOK);
      m.ShowModal();
      return 1234.0; // 'magic number', but the whole +ve dB range will 'almost' never occur
   }
   if(mT0 < t->GetStartTime())
      mT0 = t->GetStartTime();
   if(mT1 > t->GetEndTime())
      mT1 = t->GetEndTime();
   if(mT0 > mT1)
   {
      wxMessageDialog m(NULL, _("Times are not reasonable!\nPlease enter reasonable times."), _("Error"), wxOK);
      m.ShowModal();
      return 1234.0;
   }
   if(mT0 == mT1)
   {
      wxMessageDialog m(NULL, _("Nothing to measure.\nPlease select a section of a track."), _("Error"), wxOK);
      m.ShowModal();
      return 1234.0;
   }
   bool mSelected = false;
   while(t) {  
      if( ((WaveTrack *)t)->GetSelected() )
      {
         if( mSelected == true ) // already measured one track
         {
            wxMessageDialog m(NULL, _("You can only measure one track at a time."), _("Error"), wxOK);
            m.ShowModal();
            return 1234.0;
         }
         else
         {
            ((WaveTrack *)t)->GetRMS(&rms, mT0, mT1);
            mSelected = true;
         }
      }
      t = iter.Next();
   }
   if( mSelected )
   {
      if( rms < 1.0E-30 )
         return -60.0;
      return 20.0*log10(rms);
   }
   else
   {
      wxMessageDialog m(NULL, _("Please select something to be measured."), _("Error"), wxOK);
      m.ShowModal();
      return 1234.0;
   }
}
Пример #2
0
bool EffectTruncSilence::Process()
{
   // Typical fraction of total time taken by detection (better to guess low)
   const double detectFrac = .4;

   // Copy tracks
   this->CopyInputTracks(Track::All);

   // Lower bound on the amount of silence to find at a time -- this avoids
   // detecting silence repeatedly in low-frequency sounds.
   const double minTruncMs = 0.001;
   double truncDbSilenceThreshold = Enums::Db2Signal[mTruncDbChoiceIndex];

   // Master list of silent regions; it is responsible for deleting them.
   // This list should always be kept in order.
   RegionList silences;
   silences.DeleteContents(true);

   // Start with the whole selection silent
   Region *sel = new Region;
   sel->start = mT0;
   sel->end = mT1;
   silences.push_back(sel);

   // Remove non-silent regions in each track
   SelectedTrackListOfKindIterator iter(Track::Wave, mTracks);
   int whichTrack = 0;
   for (Track *t = iter.First(); t; t = iter.Next())
   {
      WaveTrack *wt = (WaveTrack *)t;

      // Smallest silent region to detect in frames
      sampleCount minSilenceFrames =
            sampleCount(wxMax( mInitialAllowedSilence, minTruncMs) *
                  wt->GetRate());

      //
      // Scan the track for silences
      //
      RegionList trackSilences;
      trackSilences.DeleteContents(true);
      sampleCount blockLen = wt->GetMaxBlockSize();
      sampleCount start = wt->TimeToLongSamples(mT0);
      sampleCount end = wt->TimeToLongSamples(mT1);

      // Allocate buffer
      float *buffer = new float[blockLen];

      sampleCount index = start;
      sampleCount silentFrames = 0;
      bool cancelled = false;

      // Keep position in overall silences list for optimization
      RegionList::iterator rit(silences.begin());

      while (index < end) {
         // Show progress dialog, test for cancellation
         cancelled = TotalProgress(
               detectFrac * (whichTrack + index / (double)end) /
               (double)GetNumWaveTracks());
         if (cancelled)
            break;

         //
         // Optimization: if not in a silent region skip ahead to the next one
         //
         double curTime = wt->LongSamplesToTime(index);
         for ( ; rit != silences.end(); ++rit)
         {
            // Find the first silent region ending after current time
            if ((*rit)->end >= curTime)
               break;
         }

         if (rit == silences.end()) {
            // No more regions -- no need to process the rest of the track
            break;
         }
         else if ((*rit)->start > curTime) {
            // End current silent region, skip ahead
            if (silentFrames >= minSilenceFrames) {
               Region *r = new Region;
               r->start = wt->LongSamplesToTime(index - silentFrames);
               r->end = wt->LongSamplesToTime(index);
               trackSilences.push_back(r);
            }
            silentFrames = 0;

            index = wt->TimeToLongSamples((*rit)->start);
         }
         //
         // End of optimization
         //

         // Limit size of current block if we've reached the end
         sampleCount count = blockLen;
         if ((index + count) > end) {
            count = end - index;
         }

         // Fill buffer
         wt->Get((samplePtr)(buffer), floatSample, index, count);

         // Look for silences in current block
         for (sampleCount i = 0; i < count; ++i) {
            if (fabs(buffer[i]) < truncDbSilenceThreshold) {
               ++silentFrames;
            }
            else {
               if (silentFrames >= minSilenceFrames)
               {
                  // Record the silent region
                  Region *r = new Region;
                  r->start = wt->LongSamplesToTime(index + i - silentFrames);
                  r->end = wt->LongSamplesToTime(index + i);
                  trackSilences.push_back(r);
               }
               silentFrames = 0;
            }
         }

         // Next block
         index += count;
      }

      delete [] buffer;

      // Buffer has been freed, so we're OK to return if cancelled
      if (cancelled)
      {
         ReplaceProcessedTracks(false);
         return false;
      }

      if (silentFrames >= minSilenceFrames)
      {
         // Track ended in silence -- record region
         Region *r = new Region;
         r->start = wt->LongSamplesToTime(index - silentFrames);
         r->end = wt->LongSamplesToTime(index);
         trackSilences.push_back(r);
      }

      // Intersect with the overall silent region list
      Intersect(silences, trackSilences);
      whichTrack++;
   }

   //
   // Now remove the silent regions from all selected / sync-lock selected tracks.
   //

   // Loop over detected regions in reverse (so cuts don't change time values
   // down the line)
   int whichReg = 0;
   RegionList::reverse_iterator rit;
   double totalCutLen = 0.0;  // For cutting selection at the end
   for (rit = silences.rbegin(); rit != silences.rend(); ++rit) {
      Region *r = *rit;

      // Progress dialog and cancellation. Do additional cleanup before return.
      if (TotalProgress(detectFrac + (1 - detectFrac) * whichReg / (double)silences.size()))
      {
         ReplaceProcessedTracks(false);
         return false;
      }

      // Intersection may create regions smaller than allowed; ignore them.
      // Allow one nanosecond extra for consistent results with exact milliseconds of allowed silence.
      if ((r->end - r->start) < (mInitialAllowedSilence - 0.000000001))
         continue;

      // Find new silence length as requested
      double inLength = r->end - r->start;
      double outLength;

      switch (mProcessIndex) {
      case 0:
         outLength = wxMin(mTruncLongestAllowedSilence, inLength);
         break;
      case 1:
         outLength = mInitialAllowedSilence +
                        (inLength - mInitialAllowedSilence) * mSilenceCompressPercent / 100.0;
         break;
      default: // Not currently used.
         outLength = wxMin(mInitialAllowedSilence +
                              (inLength - mInitialAllowedSilence) * mSilenceCompressPercent / 100.0,
                           mTruncLongestAllowedSilence);
      }

      double cutLen = inLength - outLength;
      totalCutLen += cutLen;

      TrackListIterator iterOut(mOutputTracks);
      for (Track *t = iterOut.First(); t; t = iterOut.Next())
      {
         // Don't waste time past the end of a track
         if (t->GetEndTime() < r->start)
            continue;

         if (t->GetKind() == Track::Wave && (
                  t->GetSelected() || t->IsSyncLockSelected()))
         {
            // In WaveTracks, clear with a cross-fade
            WaveTrack *wt = (WaveTrack *)t;
            sampleCount blendFrames = mBlendFrameCount;
            double cutStart = (r->start + r->end - cutLen) / 2;
            double cutEnd = cutStart + cutLen;
            // Round start/end times to frame boundaries
            cutStart = wt->LongSamplesToTime(wt->TimeToLongSamples(cutStart));
            cutEnd = wt->LongSamplesToTime(wt->TimeToLongSamples(cutEnd));

            // Make sure the cross-fade does not affect non-silent frames
            if (wt->LongSamplesToTime(blendFrames) > inLength) {
               blendFrames = wt->TimeToLongSamples(inLength);
            }

            // Perform cross-fade in memory
            float *buf1 = new float[blendFrames];
            float *buf2 = new float[blendFrames];
            sampleCount t1 = wt->TimeToLongSamples(cutStart) - blendFrames / 2;
            sampleCount t2 = wt->TimeToLongSamples(cutEnd) - blendFrames / 2;

            wt->Get((samplePtr)buf1, floatSample, t1, blendFrames);
            wt->Get((samplePtr)buf2, floatSample, t2, blendFrames);

            for (sampleCount i = 0; i < blendFrames; ++i) {
               buf1[i] = ((blendFrames-i) * buf1[i] + i * buf2[i]) /
                         (double)blendFrames;
            }

            // Perform the cut
            wt->Clear(cutStart, cutEnd);

            // Write cross-faded data
            wt->Set((samplePtr)buf1, floatSample, t1, blendFrames);

            delete [] buf1;
            delete [] buf2;
         }
         else if (t->GetSelected() || t->IsSyncLockSelected())
         {
            // Non-wave tracks: just do a sync-lock adjust
            double cutStart = (r->start + r->end - cutLen) / 2;
            double cutEnd = cutStart + cutLen;
            t->SyncLockAdjust(cutEnd, cutStart);
         }
      }
      ++whichReg;
   }

   mT1 -= totalCutLen;

   ReplaceProcessedTracks(true);

   return true;
}
Пример #3
0
double StretchHandle::GetT1(const Track &track, const ViewInfo &viewInfo)
{
   return std::min(track.GetEndTime(), viewInfo.selectedRegion.t1());
}
Пример #4
0
SnapManager::SnapManager(TrackList *tracks, TrackClipArray *exclusions,
                         double zoom, int pixelTolerance, bool noTimeSnap)
{
   int i;

   // Grab time-snapping prefs (unless otherwise requested)
   mSnapToTime = false;

   AudacityProject *p = GetActiveProject();
   wxASSERT(p);
   if (p)
   {
      // Look up the format string
      if (p->GetSnapTo() && !noTimeSnap) {
         mSnapToTime = true;
         mConverter.SetSampleRate(p->GetRate());
         mConverter.SetFormatName(p->GetSelectionFormat());
      }
   }

   mSnapPoints = new SnapPointArray(CompareSnapPoints);
   if (zoom > 0 && pixelTolerance > 0)
      mTolerance = pixelTolerance / zoom;
   else {
      // This shouldn't happen, but we don't want to crash if we get
      // illegal values.  The net effect of this is to never snap.
      mTolerance = 0.0;
   }
   // Two time points closer than this are considered the same
   mEpsilon = 1 / 44100.0;

   // Add a SnapPoint at t=0
   mSnapPoints->Add(new SnapPoint(0.0, NULL));

   TrackListIterator iter(tracks);
   Track *track = iter.First();
   while (track) {
      if (track->GetKind() == Track::Label) {
         LabelTrack *labelTrack = (LabelTrack *)track;
         for(i = 0; i < labelTrack->GetNumLabels(); i++) {
            const LabelStruct *label = labelTrack->GetLabel(i);
            CondListAdd(label->t, labelTrack);
            if (label->t1 != label->t) {
               CondListAdd(label->t1, labelTrack);
            }
         }
      }
      else if (track->GetKind() == Track::Wave) {
         WaveTrack *waveTrack = (WaveTrack *)track;
         WaveClipList::compatibility_iterator it;
         for (it=waveTrack->GetClipIterator(); it; it=it->GetNext()) {
            WaveClip *clip = it->GetData();
            if (exclusions) {
               bool skip = false;
               for(int j=0; j<(int)exclusions->GetCount(); j++) {
                  if ((*exclusions)[j].track == waveTrack &&
                      (*exclusions)[j].clip == clip)
                     skip = true;
               }
               if (skip)
                  continue;
            }
            CondListAdd(clip->GetStartTime(), waveTrack);
            CondListAdd(clip->GetEndTime(), waveTrack);
         }
      }
#ifdef USE_MIDI
      else if (track->GetKind() == Track::Note) {
         CondListAdd(track->GetStartTime(), track);
         CondListAdd(track->GetEndTime(), track);
      }
#endif
      track = iter.Next();
   }
}
Пример #5
0
bool Exporter::ExamineTracks()
{
   // Init
   mNumSelected = 0;
   mNumLeft = 0;
   mNumRight = 0;
   mNumMono = 0;

   // First analyze the selected audio, perform sanity checks, and provide
   // information as appropriate.

   // Tally how many are right, left, mono, and make sure at
   // least one track is selected (if selectedOnly==true)

   float earliestBegin = mT1;
   float latestEnd = mT0;

   TrackList *tracks = mProject->GetTracks();
   TrackListIterator iter1(tracks);
   Track *tr = iter1.First();

   while (tr) {
      if (tr->GetKind() == Track::Wave) {
         if (tr->GetSelected() || !mSelectedOnly) {

            mNumSelected++;

            if (tr->GetChannel() == Track::LeftChannel) {
               mNumLeft++;
            }
            else if (tr->GetChannel() == Track::RightChannel) {
               mNumRight++;
            }
            else if (tr->GetChannel() == Track::MonoChannel) {
               // It's a mono channel, but it may be panned
               float pan = ((WaveTrack*)tr)->GetPan();
               
               if (pan == -1.0)
                  mNumLeft++;
               else if (pan == 1.0)
                  mNumRight++;
               else if (pan == 0)
                  mNumMono++;
               else {
                  // Panned partially off-center. Mix as stereo.
                  mNumLeft++;
                  mNumRight++;
               }
            }

            if (tr->GetOffset() < earliestBegin) {
               earliestBegin = tr->GetOffset();
            }

            if (tr->GetEndTime() > latestEnd) {
               latestEnd = tr->GetEndTime();
            }

         }
      }

      tr = iter1.Next();
   }

   if (mSelectedOnly && mNumSelected == 0) {
      wxMessageBox(_("No tracks are selected! Use Ctrl-A (Select All)\nChoose Export... to export all tracks."),
                    _("Unable to export"),
                    wxOK | wxICON_INFORMATION);
      return false;
   }

   if (mT0 < earliestBegin)
      mT0 = earliestBegin;

   if (mT1 > latestEnd)
      mT1 = latestEnd;

   return true;
}
Пример #6
0
bool EffectTruncSilence::DoRemoval
(const RegionList &silences, unsigned iGroup, unsigned nGroups, Track *firstTrack, Track *lastTrack,
 double &totalCutLen)
{
   //
   // Now remove the silent regions from all selected / sync-lock selected tracks.
   //

   // Loop over detected regions in reverse (so cuts don't change time values
   // down the line)
   int whichReg = 0;
   RegionList::const_reverse_iterator rit;
   for (rit = silences.rbegin(); rit != silences.rend(); ++rit)
   {
      const Region &region = *rit;
      const Region *const r = &region;

      // Progress dialog and cancellation. Do additional cleanup before return.
      const double frac = detectFrac +
         (1 - detectFrac) * (iGroup + whichReg / double(silences.size())) / nGroups;
      if (TotalProgress(frac))
      {
         ReplaceProcessedTracks(false);
         return false;
      }

      // Intersection may create regions smaller than allowed; ignore them.
      // Allow one nanosecond extra for consistent results with exact milliseconds of allowed silence.
      if ((r->end - r->start) < (mInitialAllowedSilence - 0.000000001))
         continue;

      // Find NEW silence length as requested
      double inLength = r->end - r->start;
      double outLength;

      switch (mActionIndex)
      {
      case kTruncate:
         outLength = std::min(mTruncLongestAllowedSilence, inLength);
         break;
      case kCompress:
         outLength = mInitialAllowedSilence +
                        (inLength - mInitialAllowedSilence) * mSilenceCompressPercent / 100.0;
         break;
      default: // Not currently used.
         outLength = std::min(mInitialAllowedSilence +
                              (inLength - mInitialAllowedSilence) * mSilenceCompressPercent / 100.0,
                           mTruncLongestAllowedSilence);
      }

      double cutLen = std::max(0.0, inLength - outLength);
      totalCutLen += cutLen;

      TrackListIterator iterOut(mOutputTracks.get());
      bool lastSeen = false;
      for (Track *t = iterOut.StartWith(firstTrack); t && !lastSeen; t = iterOut.Next())
      {
         lastSeen = (t == lastTrack);
         if (!(t->GetSelected() || t->IsSyncLockSelected()))
            continue;

         // Don't waste time past the end of a track
         if (t->GetEndTime() < r->start)
            continue;

         // Don't waste time cutting nothing.
         if( cutLen == 0.0 )
            continue;

         double cutStart = (r->start + r->end - cutLen) / 2;
         double cutEnd = cutStart + cutLen;
         if (t->GetKind() == Track::Wave)
         {
            // In WaveTracks, clear with a cross-fade
            WaveTrack *const wt = static_cast<WaveTrack*>(t);
            auto blendFrames = mBlendFrameCount;
            // Round start/end times to frame boundaries
            cutStart = wt->LongSamplesToTime(wt->TimeToLongSamples(cutStart));
            cutEnd = wt->LongSamplesToTime(wt->TimeToLongSamples(cutEnd));

            // Make sure the cross-fade does not affect non-silent frames
            if (wt->LongSamplesToTime(blendFrames) > inLength)
            {
               // Result is not more than blendFrames:
               blendFrames = wt->TimeToLongSamples(inLength).as_size_t();
            }

            // Perform cross-fade in memory
            Floats buf1{ blendFrames };
            Floats buf2{ blendFrames };
            auto t1 = wt->TimeToLongSamples(cutStart) - blendFrames / 2;
            auto t2 = wt->TimeToLongSamples(cutEnd) - blendFrames / 2;

            wt->Get((samplePtr)buf1.get(), floatSample, t1, blendFrames);
            wt->Get((samplePtr)buf2.get(), floatSample, t2, blendFrames);

            for (decltype(blendFrames) i = 0; i < blendFrames; ++i)
            {
               buf1[i] = ((blendFrames-i) * buf1[i] + i * buf2[i]) /
                         (double)blendFrames;
            }

            // Perform the cut
            wt->Clear(cutStart, cutEnd);

            // Write cross-faded data
            wt->Set((samplePtr)buf1.get(), floatSample, t1, blendFrames);
         }
         else
            // Non-wave tracks: just do a sync-lock adjust
            t->SyncLockAdjust(cutEnd, cutStart);
      }
      ++whichReg;
   }

   return true;
}
Пример #7
0
//TODO-MB: wouldn't it make more sense to delete the time track after 'mix and render'?
bool MixAndRender(TrackList *tracks, TrackFactory *trackFactory,
                  double rate, sampleFormat format,
                  double startTime, double endTime,
                  WaveTrack **newLeft, WaveTrack **newRight)
{
   // This function was formerly known as "Quick Mix".
   WaveTrack **waveArray;
   Track *t;
   int numWaves = 0; /* number of wave tracks in the selection */
   int numMono = 0;  /* number of mono, centre-panned wave tracks in selection*/
   bool mono = false;   /* flag if output can be mono without loosing anything*/
   bool oneinput = false;  /* flag set to true if there is only one input track
                              (mono or stereo) */
   int w;

   TrackListIterator iter(tracks);
   SelectedTrackListOfKindIterator usefulIter(Track::Wave, tracks);
   // this only iterates tracks which are relevant to this function, i.e.
   // selected WaveTracks. The tracklist is (confusingly) the list of all 
   // tracks in the project

   t = iter.First();
   while (t) {
      if (t->GetSelected() && t->GetKind() == Track::Wave) {
         numWaves++;
         float pan = ((WaveTrack*)t)->GetPan();
         if (t->GetChannel() == Track::MonoChannel && pan == 0)
            numMono++;
      }
      t = iter.Next();
   }

   if (numMono == numWaves)
      mono = true;

   /* the next loop will do two things at once:
    * 1. build an array of all the wave tracks were are trying to process
    * 2. determine when the set of WaveTracks starts and ends, in case we
    *    need to work out for ourselves when to start and stop rendering.
    */

   double mixStartTime = 0.0;    /* start time of first track to start */
   bool gotstart = false;  // flag indicates we have found a start time
   double mixEndTime = 0.0;   /* end time of last track to end */
   double tstart, tend;    // start and end times for one track.

   waveArray = new WaveTrack *[numWaves];
   w = 0;
   t = iter.First();

   while (t) {
      if (t->GetSelected() && t->GetKind() == Track::Wave) {
         waveArray[w++] = (WaveTrack *) t;
         tstart = t->GetStartTime();
         tend = t->GetEndTime();
         if (tend > mixEndTime)
            mixEndTime = tend;
         // try and get the start time. If the track is empty we will get 0,
         // which is ambiguous because it could just mean the track starts at
         // the beginning of the project, as well as empty track. The give-away
         // is that an empty track also ends at zero.

         if (tstart != tend) {
            // we don't get empty tracks here
            if (!gotstart) {
               // no previous start, use this one unconditionally
               mixStartTime = tstart;
               gotstart = true;
            } else if (tstart < mixStartTime)
               mixStartTime = tstart;  // have a start, only make it smaller
         }  // end if start and end are different
      }  // end if track is a selected WaveTrack.
      /** @TODO: could we not use a SelectedTrackListOfKindIterator here? */
      t = iter.Next();
   }

   /* create the destination track (new track) */
   if ((numWaves == 1) || ((numWaves == 2) && (usefulIter.First()->GetLink() != NULL)))
      oneinput = true;
   // only one input track (either 1 mono or one linked stereo pair)

   WaveTrack *mixLeft = trackFactory->NewWaveTrack(format, rate);
   if (oneinput)
      mixLeft->SetName(usefulIter.First()->GetName()); /* set name of output track to be the same as the sole input track */
   else
      mixLeft->SetName(_("Mix"));
   mixLeft->SetOffset(mixStartTime);
   WaveTrack *mixRight = 0;
   if (mono) {
      mixLeft->SetChannel(Track::MonoChannel);
   }
   else {
      mixRight = trackFactory->NewWaveTrack(format, rate);
      if (oneinput) {
         if (usefulIter.First()->GetLink() != NULL)   // we have linked track
            mixLeft->SetName(usefulIter.First()->GetLink()->GetName()); /* set name to match input track's right channel!*/
         else
            mixLeft->SetName(usefulIter.First()->GetName());   /* set name to that of sole input channel */
      }
      else
         mixRight->SetName(_("Mix"));
      mixLeft->SetChannel(Track::LeftChannel);
      mixRight->SetChannel(Track::RightChannel);
      mixRight->SetOffset(mixStartTime);
      mixLeft->SetLinked(true);
   }



   int maxBlockLen = mixLeft->GetIdealBlockSize();

   // If the caller didn't specify a time range, use the whole range in which
   // any input track had clips in it.
   if (startTime == endTime) {
      startTime = mixStartTime;
      endTime = mixEndTime;
   }

   Mixer *mixer = new Mixer(numWaves, waveArray, tracks->GetTimeTrack(),
                            startTime, endTime, mono ? 1 : 2, maxBlockLen, false,
                            rate, format);

   ::wxSafeYield();
   ProgressDialog *progress = new ProgressDialog(_("Mix and Render"),
                                                 _("Mixing and rendering tracks"));
   
   int updateResult = eProgressSuccess;
   while(updateResult == eProgressSuccess) {
      sampleCount blockLen = mixer->Process(maxBlockLen);

      if (blockLen == 0)
         break;

      if (mono) {
         samplePtr buffer = mixer->GetBuffer();
         mixLeft->Append(buffer, format, blockLen);
      }
      else {
         samplePtr buffer;
         buffer = mixer->GetBuffer(0);
         mixLeft->Append(buffer, format, blockLen);
         buffer = mixer->GetBuffer(1);
         mixRight->Append(buffer, format, blockLen);
      }

      updateResult = progress->Update(mixer->MixGetCurrentTime() - startTime, endTime - startTime);
   }

   delete progress;

   mixLeft->Flush();
   if (!mono) 
      mixRight->Flush();
   if (updateResult == eProgressCancelled || updateResult == eProgressFailed)
   {
      delete mixLeft;
      if (!mono) 
         delete mixRight;
   } else {
      *newLeft = mixLeft;
      if (!mono) 
         *newRight = mixRight;

#if 0
   int elapsedMS = wxGetElapsedTime();
   double elapsedTime = elapsedMS * 0.001;
   double maxTracks = totalTime / (elapsedTime / numWaves);

   // Note: these shouldn't be translated - they're for debugging
   // and profiling only.
   printf("      Tracks: %d\n", numWaves);
   printf("  Mix length: %f sec\n", totalTime);
   printf("Elapsed time: %f sec\n", elapsedTime);
   printf("Max number of tracks to mix in real time: %f\n", maxTracks);
#endif
   }

   delete[] waveArray;
   delete mixer;

   return (updateResult == eProgressSuccess || updateResult == eProgressStopped);
}