コード例 #1
0
bool EffectTruncSilence::FindSilences
   (RegionList &silences, Track *firstTrack, Track *lastTrack)
{
   // Start with the whole selection silent
   silences.push_back(Region(mT0, mT1));

   // Remove non-silent regions in each track
   SelectedTrackListOfKindIterator iter(Track::Wave, mTracks);
   int whichTrack = 0;
   bool lastSeen = false;
   for (Track *t = iter.StartWith(firstTrack); !lastSeen && t; t = iter.Next())
   {
      lastSeen = (t == lastTrack);
      WaveTrack *const wt = static_cast<WaveTrack *>(t);

      // Smallest silent region to detect in frames
      sampleCount minSilenceFrames =
         sampleCount(std::max(mInitialAllowedSilence, DEF_MinTruncMs) * wt->GetRate());

      //
      // Scan the track for silences
      //
      RegionList trackSilences;

      sampleCount index = wt->TimeToLongSamples(mT0);
      sampleCount silentFrame = 0;

      // Detect silences
      bool cancelled = !(Analyze(silences, trackSilences, wt, &silentFrame, &index, whichTrack));

      // Buffer has been freed, so we're OK to return if cancelled
      if (cancelled)
      {
         ReplaceProcessedTracks(false);
         return false;
      }

      if (silentFrame >= minSilenceFrames)
      {
         // Track ended in silence -- record region
         trackSilences.push_back(Region(
            wt->LongSamplesToTime(index - silentFrame),
            wt->LongSamplesToTime(index)
         ));
      }

      // Intersect with the overall silent region list
      Intersect(silences, trackSilences);
      whichTrack++;
   }

   return true;
}
コード例 #2
0
void TranscriptionToolBar::GetSamples(
   const WaveTrack *t, sampleCount *s0, sampleCount *slen)
{
   // GetSamples attempts to translate the start and end selection markers into sample indices
   // These selection numbers are doubles.

   AudacityProject *p = GetActiveProject();
   if (!p) {
      return;
   }

   //First, get the current selection. It is part of the mViewInfo, which is
   //part of the project

   const auto &selectedRegion = p->GetViewInfo().selectedRegion;
   double start = selectedRegion.t0();
   double end = selectedRegion.t1();

   auto ss0 = sampleCount( (start - t->GetOffset()) * t->GetRate() );
   auto ss1 = sampleCount( (end - t->GetOffset()) * t->GetRate() );

   if (start < t->GetOffset()) {
      ss0 = 0;
   }

#if 0
   //This adjusts the right samplecount to the maximum sample.
   if (ss1 >= t->GetNumSamples()) {
      ss1 = t->GetNumSamples();
   }
#endif

   if (ss1 < ss0) {
      ss1 = ss0;
   }

   *s0 = ss0;
   *slen = ss1 - ss0;
}
コード例 #3
0
//! Compute the mean, maximum amplitude in DC, maximum amplitude in AC (mean substracted). NULL can be passed if value has to be ignored
void SignalDisplayData::channelAmplitude(unsigned channel, int *mean, int *maxAmplitudeDC, int *maxAmplitudeAC) const
{
	Q_ASSERT(channel < channelCount);

	const signed short *channelSamples = &data[channel * sampleCount()];
	int minValue, maxValue;
	int meanValue = 0;

	minValue = maxValue = *channelSamples++;
	for (unsigned i = 1; i < sampleCount(); i++)
	{
		int v = *channelSamples++;
		minValue = std::min(v, minValue);
		maxValue = std::max(v, maxValue);
		meanValue += v;
	}
	meanValue /= sampleCount();
	if (mean)
		*mean = meanValue;
	if (maxAmplitudeDC)
		*maxAmplitudeDC = std::max(maxValue, abs(minValue));
	if (maxAmplitudeAC)
		*maxAmplitudeAC = std::max(maxValue - meanValue, abs(minValue - meanValue));
}
コード例 #4
0
ファイル: CubeTileHandler.cpp プロジェクト: corburn/ISIS
  /**
   * Construct a tile handler. This will determine a good chunk size to put
   *   into the output cube.
   *
   * @param dataFile The file with cube DN data in it
   * @param virtualBandList The mapping from virtual band to physical band, see
   *          CubeIoHandler's description.
   * @param labels The Pvl labels for the cube
   * @param alreadyOnDisk True if the cube is allocated on the disk, false
   *          otherwise
   */
  CubeTileHandler::CubeTileHandler(QFile * dataFile,
      const QList<int> *virtualBandList, const Pvl &labels, bool alreadyOnDisk)
      : CubeIoHandler(dataFile, virtualBandList, labels, alreadyOnDisk) {

    const PvlObject &core = labels.findObject("IsisCube").findObject("Core");

    if(core.hasKeyword("Format")) {
      setChunkSizes(core["TileSamples"], core["TileLines"], 1);
    }
    else {
      // up to 1MB chunks
      int sampleChunkSize =
          findGoodSize(512 * 4 / SizeOf(pixelType()), sampleCount());
      int lineChunkSize =
          findGoodSize(512 * 4 / SizeOf(pixelType()), lineCount());

      setChunkSizes(sampleChunkSize, lineChunkSize, 1);
    }
  }
コード例 #5
0
ファイル: TruncSilence.cpp プロジェクト: jengelh/audacity
bool EffectTruncSilence::Analyze(RegionList& silenceList,
                                 RegionList& trackSilences,
                                 WaveTrack* wt,
                                 sampleCount* silentFrame,
                                 sampleCount* index,
                                 int whichTrack,
                                 double* inputLength /*= NULL*/,
                                 double* minInputLength /*= NULL*/)
{
   // Smallest silent region to detect in frames
   sampleCount minSilenceFrames = sampleCount(std::max( mInitialAllowedSilence, DEF_MinTruncMs) * wt->GetRate());

   double truncDbSilenceThreshold = Enums::Db2Signal[mTruncDbChoiceIndex];
   sampleCount blockLen = wt->GetMaxBlockSize();
   sampleCount start = wt->TimeToLongSamples(mT0);
   sampleCount end = wt->TimeToLongSamples(mT1);
   sampleCount outLength = 0;

   double previewLength;
   gPrefs->Read(wxT("/AudioIO/EffectsPreviewLen"), &previewLength, 6.0);
   // Minimum required length in samples.
   const sampleCount previewLen = previewLength * wt->GetRate();

   // Keep position in overall silences list for optimization
   RegionList::iterator rit(silenceList.begin());

   // Allocate buffer
   float *buffer = new float[blockLen];

   // Loop through current track
   while (*index < end) {
      if (inputLength && ((outLength >= previewLen) || (*index - start > wt->TimeToLongSamples(*minInputLength)))) {
         *inputLength = std::min<double>(*inputLength, *minInputLength);
         if (outLength >= previewLen) {
            *minInputLength = *inputLength;
         }
         return true;
      }

      if (!inputLength) {
         // Show progress dialog, test for cancellation
         bool cancelled = TotalProgress(
               detectFrac * (whichTrack + (*index - start) / (double)(end - start)) /
               (double)GetNumWaveTracks());
         if (cancelled) {
            delete [] buffer;
            return false;
         }
      }

      // Optimization: if not in a silent region skip ahead to the next one

      double curTime = wt->LongSamplesToTime(*index);
      for ( ; rit != silenceList.end(); ++rit) {
         // Find the first silent region ending after current time
         if (rit->end >= curTime) {
            break;
         }
      }

      if (rit == silenceList.end()) {
         // No more regions -- no need to process the rest of the track
         if (inputLength) {
            // Add available samples up to previewLength.
            sampleCount remainingTrackSamples = wt->TimeToLongSamples(wt->GetEndTime()) - *index;
            sampleCount requiredTrackSamples = previewLen - outLength;
            outLength += (remainingTrackSamples > requiredTrackSamples)? requiredTrackSamples : remainingTrackSamples;
         }

         break;
      }
      else if (rit->start > curTime) {
         // End current silent region, skip ahead
         if (*silentFrame >= minSilenceFrames)  {
            trackSilences.push_back(Region(
               wt->LongSamplesToTime(*index - *silentFrame),
               wt->LongSamplesToTime(*index)
            ));
         }
         *silentFrame = 0;
         sampleCount newIndex = wt->TimeToLongSamples(rit->start);
         if (inputLength) {
            sampleCount requiredTrackSamples = previewLen - outLength;
            // Add non-silent sample to outLength
            outLength += ((newIndex - *index) > requiredTrackSamples)? requiredTrackSamples : newIndex - *index;
         }

         *index = newIndex;
      }
      // End of optimization

      // Limit size of current block if we've reached the end
      sampleCount count = blockLen;
      if ((*index + count) > end) {
         count = end - *index;
      }

      // Fill buffer
      wt->Get((samplePtr)(buffer), floatSample, *index, count);

      // Look for silenceList in current block
      for (sampleCount i = 0; i < count; ++i) {
         if (inputLength && ((outLength >= previewLen) || (outLength > wt->TimeToLongSamples(*minInputLength)))) {
            *inputLength = wt->LongSamplesToTime(*index + i) - wt->LongSamplesToTime(start);
            break;
         }

         if (fabs(buffer[i]) < truncDbSilenceThreshold) {
            (*silentFrame)++;
         }
         else {
            sampleCount allowed = 0;
            if (*silentFrame >= minSilenceFrames) {
               if (inputLength) {
                  switch (mActionIndex) {
                     case kTruncate:
                        outLength += wt->TimeToLongSamples(mTruncLongestAllowedSilence);
                        break;
                     case kCompress:
                        allowed = wt->TimeToLongSamples(mInitialAllowedSilence);
                        outLength += allowed +
                                       (*silentFrame - allowed) * mSilenceCompressPercent / 100.0;
                        break;
                     // default: // Not currently used.
                  }
               }

               // Record the silent region
               Region *r = new Region;
               r->start = wt->LongSamplesToTime(*index + i - *silentFrame);
               r->end = wt->LongSamplesToTime(*index + i);
               trackSilences.push_back(Region(
                  wt->LongSamplesToTime(*index + i - *silentFrame),
                  wt->LongSamplesToTime(*index + i)
               ));
            }
            else if (inputLength) {   // included as part of non-silence
               outLength += *silentFrame;
            }
            *silentFrame = 0;
            if (inputLength) {
                ++outLength;   // Add non-silent sample to outLength
            }
         }
      }
      // Next block
      *index += count;
   }
   delete [] buffer;

   if (inputLength) {
      *inputLength = std::min<double>(*inputLength, *minInputLength);
      if (outLength >= previewLen) {
         *minInputLength = *inputLength;
      }
   }

   return true;
}
コード例 #6
0
ファイル: Modedit.cpp プロジェクト: Sappharad/modizer
// Base code for adding, removing, moving and duplicating samples. Returns new number of samples on success, SAMPLEINDEX_INVALID otherwise.
// The new sample vector can contain SAMPLEINDEX_INVALID for adding new (empty) samples.
// newOrder indices are zero-based, i.e. newOrder[0] will define the contents of the first sample slot.
SAMPLEINDEX CModDoc::ReArrangeSamples(const std::vector<SAMPLEINDEX> &newOrder)
//-----------------------------------------------------------------------------
{
	if(newOrder.size() > m_SndFile.GetModSpecifications().samplesMax)
	{
		return SAMPLEINDEX_INVALID;
	}

	CriticalSection cs;

	const SAMPLEINDEX oldNumSamples = m_SndFile.GetNumSamples(), newNumSamples = static_cast<SAMPLEINDEX>(newOrder.size());

	std::vector<int> sampleCount(oldNumSamples + 1, 0);
	std::vector<ModSample> sampleHeaders(oldNumSamples + 1);
	std::vector<SAMPLEINDEX> newIndex(oldNumSamples + 1, 0);	// One of the new indexes for the old sample
	std::vector<std::string> sampleNames(oldNumSamples + 1);
	std::vector<mpt::PathString> samplePaths(oldNumSamples + 1);

	for(SAMPLEINDEX i = 0; i < newNumSamples; i++)
	{
		const SAMPLEINDEX origSlot = newOrder[i];
		if(origSlot > 0 && origSlot <= oldNumSamples)
		{
			sampleCount[origSlot]++;
			sampleHeaders[origSlot] = m_SndFile.GetSample(origSlot);
			if(!newIndex[origSlot]) newIndex[origSlot] = i + 1;
		}
	}

	// First, delete all samples that will be removed anyway.
	for(SAMPLEINDEX i = 1; i < sampleCount.size(); i++)
	{
		if(sampleCount[i] == 0)
		{
			m_SndFile.DestroySample(i);
			GetSampleUndo().ClearUndo(i);
		}
		sampleNames[i] = m_SndFile.m_szNames[i];
		samplePaths[i] = m_SndFile.GetSamplePath(i);
	}

	// Remove sample data references from now unused slots.
	for(SAMPLEINDEX i = newNumSamples + 1; i <= oldNumSamples; i++)
	{
		m_SndFile.GetSample(i).pSample = nullptr;
		m_SndFile.GetSample(i).nLength = 0;
		strcpy(m_SndFile.m_szNames[i], "");
	}

	// Now, create new sample list.
	m_SndFile.m_nSamples = std::max(m_SndFile.m_nSamples, newNumSamples);	// Avoid assertions when using GetSample()...
	for(SAMPLEINDEX i = 0; i < newNumSamples; i++)
	{
		const SAMPLEINDEX origSlot = newOrder[i];
		ModSample &target = m_SndFile.GetSample(i + 1);
		if(origSlot > 0 && origSlot <= oldNumSamples)
		{
			// Copy an original sample.
			target = sampleHeaders[origSlot];
			if(--sampleCount[origSlot] > 0 && sampleHeaders[origSlot].pSample != nullptr)
			{
				// This sample slot is referenced multiple times, so we have to copy the actual sample.
				target.pSample = ModSample::AllocateSample(target.nLength, target.GetBytesPerSample());
				if(target.pSample != nullptr)
				{
					memcpy(target.pSample, sampleHeaders[origSlot].pSample, target.GetSampleSizeInBytes());
					target.PrecomputeLoops(m_SndFile, false);
				} else
				{
					Reporting::Error("Cannot duplicate sample - out of memory!");
				}
			}
			strcpy(m_SndFile.m_szNames[i + 1], sampleNames[origSlot].c_str());
			m_SndFile.SetSamplePath(i + 1, samplePaths[origSlot]);
		} else
		{
			// Invalid sample reference.
			target.Initialize(m_SndFile.GetType());
			target.pSample = nullptr;
			strcpy(m_SndFile.m_szNames[i + 1], "");
			m_SndFile.ResetSamplePath(i + 1);
		}
	}

	GetSampleUndo().RearrangeSamples(newIndex);

	for(CHANNELINDEX c = 0; c < CountOf(m_SndFile.m_PlayState.Chn); c++)
	{
		ModChannel &chn = m_SndFile.m_PlayState.Chn[c];
		for(SAMPLEINDEX i = 1; i <= oldNumSamples; i++)
		{
			if(chn.pModSample == &m_SndFile.GetSample(i))
			{
				chn.pModSample = &m_SndFile.GetSample(newIndex[i]);
				if(i == 0 || i > newNumSamples)
				{
					chn.Reset(ModChannel::resetTotal, m_SndFile, c);
				}
				break;
			}
		}
	}

	m_SndFile.m_nSamples = newNumSamples;

	if(m_SndFile.GetNumInstruments())
	{
		// Instrument mode: Update sample maps.
		for(INSTRUMENTINDEX i = 0; i <= m_SndFile.GetNumInstruments(); i++)
		{
			ModInstrument *ins = m_SndFile.Instruments[i];
			if(ins == nullptr)
			{
				continue;
			}
			GetInstrumentUndo().RearrangeSamples(i, newIndex);
			for(auto &sample : ins->Keyboard)
			{
				if(sample < newIndex.size())
					sample = newIndex[sample];
				else
					sample = 0;
			}
		}
	} else
	{
		PrepareUndoForAllPatterns(false, "Rearrange Samples");

		std::vector<ModCommand::INSTR> indices(newIndex.size(), 0);
		for(size_t i = 0; i < newIndex.size(); i++)
		{
			indices[i] = newIndex[i];
		}
		m_SndFile.Patterns.ForEachModCommand(RewriteInstrumentReferencesInPatterns(indices));
	}

	return GetNumSamples();
}
コード例 #7
0
ファイル: Mix.cpp プロジェクト: AthiVarathan/audacity
sampleCount Mixer::MixVariableRates(int *channelFlags, WaveTrackCache &cache,
                                    sampleCount *pos, float *queue,
                                    int *queueStart, int *queueLen,
                                    Resample * pResample)
{
   const WaveTrack *const track = cache.GetTrack();
   const double trackRate = track->GetRate();
   const double initialWarp = mRate / mSpeed / trackRate;
   const double tstep = 1.0 / trackRate;
   int sampleSize = SAMPLE_SIZE(floatSample);

   sampleCount out = 0;

   /* time is floating point. Sample rate is integer. The number of samples
    * has to be integer, but the multiplication gives a float result, which we
    * round to get an integer result. TODO: is this always right or can it be
    * off by one sometimes? Can we not get this information directly from the
    * clip (which must know) rather than convert the time?
    *
    * LLL:  Not at this time.  While WaveClips provide methods to retrieve the
    *       start and end sample, they do the same float->sampleCount conversion
    *       to calculate the position.
    */

   // Find the last sample
   double endTime = track->GetEndTime();
   double startTime = track->GetStartTime();
   const bool backwards = (mT1 < mT0);
   const double tEnd = backwards
      ? std::max(startTime, mT1)
      : std::min(endTime, mT1);
   const sampleCount endPos = track->TimeToLongSamples(tEnd);
   // Find the time corresponding to the start of the queue, for use with time track
   double t = (*pos + (backwards ? *queueLen : - *queueLen)) / trackRate;

   while (out < mMaxOut) {
      if (*queueLen < mProcessLen) {
         // Shift pending portion to start of the buffer
         memmove(queue, &queue[*queueStart], (*queueLen) * sampleSize);
         *queueStart = 0;

         int getLen =
            std::min((backwards ? *pos - endPos : endPos - *pos),
                      sampleCount(mQueueMaxLen - *queueLen));

         // Nothing to do if past end of play interval
         if (getLen > 0) {
            if (backwards) {
               auto results = cache.Get(floatSample, *pos - (getLen - 1), getLen);
               memcpy(&queue[*queueLen], results, sizeof(float) * getLen);

               track->GetEnvelopeValues(mEnvValues,
                                        getLen,
                                        (*pos - (getLen- 1)) / trackRate,
                                        tstep);

               *pos -= getLen;
            }
            else {
               auto results = cache.Get(floatSample, *pos, getLen);
               memcpy(&queue[*queueLen], results, sizeof(float) * getLen);

               track->GetEnvelopeValues(mEnvValues,
                                        getLen,
                                        (*pos) / trackRate,
                                        tstep);

               *pos += getLen;
            }

            for (int i = 0; i < getLen; i++) {
               queue[(*queueLen) + i] *= mEnvValues[i];
            }

            if (backwards)
               ReverseSamples((samplePtr)&queue[0], floatSample,
                              *queueLen, getLen);

            *queueLen += getLen;
         }
      }

      sampleCount thisProcessLen = mProcessLen;
      bool last = (*queueLen < mProcessLen);
      if (last) {
         thisProcessLen = *queueLen;
      }

      double factor = initialWarp;
      if (mTimeTrack)
      {
         //TODO-MB: The end time is wrong when the resampler doesn't use all input samples,
         //         as a result of this the warp factor may be slightly wrong, so AudioIO will stop too soon
         //         or too late (resulting in missing sound or inserted silence). This can't be fixed
         //         without changing the way the resampler works, because the number of input samples that will be used
         //         is unpredictable. Maybe it can be compensated later though.
         if (backwards)
            factor *= mTimeTrack->ComputeWarpFactor
               (t - (double)thisProcessLen / trackRate + tstep, t + tstep);
         else
            factor *= mTimeTrack->ComputeWarpFactor
               (t, t + (double)thisProcessLen / trackRate);
      }

      int input_used;
      int outgen = pResample->Process(factor,
                                      &queue[*queueStart],
                                      thisProcessLen,
                                      last,
                                      &input_used,
                                      &mFloatBuffer[out],
                                      mMaxOut - out);

      if (outgen < 0) {
         return 0;
      }

      *queueStart += input_used;
      *queueLen -= input_used;
      out += outgen;
      t += ((backwards ? -input_used : input_used) / trackRate);

      if (last) {
         break;
      }
   }

   for (int c = 0; c < mNumChannels; c++) {
      if (mApplyTrackGains) {
         mGains[c] = track->GetChannelGain(c);
      }
      else {
         mGains[c] = 1.0;
      }
   }

   MixBuffers(mNumChannels,
              channelFlags,
              mGains,
              (samplePtr)mFloatBuffer,
              mTemp,
              out,
              mInterleaved);

   return out;
}
コード例 #8
0
double Curve::minX() const {
  if (hasBars() && sampleCount() > 0) {
    return MinX - (MaxX - MinX)/(2*(sampleCount()-1));
  }
  return MinX;
}
コード例 #9
0
    void reconfigure()
    {
        if(!self.isReady() || size == Size()) return;

        LOGDEV_GL_VERBOSE("Reconfiguring framebuffer: %s ms:%i")
                << size.asText() << sampleCount();

        // Configure textures for the framebuffer.
        color.setUndefinedImage(size, colorFormat);
        color.setWrap(gl::ClampToEdge, gl::ClampToEdge);
        color.setFilter(gl::Nearest, gl::Linear, gl::MipNone);

        DENG2_ASSERT(color.isReady());

        depthStencil.setDepthStencilContent(size);
        depthStencil.setWrap(gl::ClampToEdge, gl::ClampToEdge);
        depthStencil.setFilter(gl::Nearest, gl::Nearest, gl::MipNone);

        DENG2_ASSERT(depthStencil.isReady());

        try
        {
            // We'd like to use texture attachments for both color and depth/stencil.
            target.configure(&color, &depthStencil);
        }
        catch(GLTarget::ConfigError const &er)
        {
            // Alternatively try without depth/stencil texture (some renderer features
            // will not be available!).
            LOG_GL_WARNING("Texture-based framebuffer failed: %s\n"
                           "Trying fallback without depth/stencil texture")
                    << er.asText();

            target.configure(GLTarget::Color, color, GLTarget::DepthStencil);
        }

        target.clear(GLTarget::ColorDepthStencil);

        if(isMultisampled())
        {
            try
            {
                // Set up the multisampled target with suitable renderbuffers.
                multisampleTarget.configure(size, GLTarget::ColorDepthStencil, sampleCount());
                multisampleTarget.clear(GLTarget::ColorDepthStencil);

                // Actual drawing occurs in the multisampled target that is then
                // blitted to the main target.
                target.setProxy(&multisampleTarget);
            }
            catch(GLTarget::ConfigError const &er)
            {
                LOG_GL_WARNING("Multisampling not supported: %s") << er.asText();
                _samples = 1;
                goto noMultisampling;
            }
        }
        else
        {
noMultisampling:
            multisampleTarget.configure();
        }
    }
コード例 #10
0
ファイル: TruncSilence.cpp プロジェクト: GYGit/Audacity
bool EffectTruncSilence::Process()
{
   // Typical fraction of total time taken by detection (better to guess low)
   const double detectFrac = .4;

   // Copy tracks
   this->CopyInputTracks(Track::All);

   // Lower bound on the amount of silence to find at a time -- this avoids
   // detecting silence repeatedly in low-frequency sounds.
   const double minTruncMs = 0.001;
   double truncDbSilenceThreshold = Enums::Db2Signal[mTruncDbChoiceIndex];

   // Master list of silent regions; it is responsible for deleting them.
   // This list should always be kept in order.
   RegionList silences;
   silences.DeleteContents(true);

   // Start with the whole selection silent
   Region *sel = new Region;
   sel->start = mT0;
   sel->end = mT1;
   silences.push_back(sel);

   // Remove non-silent regions in each track
   SelectedTrackListOfKindIterator iter(Track::Wave, mTracks);
   int whichTrack = 0;
   for (Track *t = iter.First(); t; t = iter.Next())
   {
      WaveTrack *wt = (WaveTrack *)t;

      // Smallest silent region to detect in frames
      sampleCount minSilenceFrames =
            sampleCount(wxMax( mInitialAllowedSilence, minTruncMs) *
                  wt->GetRate());

      //
      // Scan the track for silences
      //
      RegionList trackSilences;
      trackSilences.DeleteContents(true);
      sampleCount blockLen = wt->GetMaxBlockSize();
      sampleCount start = wt->TimeToLongSamples(mT0);
      sampleCount end = wt->TimeToLongSamples(mT1);

      // Allocate buffer
      float *buffer = new float[blockLen];

      sampleCount index = start;
      sampleCount silentFrames = 0;
      bool cancelled = false;

      // Keep position in overall silences list for optimization
      RegionList::iterator rit(silences.begin());

      while (index < end) {
         // Show progress dialog, test for cancellation
         cancelled = TotalProgress(
               detectFrac * (whichTrack + index / (double)end) /
               (double)GetNumWaveTracks());
         if (cancelled)
            break;

         //
         // Optimization: if not in a silent region skip ahead to the next one
         //
         double curTime = wt->LongSamplesToTime(index);
         for ( ; rit != silences.end(); ++rit)
         {
            // Find the first silent region ending after current time
            if ((*rit)->end >= curTime)
               break;
         }

         if (rit == silences.end()) {
            // No more regions -- no need to process the rest of the track
            break;
         }
         else if ((*rit)->start > curTime) {
            // End current silent region, skip ahead
            if (silentFrames >= minSilenceFrames) {
               Region *r = new Region;
               r->start = wt->LongSamplesToTime(index - silentFrames);
               r->end = wt->LongSamplesToTime(index);
               trackSilences.push_back(r);
            }
            silentFrames = 0;

            index = wt->TimeToLongSamples((*rit)->start);
         }
         //
         // End of optimization
         //

         // Limit size of current block if we've reached the end
         sampleCount count = blockLen;
         if ((index + count) > end) {
            count = end - index;
         }

         // Fill buffer
         wt->Get((samplePtr)(buffer), floatSample, index, count);

         // Look for silences in current block
         for (sampleCount i = 0; i < count; ++i) {
            if (fabs(buffer[i]) < truncDbSilenceThreshold) {
               ++silentFrames;
            }
            else {
               if (silentFrames >= minSilenceFrames)
               {
                  // Record the silent region
                  Region *r = new Region;
                  r->start = wt->LongSamplesToTime(index + i - silentFrames);
                  r->end = wt->LongSamplesToTime(index + i);
                  trackSilences.push_back(r);
               }
               silentFrames = 0;
            }
         }

         // Next block
         index += count;
      }

      delete [] buffer;

      // Buffer has been freed, so we're OK to return if cancelled
      if (cancelled)
      {
         ReplaceProcessedTracks(false);
         return false;
      }

      if (silentFrames >= minSilenceFrames)
      {
         // Track ended in silence -- record region
         Region *r = new Region;
         r->start = wt->LongSamplesToTime(index - silentFrames);
         r->end = wt->LongSamplesToTime(index);
         trackSilences.push_back(r);
      }

      // Intersect with the overall silent region list
      Intersect(silences, trackSilences);
      whichTrack++;
   }

   //
   // Now remove the silent regions from all selected / sync-lock selected tracks.
   //

   // Loop over detected regions in reverse (so cuts don't change time values
   // down the line)
   int whichReg = 0;
   RegionList::reverse_iterator rit;
   double totalCutLen = 0.0;  // For cutting selection at the end
   for (rit = silences.rbegin(); rit != silences.rend(); ++rit) {
      Region *r = *rit;

      // Progress dialog and cancellation. Do additional cleanup before return.
      if (TotalProgress(detectFrac + (1 - detectFrac) * whichReg / (double)silences.size()))
      {
         ReplaceProcessedTracks(false);
         return false;
      }

      // Intersection may create regions smaller than allowed; ignore them.
      // Allow one nanosecond extra for consistent results with exact milliseconds of allowed silence.
      if ((r->end - r->start) < (mInitialAllowedSilence - 0.000000001))
         continue;

      // Find new silence length as requested
      double inLength = r->end - r->start;
      double outLength;

      switch (mProcessIndex) {
      case 0:
         outLength = wxMin(mTruncLongestAllowedSilence, inLength);
         break;
      case 1:
         outLength = mInitialAllowedSilence +
                        (inLength - mInitialAllowedSilence) * mSilenceCompressPercent / 100.0;
         break;
      default: // Not currently used.
         outLength = wxMin(mInitialAllowedSilence +
                              (inLength - mInitialAllowedSilence) * mSilenceCompressPercent / 100.0,
                           mTruncLongestAllowedSilence);
      }

      double cutLen = inLength - outLength;
      totalCutLen += cutLen;

      TrackListIterator iterOut(mOutputTracks);
      for (Track *t = iterOut.First(); t; t = iterOut.Next())
      {
         // Don't waste time past the end of a track
         if (t->GetEndTime() < r->start)
            continue;

         if (t->GetKind() == Track::Wave && (
                  t->GetSelected() || t->IsSyncLockSelected()))
         {
            // In WaveTracks, clear with a cross-fade
            WaveTrack *wt = (WaveTrack *)t;
            sampleCount blendFrames = mBlendFrameCount;
            double cutStart = (r->start + r->end - cutLen) / 2;
            double cutEnd = cutStart + cutLen;
            // Round start/end times to frame boundaries
            cutStart = wt->LongSamplesToTime(wt->TimeToLongSamples(cutStart));
            cutEnd = wt->LongSamplesToTime(wt->TimeToLongSamples(cutEnd));

            // Make sure the cross-fade does not affect non-silent frames
            if (wt->LongSamplesToTime(blendFrames) > inLength) {
               blendFrames = wt->TimeToLongSamples(inLength);
            }

            // Perform cross-fade in memory
            float *buf1 = new float[blendFrames];
            float *buf2 = new float[blendFrames];
            sampleCount t1 = wt->TimeToLongSamples(cutStart) - blendFrames / 2;
            sampleCount t2 = wt->TimeToLongSamples(cutEnd) - blendFrames / 2;

            wt->Get((samplePtr)buf1, floatSample, t1, blendFrames);
            wt->Get((samplePtr)buf2, floatSample, t2, blendFrames);

            for (sampleCount i = 0; i < blendFrames; ++i) {
               buf1[i] = ((blendFrames-i) * buf1[i] + i * buf2[i]) /
                         (double)blendFrames;
            }

            // Perform the cut
            wt->Clear(cutStart, cutEnd);

            // Write cross-faded data
            wt->Set((samplePtr)buf1, floatSample, t1, blendFrames);

            delete [] buf1;
            delete [] buf2;
         }
         else if (t->GetSelected() || t->IsSyncLockSelected())
         {
            // Non-wave tracks: just do a sync-lock adjust
            double cutStart = (r->start + r->end - cutLen) / 2;
            double cutEnd = cutStart + cutLen;
            t->SyncLockAdjust(cutEnd, cutStart);
         }
      }
      ++whichReg;
   }

   mT1 -= totalCutLen;

   ReplaceProcessedTracks(true);

   return true;
}
コード例 #11
0
//! Return a duration in ms for a given sample value. Floating point version
double SignalDisplayData::sampleToTime(double sample) const
{
	return (sample * (double)duration) / (double)sampleCount();
}
コード例 #12
0
//! Return a duration in ms for a given sample value. Integer version
unsigned SignalDisplayData::sampleToTime(unsigned sample) const
{
	return (sample * duration) / sampleCount();
}
コード例 #13
0
//! Return a duration in sample for the given time in ms. Floating point version
double SignalDisplayData::timeToSample(double time) const
{
	return (time * (double)sampleCount()) / (double)duration;
}
コード例 #14
0
//! Return a duration in sample for the given time in ms. Integer version
unsigned SignalDisplayData::timeToSample(unsigned time) const
{
	return (time * sampleCount()) / duration;
}