Esempio n. 1
0
void InsertSilence(COMMAND_T* _ct)
{
	char val[64]="";
	lstrcpyn(val, g_lastSilenceVal[(int)_ct->user], sizeof(val));

	if (PromptUserForString(GetMainHwnd(), SWS_CMD_SHORTNAME(_ct), val, sizeof(val)) && *val)
	{
		double pos = GetCursorPositionEx(NULL), len = 0.0;
		switch ((int)_ct->user)
		{
			case 0: // s
				len = parse_timestr_len(val, pos, 3);
				break;
			case 1: // meas.beat
			{
/* no! would not take get tempo markers into account since 'val' is not inserted yet
				len = parse_timestr_len(val, pos, 2);
				break;
*/
				int in_meas = atoi(val);
				double in_beats = 0.0;
				char* p = strchr(val, '.');
				if (p && p[1]) in_beats = atof(p+1);

				double bpm; int num, den;
				TimeMap_GetTimeSigAtTime(NULL, pos, &num, &den, &bpm);
				len = in_beats*(60.0/bpm) + in_meas*((240.0*num/den)/bpm);
				break;
			}
			case 2: // smp
				len = parse_timestr_len(val, pos, 4);
				break;
		}

		if (len>0.0) {
			lstrcpyn(g_lastSilenceVal[(int)_ct->user], val, sizeof(val));
			InsertSilence(SWS_CMD_SHORTNAME(_ct), pos, len); // includes an undo point
		}
		else
			MessageBox(GetMainHwnd(), __LOCALIZE("Invalid input!","sws_mbox"), __LOCALIZE("S&M - Error","sws_mbox"), MB_OK);
	}
}
Esempio n. 2
0
SmpLength ResizeSample(modplug::tracker::modsample_t& smp, const SmpLength nNewLength, module_renderer* pSndFile)
//--------------------------------------------------------------------------------------
{
    // Invalid sample size
    if(nNewLength > MAX_SAMPLE_LENGTH || nNewLength == smp.length)
            return smp.length;

    // New sample will be bigger so we'll just use "InsertSilence" as it's already there.
    if(nNewLength > smp.length)
            return InsertSilence(smp, nNewLength - smp.length, smp.length, pSndFile);

    // Else: Shrink sample

    const SmpLength nNewSmpBytes = nNewLength * smp.GetBytesPerSample();

    LPSTR pNewSmp = 0;
    pNewSmp = module_renderer::AllocateSample(nNewSmpBytes);
    if(pNewSmp == 0)
            return smp.length; //Sample allocation failed.

    // Copy over old data and replace sample by the new one
    memcpy(pNewSmp, smp.sample_data, nNewSmpBytes);
    ReplaceSample(smp, pNewSmp, nNewLength, pSndFile);

    // Adjust loops
    if(smp.loop_start > nNewLength)
    {
            smp.loop_start = smp.loop_end = 0;
            smp.flags &= ~CHN_LOOP;
    }
    if(smp.loop_end > nNewLength) smp.loop_end = nNewLength;
    if(smp.sustain_start > nNewLength)
    {
            smp.sustain_start = smp.sustain_end = 0;
            smp.flags &= ~CHN_SUSTAINLOOP;
    }
    if(smp.sustain_end > nNewLength) smp.sustain_end = nNewLength;

    AdjustEndOfSample(smp, pSndFile);

    return smp.length;
}
Esempio n. 3
0
SmpLength ResizeSample(ModSample &smp, const SmpLength nNewLength, CSoundFile &sndFile)
//-------------------------------------------------------------------------------------
{
	// Invalid sample size
	if(nNewLength > MAX_SAMPLE_LENGTH || nNewLength == smp.nLength)
		return smp.nLength;

	// New sample will be bigger so we'll just use "InsertSilence" as it's already there.
	if(nNewLength > smp.nLength)
		return InsertSilence(smp, nNewLength - smp.nLength, smp.nLength, sndFile);

	// Else: Shrink sample

	const SmpLength nNewSmpBytes = nNewLength * smp.GetBytesPerSample();

	void *pNewSmp = ModSample::AllocateSample(nNewLength, smp.GetBytesPerSample());
	if(pNewSmp == nullptr)
		return smp.nLength; //Sample allocation failed.

	// Copy over old data and replace sample by the new one
	memcpy(pNewSmp, smp.pSample, nNewSmpBytes);
	ReplaceSample(smp, pNewSmp, nNewLength, sndFile);

	// Adjust loops
	if(smp.nLoopStart > nNewLength)
	{
		smp.nLoopStart = smp.nLoopEnd = 0;
		smp.uFlags.reset(CHN_LOOP);
	}
	if(smp.nLoopEnd > nNewLength) smp.nLoopEnd = nNewLength;
	if(smp.nSustainStart > nNewLength)
	{
		smp.nSustainStart = smp.nSustainEnd = 0;
		smp.uFlags.reset(CHN_SUSTAINLOOP);
	}
	if(smp.nSustainEnd > nNewLength) smp.nSustainEnd = nNewLength;

	PrecomputeLoops(smp, sndFile);

	return smp.nLength;
}
Esempio n. 4
0
/***********************************************************************
 * SyncAudio
 ***********************************************************************
 *
 **********************************************************************/
static int syncAudioWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
                       hb_buffer_t ** buf_out )
{
    hb_work_private_t * pv = w->private_data;
    hb_job_t        * job = pv->job;
    hb_sync_audio_t * sync = &pv->type.audio;
    hb_buffer_t     * buf;
    int64_t start;

    *buf_out = NULL;
    buf = *buf_in;
    *buf_in = NULL;
    /* if the next buffer is an eof send it downstream */
    if ( buf->size <= 0 )
    {
        hb_buffer_close( &buf );
        *buf_out = hb_buffer_init( 0 );
        pv->common->first_pts[sync->index+1] = INT64_MAX - 1;
        return HB_WORK_DONE;
    }

    /* Wait till we can determine the initial pts of all streams */
    if( pv->common->pts_offset == INT64_MIN )
    {
        pv->common->first_pts[sync->index+1] = buf->s.start;
        hb_lock( pv->common->mutex );
        while( pv->common->pts_offset == INT64_MIN )
        {
            // Full fifos will make us wait forever, so get the
            // pts offset from the available streams if full
            if (hb_fifo_is_full(w->fifo_in))
            {
                getPtsOffset( w );
                hb_cond_broadcast( pv->common->next_frame );
            }
            else if ( checkPtsOffset( w ) )
                hb_cond_broadcast( pv->common->next_frame );
            else
                hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 200 );
        }
        hb_unlock( pv->common->mutex );
    }

    /* Wait for start frame if doing point-to-point */
    hb_lock( pv->common->mutex );
    start = buf->s.start - pv->common->audio_pts_slip;
    while ( !pv->common->start_found )
    {
        if ( pv->common->audio_pts_thresh < 0 )
        {
            // I would initialize this in hb_sync_init, but 
            // job->pts_to_start can be modified by reader 
            // after hb_sync_init is called.
            pv->common->audio_pts_thresh = job->pts_to_start;
        }
        if ( buf->s.start < pv->common->audio_pts_thresh )
        {
            hb_buffer_close( &buf );
            hb_unlock( pv->common->mutex );
            return HB_WORK_OK;
        }
        while ( !pv->common->start_found && 
                buf->s.start >= pv->common->audio_pts_thresh )
        {
            hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 10 );
            // There is an unfortunate unavoidable deadlock that can occur.
            // Since we need to wait for a specific frame in syncVideoWork,
            // syncAudioWork can be stalled indefinitely.  The video decoder
            // often drops multiple of the initial frames after starting
            // because they require references that have not been decoded yet.
            // This allows a lot of audio to be queued in the fifo and the
            // audio fifo fills before we get a single video frame.  So we
            // must drop some audio to unplug the pipeline and allow the first
            // video frame to be decoded.
            if ( hb_fifo_is_full(w->fifo_in) )
            {
                hb_buffer_t *tmp;
                tmp = buf = hb_fifo_get( w->fifo_in );
                while ( tmp )
                {
                    tmp = hb_fifo_get( w->fifo_in );
                    if ( tmp )
                    {
                        hb_buffer_close( &buf );
                        buf = tmp;
                    }
                }
            }
        }
        start = buf->s.start - pv->common->audio_pts_slip;
    }
    if ( start < 0 )
    {
        hb_buffer_close( &buf );
        hb_unlock( pv->common->mutex );
        return HB_WORK_OK;
    }
    hb_unlock( pv->common->mutex );

    if( job->frame_to_stop && pv->common->count_frames >= job->frame_to_stop )
    {
        hb_buffer_close( &buf );
        *buf_out = hb_buffer_init( 0 );
        return HB_WORK_DONE;
    }

    if( job->pts_to_stop && sync->next_start >= job->pts_to_stop )
    {
        hb_buffer_close( &buf );
        *buf_out = hb_buffer_init( 0 );
        return HB_WORK_DONE;
    }

    // audio time went backwards.
    // If our output clock is more than a half frame ahead of the
    // input clock drop this frame to move closer to sync.
    // Otherwise drop frames until the input clock matches the output clock.
    if ( sync->next_start - start > 90*15 )
    {
        // Discard data that's in the past.
        if ( sync->first_drop == 0 )
        {
            sync->first_drop = start;
        }
        ++sync->drop_count;
        hb_buffer_close( &buf );
        return HB_WORK_OK;
    }
    if ( sync->first_drop )
    {
        // we were dropping old data but input buf time is now current
        hb_log( "sync: audio 0x%x time went backwards %d ms, dropped %d frames "
                "(start %"PRId64", next %"PRId64")", w->audio->id,
                (int)( sync->next_start - sync->first_drop ) / 90,
                sync->drop_count, sync->first_drop, (int64_t)sync->next_start );
        sync->first_drop = 0;
        sync->drop_count = 0;
    }
    if ( start - sync->next_start >= (90 * 70) )
    {
        if ( start - sync->next_start > (90000LL * 60) )
        {
            // there's a gap of more than a minute between the last
            // frame and this. assume we got a corrupted timestamp
            // and just drop the next buf.
            hb_log( "sync: %d minute time gap in audio 0x%x - dropping buf"
                    "  start %"PRId64", next %"PRId64,
                    (int)((start - sync->next_start) / (90000*60)),
                    w->audio->id, start, (int64_t)sync->next_start );
            hb_buffer_close( &buf );
            return HB_WORK_OK;
        }
        /*
         * there's a gap of at least 70ms between the last
         * frame we processed & the next. Fill it with silence.
         * Or in the case of DCA, skip some frames from the
         * other streams.
         */
        if ( sync->drop_video_to_sync )
        {
            hb_log( "sync: audio gap %d ms. Skipping frames. Audio 0x%x"
                    "  start %"PRId64", next %"PRId64,
                    (int)((start - sync->next_start) / 90),
                    w->audio->id, start, (int64_t)sync->next_start );
            hb_lock( pv->common->mutex );
            pv->common->audio_pts_slip += (start - sync->next_start);
            pv->common->video_pts_slip += (start - sync->next_start);
            hb_unlock( pv->common->mutex );
            *buf_out = OutputAudioFrame( w->audio, buf, sync );
            return HB_WORK_OK;
        }
        hb_log( "sync: adding %d ms of silence to audio 0x%x"
                "  start %"PRId64", next %"PRId64,
                (int)((start - sync->next_start) / 90),
                w->audio->id, start, (int64_t)sync->next_start );
        InsertSilence( w, start - sync->next_start );
    }

    /*
     * When we get here we've taken care of all the dups and gaps in the
     * audio stream and are ready to inject the next input frame into
     * the output stream.
     */
    *buf_out = OutputAudioFrame( w->audio, buf, sync );
    return HB_WORK_OK;
}
Esempio n. 5
0
void ControlToolBar::OnRecord(wxCommandEvent &evt)
{
   auto doubleClicked = mRecord->IsDoubleClicked();
   mRecord->ClearDoubleClicked();

   if (doubleClicked) {
      // Display a fixed recording head while scrolling the waves continuously.
      // If you overdub, you may want to anticipate some context in existing tracks,
      // so center the head.  If not, put it rightmost to display as much wave as we can.
      const auto project = GetActiveProject();
      bool duplex;
      gPrefs->Read(wxT("/AudioIO/Duplex"), &duplex, true);

      if (duplex) {
         // See if there is really anything being overdubbed
         if (gAudioIO->GetNumPlaybackChannels() == 0)
            // No.
            duplex = false;
      }

      using Mode = AudacityProject::PlaybackScroller::Mode;
      project->GetPlaybackScroller().Activate(duplex ? Mode::Centered : Mode::Right);
      return;
   }

   if (gAudioIO->IsBusy()) {
      if (!CanStopAudioStream() || 0 == gAudioIO->GetNumCaptureChannels())
         mRecord->PopUp();
      else
         mRecord->PushDown();
      return;
   }
   AudacityProject *p = GetActiveProject();

   if( evt.GetInt() == 1 ) // used when called by keyboard shortcut. Default (0) ignored.
      mRecord->SetShift(true);
   if( evt.GetInt() == 2 )
      mRecord->SetShift(false);

   SetRecord(true, mRecord->WasShiftDown());

   if (p) {
      TrackList *trackList = p->GetTracks();
      TrackListIterator it(trackList);
      if(it.First() == NULL)
         mRecord->SetShift(false);
      double t0 = p->GetSel0();
      double t1 = p->GetSel1();
      if (t1 == t0)
         t1 = 1000000000.0;     // record for a long, long time (tens of years)

      /* TODO: set up stereo tracks if that is how the user has set up
       * their preferences, and choose sample format based on prefs */
      WaveTrackArray newRecordingTracks, playbackTracks;
#ifdef EXPERIMENTAL_MIDI_OUT
      NoteTrackArray midiTracks;
#endif
      bool duplex;
      gPrefs->Read(wxT("/AudioIO/Duplex"), &duplex, true);

      if(duplex){
         playbackTracks = trackList->GetWaveTrackArray(false);
#ifdef EXPERIMENTAL_MIDI_OUT
         midiTracks = trackList->GetNoteTrackArray(false);
#endif
     }
      else {
         playbackTracks = WaveTrackArray();
#ifdef EXPERIMENTAL_MIDI_OUT
         midiTracks = NoteTrackArray();
#endif
     }

      // If SHIFT key was down, the user wants append to tracks
      int recordingChannels = 0;
      TrackList tracksCopy{};
      bool tracksCopied = false;
      bool shifted = mRecord->WasShiftDown();
      if (shifted) {
         bool sel = false;
         double allt0 = t0;

         // Find the maximum end time of selected and all wave tracks
         // Find whether any tracks were selected.  (If any are selected,
         // record only into them; else if tracks exist, record into all.)
         for (Track *tt = it.First(); tt; tt = it.Next()) {
            if (tt->GetKind() == Track::Wave) {
               WaveTrack *wt = static_cast<WaveTrack *>(tt);
               if (wt->GetEndTime() > allt0) {
                  allt0 = wt->GetEndTime();
               }

               if (tt->GetSelected()) {
                  sel = true;
                  if (wt->GetEndTime() > t0) {
                     t0 = wt->GetEndTime();
                  }
               }
            }
         }

         // Use end time of all wave tracks if none selected
         if (!sel) {
            t0 = allt0;
         }

         // Pad selected/all wave tracks to make them all the same length
         // Remove recording tracks from the list of tracks for duplex ("overdub")
         // playback.
         for (Track *tt = it.First(); tt; tt = it.Next()) {
            if (tt->GetKind() == Track::Wave && (tt->GetSelected() || !sel)) {
               WaveTrack *wt = static_cast<WaveTrack *>(tt);
               if (duplex) {
                  auto end = playbackTracks.end();
                  auto it = std::find(playbackTracks.begin(), end, wt);
                  if (it != end)
                     playbackTracks.erase(it);
               }
               t1 = wt->GetEndTime();
               if (t1 < t0) {
                  if (!tracksCopied) {
                     tracksCopied = true;
                     tracksCopy = *trackList;
                  }

                  auto newTrack = p->GetTrackFactory()->NewWaveTrack();
                  newTrack->InsertSilence(0.0, t0 - t1);
                  newTrack->Flush();
                  wt->Clear(t1, t0);
                  bool bResult = wt->Paste(t1, newTrack.get());
                  wxASSERT(bResult); // TO DO: Actually handle this.
                  wxUnusedVar(bResult);
               }
               newRecordingTracks.push_back(wt);
            }
         }

         t1 = 1000000000.0;     // record for a long, long time (tens of years)
      }
      else {
         bool recordingNameCustom, useTrackNumber, useDateStamp, useTimeStamp;
         wxString defaultTrackName, defaultRecordingTrackName;
         int numTracks = 0;

         for (Track *tt = it.First(); tt; tt = it.Next()) {
            if (tt->GetKind() == Track::Wave && !tt->GetLinked())
               numTracks++;
         }
         numTracks++;
         
         recordingChannels = gPrefs->Read(wxT("/AudioIO/RecordChannels"), 2);

         gPrefs->Read(wxT("/GUI/TrackNames/RecordingNameCustom"), &recordingNameCustom, false);
         gPrefs->Read(wxT("/GUI/TrackNames/TrackNumber"), &useTrackNumber, false);
         gPrefs->Read(wxT("/GUI/TrackNames/DateStamp"), &useDateStamp, false);
         gPrefs->Read(wxT("/GUI/TrackNames/TimeStamp"), &useTimeStamp, false);
         /* i18n-hint: The default name for an audio track. */
         gPrefs->Read(wxT("/GUI/TrackNames/DefaultTrackName"),&defaultTrackName, _("Audio Track"));
         gPrefs->Read(wxT("/GUI/TrackNames/RecodingTrackName"), &defaultRecordingTrackName, defaultTrackName);

         wxString baseTrackName = recordingNameCustom? defaultRecordingTrackName : defaultTrackName;

         for (int c = 0; c < recordingChannels; c++) {
            auto newTrack = p->GetTrackFactory()->NewWaveTrack();

            newTrack->SetOffset(t0);
            wxString nameSuffix = wxString(wxT(""));

            if (useTrackNumber) {
               nameSuffix += wxString::Format(wxT("%d"), numTracks + c);
            }

            if (useDateStamp) {
               if (!nameSuffix.IsEmpty()) {
                  nameSuffix += wxT("_");
               }
               nameSuffix += wxDateTime::Now().FormatISODate();
            }

            if (useTimeStamp) {
               if (!nameSuffix.IsEmpty()) {
                  nameSuffix += wxT("_");
               }
               nameSuffix += wxDateTime::Now().FormatISOTime();
            }

            // ISO standard would be nice, but ":" is unsafe for file name.
            nameSuffix.Replace(wxT(":"), wxT("-"));

            if (baseTrackName.IsEmpty()) {
               newTrack->SetName(nameSuffix);
            }
            else if (nameSuffix.IsEmpty()) {
               newTrack->SetName(baseTrackName);
            }
            else {
               newTrack->SetName(baseTrackName + wxT("_") + nameSuffix);
            }

            if (recordingChannels > 2)
              newTrack->SetMinimized(true);

            if (recordingChannels == 2) {
               if (c == 0) {
                  newTrack->SetChannel(Track::LeftChannel);
                  newTrack->SetLinked(true);
               }
               else {
                  newTrack->SetChannel(Track::RightChannel);
               }
            }
            else {
               newTrack->SetChannel( Track::MonoChannel );
            }

            // Let the list hold the track, and keep a pointer to it
            newRecordingTracks.push_back(
               static_cast<WaveTrack*>(
                  trackList->Add(
                     std::move(newTrack))));
         }
      }

      //Automated Input Level Adjustment Initialization
      #ifdef EXPERIMENTAL_AUTOMATED_INPUT_LEVEL_ADJUSTMENT
         gAudioIO->AILAInitialize();
      #endif

      AudioIOStartStreamOptions options(p->GetDefaultPlayOptions());
      int token = gAudioIO->StartStream(playbackTracks,
                                        newRecordingTracks,
#ifdef EXPERIMENTAL_MIDI_OUT
                                        midiTracks,
#endif
                                        t0, t1, options);

      bool success = (token != 0);

      if (success) {
         p->SetAudioIOToken(token);
         mBusyProject = p;
      }
      else {
         if (shifted) {
            // Restore the tracks to remove any inserted silence
            if (tracksCopied)
               *trackList = std::move(tracksCopy);
         }
         else {
            // msmeyer: Delete recently added tracks if opening stream fails
            for (unsigned int i = 0; i < newRecordingTracks.size(); i++) {
               trackList->Remove(newRecordingTracks[i]);
            }
         }

         // msmeyer: Show error message if stream could not be opened
         wxMessageBox(_("Error while opening sound device. Please check the recording device settings and the project sample rate."),
                      _("Error"), wxOK | wxICON_EXCLAMATION, this);

         SetPlay(false);
         SetStop(false);
         SetRecord(false);
      }
   }
   UpdateStatusBar(GetActiveProject());
}