bool loadTracks(const MultiviewTrackList<IndexSet>& index_tracks,
                const std::string& features_format,
                const std::vector<std::string>& views,
                MultiviewTrackList<FeatureSet>& tracks) {
  // Initialize list of empty tracks.
  int num_features = index_tracks.numTracks();
  int num_views = views.size();
  tracks = MultiviewTrackList<FeatureSet>(num_features, num_views);

  // Iterate through time (to avoid loading all features at once).
  MultiViewTimeIterator<IndexSet> iterator(index_tracks);

  while (!iterator.end()) {
    int time = iterator.time();

    for (int view = 0; view < num_views; view += 1) {
      // Get feature indices matched in this frame.
      std::map<int, IndexSet> subset;
      iterator.getView(view, subset);

      // Only load tracks if there were some features.
      if (!subset.empty()) {
        TrackList<SiftPosition> features;

        // Load features in this frame.
        std::string file;
        file = makeFrameFilename(features_format, views[view], time);
        SiftPositionReader reader;
        bool ok = loadTrackList(file, features, reader);
        if (!ok) {
          return false;
        }
        LOG(INFO) << "Loaded " << features.size() << " tracks for (" << view <<
            ", " << time << ")";

        // Iterate through features in this frame.
        std::map<int, IndexSet>::const_iterator it;
        for (it = subset.begin(); it != subset.end(); ++it) {
          int id = it->first;
          const IndexSet& indices = it->second;
          CHECK(id < num_features);

          IndexSet::const_iterator index;
          for (index = indices.begin(); index != indices.end(); ++index) {
            // Copy every point in the track.
            CHECK(*index < int(features.size()));
            const Track<SiftPosition>& track = features[*index];

            Track<SiftPosition>::const_iterator point;
            for (point = track.begin(); point != track.end(); ++point) {
              int t = point->first;
              const SiftPosition& x = point->second;

              tracks.track(id).view(view)[t].push_back(x);
            }
          }
        }
      }
    }

    iterator.next();
  }

  return true;
}
Ejemplo n.º 2
0
void Song::processAutomations(const TrackList &tracklist, MidiTime timeStart, fpp_t)
{
	AutomatedValueMap values;

	QSet<const AutomatableModel*> recordedModels;

	TrackContainer* container = this;
	int tcoNum = -1;

	switch (m_playMode)
	{
	case Mode_PlaySong:
		break;
	case Mode_PlayBB:
	{
		Q_ASSERT(tracklist.size() == 1);
		Q_ASSERT(tracklist.at(0)->type() == Track::BBTrack);
		auto bbTrack = dynamic_cast<BBTrack*>(tracklist.at(0));
		auto bbContainer = Engine::getBBTrackContainer();
		container = bbContainer;
		tcoNum = bbTrack->index();
	}
		break;
	default:
		return;
	}

	values = container->automatedValuesAt(timeStart, tcoNum);
	TrackList tracks = container->tracks();

	Track::tcoVector tcos;
	for (Track* track : tracks)
	{
		if (track->type() == Track::AutomationTrack) {
			track->getTCOsInRange(tcos, 0, timeStart);
		}
	}

	// Process recording
	for (TrackContentObject* tco : tcos)
	{
		auto p = dynamic_cast<AutomationPattern *>(tco);
		MidiTime relTime = timeStart - p->startPosition();
		if (p->isRecording() && relTime >= 0 && relTime < p->length())
		{
			const AutomatableModel* recordedModel = p->firstObject();
			p->recordValue(relTime, recordedModel->value<float>());

			recordedModels << recordedModel;
		}
	}

	// Apply values
	for (auto it = values.begin(); it != values.end(); it++)
	{
		if (! recordedModels.contains(it.key()))
		{
			it.key()->setAutomatedValue(it.value());
		}
	}
}
int main(int argc, char** argv) {
  init(argc, argv);

  std::string descriptor_tracks_file = argv[1];
  std::string image_format = argv[2];
  std::string fund_mat_format = argv[3];
  std::string intrinsics_format = argv[4];
  std::string views_file = argv[5];
  int view1 = boost::lexical_cast<int>(argv[6]);
  int time = boost::lexical_cast<int>(argv[7]);
  std::string examples_file = argv[8];

  // Load descriptors tracks.
  TrackList<SiftFeature> features;
  SiftFeatureReader feature_reader;
  bool ok = loadTrackList(descriptor_tracks_file, features, feature_reader);
  CHECK(ok) << "Could not load descriptor tracks";
  LOG(INFO) << "Loaded " << features.size() << " features";

  // Load names of views.
  std::vector<std::string> view_names;
  ok = readLines(views_file, view_names);
  CHECK(ok) << "Could not load view names";
  int num_views = view_names.size();

  // Load intrinsics for main camera.
  CameraProperties camera1;
  std::string camera_file1 = makeViewFilename(intrinsics_format,
      view_names[view1]);
  CameraPropertiesReader camera_reader;
  ok = load(camera_file1, camera1, camera_reader);
  CHECK(ok) << "Could not load intrinsics for main camera";

  // For each view.
  for (int view2 = 0; view2 < num_views; view2 += 1) {
    if (view2 != view1) {
      // Load fundamental matrix.
      cv::Mat F;
      int i = view1;
      int j = view2;
      bool swap = false;
      if (view2 < view1) {
        std::swap(i, j);
        swap = true;
      }
      std::string fund_mat_file = makeViewPairFilename(fund_mat_format,
          view_names[i], view_names[j]);
      MatrixReader matrix_reader;
      ok = load(fund_mat_file, F, matrix_reader);
      CHECK(ok) << "Could not load fundamental matrix";
      if (swap) {
        F = F.t();
      }

      // Load camera properties.
      CameraProperties camera2;
      std::string camera_file2 = makeViewFilename(intrinsics_format,
          view_names[view2]);
      ok = load(camera_file2, camera2, camera_reader);
      CHECK(ok) << "Could not load intrinsics for second camera";

      std::vector<double> scales;
      scales.push_back(4);
      scales.push_back(8);
      scales.push_back(16);
      scales.push_back(32);
      scales.push_back(64);

      std::vector<double> angles;
      angles.push_back(0 * M_PI / 4.);
      angles.push_back(1 * M_PI / 4.);
      angles.push_back(2 * M_PI / 4.);
      angles.push_back(3 * M_PI / 4.);
      angles.push_back(4 * M_PI / 4.);
      angles.push_back(5 * M_PI / 4.);
      angles.push_back(6 * M_PI / 4.);
      angles.push_back(7 * M_PI / 4.);

      extractExamplesForView(features, F, camera1, camera2, image_format,
          view_names[view2], time, scales, angles);
    }
  }

  return 0;
}
Ejemplo n.º 4
0
void Song::processNextBuffer()
{
	// if not playing, nothing to do
	if( m_playing == false )
	{
		return;
	}

	TrackList trackList;
	int tcoNum = -1; // track content object number

	// determine the list of tracks to play and the track content object
	// (TCO) number
	switch( m_playMode )
	{
		case Mode_PlaySong:
			trackList = tracks();
			// at song-start we have to reset the LFOs
			if( m_playPos[Mode_PlaySong] == 0 )
			{
				EnvelopeAndLfoParameters::instances()->reset();
			}
			break;

		case Mode_PlayBB:
			if( Engine::getBBTrackContainer()->numOfBBs() > 0 )
			{
				tcoNum = Engine::getBBTrackContainer()->
								currentBB();
				trackList.push_back( BBTrack::findBBTrack(
								tcoNum ) );
			}
			break;

		case Mode_PlayPattern:
			if( m_patternToPlay != NULL )
			{
				tcoNum = m_patternToPlay->getTrack()->
						getTCONum( m_patternToPlay );
				trackList.push_back(
						m_patternToPlay->getTrack() );
			}
			break;

		default:
			return;

	}

	// if we have no tracks to play, nothing to do
	if( trackList.empty() == true )
	{
		return;
	}

	// check for looping-mode and act if necessary
	TimeLineWidget * tl = m_playPos[m_playMode].m_timeLine;
	bool checkLoop =
		tl != NULL && m_exporting == false && tl->loopPointsEnabled();

	if( checkLoop )
	{
		// if looping-mode is enabled and we are outside of the looping
		// range, go to the beginning of the range
		if( m_playPos[m_playMode] < tl->loopBegin() ||
					m_playPos[m_playMode] >= tl->loopEnd() )
		{
			setToTime(tl->loopBegin());
			m_playPos[m_playMode].setTicks(
						tl->loopBegin().getTicks() );
			emit updateSampleTracks();
		}
	}

	f_cnt_t framesPlayed = 0;
	const float framesPerTick = Engine::framesPerTick();

	while( framesPlayed < Engine::mixer()->framesPerPeriod() )
	{
		m_vstSyncController.update();

		float currentFrame = m_playPos[m_playMode].currentFrame();
		// did we play a tick?
		if( currentFrame >= framesPerTick )
		{
			int ticks = m_playPos[m_playMode].getTicks() +
				( int )( currentFrame / framesPerTick );

			m_vstSyncController.setAbsolutePosition( ticks );

			// did we play a whole tact?
			if( ticks >= MidiTime::ticksPerTact() )
			{
				// per default we just continue playing even if
				// there's no more stuff to play
				// (song-play-mode)
				int maxTact = m_playPos[m_playMode].getTact()
									+ 2;

				// then decide whether to go over to next tact
				// or to loop back to first tact
				if( m_playMode == Mode_PlayBB )
				{
					maxTact = Engine::getBBTrackContainer()
							->lengthOfCurrentBB();
				}
				else if( m_playMode == Mode_PlayPattern &&
					m_loopPattern == true &&
					tl != NULL &&
					tl->loopPointsEnabled() == false )
				{
					maxTact = m_patternToPlay->length()
								.getTact();
				}

				// end of played object reached?
				if( m_playPos[m_playMode].getTact() + 1
								>= maxTact )
				{
					// then start from beginning and keep
					// offset
					ticks %= ( maxTact * MidiTime::ticksPerTact() );

					// wrap milli second counter
					setToTimeByTicks(ticks);

					m_vstSyncController.setAbsolutePosition( ticks );
				}
			}
			m_playPos[m_playMode].setTicks( ticks );

			if( checkLoop )
			{
				m_vstSyncController.startCycle(
					tl->loopBegin().getTicks(), tl->loopEnd().getTicks() );

				// if looping-mode is enabled and we have got
				// past the looping range, return to the
				// beginning of the range
				if( m_playPos[m_playMode] >= tl->loopEnd() )
				{
					m_playPos[m_playMode].setTicks( tl->loopBegin().getTicks() );
					setToTime(tl->loopBegin());
				}
				else if( m_playPos[m_playMode] == tl->loopEnd() - 1 )
				{
					emit updateSampleTracks();
				}
			}
			else
			{
				m_vstSyncController.stopCycle();
			}

			currentFrame = fmodf( currentFrame, framesPerTick );
			m_playPos[m_playMode].setCurrentFrame( currentFrame );
		}

		f_cnt_t framesToPlay =
			Engine::mixer()->framesPerPeriod() - framesPlayed;

		f_cnt_t framesLeft = ( f_cnt_t )framesPerTick -
						( f_cnt_t )currentFrame;
		// skip last frame fraction
		if( framesLeft == 0 )
		{
			++framesPlayed;
			m_playPos[m_playMode].setCurrentFrame( currentFrame
								+ 1.0f );
			continue;
		}
		// do we have samples left in this tick but these are less
		// than samples we have to play?
		if( framesLeft < framesToPlay )
		{
			// then set framesToPlay to remaining samples, the
			// rest will be played in next loop
			framesToPlay = framesLeft;
		}

		if( ( f_cnt_t ) currentFrame == 0 )
		{
			processAutomations(trackList, m_playPos[m_playMode], framesToPlay);

			// loop through all tracks and play them
			for( int i = 0; i < trackList.size(); ++i )
			{
				trackList[i]->play( m_playPos[m_playMode],
						framesToPlay,
						framesPlayed, tcoNum );
			}
		}

		// update frame-counters
		framesPlayed += framesToPlay;
		m_playPos[m_playMode].setCurrentFrame( framesToPlay +
								currentFrame );
		m_elapsedMilliSeconds += MidiTime::ticksToMilliseconds( framesToPlay / framesPerTick, getTempo());
		m_elapsedTacts = m_playPos[Mode_PlaySong].getTact();
		m_elapsedTicks = ( m_playPos[Mode_PlaySong].getTicks() % ticksPerTact() ) / 48;
	}
}
Ejemplo n.º 5
0
int main(int argc, char** argv) {
  init(argc, argv);

  if (argc != 3) {
    google::ShowUsageWithFlags(argv[0]);
    return 1;
  }

  std::string tracks_file = argv[1];
  std::string image_format = argv[2];
  std::string output_format = FLAGS_output_format;

  bool ok;

  TrackList<DrawerPointer> tracks;

  if (FLAGS_similarity) {
    // Load tracks.
    TrackList<SiftPosition> sift_tracks;
    SiftPositionReader feature_reader;
    ok = loadTrackList(tracks_file, sift_tracks, feature_reader);
    CHECK(ok) << "Could not load tracks";

    // Convert SIFT features to generic drawable features.
    siftPositionTracksToDrawers(sift_tracks, tracks);
  } else if (FLAGS_scale) {
    // Load tracks.
    TrackList<ScaleSpacePosition> scale_tracks;
    ScaleSpacePositionReader feature_reader;
    ok = loadTrackList(tracks_file, scale_tracks, feature_reader);
    CHECK(ok) << "Could not load tracks";

    // Convert SIFT features to generic drawable features.
    scaleFeatureTracksToDrawers(scale_tracks, tracks, FLAGS_radius);
  } else {
    // Load tracks.
    TrackList<cv::Point2d> point_tracks;
    ImagePointReader<double> feature_reader;
    ok = loadTrackList(tracks_file, point_tracks, feature_reader);
    CHECK(ok) << "Could not load tracks";

    // Convert SIFT features to generic drawable features.
    translationTracksToDrawers(point_tracks, tracks, FLAGS_radius);
  }

  LOG(INFO) << "Loaded " << tracks.size() << " tracks";

  // Make a list of random colors.
  typedef std::vector<cv::Scalar> ColorList;
  ColorList colors;
  for (int i = 0; i < int(tracks.size()); i += 1) {
    colors.push_back(randomColor(BRIGHTNESS, SATURATION));
  }

  // Iterate through frames in which track was observed.
  TrackListTimeIterator<DrawerPointer> frame(tracks, 0);

  while (!frame.end()) {
    // Get the current time.
    int t = frame.t();

    // Load the image.
    cv::Mat color_image;
    cv::Mat gray_image;
    ok = readImage(makeFilename(image_format, t), color_image, gray_image);
    CHECK(ok) << "Could not read image";

    // Get the features.
    typedef std::map<int, DrawerPointer> FeatureSet;
    FeatureSet features;
    frame.getPoints(features);

    // Draw each one with its color.
    drawFeatures(color_image, features, colors);

    if (FLAGS_save) {
      std::string output_file = makeFilename(output_format, t);
      ok = cv::imwrite(output_file, color_image);
      CHECK(ok) << "Could not save image";
    }

    if (FLAGS_display) {
      cv::imshow("tracks", color_image);
      cv::waitKey(10);
    }

    ++frame;
  }

  return 0;
}